Mon, 24 Nov 2014 07:29:03 -0800
8058148: MaxNodeLimit and LiveNodeCountInliningCutoff
Reviewed-by: kvn, roland
duke@435 | 1 | /* |
mikael@6198 | 2 | * Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved. |
duke@435 | 3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
duke@435 | 4 | * |
duke@435 | 5 | * This code is free software; you can redistribute it and/or modify it |
duke@435 | 6 | * under the terms of the GNU General Public License version 2 only, as |
duke@435 | 7 | * published by the Free Software Foundation. |
duke@435 | 8 | * |
duke@435 | 9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
duke@435 | 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
duke@435 | 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
duke@435 | 12 | * version 2 for more details (a copy is included in the LICENSE file that |
duke@435 | 13 | * accompanied this code). |
duke@435 | 14 | * |
duke@435 | 15 | * You should have received a copy of the GNU General Public License version |
duke@435 | 16 | * 2 along with this work; if not, write to the Free Software Foundation, |
duke@435 | 17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
duke@435 | 18 | * |
trims@1907 | 19 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
trims@1907 | 20 | * or visit www.oracle.com if you need additional information or have any |
trims@1907 | 21 | * questions. |
duke@435 | 22 | * |
duke@435 | 23 | */ |
duke@435 | 24 | |
stefank@2314 | 25 | #ifndef SHARE_VM_OPTO_LOOPNODE_HPP |
stefank@2314 | 26 | #define SHARE_VM_OPTO_LOOPNODE_HPP |
stefank@2314 | 27 | |
stefank@2314 | 28 | #include "opto/cfgnode.hpp" |
stefank@2314 | 29 | #include "opto/multnode.hpp" |
stefank@2314 | 30 | #include "opto/phaseX.hpp" |
stefank@2314 | 31 | #include "opto/subnode.hpp" |
stefank@2314 | 32 | #include "opto/type.hpp" |
stefank@2314 | 33 | |
duke@435 | 34 | class CmpNode; |
duke@435 | 35 | class CountedLoopEndNode; |
duke@435 | 36 | class CountedLoopNode; |
duke@435 | 37 | class IdealLoopTree; |
duke@435 | 38 | class LoopNode; |
duke@435 | 39 | class Node; |
duke@435 | 40 | class PhaseIdealLoop; |
duke@435 | 41 | class VectorSet; |
cfang@1607 | 42 | class Invariance; |
duke@435 | 43 | struct small_cache; |
duke@435 | 44 | |
duke@435 | 45 | // |
duke@435 | 46 | // I D E A L I Z E D L O O P S |
duke@435 | 47 | // |
duke@435 | 48 | // Idealized loops are the set of loops I perform more interesting |
duke@435 | 49 | // transformations on, beyond simple hoisting. |
duke@435 | 50 | |
duke@435 | 51 | //------------------------------LoopNode--------------------------------------- |
duke@435 | 52 | // Simple loop header. Fall in path on left, loop-back path on right. |
duke@435 | 53 | class LoopNode : public RegionNode { |
duke@435 | 54 | // Size is bigger to hold the flags. However, the flags do not change |
duke@435 | 55 | // the semantics so it does not appear in the hash & cmp functions. |
duke@435 | 56 | virtual uint size_of() const { return sizeof(*this); } |
duke@435 | 57 | protected: |
duke@435 | 58 | short _loop_flags; |
duke@435 | 59 | // Names for flag bitfields |
kvn@2747 | 60 | enum { Normal=0, Pre=1, Main=2, Post=3, PreMainPostFlagsMask=3, |
kvn@2747 | 61 | MainHasNoPreLoop=4, |
kvn@2747 | 62 | HasExactTripCount=8, |
kvn@2747 | 63 | InnerLoop=16, |
kvn@2747 | 64 | PartialPeelLoop=32, |
kvn@2747 | 65 | PartialPeelFailed=64 }; |
duke@435 | 66 | char _unswitch_count; |
duke@435 | 67 | enum { _unswitch_max=3 }; |
duke@435 | 68 | |
duke@435 | 69 | public: |
duke@435 | 70 | // Names for edge indices |
duke@435 | 71 | enum { Self=0, EntryControl, LoopBackControl }; |
duke@435 | 72 | |
kvn@2747 | 73 | int is_inner_loop() const { return _loop_flags & InnerLoop; } |
kvn@2747 | 74 | void set_inner_loop() { _loop_flags |= InnerLoop; } |
duke@435 | 75 | |
kvn@2747 | 76 | int is_partial_peel_loop() const { return _loop_flags & PartialPeelLoop; } |
kvn@2747 | 77 | void set_partial_peel_loop() { _loop_flags |= PartialPeelLoop; } |
kvn@2747 | 78 | int partial_peel_has_failed() const { return _loop_flags & PartialPeelFailed; } |
kvn@2747 | 79 | void mark_partial_peel_failed() { _loop_flags |= PartialPeelFailed; } |
duke@435 | 80 | |
duke@435 | 81 | int unswitch_max() { return _unswitch_max; } |
duke@435 | 82 | int unswitch_count() { return _unswitch_count; } |
duke@435 | 83 | void set_unswitch_count(int val) { |
duke@435 | 84 | assert (val <= unswitch_max(), "too many unswitches"); |
duke@435 | 85 | _unswitch_count = val; |
duke@435 | 86 | } |
duke@435 | 87 | |
duke@435 | 88 | LoopNode( Node *entry, Node *backedge ) : RegionNode(3), _loop_flags(0), _unswitch_count(0) { |
duke@435 | 89 | init_class_id(Class_Loop); |
duke@435 | 90 | init_req(EntryControl, entry); |
duke@435 | 91 | init_req(LoopBackControl, backedge); |
duke@435 | 92 | } |
duke@435 | 93 | |
duke@435 | 94 | virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); |
duke@435 | 95 | virtual int Opcode() const; |
duke@435 | 96 | bool can_be_counted_loop(PhaseTransform* phase) const { |
duke@435 | 97 | return req() == 3 && in(0) != NULL && |
duke@435 | 98 | in(1) != NULL && phase->type(in(1)) != Type::TOP && |
duke@435 | 99 | in(2) != NULL && phase->type(in(2)) != Type::TOP; |
duke@435 | 100 | } |
kvn@2665 | 101 | bool is_valid_counted_loop() const; |
duke@435 | 102 | #ifndef PRODUCT |
duke@435 | 103 | virtual void dump_spec(outputStream *st) const; |
duke@435 | 104 | #endif |
duke@435 | 105 | }; |
duke@435 | 106 | |
duke@435 | 107 | //------------------------------Counted Loops---------------------------------- |
duke@435 | 108 | // Counted loops are all trip-counted loops, with exactly 1 trip-counter exit |
duke@435 | 109 | // path (and maybe some other exit paths). The trip-counter exit is always |
kvn@2665 | 110 | // last in the loop. The trip-counter have to stride by a constant; |
kvn@2665 | 111 | // the exit value is also loop invariant. |
duke@435 | 112 | |
duke@435 | 113 | // CountedLoopNodes and CountedLoopEndNodes come in matched pairs. The |
duke@435 | 114 | // CountedLoopNode has the incoming loop control and the loop-back-control |
duke@435 | 115 | // which is always the IfTrue before the matching CountedLoopEndNode. The |
duke@435 | 116 | // CountedLoopEndNode has an incoming control (possibly not the |
duke@435 | 117 | // CountedLoopNode if there is control flow in the loop), the post-increment |
duke@435 | 118 | // trip-counter value, and the limit. The trip-counter value is always of |
duke@435 | 119 | // the form (Op old-trip-counter stride). The old-trip-counter is produced |
kvn@2665 | 120 | // by a Phi connected to the CountedLoopNode. The stride is constant. |
duke@435 | 121 | // The Op is any commutable opcode, including Add, Mul, Xor. The |
duke@435 | 122 | // CountedLoopEndNode also takes in the loop-invariant limit value. |
duke@435 | 123 | |
duke@435 | 124 | // From a CountedLoopNode I can reach the matching CountedLoopEndNode via the |
duke@435 | 125 | // loop-back control. From CountedLoopEndNodes I can reach CountedLoopNodes |
duke@435 | 126 | // via the old-trip-counter from the Op node. |
duke@435 | 127 | |
duke@435 | 128 | //------------------------------CountedLoopNode-------------------------------- |
duke@435 | 129 | // CountedLoopNodes head simple counted loops. CountedLoopNodes have as |
duke@435 | 130 | // inputs the incoming loop-start control and the loop-back control, so they |
duke@435 | 131 | // act like RegionNodes. They also take in the initial trip counter, the |
duke@435 | 132 | // loop-invariant stride and the loop-invariant limit value. CountedLoopNodes |
duke@435 | 133 | // produce a loop-body control and the trip counter value. Since |
duke@435 | 134 | // CountedLoopNodes behave like RegionNodes I still have a standard CFG model. |
duke@435 | 135 | |
duke@435 | 136 | class CountedLoopNode : public LoopNode { |
duke@435 | 137 | // Size is bigger to hold _main_idx. However, _main_idx does not change |
duke@435 | 138 | // the semantics so it does not appear in the hash & cmp functions. |
duke@435 | 139 | virtual uint size_of() const { return sizeof(*this); } |
duke@435 | 140 | |
duke@435 | 141 | // For Pre- and Post-loops during debugging ONLY, this holds the index of |
duke@435 | 142 | // the Main CountedLoop. Used to assert that we understand the graph shape. |
duke@435 | 143 | node_idx_t _main_idx; |
duke@435 | 144 | |
kvn@2747 | 145 | // Known trip count calculated by compute_exact_trip_count() |
kvn@2747 | 146 | uint _trip_count; |
duke@435 | 147 | |
duke@435 | 148 | // Expected trip count from profile data |
duke@435 | 149 | float _profile_trip_cnt; |
duke@435 | 150 | |
duke@435 | 151 | // Log2 of original loop bodies in unrolled loop |
duke@435 | 152 | int _unrolled_count_log2; |
duke@435 | 153 | |
duke@435 | 154 | // Node count prior to last unrolling - used to decide if |
duke@435 | 155 | // unroll,optimize,unroll,optimize,... is making progress |
duke@435 | 156 | int _node_count_before_unroll; |
duke@435 | 157 | |
duke@435 | 158 | public: |
duke@435 | 159 | CountedLoopNode( Node *entry, Node *backedge ) |
kvn@2747 | 160 | : LoopNode(entry, backedge), _main_idx(0), _trip_count(max_juint), |
duke@435 | 161 | _profile_trip_cnt(COUNT_UNKNOWN), _unrolled_count_log2(0), |
duke@435 | 162 | _node_count_before_unroll(0) { |
duke@435 | 163 | init_class_id(Class_CountedLoop); |
duke@435 | 164 | // Initialize _trip_count to the largest possible value. |
duke@435 | 165 | // Will be reset (lower) if the loop's trip count is known. |
duke@435 | 166 | } |
duke@435 | 167 | |
duke@435 | 168 | virtual int Opcode() const; |
duke@435 | 169 | virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); |
duke@435 | 170 | |
duke@435 | 171 | Node *init_control() const { return in(EntryControl); } |
duke@435 | 172 | Node *back_control() const { return in(LoopBackControl); } |
duke@435 | 173 | CountedLoopEndNode *loopexit() const; |
duke@435 | 174 | Node *init_trip() const; |
duke@435 | 175 | Node *stride() const; |
duke@435 | 176 | int stride_con() const; |
duke@435 | 177 | bool stride_is_con() const; |
duke@435 | 178 | Node *limit() const; |
duke@435 | 179 | Node *incr() const; |
duke@435 | 180 | Node *phi() const; |
duke@435 | 181 | |
duke@435 | 182 | // Match increment with optional truncation |
duke@435 | 183 | static Node* match_incr_with_optional_truncation(Node* expr, Node** trunc1, Node** trunc2, const TypeInt** trunc_type); |
duke@435 | 184 | |
duke@435 | 185 | // A 'main' loop has a pre-loop and a post-loop. The 'main' loop |
duke@435 | 186 | // can run short a few iterations and may start a few iterations in. |
duke@435 | 187 | // It will be RCE'd and unrolled and aligned. |
duke@435 | 188 | |
duke@435 | 189 | // A following 'post' loop will run any remaining iterations. Used |
duke@435 | 190 | // during Range Check Elimination, the 'post' loop will do any final |
duke@435 | 191 | // iterations with full checks. Also used by Loop Unrolling, where |
duke@435 | 192 | // the 'post' loop will do any epilog iterations needed. Basically, |
duke@435 | 193 | // a 'post' loop can not profitably be further unrolled or RCE'd. |
duke@435 | 194 | |
duke@435 | 195 | // A preceding 'pre' loop will run at least 1 iteration (to do peeling), |
duke@435 | 196 | // it may do under-flow checks for RCE and may do alignment iterations |
duke@435 | 197 | // so the following main loop 'knows' that it is striding down cache |
duke@435 | 198 | // lines. |
duke@435 | 199 | |
duke@435 | 200 | // A 'main' loop that is ONLY unrolled or peeled, never RCE'd or |
duke@435 | 201 | // Aligned, may be missing it's pre-loop. |
kvn@2747 | 202 | int is_normal_loop() const { return (_loop_flags&PreMainPostFlagsMask) == Normal; } |
kvn@2747 | 203 | int is_pre_loop () const { return (_loop_flags&PreMainPostFlagsMask) == Pre; } |
kvn@2747 | 204 | int is_main_loop () const { return (_loop_flags&PreMainPostFlagsMask) == Main; } |
kvn@2747 | 205 | int is_post_loop () const { return (_loop_flags&PreMainPostFlagsMask) == Post; } |
kvn@2747 | 206 | int is_main_no_pre_loop() const { return _loop_flags & MainHasNoPreLoop; } |
kvn@2747 | 207 | void set_main_no_pre_loop() { _loop_flags |= MainHasNoPreLoop; } |
duke@435 | 208 | |
never@802 | 209 | int main_idx() const { return _main_idx; } |
never@802 | 210 | |
duke@435 | 211 | |
duke@435 | 212 | void set_pre_loop (CountedLoopNode *main) { assert(is_normal_loop(),""); _loop_flags |= Pre ; _main_idx = main->_idx; } |
duke@435 | 213 | void set_main_loop ( ) { assert(is_normal_loop(),""); _loop_flags |= Main; } |
duke@435 | 214 | void set_post_loop (CountedLoopNode *main) { assert(is_normal_loop(),""); _loop_flags |= Post; _main_idx = main->_idx; } |
kvn@2747 | 215 | void set_normal_loop( ) { _loop_flags &= ~PreMainPostFlagsMask; } |
duke@435 | 216 | |
kvn@2747 | 217 | void set_trip_count(uint tc) { _trip_count = tc; } |
kvn@2747 | 218 | uint trip_count() { return _trip_count; } |
kvn@2747 | 219 | |
kvn@2747 | 220 | bool has_exact_trip_count() const { return (_loop_flags & HasExactTripCount) != 0; } |
kvn@2747 | 221 | void set_exact_trip_count(uint tc) { |
kvn@2747 | 222 | _trip_count = tc; |
kvn@2747 | 223 | _loop_flags |= HasExactTripCount; |
kvn@2747 | 224 | } |
kvn@2747 | 225 | void set_nonexact_trip_count() { |
kvn@2747 | 226 | _loop_flags &= ~HasExactTripCount; |
kvn@2747 | 227 | } |
duke@435 | 228 | |
duke@435 | 229 | void set_profile_trip_cnt(float ptc) { _profile_trip_cnt = ptc; } |
duke@435 | 230 | float profile_trip_cnt() { return _profile_trip_cnt; } |
duke@435 | 231 | |
duke@435 | 232 | void double_unrolled_count() { _unrolled_count_log2++; } |
duke@435 | 233 | int unrolled_count() { return 1 << MIN2(_unrolled_count_log2, BitsPerInt-3); } |
duke@435 | 234 | |
duke@435 | 235 | void set_node_count_before_unroll(int ct) { _node_count_before_unroll = ct; } |
duke@435 | 236 | int node_count_before_unroll() { return _node_count_before_unroll; } |
duke@435 | 237 | |
duke@435 | 238 | #ifndef PRODUCT |
duke@435 | 239 | virtual void dump_spec(outputStream *st) const; |
duke@435 | 240 | #endif |
duke@435 | 241 | }; |
duke@435 | 242 | |
duke@435 | 243 | //------------------------------CountedLoopEndNode----------------------------- |
duke@435 | 244 | // CountedLoopEndNodes end simple trip counted loops. They act much like |
duke@435 | 245 | // IfNodes. |
duke@435 | 246 | class CountedLoopEndNode : public IfNode { |
duke@435 | 247 | public: |
duke@435 | 248 | enum { TestControl, TestValue }; |
duke@435 | 249 | |
duke@435 | 250 | CountedLoopEndNode( Node *control, Node *test, float prob, float cnt ) |
duke@435 | 251 | : IfNode( control, test, prob, cnt) { |
duke@435 | 252 | init_class_id(Class_CountedLoopEnd); |
duke@435 | 253 | } |
duke@435 | 254 | virtual int Opcode() const; |
duke@435 | 255 | |
duke@435 | 256 | Node *cmp_node() const { return (in(TestValue)->req() >=2) ? in(TestValue)->in(1) : NULL; } |
duke@435 | 257 | Node *incr() const { Node *tmp = cmp_node(); return (tmp && tmp->req()==3) ? tmp->in(1) : NULL; } |
duke@435 | 258 | Node *limit() const { Node *tmp = cmp_node(); return (tmp && tmp->req()==3) ? tmp->in(2) : NULL; } |
duke@435 | 259 | Node *stride() const { Node *tmp = incr (); return (tmp && tmp->req()==3) ? tmp->in(2) : NULL; } |
duke@435 | 260 | Node *phi() const { Node *tmp = incr (); return (tmp && tmp->req()==3) ? tmp->in(1) : NULL; } |
duke@435 | 261 | Node *init_trip() const { Node *tmp = phi (); return (tmp && tmp->req()==3) ? tmp->in(1) : NULL; } |
duke@435 | 262 | int stride_con() const; |
duke@435 | 263 | bool stride_is_con() const { Node *tmp = stride (); return (tmp != NULL && tmp->is_Con()); } |
duke@435 | 264 | BoolTest::mask test_trip() const { return in(TestValue)->as_Bool()->_test._test; } |
duke@435 | 265 | CountedLoopNode *loopnode() const { |
roland@4589 | 266 | // The CountedLoopNode that goes with this CountedLoopEndNode may |
roland@4589 | 267 | // have been optimized out by the IGVN so be cautious with the |
roland@4589 | 268 | // pattern matching on the graph |
roland@4589 | 269 | if (phi() == NULL) { |
roland@4589 | 270 | return NULL; |
roland@4589 | 271 | } |
duke@435 | 272 | Node *ln = phi()->in(0); |
roland@4589 | 273 | if (ln->is_CountedLoop() && ln->as_CountedLoop()->loopexit() == this) { |
roland@4589 | 274 | return (CountedLoopNode*)ln; |
roland@4589 | 275 | } |
roland@4589 | 276 | return NULL; |
roland@4589 | 277 | } |
duke@435 | 278 | |
duke@435 | 279 | #ifndef PRODUCT |
duke@435 | 280 | virtual void dump_spec(outputStream *st) const; |
duke@435 | 281 | #endif |
duke@435 | 282 | }; |
duke@435 | 283 | |
duke@435 | 284 | |
duke@435 | 285 | inline CountedLoopEndNode *CountedLoopNode::loopexit() const { |
duke@435 | 286 | Node *bc = back_control(); |
duke@435 | 287 | if( bc == NULL ) return NULL; |
duke@435 | 288 | Node *le = bc->in(0); |
duke@435 | 289 | if( le->Opcode() != Op_CountedLoopEnd ) |
duke@435 | 290 | return NULL; |
duke@435 | 291 | return (CountedLoopEndNode*)le; |
duke@435 | 292 | } |
duke@435 | 293 | inline Node *CountedLoopNode::init_trip() const { return loopexit() ? loopexit()->init_trip() : NULL; } |
duke@435 | 294 | inline Node *CountedLoopNode::stride() const { return loopexit() ? loopexit()->stride() : NULL; } |
duke@435 | 295 | inline int CountedLoopNode::stride_con() const { return loopexit() ? loopexit()->stride_con() : 0; } |
duke@435 | 296 | inline bool CountedLoopNode::stride_is_con() const { return loopexit() && loopexit()->stride_is_con(); } |
duke@435 | 297 | inline Node *CountedLoopNode::limit() const { return loopexit() ? loopexit()->limit() : NULL; } |
duke@435 | 298 | inline Node *CountedLoopNode::incr() const { return loopexit() ? loopexit()->incr() : NULL; } |
duke@435 | 299 | inline Node *CountedLoopNode::phi() const { return loopexit() ? loopexit()->phi() : NULL; } |
duke@435 | 300 | |
kvn@2877 | 301 | //------------------------------LoopLimitNode----------------------------- |
kvn@2877 | 302 | // Counted Loop limit node which represents exact final iterator value: |
kvn@2877 | 303 | // trip_count = (limit - init_trip + stride - 1)/stride |
kvn@2877 | 304 | // final_value= trip_count * stride + init_trip. |
kvn@2877 | 305 | // Use HW instructions to calculate it when it can overflow in integer. |
kvn@2877 | 306 | // Note, final_value should fit into integer since counted loop has |
kvn@2877 | 307 | // limit check: limit <= max_int-stride. |
kvn@2877 | 308 | class LoopLimitNode : public Node { |
kvn@2877 | 309 | enum { Init=1, Limit=2, Stride=3 }; |
kvn@2877 | 310 | public: |
kvn@2877 | 311 | LoopLimitNode( Compile* C, Node *init, Node *limit, Node *stride ) : Node(0,init,limit,stride) { |
kvn@2877 | 312 | // Put it on the Macro nodes list to optimize during macro nodes expansion. |
kvn@2877 | 313 | init_flags(Flag_is_macro); |
kvn@2877 | 314 | C->add_macro_node(this); |
kvn@2877 | 315 | } |
kvn@2877 | 316 | virtual int Opcode() const; |
kvn@2877 | 317 | virtual const Type *bottom_type() const { return TypeInt::INT; } |
kvn@2877 | 318 | virtual uint ideal_reg() const { return Op_RegI; } |
kvn@2877 | 319 | virtual const Type *Value( PhaseTransform *phase ) const; |
kvn@2877 | 320 | virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); |
kvn@2877 | 321 | virtual Node *Identity( PhaseTransform *phase ); |
kvn@2877 | 322 | }; |
duke@435 | 323 | |
duke@435 | 324 | // -----------------------------IdealLoopTree---------------------------------- |
duke@435 | 325 | class IdealLoopTree : public ResourceObj { |
duke@435 | 326 | public: |
duke@435 | 327 | IdealLoopTree *_parent; // Parent in loop tree |
duke@435 | 328 | IdealLoopTree *_next; // Next sibling in loop tree |
duke@435 | 329 | IdealLoopTree *_child; // First child in loop tree |
duke@435 | 330 | |
duke@435 | 331 | // The head-tail backedge defines the loop. |
duke@435 | 332 | // If tail is NULL then this loop has multiple backedges as part of the |
duke@435 | 333 | // same loop. During cleanup I'll peel off the multiple backedges; merge |
duke@435 | 334 | // them at the loop bottom and flow 1 real backedge into the loop. |
duke@435 | 335 | Node *_head; // Head of loop |
duke@435 | 336 | Node *_tail; // Tail of loop |
duke@435 | 337 | inline Node *tail(); // Handle lazy update of _tail field |
duke@435 | 338 | PhaseIdealLoop* _phase; |
duke@435 | 339 | |
duke@435 | 340 | Node_List _body; // Loop body for inner loops |
duke@435 | 341 | |
duke@435 | 342 | uint8 _nest; // Nesting depth |
duke@435 | 343 | uint8 _irreducible:1, // True if irreducible |
duke@435 | 344 | _has_call:1, // True if has call safepoint |
duke@435 | 345 | _has_sfpt:1, // True if has non-call safepoint |
duke@435 | 346 | _rce_candidate:1; // True if candidate for range check elimination |
duke@435 | 347 | |
kvn@4023 | 348 | Node_List* _safepts; // List of safepoints in this loop |
kvn@474 | 349 | Node_List* _required_safept; // A inner loop cannot delete these safepts; |
kvn@474 | 350 | bool _allow_optimizations; // Allow loop optimizations |
duke@435 | 351 | |
duke@435 | 352 | IdealLoopTree( PhaseIdealLoop* phase, Node *head, Node *tail ) |
duke@435 | 353 | : _parent(0), _next(0), _child(0), |
duke@435 | 354 | _head(head), _tail(tail), |
duke@435 | 355 | _phase(phase), |
kvn@4023 | 356 | _safepts(NULL), |
duke@435 | 357 | _required_safept(NULL), |
kvn@474 | 358 | _allow_optimizations(true), |
duke@435 | 359 | _nest(0), _irreducible(0), _has_call(0), _has_sfpt(0), _rce_candidate(0) |
duke@435 | 360 | { } |
duke@435 | 361 | |
duke@435 | 362 | // Is 'l' a member of 'this'? |
duke@435 | 363 | int is_member( const IdealLoopTree *l ) const; // Test for nested membership |
duke@435 | 364 | |
duke@435 | 365 | // Set loop nesting depth. Accumulate has_call bits. |
duke@435 | 366 | int set_nest( uint depth ); |
duke@435 | 367 | |
duke@435 | 368 | // Split out multiple fall-in edges from the loop header. Move them to a |
duke@435 | 369 | // private RegionNode before the loop. This becomes the loop landing pad. |
duke@435 | 370 | void split_fall_in( PhaseIdealLoop *phase, int fall_in_cnt ); |
duke@435 | 371 | |
duke@435 | 372 | // Split out the outermost loop from this shared header. |
duke@435 | 373 | void split_outer_loop( PhaseIdealLoop *phase ); |
duke@435 | 374 | |
duke@435 | 375 | // Merge all the backedges from the shared header into a private Region. |
duke@435 | 376 | // Feed that region as the one backedge to this loop. |
duke@435 | 377 | void merge_many_backedges( PhaseIdealLoop *phase ); |
duke@435 | 378 | |
duke@435 | 379 | // Split shared headers and insert loop landing pads. |
duke@435 | 380 | // Insert a LoopNode to replace the RegionNode. |
duke@435 | 381 | // Returns TRUE if loop tree is structurally changed. |
duke@435 | 382 | bool beautify_loops( PhaseIdealLoop *phase ); |
duke@435 | 383 | |
cfang@1607 | 384 | // Perform optimization to use the loop predicates for null checks and range checks. |
cfang@1607 | 385 | // Applies to any loop level (not just the innermost one) |
cfang@1607 | 386 | bool loop_predication( PhaseIdealLoop *phase); |
cfang@1607 | 387 | |
never@836 | 388 | // Perform iteration-splitting on inner loops. Split iterations to |
never@836 | 389 | // avoid range checks or one-shot null checks. Returns false if the |
never@836 | 390 | // current round of loop opts should stop. |
never@836 | 391 | bool iteration_split( PhaseIdealLoop *phase, Node_List &old_new ); |
duke@435 | 392 | |
never@836 | 393 | // Driver for various flavors of iteration splitting. Returns false |
never@836 | 394 | // if the current round of loop opts should stop. |
never@836 | 395 | bool iteration_split_impl( PhaseIdealLoop *phase, Node_List &old_new ); |
duke@435 | 396 | |
duke@435 | 397 | // Given dominators, try to find loops with calls that must always be |
duke@435 | 398 | // executed (call dominates loop tail). These loops do not need non-call |
duke@435 | 399 | // safepoints (ncsfpt). |
duke@435 | 400 | void check_safepts(VectorSet &visited, Node_List &stack); |
duke@435 | 401 | |
duke@435 | 402 | // Allpaths backwards scan from loop tail, terminating each path at first safepoint |
duke@435 | 403 | // encountered. |
duke@435 | 404 | void allpaths_check_safepts(VectorSet &visited, Node_List &stack); |
duke@435 | 405 | |
duke@435 | 406 | // Convert to counted loops where possible |
duke@435 | 407 | void counted_loop( PhaseIdealLoop *phase ); |
duke@435 | 408 | |
duke@435 | 409 | // Check for Node being a loop-breaking test |
duke@435 | 410 | Node *is_loop_exit(Node *iff) const; |
duke@435 | 411 | |
duke@435 | 412 | // Returns true if ctrl is executed on every complete iteration |
duke@435 | 413 | bool dominates_backedge(Node* ctrl); |
duke@435 | 414 | |
duke@435 | 415 | // Remove simplistic dead code from loop body |
duke@435 | 416 | void DCE_loop_body(); |
duke@435 | 417 | |
duke@435 | 418 | // Look for loop-exit tests with my 50/50 guesses from the Parsing stage. |
duke@435 | 419 | // Replace with a 1-in-10 exit guess. |
duke@435 | 420 | void adjust_loop_exit_prob( PhaseIdealLoop *phase ); |
duke@435 | 421 | |
duke@435 | 422 | // Return TRUE or FALSE if the loop should never be RCE'd or aligned. |
duke@435 | 423 | // Useful for unrolling loops with NO array accesses. |
duke@435 | 424 | bool policy_peel_only( PhaseIdealLoop *phase ) const; |
duke@435 | 425 | |
duke@435 | 426 | // Return TRUE or FALSE if the loop should be unswitched -- clone |
duke@435 | 427 | // loop with an invariant test |
duke@435 | 428 | bool policy_unswitching( PhaseIdealLoop *phase ) const; |
duke@435 | 429 | |
duke@435 | 430 | // Micro-benchmark spamming. Remove empty loops. |
duke@435 | 431 | bool policy_do_remove_empty_loop( PhaseIdealLoop *phase ); |
duke@435 | 432 | |
kvn@2747 | 433 | // Convert one iteration loop into normal code. |
kvn@2747 | 434 | bool policy_do_one_iteration_loop( PhaseIdealLoop *phase ); |
kvn@2747 | 435 | |
duke@435 | 436 | // Return TRUE or FALSE if the loop should be peeled or not. Peel if we can |
duke@435 | 437 | // make some loop-invariant test (usually a null-check) happen before the |
duke@435 | 438 | // loop. |
duke@435 | 439 | bool policy_peeling( PhaseIdealLoop *phase ) const; |
duke@435 | 440 | |
duke@435 | 441 | // Return TRUE or FALSE if the loop should be maximally unrolled. Stash any |
duke@435 | 442 | // known trip count in the counted loop node. |
duke@435 | 443 | bool policy_maximally_unroll( PhaseIdealLoop *phase ) const; |
duke@435 | 444 | |
duke@435 | 445 | // Return TRUE or FALSE if the loop should be unrolled or not. Unroll if |
duke@435 | 446 | // the loop is a CountedLoop and the body is small enough. |
duke@435 | 447 | bool policy_unroll( PhaseIdealLoop *phase ) const; |
duke@435 | 448 | |
duke@435 | 449 | // Return TRUE or FALSE if the loop should be range-check-eliminated. |
duke@435 | 450 | // Gather a list of IF tests that are dominated by iteration splitting; |
duke@435 | 451 | // also gather the end of the first split and the start of the 2nd split. |
duke@435 | 452 | bool policy_range_check( PhaseIdealLoop *phase ) const; |
duke@435 | 453 | |
duke@435 | 454 | // Return TRUE or FALSE if the loop should be cache-line aligned. |
duke@435 | 455 | // Gather the expression that does the alignment. Note that only |
twisti@1040 | 456 | // one array base can be aligned in a loop (unless the VM guarantees |
duke@435 | 457 | // mutual alignment). Note that if we vectorize short memory ops |
duke@435 | 458 | // into longer memory ops, we may want to increase alignment. |
duke@435 | 459 | bool policy_align( PhaseIdealLoop *phase ) const; |
duke@435 | 460 | |
cfang@1607 | 461 | // Return TRUE if "iff" is a range check. |
cfang@1607 | 462 | bool is_range_check_if(IfNode *iff, PhaseIdealLoop *phase, Invariance& invar) const; |
cfang@1607 | 463 | |
kvn@2747 | 464 | // Compute loop exact trip count if possible |
kvn@2747 | 465 | void compute_exact_trip_count( PhaseIdealLoop *phase ); |
kvn@2747 | 466 | |
duke@435 | 467 | // Compute loop trip count from profile data |
duke@435 | 468 | void compute_profile_trip_cnt( PhaseIdealLoop *phase ); |
duke@435 | 469 | |
duke@435 | 470 | // Reassociate invariant expressions. |
duke@435 | 471 | void reassociate_invariants(PhaseIdealLoop *phase); |
duke@435 | 472 | // Reassociate invariant add and subtract expressions. |
duke@435 | 473 | Node* reassociate_add_sub(Node* n1, PhaseIdealLoop *phase); |
duke@435 | 474 | // Return nonzero index of invariant operand if invariant and variant |
twisti@1040 | 475 | // are combined with an Add or Sub. Helper for reassociate_invariants. |
duke@435 | 476 | int is_invariant_addition(Node* n, PhaseIdealLoop *phase); |
duke@435 | 477 | |
duke@435 | 478 | // Return true if n is invariant |
duke@435 | 479 | bool is_invariant(Node* n) const; |
duke@435 | 480 | |
duke@435 | 481 | // Put loop body on igvn work list |
duke@435 | 482 | void record_for_igvn(); |
duke@435 | 483 | |
duke@435 | 484 | bool is_loop() { return !_irreducible && _tail && !_tail->is_top(); } |
duke@435 | 485 | bool is_inner() { return is_loop() && _child == NULL; } |
duke@435 | 486 | bool is_counted() { return is_loop() && _head != NULL && _head->is_CountedLoop(); } |
duke@435 | 487 | |
duke@435 | 488 | #ifndef PRODUCT |
duke@435 | 489 | void dump_head( ) const; // Dump loop head only |
duke@435 | 490 | void dump() const; // Dump this loop recursively |
duke@435 | 491 | void verify_tree(IdealLoopTree *loop, const IdealLoopTree *parent) const; |
duke@435 | 492 | #endif |
duke@435 | 493 | |
duke@435 | 494 | }; |
duke@435 | 495 | |
duke@435 | 496 | // -----------------------------PhaseIdealLoop--------------------------------- |
duke@435 | 497 | // Computes the mapping from Nodes to IdealLoopTrees. Organizes IdealLoopTrees into a |
duke@435 | 498 | // loop tree. Drives the loop-based transformations on the ideal graph. |
duke@435 | 499 | class PhaseIdealLoop : public PhaseTransform { |
duke@435 | 500 | friend class IdealLoopTree; |
duke@435 | 501 | friend class SuperWord; |
duke@435 | 502 | // Pre-computed def-use info |
duke@435 | 503 | PhaseIterGVN &_igvn; |
duke@435 | 504 | |
duke@435 | 505 | // Head of loop tree |
duke@435 | 506 | IdealLoopTree *_ltree_root; |
duke@435 | 507 | |
duke@435 | 508 | // Array of pre-order numbers, plus post-visited bit. |
duke@435 | 509 | // ZERO for not pre-visited. EVEN for pre-visited but not post-visited. |
duke@435 | 510 | // ODD for post-visited. Other bits are the pre-order number. |
duke@435 | 511 | uint *_preorders; |
duke@435 | 512 | uint _max_preorder; |
duke@435 | 513 | |
never@1356 | 514 | const PhaseIdealLoop* _verify_me; |
never@1356 | 515 | bool _verify_only; |
never@1356 | 516 | |
duke@435 | 517 | // Allocate _preorders[] array |
duke@435 | 518 | void allocate_preorders() { |
duke@435 | 519 | _max_preorder = C->unique()+8; |
duke@435 | 520 | _preorders = NEW_RESOURCE_ARRAY(uint, _max_preorder); |
duke@435 | 521 | memset(_preorders, 0, sizeof(uint) * _max_preorder); |
duke@435 | 522 | } |
duke@435 | 523 | |
duke@435 | 524 | // Allocate _preorders[] array |
duke@435 | 525 | void reallocate_preorders() { |
duke@435 | 526 | if ( _max_preorder < C->unique() ) { |
duke@435 | 527 | _preorders = REALLOC_RESOURCE_ARRAY(uint, _preorders, _max_preorder, C->unique()); |
duke@435 | 528 | _max_preorder = C->unique(); |
duke@435 | 529 | } |
duke@435 | 530 | memset(_preorders, 0, sizeof(uint) * _max_preorder); |
duke@435 | 531 | } |
duke@435 | 532 | |
duke@435 | 533 | // Check to grow _preorders[] array for the case when build_loop_tree_impl() |
duke@435 | 534 | // adds new nodes. |
duke@435 | 535 | void check_grow_preorders( ) { |
duke@435 | 536 | if ( _max_preorder < C->unique() ) { |
duke@435 | 537 | uint newsize = _max_preorder<<1; // double size of array |
duke@435 | 538 | _preorders = REALLOC_RESOURCE_ARRAY(uint, _preorders, _max_preorder, newsize); |
duke@435 | 539 | memset(&_preorders[_max_preorder],0,sizeof(uint)*(newsize-_max_preorder)); |
duke@435 | 540 | _max_preorder = newsize; |
duke@435 | 541 | } |
duke@435 | 542 | } |
duke@435 | 543 | // Check for pre-visited. Zero for NOT visited; non-zero for visited. |
duke@435 | 544 | int is_visited( Node *n ) const { return _preorders[n->_idx]; } |
duke@435 | 545 | // Pre-order numbers are written to the Nodes array as low-bit-set values. |
duke@435 | 546 | void set_preorder_visited( Node *n, int pre_order ) { |
duke@435 | 547 | assert( !is_visited( n ), "already set" ); |
duke@435 | 548 | _preorders[n->_idx] = (pre_order<<1); |
duke@435 | 549 | }; |
duke@435 | 550 | // Return pre-order number. |
duke@435 | 551 | int get_preorder( Node *n ) const { assert( is_visited(n), "" ); return _preorders[n->_idx]>>1; } |
duke@435 | 552 | |
duke@435 | 553 | // Check for being post-visited. |
duke@435 | 554 | // Should be previsited already (checked with assert(is_visited(n))). |
duke@435 | 555 | int is_postvisited( Node *n ) const { assert( is_visited(n), "" ); return _preorders[n->_idx]&1; } |
duke@435 | 556 | |
duke@435 | 557 | // Mark as post visited |
duke@435 | 558 | void set_postvisited( Node *n ) { assert( !is_postvisited( n ), "" ); _preorders[n->_idx] |= 1; } |
duke@435 | 559 | |
duke@435 | 560 | // Set/get control node out. Set lower bit to distinguish from IdealLoopTree |
duke@435 | 561 | // Returns true if "n" is a data node, false if it's a control node. |
duke@435 | 562 | bool has_ctrl( Node *n ) const { return ((intptr_t)_nodes[n->_idx]) & 1; } |
duke@435 | 563 | |
duke@435 | 564 | // clear out dead code after build_loop_late |
duke@435 | 565 | Node_List _deadlist; |
duke@435 | 566 | |
duke@435 | 567 | // Support for faster execution of get_late_ctrl()/dom_lca() |
duke@435 | 568 | // when a node has many uses and dominator depth is deep. |
duke@435 | 569 | Node_Array _dom_lca_tags; |
duke@435 | 570 | void init_dom_lca_tags(); |
duke@435 | 571 | void clear_dom_lca_tags(); |
never@1356 | 572 | |
never@1356 | 573 | // Helper for debugging bad dominance relationships |
never@1356 | 574 | bool verify_dominance(Node* n, Node* use, Node* LCA, Node* early); |
never@1356 | 575 | |
never@1356 | 576 | Node* compute_lca_of_uses(Node* n, Node* early, bool verify = false); |
never@1356 | 577 | |
duke@435 | 578 | // Inline wrapper for frequent cases: |
duke@435 | 579 | // 1) only one use |
duke@435 | 580 | // 2) a use is the same as the current LCA passed as 'n1' |
duke@435 | 581 | Node *dom_lca_for_get_late_ctrl( Node *lca, Node *n, Node *tag ) { |
duke@435 | 582 | assert( n->is_CFG(), "" ); |
duke@435 | 583 | // Fast-path NULL lca |
duke@435 | 584 | if( lca != NULL && lca != n ) { |
duke@435 | 585 | assert( lca->is_CFG(), "" ); |
duke@435 | 586 | // find LCA of all uses |
duke@435 | 587 | n = dom_lca_for_get_late_ctrl_internal( lca, n, tag ); |
duke@435 | 588 | } |
duke@435 | 589 | return find_non_split_ctrl(n); |
duke@435 | 590 | } |
duke@435 | 591 | Node *dom_lca_for_get_late_ctrl_internal( Node *lca, Node *n, Node *tag ); |
never@1356 | 592 | |
duke@435 | 593 | // Helper function for directing control inputs away from CFG split |
duke@435 | 594 | // points. |
duke@435 | 595 | Node *find_non_split_ctrl( Node *ctrl ) const { |
duke@435 | 596 | if (ctrl != NULL) { |
duke@435 | 597 | if (ctrl->is_MultiBranch()) { |
duke@435 | 598 | ctrl = ctrl->in(0); |
duke@435 | 599 | } |
duke@435 | 600 | assert(ctrl->is_CFG(), "CFG"); |
duke@435 | 601 | } |
duke@435 | 602 | return ctrl; |
duke@435 | 603 | } |
duke@435 | 604 | |
duke@435 | 605 | public: |
morris@4774 | 606 | bool has_node( Node* n ) const { |
morris@4774 | 607 | guarantee(n != NULL, "No Node."); |
morris@4774 | 608 | return _nodes[n->_idx] != NULL; |
morris@4774 | 609 | } |
duke@435 | 610 | // check if transform created new nodes that need _ctrl recorded |
duke@435 | 611 | Node *get_late_ctrl( Node *n, Node *early ); |
duke@435 | 612 | Node *get_early_ctrl( Node *n ); |
roland@4589 | 613 | Node *get_early_ctrl_for_expensive(Node *n, Node* earliest); |
duke@435 | 614 | void set_early_ctrl( Node *n ); |
duke@435 | 615 | void set_subtree_ctrl( Node *root ); |
duke@435 | 616 | void set_ctrl( Node *n, Node *ctrl ) { |
duke@435 | 617 | assert( !has_node(n) || has_ctrl(n), "" ); |
duke@435 | 618 | assert( ctrl->in(0), "cannot set dead control node" ); |
duke@435 | 619 | assert( ctrl == find_non_split_ctrl(ctrl), "must set legal crtl" ); |
duke@435 | 620 | _nodes.map( n->_idx, (Node*)((intptr_t)ctrl + 1) ); |
duke@435 | 621 | } |
duke@435 | 622 | // Set control and update loop membership |
duke@435 | 623 | void set_ctrl_and_loop(Node* n, Node* ctrl) { |
duke@435 | 624 | IdealLoopTree* old_loop = get_loop(get_ctrl(n)); |
duke@435 | 625 | IdealLoopTree* new_loop = get_loop(ctrl); |
duke@435 | 626 | if (old_loop != new_loop) { |
duke@435 | 627 | if (old_loop->_child == NULL) old_loop->_body.yank(n); |
duke@435 | 628 | if (new_loop->_child == NULL) new_loop->_body.push(n); |
duke@435 | 629 | } |
duke@435 | 630 | set_ctrl(n, ctrl); |
duke@435 | 631 | } |
duke@435 | 632 | // Control nodes can be replaced or subsumed. During this pass they |
duke@435 | 633 | // get their replacement Node in slot 1. Instead of updating the block |
duke@435 | 634 | // location of all Nodes in the subsumed block, we lazily do it. As we |
duke@435 | 635 | // pull such a subsumed block out of the array, we write back the final |
duke@435 | 636 | // correct block. |
duke@435 | 637 | Node *get_ctrl( Node *i ) { |
duke@435 | 638 | assert(has_node(i), ""); |
duke@435 | 639 | Node *n = get_ctrl_no_update(i); |
duke@435 | 640 | _nodes.map( i->_idx, (Node*)((intptr_t)n + 1) ); |
duke@435 | 641 | assert(has_node(i) && has_ctrl(i), ""); |
duke@435 | 642 | assert(n == find_non_split_ctrl(n), "must return legal ctrl" ); |
duke@435 | 643 | return n; |
duke@435 | 644 | } |
cfang@1607 | 645 | // true if CFG node d dominates CFG node n |
cfang@1607 | 646 | bool is_dominator(Node *d, Node *n); |
cfang@1607 | 647 | // return get_ctrl for a data node and self(n) for a CFG node |
cfang@1607 | 648 | Node* ctrl_or_self(Node* n) { |
cfang@1607 | 649 | if (has_ctrl(n)) |
cfang@1607 | 650 | return get_ctrl(n); |
cfang@1607 | 651 | else { |
cfang@1607 | 652 | assert (n->is_CFG(), "must be a CFG node"); |
cfang@1607 | 653 | return n; |
cfang@1607 | 654 | } |
cfang@1607 | 655 | } |
duke@435 | 656 | |
duke@435 | 657 | private: |
duke@435 | 658 | Node *get_ctrl_no_update( Node *i ) const { |
duke@435 | 659 | assert( has_ctrl(i), "" ); |
duke@435 | 660 | Node *n = (Node*)(((intptr_t)_nodes[i->_idx]) & ~1); |
duke@435 | 661 | if (!n->in(0)) { |
duke@435 | 662 | // Skip dead CFG nodes |
duke@435 | 663 | do { |
duke@435 | 664 | n = (Node*)(((intptr_t)_nodes[n->_idx]) & ~1); |
duke@435 | 665 | } while (!n->in(0)); |
duke@435 | 666 | n = find_non_split_ctrl(n); |
duke@435 | 667 | } |
duke@435 | 668 | return n; |
duke@435 | 669 | } |
duke@435 | 670 | |
duke@435 | 671 | // Check for loop being set |
duke@435 | 672 | // "n" must be a control node. Returns true if "n" is known to be in a loop. |
duke@435 | 673 | bool has_loop( Node *n ) const { |
duke@435 | 674 | assert(!has_node(n) || !has_ctrl(n), ""); |
duke@435 | 675 | return has_node(n); |
duke@435 | 676 | } |
duke@435 | 677 | // Set loop |
duke@435 | 678 | void set_loop( Node *n, IdealLoopTree *loop ) { |
duke@435 | 679 | _nodes.map(n->_idx, (Node*)loop); |
duke@435 | 680 | } |
duke@435 | 681 | // Lazy-dazy update of 'get_ctrl' and 'idom_at' mechanisms. Replace |
duke@435 | 682 | // the 'old_node' with 'new_node'. Kill old-node. Add a reference |
duke@435 | 683 | // from old_node to new_node to support the lazy update. Reference |
cfang@1607 | 684 | // replaces loop reference, since that is not needed for dead node. |
duke@435 | 685 | public: |
duke@435 | 686 | void lazy_update( Node *old_node, Node *new_node ) { |
duke@435 | 687 | assert( old_node != new_node, "no cycles please" ); |
duke@435 | 688 | //old_node->set_req( 1, new_node /*NO DU INFO*/ ); |
duke@435 | 689 | // Nodes always have DU info now, so re-use the side array slot |
duke@435 | 690 | // for this node to provide the forwarding pointer. |
duke@435 | 691 | _nodes.map( old_node->_idx, (Node*)((intptr_t)new_node + 1) ); |
duke@435 | 692 | } |
duke@435 | 693 | void lazy_replace( Node *old_node, Node *new_node ) { |
kvn@1976 | 694 | _igvn.replace_node( old_node, new_node ); |
duke@435 | 695 | lazy_update( old_node, new_node ); |
duke@435 | 696 | } |
duke@435 | 697 | void lazy_replace_proj( Node *old_node, Node *new_node ) { |
duke@435 | 698 | assert( old_node->req() == 1, "use this for Projs" ); |
duke@435 | 699 | _igvn.hash_delete(old_node); // Must hash-delete before hacking edges |
duke@435 | 700 | old_node->add_req( NULL ); |
duke@435 | 701 | lazy_replace( old_node, new_node ); |
duke@435 | 702 | } |
duke@435 | 703 | |
duke@435 | 704 | private: |
duke@435 | 705 | |
duke@435 | 706 | // Place 'n' in some loop nest, where 'n' is a CFG node |
duke@435 | 707 | void build_loop_tree(); |
duke@435 | 708 | int build_loop_tree_impl( Node *n, int pre_order ); |
duke@435 | 709 | // Insert loop into the existing loop tree. 'innermost' is a leaf of the |
duke@435 | 710 | // loop tree, not the root. |
duke@435 | 711 | IdealLoopTree *sort( IdealLoopTree *loop, IdealLoopTree *innermost ); |
duke@435 | 712 | |
duke@435 | 713 | // Place Data nodes in some loop nest |
never@1356 | 714 | void build_loop_early( VectorSet &visited, Node_List &worklist, Node_Stack &nstack ); |
never@1356 | 715 | void build_loop_late ( VectorSet &visited, Node_List &worklist, Node_Stack &nstack ); |
never@1356 | 716 | void build_loop_late_post ( Node* n ); |
duke@435 | 717 | |
duke@435 | 718 | // Array of immediate dominance info for each CFG node indexed by node idx |
duke@435 | 719 | private: |
duke@435 | 720 | uint _idom_size; |
duke@435 | 721 | Node **_idom; // Array of immediate dominators |
duke@435 | 722 | uint *_dom_depth; // Used for fast LCA test |
duke@435 | 723 | GrowableArray<uint>* _dom_stk; // For recomputation of dom depth |
duke@435 | 724 | |
duke@435 | 725 | Node* idom_no_update(Node* d) const { |
duke@435 | 726 | assert(d->_idx < _idom_size, "oob"); |
duke@435 | 727 | Node* n = _idom[d->_idx]; |
duke@435 | 728 | assert(n != NULL,"Bad immediate dominator info."); |
duke@435 | 729 | while (n->in(0) == NULL) { // Skip dead CFG nodes |
duke@435 | 730 | //n = n->in(1); |
duke@435 | 731 | n = (Node*)(((intptr_t)_nodes[n->_idx]) & ~1); |
duke@435 | 732 | assert(n != NULL,"Bad immediate dominator info."); |
duke@435 | 733 | } |
duke@435 | 734 | return n; |
duke@435 | 735 | } |
duke@435 | 736 | Node *idom(Node* d) const { |
duke@435 | 737 | uint didx = d->_idx; |
duke@435 | 738 | Node *n = idom_no_update(d); |
duke@435 | 739 | _idom[didx] = n; // Lazily remove dead CFG nodes from table. |
duke@435 | 740 | return n; |
duke@435 | 741 | } |
duke@435 | 742 | uint dom_depth(Node* d) const { |
morris@4774 | 743 | guarantee(d != NULL, "Null dominator info."); |
morris@4774 | 744 | guarantee(d->_idx < _idom_size, ""); |
duke@435 | 745 | return _dom_depth[d->_idx]; |
duke@435 | 746 | } |
duke@435 | 747 | void set_idom(Node* d, Node* n, uint dom_depth); |
duke@435 | 748 | // Locally compute IDOM using dom_lca call |
duke@435 | 749 | Node *compute_idom( Node *region ) const; |
duke@435 | 750 | // Recompute dom_depth |
duke@435 | 751 | void recompute_dom_depth(); |
duke@435 | 752 | |
duke@435 | 753 | // Is safept not required by an outer loop? |
duke@435 | 754 | bool is_deleteable_safept(Node* sfpt); |
duke@435 | 755 | |
kvn@2665 | 756 | // Replace parallel induction variable (parallel to trip counter) |
kvn@2665 | 757 | void replace_parallel_iv(IdealLoopTree *loop); |
kvn@2665 | 758 | |
never@1356 | 759 | // Perform verification that the graph is valid. |
never@1356 | 760 | PhaseIdealLoop( PhaseIterGVN &igvn) : |
never@1356 | 761 | PhaseTransform(Ideal_Loop), |
never@1356 | 762 | _igvn(igvn), |
kvn@2555 | 763 | _dom_lca_tags(arena()), // Thread::resource_area |
never@1356 | 764 | _verify_me(NULL), |
never@1356 | 765 | _verify_only(true) { |
kvn@3260 | 766 | build_and_optimize(false, false); |
never@1356 | 767 | } |
never@1356 | 768 | |
never@1356 | 769 | // build the loop tree and perform any requested optimizations |
kvn@3260 | 770 | void build_and_optimize(bool do_split_if, bool skip_loop_opts); |
never@1356 | 771 | |
duke@435 | 772 | public: |
duke@435 | 773 | // Dominators for the sea of nodes |
duke@435 | 774 | void Dominators(); |
duke@435 | 775 | Node *dom_lca( Node *n1, Node *n2 ) const { |
duke@435 | 776 | return find_non_split_ctrl(dom_lca_internal(n1, n2)); |
duke@435 | 777 | } |
duke@435 | 778 | Node *dom_lca_internal( Node *n1, Node *n2 ) const; |
duke@435 | 779 | |
duke@435 | 780 | // Compute the Ideal Node to Loop mapping |
kvn@3260 | 781 | PhaseIdealLoop( PhaseIterGVN &igvn, bool do_split_ifs, bool skip_loop_opts = false) : |
never@1356 | 782 | PhaseTransform(Ideal_Loop), |
never@1356 | 783 | _igvn(igvn), |
kvn@2555 | 784 | _dom_lca_tags(arena()), // Thread::resource_area |
never@1356 | 785 | _verify_me(NULL), |
never@1356 | 786 | _verify_only(false) { |
kvn@3260 | 787 | build_and_optimize(do_split_ifs, skip_loop_opts); |
never@1356 | 788 | } |
never@1356 | 789 | |
never@1356 | 790 | // Verify that verify_me made the same decisions as a fresh run. |
never@1356 | 791 | PhaseIdealLoop( PhaseIterGVN &igvn, const PhaseIdealLoop *verify_me) : |
never@1356 | 792 | PhaseTransform(Ideal_Loop), |
never@1356 | 793 | _igvn(igvn), |
kvn@2555 | 794 | _dom_lca_tags(arena()), // Thread::resource_area |
never@1356 | 795 | _verify_me(verify_me), |
never@1356 | 796 | _verify_only(false) { |
kvn@3260 | 797 | build_and_optimize(false, false); |
never@1356 | 798 | } |
never@1356 | 799 | |
never@1356 | 800 | // Build and verify the loop tree without modifying the graph. This |
never@1356 | 801 | // is useful to verify that all inputs properly dominate their uses. |
never@1356 | 802 | static void verify(PhaseIterGVN& igvn) { |
never@1356 | 803 | #ifdef ASSERT |
never@1356 | 804 | PhaseIdealLoop v(igvn); |
never@1356 | 805 | #endif |
never@1356 | 806 | } |
duke@435 | 807 | |
duke@435 | 808 | // True if the method has at least 1 irreducible loop |
duke@435 | 809 | bool _has_irreducible_loops; |
duke@435 | 810 | |
duke@435 | 811 | // Per-Node transform |
duke@435 | 812 | virtual Node *transform( Node *a_node ) { return 0; } |
duke@435 | 813 | |
kvn@2665 | 814 | bool is_counted_loop( Node *x, IdealLoopTree *loop ); |
duke@435 | 815 | |
kvn@2877 | 816 | Node* exact_limit( IdealLoopTree *loop ); |
kvn@2877 | 817 | |
duke@435 | 818 | // Return a post-walked LoopNode |
duke@435 | 819 | IdealLoopTree *get_loop( Node *n ) const { |
duke@435 | 820 | // Dead nodes have no loop, so return the top level loop instead |
duke@435 | 821 | if (!has_node(n)) return _ltree_root; |
duke@435 | 822 | assert(!has_ctrl(n), ""); |
duke@435 | 823 | return (IdealLoopTree*)_nodes[n->_idx]; |
duke@435 | 824 | } |
duke@435 | 825 | |
duke@435 | 826 | // Is 'n' a (nested) member of 'loop'? |
duke@435 | 827 | int is_member( const IdealLoopTree *loop, Node *n ) const { |
duke@435 | 828 | return loop->is_member(get_loop(n)); } |
duke@435 | 829 | |
duke@435 | 830 | // This is the basic building block of the loop optimizations. It clones an |
duke@435 | 831 | // entire loop body. It makes an old_new loop body mapping; with this |
duke@435 | 832 | // mapping you can find the new-loop equivalent to an old-loop node. All |
duke@435 | 833 | // new-loop nodes are exactly equal to their old-loop counterparts, all |
duke@435 | 834 | // edges are the same. All exits from the old-loop now have a RegionNode |
duke@435 | 835 | // that merges the equivalent new-loop path. This is true even for the |
duke@435 | 836 | // normal "loop-exit" condition. All uses of loop-invariant old-loop values |
duke@435 | 837 | // now come from (one or more) Phis that merge their new-loop equivalents. |
duke@435 | 838 | // Parameter side_by_side_idom: |
duke@435 | 839 | // When side_by_size_idom is NULL, the dominator tree is constructed for |
duke@435 | 840 | // the clone loop to dominate the original. Used in construction of |
duke@435 | 841 | // pre-main-post loop sequence. |
duke@435 | 842 | // When nonnull, the clone and original are side-by-side, both are |
duke@435 | 843 | // dominated by the passed in side_by_side_idom node. Used in |
duke@435 | 844 | // construction of unswitched loops. |
duke@435 | 845 | void clone_loop( IdealLoopTree *loop, Node_List &old_new, int dom_depth, |
duke@435 | 846 | Node* side_by_side_idom = NULL); |
duke@435 | 847 | |
duke@435 | 848 | // If we got the effect of peeling, either by actually peeling or by |
duke@435 | 849 | // making a pre-loop which must execute at least once, we can remove |
duke@435 | 850 | // all loop-invariant dominated tests in the main body. |
duke@435 | 851 | void peeled_dom_test_elim( IdealLoopTree *loop, Node_List &old_new ); |
duke@435 | 852 | |
duke@435 | 853 | // Generate code to do a loop peel for the given loop (and body). |
duke@435 | 854 | // old_new is a temp array. |
duke@435 | 855 | void do_peeling( IdealLoopTree *loop, Node_List &old_new ); |
duke@435 | 856 | |
duke@435 | 857 | // Add pre and post loops around the given loop. These loops are used |
duke@435 | 858 | // during RCE, unrolling and aligning loops. |
duke@435 | 859 | void insert_pre_post_loops( IdealLoopTree *loop, Node_List &old_new, bool peel_only ); |
duke@435 | 860 | // If Node n lives in the back_ctrl block, we clone a private version of n |
duke@435 | 861 | // in preheader_ctrl block and return that, otherwise return n. |
kvn@2985 | 862 | Node *clone_up_backedge_goo( Node *back_ctrl, Node *preheader_ctrl, Node *n, VectorSet &visited, Node_Stack &clones ); |
duke@435 | 863 | |
duke@435 | 864 | // Take steps to maximally unroll the loop. Peel any odd iterations, then |
duke@435 | 865 | // unroll to do double iterations. The next round of major loop transforms |
duke@435 | 866 | // will repeat till the doubled loop body does all remaining iterations in 1 |
duke@435 | 867 | // pass. |
duke@435 | 868 | void do_maximally_unroll( IdealLoopTree *loop, Node_List &old_new ); |
duke@435 | 869 | |
duke@435 | 870 | // Unroll the loop body one step - make each trip do 2 iterations. |
duke@435 | 871 | void do_unroll( IdealLoopTree *loop, Node_List &old_new, bool adjust_min_trip ); |
duke@435 | 872 | |
duke@435 | 873 | // Return true if exp is a constant times an induction var |
duke@435 | 874 | bool is_scaled_iv(Node* exp, Node* iv, int* p_scale); |
duke@435 | 875 | |
duke@435 | 876 | // Return true if exp is a scaled induction var plus (or minus) constant |
duke@435 | 877 | bool is_scaled_iv_plus_offset(Node* exp, Node* iv, int* p_scale, Node** p_offset, int depth = 0); |
duke@435 | 878 | |
cfang@1607 | 879 | // Create a new if above the uncommon_trap_if_pattern for the predicate to be promoted |
kvn@2665 | 880 | ProjNode* create_new_if_for_predicate(ProjNode* cont_proj, Node* new_entry, |
kvn@2665 | 881 | Deoptimization::DeoptReason reason); |
kvn@2665 | 882 | void register_control(Node* n, IdealLoopTree *loop, Node* pred); |
kvn@2665 | 883 | |
kvn@2727 | 884 | // Clone loop predicates to cloned loops (peeled, unswitched) |
kvn@2727 | 885 | static ProjNode* clone_predicate(ProjNode* predicate_proj, Node* new_entry, |
kvn@2727 | 886 | Deoptimization::DeoptReason reason, |
kvn@2727 | 887 | PhaseIdealLoop* loop_phase, |
kvn@2727 | 888 | PhaseIterGVN* igvn); |
kvn@3043 | 889 | |
kvn@2727 | 890 | static Node* clone_loop_predicates(Node* old_entry, Node* new_entry, |
kvn@2877 | 891 | bool clone_limit_check, |
kvn@2727 | 892 | PhaseIdealLoop* loop_phase, |
kvn@2727 | 893 | PhaseIterGVN* igvn); |
kvn@2877 | 894 | Node* clone_loop_predicates(Node* old_entry, Node* new_entry, bool clone_limit_check); |
kvn@2727 | 895 | |
kvn@2727 | 896 | static Node* skip_loop_predicates(Node* entry); |
kvn@2727 | 897 | |
kvn@2727 | 898 | // Find a good location to insert a predicate |
kvn@2665 | 899 | static ProjNode* find_predicate_insertion_point(Node* start_c, Deoptimization::DeoptReason reason); |
kvn@2665 | 900 | // Find a predicate |
kvn@2665 | 901 | static Node* find_predicate(Node* entry); |
cfang@1607 | 902 | // Construct a range check for a predicate if |
kvn@2877 | 903 | BoolNode* rc_predicate(IdealLoopTree *loop, Node* ctrl, |
cfang@1607 | 904 | int scale, Node* offset, |
cfang@1607 | 905 | Node* init, Node* limit, Node* stride, |
never@1738 | 906 | Node* range, bool upper); |
cfang@1607 | 907 | |
cfang@1607 | 908 | // Implementation of the loop predication to promote checks outside the loop |
cfang@1607 | 909 | bool loop_predication_impl(IdealLoopTree *loop); |
cfang@1607 | 910 | |
cfang@1607 | 911 | // Helper function to collect predicate for eliminating the useless ones |
cfang@1607 | 912 | void collect_potentially_useful_predicates(IdealLoopTree *loop, Unique_Node_List &predicate_opaque1); |
cfang@1607 | 913 | void eliminate_useless_predicates(); |
cfang@1607 | 914 | |
roland@4589 | 915 | // Change the control input of expensive nodes to allow commoning by |
roland@4589 | 916 | // IGVN when it is guaranteed to not result in a more frequent |
roland@4589 | 917 | // execution of the expensive node. Return true if progress. |
roland@4589 | 918 | bool process_expensive_nodes(); |
roland@4589 | 919 | |
roland@4589 | 920 | // Check whether node has become unreachable |
roland@4589 | 921 | bool is_node_unreachable(Node *n) const { |
roland@4589 | 922 | return !has_node(n) || n->is_unreachable(_igvn); |
roland@4589 | 923 | } |
roland@4589 | 924 | |
duke@435 | 925 | // Eliminate range-checks and other trip-counter vs loop-invariant tests. |
duke@435 | 926 | void do_range_check( IdealLoopTree *loop, Node_List &old_new ); |
duke@435 | 927 | |
duke@435 | 928 | // Create a slow version of the loop by cloning the loop |
duke@435 | 929 | // and inserting an if to select fast-slow versions. |
duke@435 | 930 | ProjNode* create_slow_version_of_loop(IdealLoopTree *loop, |
duke@435 | 931 | Node_List &old_new); |
duke@435 | 932 | |
duke@435 | 933 | // Clone loop with an invariant test (that does not exit) and |
duke@435 | 934 | // insert a clone of the test that selects which version to |
duke@435 | 935 | // execute. |
duke@435 | 936 | void do_unswitching (IdealLoopTree *loop, Node_List &old_new); |
duke@435 | 937 | |
duke@435 | 938 | // Find candidate "if" for unswitching |
duke@435 | 939 | IfNode* find_unswitching_candidate(const IdealLoopTree *loop) const; |
duke@435 | 940 | |
duke@435 | 941 | // Range Check Elimination uses this function! |
duke@435 | 942 | // Constrain the main loop iterations so the affine function: |
kvn@2877 | 943 | // low_limit <= scale_con * I + offset < upper_limit |
duke@435 | 944 | // always holds true. That is, either increase the number of iterations in |
duke@435 | 945 | // the pre-loop or the post-loop until the condition holds true in the main |
duke@435 | 946 | // loop. Scale_con, offset and limit are all loop invariant. |
kvn@2877 | 947 | void add_constraint( int stride_con, int scale_con, Node *offset, Node *low_limit, Node *upper_limit, Node *pre_ctrl, Node **pre_limit, Node **main_limit ); |
kvn@2915 | 948 | // Helper function for add_constraint(). |
kvn@2915 | 949 | Node* adjust_limit( int stride_con, Node * scale, Node *offset, Node *rc_limit, Node *loop_limit, Node *pre_ctrl ); |
duke@435 | 950 | |
duke@435 | 951 | // Partially peel loop up through last_peel node. |
duke@435 | 952 | bool partial_peel( IdealLoopTree *loop, Node_List &old_new ); |
duke@435 | 953 | |
duke@435 | 954 | // Create a scheduled list of nodes control dependent on ctrl set. |
duke@435 | 955 | void scheduled_nodelist( IdealLoopTree *loop, VectorSet& ctrl, Node_List &sched ); |
duke@435 | 956 | // Has a use in the vector set |
duke@435 | 957 | bool has_use_in_set( Node* n, VectorSet& vset ); |
duke@435 | 958 | // Has use internal to the vector set (ie. not in a phi at the loop head) |
duke@435 | 959 | bool has_use_internal_to_set( Node* n, VectorSet& vset, IdealLoopTree *loop ); |
duke@435 | 960 | // clone "n" for uses that are outside of loop |
kvn@5154 | 961 | int clone_for_use_outside_loop( IdealLoopTree *loop, Node* n, Node_List& worklist ); |
duke@435 | 962 | // clone "n" for special uses that are in the not_peeled region |
duke@435 | 963 | void clone_for_special_use_inside_loop( IdealLoopTree *loop, Node* n, |
duke@435 | 964 | VectorSet& not_peel, Node_List& sink_list, Node_List& worklist ); |
duke@435 | 965 | // Insert phi(lp_entry_val, back_edge_val) at use->in(idx) for loop lp if phi does not already exist |
duke@435 | 966 | void insert_phi_for_loop( Node* use, uint idx, Node* lp_entry_val, Node* back_edge_val, LoopNode* lp ); |
duke@435 | 967 | #ifdef ASSERT |
duke@435 | 968 | // Validate the loop partition sets: peel and not_peel |
duke@435 | 969 | bool is_valid_loop_partition( IdealLoopTree *loop, VectorSet& peel, Node_List& peel_list, VectorSet& not_peel ); |
duke@435 | 970 | // Ensure that uses outside of loop are of the right form |
duke@435 | 971 | bool is_valid_clone_loop_form( IdealLoopTree *loop, Node_List& peel_list, |
duke@435 | 972 | uint orig_exit_idx, uint clone_exit_idx); |
duke@435 | 973 | bool is_valid_clone_loop_exit_use( IdealLoopTree *loop, Node* use, uint exit_idx); |
duke@435 | 974 | #endif |
duke@435 | 975 | |
duke@435 | 976 | // Returns nonzero constant stride if-node is a possible iv test (otherwise returns zero.) |
duke@435 | 977 | int stride_of_possible_iv( Node* iff ); |
duke@435 | 978 | bool is_possible_iv_test( Node* iff ) { return stride_of_possible_iv(iff) != 0; } |
duke@435 | 979 | // Return the (unique) control output node that's in the loop (if it exists.) |
duke@435 | 980 | Node* stay_in_loop( Node* n, IdealLoopTree *loop); |
duke@435 | 981 | // Insert a signed compare loop exit cloned from an unsigned compare. |
duke@435 | 982 | IfNode* insert_cmpi_loop_exit(IfNode* if_cmpu, IdealLoopTree *loop); |
duke@435 | 983 | void remove_cmpi_loop_exit(IfNode* if_cmp, IdealLoopTree *loop); |
duke@435 | 984 | // Utility to register node "n" with PhaseIdealLoop |
duke@435 | 985 | void register_node(Node* n, IdealLoopTree *loop, Node* pred, int ddepth); |
duke@435 | 986 | // Utility to create an if-projection |
duke@435 | 987 | ProjNode* proj_clone(ProjNode* p, IfNode* iff); |
duke@435 | 988 | // Force the iff control output to be the live_proj |
duke@435 | 989 | Node* short_circuit_if(IfNode* iff, ProjNode* live_proj); |
duke@435 | 990 | // Insert a region before an if projection |
duke@435 | 991 | RegionNode* insert_region_before_proj(ProjNode* proj); |
duke@435 | 992 | // Insert a new if before an if projection |
duke@435 | 993 | ProjNode* insert_if_before_proj(Node* left, bool Signed, BoolTest::mask relop, Node* right, ProjNode* proj); |
duke@435 | 994 | |
duke@435 | 995 | // Passed in a Phi merging (recursively) some nearly equivalent Bool/Cmps. |
duke@435 | 996 | // "Nearly" because all Nodes have been cloned from the original in the loop, |
duke@435 | 997 | // but the fall-in edges to the Cmp are different. Clone bool/Cmp pairs |
duke@435 | 998 | // through the Phi recursively, and return a Bool. |
duke@435 | 999 | BoolNode *clone_iff( PhiNode *phi, IdealLoopTree *loop ); |
duke@435 | 1000 | CmpNode *clone_bool( PhiNode *phi, IdealLoopTree *loop ); |
duke@435 | 1001 | |
duke@435 | 1002 | |
duke@435 | 1003 | // Rework addressing expressions to get the most loop-invariant stuff |
duke@435 | 1004 | // moved out. We'd like to do all associative operators, but it's especially |
duke@435 | 1005 | // important (common) to do address expressions. |
duke@435 | 1006 | Node *remix_address_expressions( Node *n ); |
duke@435 | 1007 | |
duke@435 | 1008 | // Attempt to use a conditional move instead of a phi/branch |
duke@435 | 1009 | Node *conditional_move( Node *n ); |
duke@435 | 1010 | |
duke@435 | 1011 | // Reorganize offset computations to lower register pressure. |
duke@435 | 1012 | // Mostly prevent loop-fallout uses of the pre-incremented trip counter |
duke@435 | 1013 | // (which are then alive with the post-incremented trip counter |
duke@435 | 1014 | // forcing an extra register move) |
duke@435 | 1015 | void reorg_offsets( IdealLoopTree *loop ); |
duke@435 | 1016 | |
duke@435 | 1017 | // Check for aggressive application of 'split-if' optimization, |
duke@435 | 1018 | // using basic block level info. |
duke@435 | 1019 | void split_if_with_blocks ( VectorSet &visited, Node_Stack &nstack ); |
duke@435 | 1020 | Node *split_if_with_blocks_pre ( Node *n ); |
duke@435 | 1021 | void split_if_with_blocks_post( Node *n ); |
duke@435 | 1022 | Node *has_local_phi_input( Node *n ); |
duke@435 | 1023 | // Mark an IfNode as being dominated by a prior test, |
duke@435 | 1024 | // without actually altering the CFG (and hence IDOM info). |
kvn@3038 | 1025 | void dominated_by( Node *prevdom, Node *iff, bool flip = false, bool exclude_loop_predicate = false ); |
duke@435 | 1026 | |
duke@435 | 1027 | // Split Node 'n' through merge point |
duke@435 | 1028 | Node *split_thru_region( Node *n, Node *region ); |
duke@435 | 1029 | // Split Node 'n' through merge point if there is enough win. |
duke@435 | 1030 | Node *split_thru_phi( Node *n, Node *region, int policy ); |
duke@435 | 1031 | // Found an If getting its condition-code input from a Phi in the |
duke@435 | 1032 | // same block. Split thru the Region. |
duke@435 | 1033 | void do_split_if( Node *iff ); |
duke@435 | 1034 | |
never@2118 | 1035 | // Conversion of fill/copy patterns into intrisic versions |
never@2118 | 1036 | bool do_intrinsify_fill(); |
never@2118 | 1037 | bool intrinsify_fill(IdealLoopTree* lpt); |
never@2118 | 1038 | bool match_fill_loop(IdealLoopTree* lpt, Node*& store, Node*& store_value, |
never@2118 | 1039 | Node*& shift, Node*& offset); |
never@2118 | 1040 | |
duke@435 | 1041 | private: |
duke@435 | 1042 | // Return a type based on condition control flow |
duke@435 | 1043 | const TypeInt* filtered_type( Node *n, Node* n_ctrl); |
duke@435 | 1044 | const TypeInt* filtered_type( Node *n ) { return filtered_type(n, NULL); } |
duke@435 | 1045 | // Helpers for filtered type |
duke@435 | 1046 | const TypeInt* filtered_type_from_dominators( Node* val, Node *val_ctrl); |
duke@435 | 1047 | |
duke@435 | 1048 | // Helper functions |
duke@435 | 1049 | Node *spinup( Node *iff, Node *new_false, Node *new_true, Node *region, Node *phi, small_cache *cache ); |
duke@435 | 1050 | Node *find_use_block( Node *use, Node *def, Node *old_false, Node *new_false, Node *old_true, Node *new_true ); |
duke@435 | 1051 | void handle_use( Node *use, Node *def, small_cache *cache, Node *region_dom, Node *new_false, Node *new_true, Node *old_false, Node *old_true ); |
duke@435 | 1052 | bool split_up( Node *n, Node *blk1, Node *blk2 ); |
duke@435 | 1053 | void sink_use( Node *use, Node *post_loop ); |
duke@435 | 1054 | Node *place_near_use( Node *useblock ) const; |
duke@435 | 1055 | |
duke@435 | 1056 | bool _created_loop_node; |
duke@435 | 1057 | public: |
duke@435 | 1058 | void set_created_loop_node() { _created_loop_node = true; } |
duke@435 | 1059 | bool created_loop_node() { return _created_loop_node; } |
cfang@1607 | 1060 | void register_new_node( Node *n, Node *blk ); |
duke@435 | 1061 | |
kvn@3408 | 1062 | #ifdef ASSERT |
roland@4589 | 1063 | void dump_bad_graph(const char* msg, Node* n, Node* early, Node* LCA); |
kvn@3408 | 1064 | #endif |
kvn@3408 | 1065 | |
duke@435 | 1066 | #ifndef PRODUCT |
duke@435 | 1067 | void dump( ) const; |
duke@435 | 1068 | void dump( IdealLoopTree *loop, uint rpo_idx, Node_List &rpo_list ) const; |
duke@435 | 1069 | void rpo( Node *start, Node_Stack &stk, VectorSet &visited, Node_List &rpo_list ) const; |
duke@435 | 1070 | void verify() const; // Major slow :-) |
duke@435 | 1071 | void verify_compare( Node *n, const PhaseIdealLoop *loop_verify, VectorSet &visited ) const; |
duke@435 | 1072 | IdealLoopTree *get_loop_idx(Node* n) const { |
duke@435 | 1073 | // Dead nodes have no loop, so return the top level loop instead |
duke@435 | 1074 | return _nodes[n->_idx] ? (IdealLoopTree*)_nodes[n->_idx] : _ltree_root; |
duke@435 | 1075 | } |
duke@435 | 1076 | // Print some stats |
duke@435 | 1077 | static void print_statistics(); |
duke@435 | 1078 | static int _loop_invokes; // Count of PhaseIdealLoop invokes |
duke@435 | 1079 | static int _loop_work; // Sum of PhaseIdealLoop x _unique |
duke@435 | 1080 | #endif |
duke@435 | 1081 | }; |
duke@435 | 1082 | |
duke@435 | 1083 | inline Node* IdealLoopTree::tail() { |
duke@435 | 1084 | // Handle lazy update of _tail field |
duke@435 | 1085 | Node *n = _tail; |
duke@435 | 1086 | //while( !n->in(0) ) // Skip dead CFG nodes |
duke@435 | 1087 | //n = n->in(1); |
duke@435 | 1088 | if (n->in(0) == NULL) |
duke@435 | 1089 | n = _phase->get_ctrl(n); |
duke@435 | 1090 | _tail = n; |
duke@435 | 1091 | return n; |
duke@435 | 1092 | } |
duke@435 | 1093 | |
duke@435 | 1094 | |
duke@435 | 1095 | // Iterate over the loop tree using a preorder, left-to-right traversal. |
duke@435 | 1096 | // |
duke@435 | 1097 | // Example that visits all counted loops from within PhaseIdealLoop |
duke@435 | 1098 | // |
duke@435 | 1099 | // for (LoopTreeIterator iter(_ltree_root); !iter.done(); iter.next()) { |
duke@435 | 1100 | // IdealLoopTree* lpt = iter.current(); |
duke@435 | 1101 | // if (!lpt->is_counted()) continue; |
duke@435 | 1102 | // ... |
duke@435 | 1103 | class LoopTreeIterator : public StackObj { |
duke@435 | 1104 | private: |
duke@435 | 1105 | IdealLoopTree* _root; |
duke@435 | 1106 | IdealLoopTree* _curnt; |
duke@435 | 1107 | |
duke@435 | 1108 | public: |
duke@435 | 1109 | LoopTreeIterator(IdealLoopTree* root) : _root(root), _curnt(root) {} |
duke@435 | 1110 | |
duke@435 | 1111 | bool done() { return _curnt == NULL; } // Finished iterating? |
duke@435 | 1112 | |
duke@435 | 1113 | void next(); // Advance to next loop tree |
duke@435 | 1114 | |
duke@435 | 1115 | IdealLoopTree* current() { return _curnt; } // Return current value of iterator. |
duke@435 | 1116 | }; |
stefank@2314 | 1117 | |
stefank@2314 | 1118 | #endif // SHARE_VM_OPTO_LOOPNODE_HPP |