Wed, 09 Nov 2011 06:14:32 -0800
7109887: java/util/Arrays/CopyMethods.java fails with -XX:+DeoptimizeALot
Summary: zero array when compiled code is deoptimized.
Reviewed-by: never, twisti
duke@435 | 1 | /* |
trims@2708 | 2 | * Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved. |
duke@435 | 3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
duke@435 | 4 | * |
duke@435 | 5 | * This code is free software; you can redistribute it and/or modify it |
duke@435 | 6 | * under the terms of the GNU General Public License version 2 only, as |
duke@435 | 7 | * published by the Free Software Foundation. |
duke@435 | 8 | * |
duke@435 | 9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
duke@435 | 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
duke@435 | 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
duke@435 | 12 | * version 2 for more details (a copy is included in the LICENSE file that |
duke@435 | 13 | * accompanied this code). |
duke@435 | 14 | * |
duke@435 | 15 | * You should have received a copy of the GNU General Public License version |
duke@435 | 16 | * 2 along with this work; if not, write to the Free Software Foundation, |
duke@435 | 17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
duke@435 | 18 | * |
trims@1907 | 19 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
trims@1907 | 20 | * or visit www.oracle.com if you need additional information or have any |
trims@1907 | 21 | * questions. |
duke@435 | 22 | * |
duke@435 | 23 | */ |
duke@435 | 24 | |
stefank@2314 | 25 | #include "precompiled.hpp" |
stefank@2314 | 26 | #include "compiler/compileLog.hpp" |
stefank@2314 | 27 | #include "memory/allocation.inline.hpp" |
stefank@2314 | 28 | #include "opto/addnode.hpp" |
stefank@2314 | 29 | #include "opto/callnode.hpp" |
stefank@2314 | 30 | #include "opto/connode.hpp" |
stefank@2314 | 31 | #include "opto/divnode.hpp" |
stefank@2314 | 32 | #include "opto/loopnode.hpp" |
stefank@2314 | 33 | #include "opto/mulnode.hpp" |
stefank@2314 | 34 | #include "opto/rootnode.hpp" |
stefank@2314 | 35 | #include "opto/runtime.hpp" |
stefank@2314 | 36 | #include "opto/subnode.hpp" |
duke@435 | 37 | |
duke@435 | 38 | //------------------------------is_loop_exit----------------------------------- |
duke@435 | 39 | // Given an IfNode, return the loop-exiting projection or NULL if both |
duke@435 | 40 | // arms remain in the loop. |
duke@435 | 41 | Node *IdealLoopTree::is_loop_exit(Node *iff) const { |
duke@435 | 42 | if( iff->outcnt() != 2 ) return NULL; // Ignore partially dead tests |
duke@435 | 43 | PhaseIdealLoop *phase = _phase; |
duke@435 | 44 | // Test is an IfNode, has 2 projections. If BOTH are in the loop |
duke@435 | 45 | // we need loop unswitching instead of peeling. |
duke@435 | 46 | if( !is_member(phase->get_loop( iff->raw_out(0) )) ) |
duke@435 | 47 | return iff->raw_out(0); |
duke@435 | 48 | if( !is_member(phase->get_loop( iff->raw_out(1) )) ) |
duke@435 | 49 | return iff->raw_out(1); |
duke@435 | 50 | return NULL; |
duke@435 | 51 | } |
duke@435 | 52 | |
duke@435 | 53 | |
duke@435 | 54 | //============================================================================= |
duke@435 | 55 | |
duke@435 | 56 | |
duke@435 | 57 | //------------------------------record_for_igvn---------------------------- |
duke@435 | 58 | // Put loop body on igvn work list |
duke@435 | 59 | void IdealLoopTree::record_for_igvn() { |
duke@435 | 60 | for( uint i = 0; i < _body.size(); i++ ) { |
duke@435 | 61 | Node *n = _body.at(i); |
duke@435 | 62 | _phase->_igvn._worklist.push(n); |
duke@435 | 63 | } |
duke@435 | 64 | } |
duke@435 | 65 | |
kvn@2747 | 66 | //------------------------------compute_exact_trip_count----------------------- |
kvn@2747 | 67 | // Compute loop exact trip count if possible. Do not recalculate trip count for |
kvn@2747 | 68 | // split loops (pre-main-post) which have their limits and inits behind Opaque node. |
kvn@2747 | 69 | void IdealLoopTree::compute_exact_trip_count( PhaseIdealLoop *phase ) { |
kvn@2747 | 70 | if (!_head->as_Loop()->is_valid_counted_loop()) { |
kvn@2747 | 71 | return; |
kvn@2747 | 72 | } |
kvn@2747 | 73 | CountedLoopNode* cl = _head->as_CountedLoop(); |
kvn@2747 | 74 | // Trip count may become nonexact for iteration split loops since |
kvn@2747 | 75 | // RCE modifies limits. Note, _trip_count value is not reset since |
kvn@2747 | 76 | // it is used to limit unrolling of main loop. |
kvn@2747 | 77 | cl->set_nonexact_trip_count(); |
kvn@2747 | 78 | |
kvn@2747 | 79 | // Loop's test should be part of loop. |
kvn@2747 | 80 | if (!phase->is_member(this, phase->get_ctrl(cl->loopexit()->in(CountedLoopEndNode::TestValue)))) |
kvn@2747 | 81 | return; // Infinite loop |
kvn@2747 | 82 | |
kvn@2747 | 83 | #ifdef ASSERT |
kvn@2747 | 84 | BoolTest::mask bt = cl->loopexit()->test_trip(); |
kvn@2747 | 85 | assert(bt == BoolTest::lt || bt == BoolTest::gt || |
kvn@2979 | 86 | bt == BoolTest::ne, "canonical test is expected"); |
kvn@2747 | 87 | #endif |
kvn@2747 | 88 | |
kvn@2747 | 89 | Node* init_n = cl->init_trip(); |
kvn@2747 | 90 | Node* limit_n = cl->limit(); |
kvn@2747 | 91 | if (init_n != NULL && init_n->is_Con() && |
kvn@2747 | 92 | limit_n != NULL && limit_n->is_Con()) { |
kvn@2747 | 93 | // Use longs to avoid integer overflow. |
kvn@2747 | 94 | int stride_con = cl->stride_con(); |
kvn@2747 | 95 | long init_con = cl->init_trip()->get_int(); |
kvn@2747 | 96 | long limit_con = cl->limit()->get_int(); |
kvn@2747 | 97 | int stride_m = stride_con - (stride_con > 0 ? 1 : -1); |
kvn@2747 | 98 | long trip_count = (limit_con - init_con + stride_m)/stride_con; |
kvn@2747 | 99 | if (trip_count > 0 && (julong)trip_count < (julong)max_juint) { |
kvn@2747 | 100 | // Set exact trip count. |
kvn@2747 | 101 | cl->set_exact_trip_count((uint)trip_count); |
kvn@2747 | 102 | } |
kvn@2747 | 103 | } |
kvn@2747 | 104 | } |
kvn@2747 | 105 | |
duke@435 | 106 | //------------------------------compute_profile_trip_cnt---------------------------- |
duke@435 | 107 | // Compute loop trip count from profile data as |
duke@435 | 108 | // (backedge_count + loop_exit_count) / loop_exit_count |
duke@435 | 109 | void IdealLoopTree::compute_profile_trip_cnt( PhaseIdealLoop *phase ) { |
duke@435 | 110 | if (!_head->is_CountedLoop()) { |
duke@435 | 111 | return; |
duke@435 | 112 | } |
duke@435 | 113 | CountedLoopNode* head = _head->as_CountedLoop(); |
duke@435 | 114 | if (head->profile_trip_cnt() != COUNT_UNKNOWN) { |
duke@435 | 115 | return; // Already computed |
duke@435 | 116 | } |
duke@435 | 117 | float trip_cnt = (float)max_jint; // default is big |
duke@435 | 118 | |
duke@435 | 119 | Node* back = head->in(LoopNode::LoopBackControl); |
duke@435 | 120 | while (back != head) { |
duke@435 | 121 | if ((back->Opcode() == Op_IfTrue || back->Opcode() == Op_IfFalse) && |
duke@435 | 122 | back->in(0) && |
duke@435 | 123 | back->in(0)->is_If() && |
duke@435 | 124 | back->in(0)->as_If()->_fcnt != COUNT_UNKNOWN && |
duke@435 | 125 | back->in(0)->as_If()->_prob != PROB_UNKNOWN) { |
duke@435 | 126 | break; |
duke@435 | 127 | } |
duke@435 | 128 | back = phase->idom(back); |
duke@435 | 129 | } |
duke@435 | 130 | if (back != head) { |
duke@435 | 131 | assert((back->Opcode() == Op_IfTrue || back->Opcode() == Op_IfFalse) && |
duke@435 | 132 | back->in(0), "if-projection exists"); |
duke@435 | 133 | IfNode* back_if = back->in(0)->as_If(); |
duke@435 | 134 | float loop_back_cnt = back_if->_fcnt * back_if->_prob; |
duke@435 | 135 | |
duke@435 | 136 | // Now compute a loop exit count |
duke@435 | 137 | float loop_exit_cnt = 0.0f; |
duke@435 | 138 | for( uint i = 0; i < _body.size(); i++ ) { |
duke@435 | 139 | Node *n = _body[i]; |
duke@435 | 140 | if( n->is_If() ) { |
duke@435 | 141 | IfNode *iff = n->as_If(); |
duke@435 | 142 | if( iff->_fcnt != COUNT_UNKNOWN && iff->_prob != PROB_UNKNOWN ) { |
duke@435 | 143 | Node *exit = is_loop_exit(iff); |
duke@435 | 144 | if( exit ) { |
duke@435 | 145 | float exit_prob = iff->_prob; |
duke@435 | 146 | if (exit->Opcode() == Op_IfFalse) exit_prob = 1.0 - exit_prob; |
duke@435 | 147 | if (exit_prob > PROB_MIN) { |
duke@435 | 148 | float exit_cnt = iff->_fcnt * exit_prob; |
duke@435 | 149 | loop_exit_cnt += exit_cnt; |
duke@435 | 150 | } |
duke@435 | 151 | } |
duke@435 | 152 | } |
duke@435 | 153 | } |
duke@435 | 154 | } |
duke@435 | 155 | if (loop_exit_cnt > 0.0f) { |
duke@435 | 156 | trip_cnt = (loop_back_cnt + loop_exit_cnt) / loop_exit_cnt; |
duke@435 | 157 | } else { |
duke@435 | 158 | // No exit count so use |
duke@435 | 159 | trip_cnt = loop_back_cnt; |
duke@435 | 160 | } |
duke@435 | 161 | } |
duke@435 | 162 | #ifndef PRODUCT |
duke@435 | 163 | if (TraceProfileTripCount) { |
duke@435 | 164 | tty->print_cr("compute_profile_trip_cnt lp: %d cnt: %f\n", head->_idx, trip_cnt); |
duke@435 | 165 | } |
duke@435 | 166 | #endif |
duke@435 | 167 | head->set_profile_trip_cnt(trip_cnt); |
duke@435 | 168 | } |
duke@435 | 169 | |
duke@435 | 170 | //---------------------is_invariant_addition----------------------------- |
duke@435 | 171 | // Return nonzero index of invariant operand for an Add or Sub |
twisti@1040 | 172 | // of (nonconstant) invariant and variant values. Helper for reassociate_invariants. |
duke@435 | 173 | int IdealLoopTree::is_invariant_addition(Node* n, PhaseIdealLoop *phase) { |
duke@435 | 174 | int op = n->Opcode(); |
duke@435 | 175 | if (op == Op_AddI || op == Op_SubI) { |
duke@435 | 176 | bool in1_invar = this->is_invariant(n->in(1)); |
duke@435 | 177 | bool in2_invar = this->is_invariant(n->in(2)); |
duke@435 | 178 | if (in1_invar && !in2_invar) return 1; |
duke@435 | 179 | if (!in1_invar && in2_invar) return 2; |
duke@435 | 180 | } |
duke@435 | 181 | return 0; |
duke@435 | 182 | } |
duke@435 | 183 | |
duke@435 | 184 | //---------------------reassociate_add_sub----------------------------- |
duke@435 | 185 | // Reassociate invariant add and subtract expressions: |
duke@435 | 186 | // |
duke@435 | 187 | // inv1 + (x + inv2) => ( inv1 + inv2) + x |
duke@435 | 188 | // (x + inv2) + inv1 => ( inv1 + inv2) + x |
duke@435 | 189 | // inv1 + (x - inv2) => ( inv1 - inv2) + x |
duke@435 | 190 | // inv1 - (inv2 - x) => ( inv1 - inv2) + x |
duke@435 | 191 | // (x + inv2) - inv1 => (-inv1 + inv2) + x |
duke@435 | 192 | // (x - inv2) + inv1 => ( inv1 - inv2) + x |
duke@435 | 193 | // (x - inv2) - inv1 => (-inv1 - inv2) + x |
duke@435 | 194 | // inv1 + (inv2 - x) => ( inv1 + inv2) - x |
duke@435 | 195 | // inv1 - (x - inv2) => ( inv1 + inv2) - x |
duke@435 | 196 | // (inv2 - x) + inv1 => ( inv1 + inv2) - x |
duke@435 | 197 | // (inv2 - x) - inv1 => (-inv1 + inv2) - x |
duke@435 | 198 | // inv1 - (x + inv2) => ( inv1 - inv2) - x |
duke@435 | 199 | // |
duke@435 | 200 | Node* IdealLoopTree::reassociate_add_sub(Node* n1, PhaseIdealLoop *phase) { |
duke@435 | 201 | if (!n1->is_Add() && !n1->is_Sub() || n1->outcnt() == 0) return NULL; |
duke@435 | 202 | if (is_invariant(n1)) return NULL; |
duke@435 | 203 | int inv1_idx = is_invariant_addition(n1, phase); |
duke@435 | 204 | if (!inv1_idx) return NULL; |
duke@435 | 205 | // Don't mess with add of constant (igvn moves them to expression tree root.) |
duke@435 | 206 | if (n1->is_Add() && n1->in(2)->is_Con()) return NULL; |
duke@435 | 207 | Node* inv1 = n1->in(inv1_idx); |
duke@435 | 208 | Node* n2 = n1->in(3 - inv1_idx); |
duke@435 | 209 | int inv2_idx = is_invariant_addition(n2, phase); |
duke@435 | 210 | if (!inv2_idx) return NULL; |
duke@435 | 211 | Node* x = n2->in(3 - inv2_idx); |
duke@435 | 212 | Node* inv2 = n2->in(inv2_idx); |
duke@435 | 213 | |
duke@435 | 214 | bool neg_x = n2->is_Sub() && inv2_idx == 1; |
duke@435 | 215 | bool neg_inv2 = n2->is_Sub() && inv2_idx == 2; |
duke@435 | 216 | bool neg_inv1 = n1->is_Sub() && inv1_idx == 2; |
duke@435 | 217 | if (n1->is_Sub() && inv1_idx == 1) { |
duke@435 | 218 | neg_x = !neg_x; |
duke@435 | 219 | neg_inv2 = !neg_inv2; |
duke@435 | 220 | } |
duke@435 | 221 | Node* inv1_c = phase->get_ctrl(inv1); |
duke@435 | 222 | Node* inv2_c = phase->get_ctrl(inv2); |
duke@435 | 223 | Node* n_inv1; |
duke@435 | 224 | if (neg_inv1) { |
duke@435 | 225 | Node *zero = phase->_igvn.intcon(0); |
duke@435 | 226 | phase->set_ctrl(zero, phase->C->root()); |
duke@435 | 227 | n_inv1 = new (phase->C, 3) SubINode(zero, inv1); |
duke@435 | 228 | phase->register_new_node(n_inv1, inv1_c); |
duke@435 | 229 | } else { |
duke@435 | 230 | n_inv1 = inv1; |
duke@435 | 231 | } |
duke@435 | 232 | Node* inv; |
duke@435 | 233 | if (neg_inv2) { |
duke@435 | 234 | inv = new (phase->C, 3) SubINode(n_inv1, inv2); |
duke@435 | 235 | } else { |
duke@435 | 236 | inv = new (phase->C, 3) AddINode(n_inv1, inv2); |
duke@435 | 237 | } |
duke@435 | 238 | phase->register_new_node(inv, phase->get_early_ctrl(inv)); |
duke@435 | 239 | |
duke@435 | 240 | Node* addx; |
duke@435 | 241 | if (neg_x) { |
duke@435 | 242 | addx = new (phase->C, 3) SubINode(inv, x); |
duke@435 | 243 | } else { |
duke@435 | 244 | addx = new (phase->C, 3) AddINode(x, inv); |
duke@435 | 245 | } |
duke@435 | 246 | phase->register_new_node(addx, phase->get_ctrl(x)); |
kvn@1976 | 247 | phase->_igvn.replace_node(n1, addx); |
kvn@2665 | 248 | assert(phase->get_loop(phase->get_ctrl(n1)) == this, ""); |
kvn@2665 | 249 | _body.yank(n1); |
duke@435 | 250 | return addx; |
duke@435 | 251 | } |
duke@435 | 252 | |
duke@435 | 253 | //---------------------reassociate_invariants----------------------------- |
duke@435 | 254 | // Reassociate invariant expressions: |
duke@435 | 255 | void IdealLoopTree::reassociate_invariants(PhaseIdealLoop *phase) { |
duke@435 | 256 | for (int i = _body.size() - 1; i >= 0; i--) { |
duke@435 | 257 | Node *n = _body.at(i); |
duke@435 | 258 | for (int j = 0; j < 5; j++) { |
duke@435 | 259 | Node* nn = reassociate_add_sub(n, phase); |
duke@435 | 260 | if (nn == NULL) break; |
duke@435 | 261 | n = nn; // again |
duke@435 | 262 | }; |
duke@435 | 263 | } |
duke@435 | 264 | } |
duke@435 | 265 | |
duke@435 | 266 | //------------------------------policy_peeling--------------------------------- |
duke@435 | 267 | // Return TRUE or FALSE if the loop should be peeled or not. Peel if we can |
duke@435 | 268 | // make some loop-invariant test (usually a null-check) happen before the loop. |
duke@435 | 269 | bool IdealLoopTree::policy_peeling( PhaseIdealLoop *phase ) const { |
duke@435 | 270 | Node *test = ((IdealLoopTree*)this)->tail(); |
duke@435 | 271 | int body_size = ((IdealLoopTree*)this)->_body.size(); |
duke@435 | 272 | int uniq = phase->C->unique(); |
duke@435 | 273 | // Peeling does loop cloning which can result in O(N^2) node construction |
duke@435 | 274 | if( body_size > 255 /* Prevent overflow for large body_size */ |
duke@435 | 275 | || (body_size * body_size + uniq > MaxNodeLimit) ) { |
duke@435 | 276 | return false; // too large to safely clone |
duke@435 | 277 | } |
duke@435 | 278 | while( test != _head ) { // Scan till run off top of loop |
duke@435 | 279 | if( test->is_If() ) { // Test? |
duke@435 | 280 | Node *ctrl = phase->get_ctrl(test->in(1)); |
duke@435 | 281 | if (ctrl->is_top()) |
duke@435 | 282 | return false; // Found dead test on live IF? No peeling! |
duke@435 | 283 | // Standard IF only has one input value to check for loop invariance |
duke@435 | 284 | assert( test->Opcode() == Op_If || test->Opcode() == Op_CountedLoopEnd, "Check this code when new subtype is added"); |
duke@435 | 285 | // Condition is not a member of this loop? |
duke@435 | 286 | if( !is_member(phase->get_loop(ctrl)) && |
duke@435 | 287 | is_loop_exit(test) ) |
duke@435 | 288 | return true; // Found reason to peel! |
duke@435 | 289 | } |
duke@435 | 290 | // Walk up dominators to loop _head looking for test which is |
duke@435 | 291 | // executed on every path thru loop. |
duke@435 | 292 | test = phase->idom(test); |
duke@435 | 293 | } |
duke@435 | 294 | return false; |
duke@435 | 295 | } |
duke@435 | 296 | |
duke@435 | 297 | //------------------------------peeled_dom_test_elim--------------------------- |
duke@435 | 298 | // If we got the effect of peeling, either by actually peeling or by making |
duke@435 | 299 | // a pre-loop which must execute at least once, we can remove all |
duke@435 | 300 | // loop-invariant dominated tests in the main body. |
duke@435 | 301 | void PhaseIdealLoop::peeled_dom_test_elim( IdealLoopTree *loop, Node_List &old_new ) { |
duke@435 | 302 | bool progress = true; |
duke@435 | 303 | while( progress ) { |
duke@435 | 304 | progress = false; // Reset for next iteration |
duke@435 | 305 | Node *prev = loop->_head->in(LoopNode::LoopBackControl);//loop->tail(); |
duke@435 | 306 | Node *test = prev->in(0); |
duke@435 | 307 | while( test != loop->_head ) { // Scan till run off top of loop |
duke@435 | 308 | |
duke@435 | 309 | int p_op = prev->Opcode(); |
duke@435 | 310 | if( (p_op == Op_IfFalse || p_op == Op_IfTrue) && |
duke@435 | 311 | test->is_If() && // Test? |
duke@435 | 312 | !test->in(1)->is_Con() && // And not already obvious? |
duke@435 | 313 | // Condition is not a member of this loop? |
duke@435 | 314 | !loop->is_member(get_loop(get_ctrl(test->in(1))))){ |
duke@435 | 315 | // Walk loop body looking for instances of this test |
duke@435 | 316 | for( uint i = 0; i < loop->_body.size(); i++ ) { |
duke@435 | 317 | Node *n = loop->_body.at(i); |
duke@435 | 318 | if( n->is_If() && n->in(1) == test->in(1) /*&& n != loop->tail()->in(0)*/ ) { |
duke@435 | 319 | // IfNode was dominated by version in peeled loop body |
duke@435 | 320 | progress = true; |
duke@435 | 321 | dominated_by( old_new[prev->_idx], n ); |
duke@435 | 322 | } |
duke@435 | 323 | } |
duke@435 | 324 | } |
duke@435 | 325 | prev = test; |
duke@435 | 326 | test = idom(test); |
duke@435 | 327 | } // End of scan tests in loop |
duke@435 | 328 | |
duke@435 | 329 | } // End of while( progress ) |
duke@435 | 330 | } |
duke@435 | 331 | |
duke@435 | 332 | //------------------------------do_peeling------------------------------------- |
duke@435 | 333 | // Peel the first iteration of the given loop. |
duke@435 | 334 | // Step 1: Clone the loop body. The clone becomes the peeled iteration. |
duke@435 | 335 | // The pre-loop illegally has 2 control users (old & new loops). |
duke@435 | 336 | // Step 2: Make the old-loop fall-in edges point to the peeled iteration. |
duke@435 | 337 | // Do this by making the old-loop fall-in edges act as if they came |
duke@435 | 338 | // around the loopback from the prior iteration (follow the old-loop |
duke@435 | 339 | // backedges) and then map to the new peeled iteration. This leaves |
duke@435 | 340 | // the pre-loop with only 1 user (the new peeled iteration), but the |
duke@435 | 341 | // peeled-loop backedge has 2 users. |
duke@435 | 342 | // Step 3: Cut the backedge on the clone (so its not a loop) and remove the |
duke@435 | 343 | // extra backedge user. |
kvn@2727 | 344 | // |
kvn@2727 | 345 | // orig |
kvn@2727 | 346 | // |
kvn@2727 | 347 | // stmt1 |
kvn@2727 | 348 | // | |
kvn@2727 | 349 | // v |
kvn@2727 | 350 | // loop predicate |
kvn@2727 | 351 | // | |
kvn@2727 | 352 | // v |
kvn@2727 | 353 | // loop<----+ |
kvn@2727 | 354 | // | | |
kvn@2727 | 355 | // stmt2 | |
kvn@2727 | 356 | // | | |
kvn@2727 | 357 | // v | |
kvn@2727 | 358 | // if ^ |
kvn@2727 | 359 | // / \ | |
kvn@2727 | 360 | // / \ | |
kvn@2727 | 361 | // v v | |
kvn@2727 | 362 | // false true | |
kvn@2727 | 363 | // / \ | |
kvn@2727 | 364 | // / ----+ |
kvn@2727 | 365 | // | |
kvn@2727 | 366 | // v |
kvn@2727 | 367 | // exit |
kvn@2727 | 368 | // |
kvn@2727 | 369 | // |
kvn@2727 | 370 | // after clone loop |
kvn@2727 | 371 | // |
kvn@2727 | 372 | // stmt1 |
kvn@2727 | 373 | // | |
kvn@2727 | 374 | // v |
kvn@2727 | 375 | // loop predicate |
kvn@2727 | 376 | // / \ |
kvn@2727 | 377 | // clone / \ orig |
kvn@2727 | 378 | // / \ |
kvn@2727 | 379 | // / \ |
kvn@2727 | 380 | // v v |
kvn@2727 | 381 | // +---->loop clone loop<----+ |
kvn@2727 | 382 | // | | | | |
kvn@2727 | 383 | // | stmt2 clone stmt2 | |
kvn@2727 | 384 | // | | | | |
kvn@2727 | 385 | // | v v | |
kvn@2727 | 386 | // ^ if clone If ^ |
kvn@2727 | 387 | // | / \ / \ | |
kvn@2727 | 388 | // | / \ / \ | |
kvn@2727 | 389 | // | v v v v | |
kvn@2727 | 390 | // | true false false true | |
kvn@2727 | 391 | // | / \ / \ | |
kvn@2727 | 392 | // +---- \ / ----+ |
kvn@2727 | 393 | // \ / |
kvn@2727 | 394 | // 1v v2 |
kvn@2727 | 395 | // region |
kvn@2727 | 396 | // | |
kvn@2727 | 397 | // v |
kvn@2727 | 398 | // exit |
kvn@2727 | 399 | // |
kvn@2727 | 400 | // |
kvn@2727 | 401 | // after peel and predicate move |
kvn@2727 | 402 | // |
kvn@2727 | 403 | // stmt1 |
kvn@2727 | 404 | // / |
kvn@2727 | 405 | // / |
kvn@2727 | 406 | // clone / orig |
kvn@2727 | 407 | // / |
kvn@2727 | 408 | // / +----------+ |
kvn@2727 | 409 | // / | | |
kvn@2727 | 410 | // / loop predicate | |
kvn@2727 | 411 | // / | | |
kvn@2727 | 412 | // v v | |
kvn@2727 | 413 | // TOP-->loop clone loop<----+ | |
kvn@2727 | 414 | // | | | | |
kvn@2727 | 415 | // stmt2 clone stmt2 | | |
kvn@2727 | 416 | // | | | ^ |
kvn@2727 | 417 | // v v | | |
kvn@2727 | 418 | // if clone If ^ | |
kvn@2727 | 419 | // / \ / \ | | |
kvn@2727 | 420 | // / \ / \ | | |
kvn@2727 | 421 | // v v v v | | |
kvn@2727 | 422 | // true false false true | | |
kvn@2727 | 423 | // | \ / \ | | |
kvn@2727 | 424 | // | \ / ----+ ^ |
kvn@2727 | 425 | // | \ / | |
kvn@2727 | 426 | // | 1v v2 | |
kvn@2727 | 427 | // v region | |
kvn@2727 | 428 | // | | | |
kvn@2727 | 429 | // | v | |
kvn@2727 | 430 | // | exit | |
kvn@2727 | 431 | // | | |
kvn@2727 | 432 | // +--------------->-----------------+ |
kvn@2727 | 433 | // |
kvn@2727 | 434 | // |
kvn@2727 | 435 | // final graph |
kvn@2727 | 436 | // |
kvn@2727 | 437 | // stmt1 |
kvn@2727 | 438 | // | |
kvn@2727 | 439 | // v |
kvn@2727 | 440 | // stmt2 clone |
kvn@2727 | 441 | // | |
kvn@2727 | 442 | // v |
kvn@2727 | 443 | // if clone |
kvn@2727 | 444 | // / | |
kvn@2727 | 445 | // / | |
kvn@2727 | 446 | // v v |
kvn@2727 | 447 | // false true |
kvn@2727 | 448 | // | | |
kvn@2727 | 449 | // | v |
kvn@2727 | 450 | // | loop predicate |
kvn@2727 | 451 | // | | |
kvn@2727 | 452 | // | v |
kvn@2727 | 453 | // | loop<----+ |
kvn@2727 | 454 | // | | | |
kvn@2727 | 455 | // | stmt2 | |
kvn@2727 | 456 | // | | | |
kvn@2727 | 457 | // | v | |
kvn@2727 | 458 | // v if ^ |
kvn@2727 | 459 | // | / \ | |
kvn@2727 | 460 | // | / \ | |
kvn@2727 | 461 | // | v v | |
kvn@2727 | 462 | // | false true | |
kvn@2727 | 463 | // | | \ | |
kvn@2727 | 464 | // v v --+ |
kvn@2727 | 465 | // region |
kvn@2727 | 466 | // | |
kvn@2727 | 467 | // v |
kvn@2727 | 468 | // exit |
kvn@2727 | 469 | // |
duke@435 | 470 | void PhaseIdealLoop::do_peeling( IdealLoopTree *loop, Node_List &old_new ) { |
duke@435 | 471 | |
duke@435 | 472 | C->set_major_progress(); |
duke@435 | 473 | // Peeling a 'main' loop in a pre/main/post situation obfuscates the |
duke@435 | 474 | // 'pre' loop from the main and the 'pre' can no longer have it's |
duke@435 | 475 | // iterations adjusted. Therefore, we need to declare this loop as |
duke@435 | 476 | // no longer a 'main' loop; it will need new pre and post loops before |
duke@435 | 477 | // we can do further RCE. |
kvn@2665 | 478 | #ifndef PRODUCT |
kvn@2665 | 479 | if (TraceLoopOpts) { |
kvn@2665 | 480 | tty->print("Peel "); |
kvn@2665 | 481 | loop->dump_head(); |
kvn@2665 | 482 | } |
kvn@2665 | 483 | #endif |
kvn@2727 | 484 | Node* head = loop->_head; |
kvn@2727 | 485 | bool counted_loop = head->is_CountedLoop(); |
kvn@2727 | 486 | if (counted_loop) { |
kvn@2727 | 487 | CountedLoopNode *cl = head->as_CountedLoop(); |
duke@435 | 488 | assert(cl->trip_count() > 0, "peeling a fully unrolled loop"); |
duke@435 | 489 | cl->set_trip_count(cl->trip_count() - 1); |
kvn@2665 | 490 | if (cl->is_main_loop()) { |
duke@435 | 491 | cl->set_normal_loop(); |
duke@435 | 492 | #ifndef PRODUCT |
kvn@2665 | 493 | if (PrintOpto && VerifyLoopOptimizations) { |
duke@435 | 494 | tty->print("Peeling a 'main' loop; resetting to 'normal' "); |
duke@435 | 495 | loop->dump_head(); |
duke@435 | 496 | } |
duke@435 | 497 | #endif |
duke@435 | 498 | } |
duke@435 | 499 | } |
kvn@2727 | 500 | Node* entry = head->in(LoopNode::EntryControl); |
duke@435 | 501 | |
duke@435 | 502 | // Step 1: Clone the loop body. The clone becomes the peeled iteration. |
duke@435 | 503 | // The pre-loop illegally has 2 control users (old & new loops). |
kvn@2727 | 504 | clone_loop( loop, old_new, dom_depth(head) ); |
duke@435 | 505 | |
duke@435 | 506 | // Step 2: Make the old-loop fall-in edges point to the peeled iteration. |
duke@435 | 507 | // Do this by making the old-loop fall-in edges act as if they came |
duke@435 | 508 | // around the loopback from the prior iteration (follow the old-loop |
duke@435 | 509 | // backedges) and then map to the new peeled iteration. This leaves |
duke@435 | 510 | // the pre-loop with only 1 user (the new peeled iteration), but the |
duke@435 | 511 | // peeled-loop backedge has 2 users. |
kvn@3043 | 512 | Node* new_entry = old_new[head->in(LoopNode::LoopBackControl)->_idx]; |
kvn@2727 | 513 | _igvn.hash_delete(head); |
kvn@3043 | 514 | head->set_req(LoopNode::EntryControl, new_entry); |
kvn@2727 | 515 | for (DUIterator_Fast jmax, j = head->fast_outs(jmax); j < jmax; j++) { |
kvn@2727 | 516 | Node* old = head->fast_out(j); |
kvn@2727 | 517 | if (old->in(0) == loop->_head && old->req() == 3 && old->is_Phi()) { |
kvn@3043 | 518 | Node* new_exit_value = old_new[old->in(LoopNode::LoopBackControl)->_idx]; |
kvn@2727 | 519 | if (!new_exit_value ) // Backedge value is ALSO loop invariant? |
duke@435 | 520 | // Then loop body backedge value remains the same. |
duke@435 | 521 | new_exit_value = old->in(LoopNode::LoopBackControl); |
duke@435 | 522 | _igvn.hash_delete(old); |
duke@435 | 523 | old->set_req(LoopNode::EntryControl, new_exit_value); |
duke@435 | 524 | } |
duke@435 | 525 | } |
duke@435 | 526 | |
duke@435 | 527 | |
duke@435 | 528 | // Step 3: Cut the backedge on the clone (so its not a loop) and remove the |
duke@435 | 529 | // extra backedge user. |
kvn@2727 | 530 | Node* new_head = old_new[head->_idx]; |
kvn@2727 | 531 | _igvn.hash_delete(new_head); |
kvn@2727 | 532 | new_head->set_req(LoopNode::LoopBackControl, C->top()); |
kvn@2727 | 533 | for (DUIterator_Fast j2max, j2 = new_head->fast_outs(j2max); j2 < j2max; j2++) { |
kvn@2727 | 534 | Node* use = new_head->fast_out(j2); |
kvn@2727 | 535 | if (use->in(0) == new_head && use->req() == 3 && use->is_Phi()) { |
duke@435 | 536 | _igvn.hash_delete(use); |
duke@435 | 537 | use->set_req(LoopNode::LoopBackControl, C->top()); |
duke@435 | 538 | } |
duke@435 | 539 | } |
duke@435 | 540 | |
duke@435 | 541 | |
duke@435 | 542 | // Step 4: Correct dom-depth info. Set to loop-head depth. |
kvn@2727 | 543 | int dd = dom_depth(head); |
kvn@2727 | 544 | set_idom(head, head->in(1), dd); |
duke@435 | 545 | for (uint j3 = 0; j3 < loop->_body.size(); j3++) { |
duke@435 | 546 | Node *old = loop->_body.at(j3); |
duke@435 | 547 | Node *nnn = old_new[old->_idx]; |
duke@435 | 548 | if (!has_ctrl(nnn)) |
duke@435 | 549 | set_idom(nnn, idom(nnn), dd-1); |
duke@435 | 550 | // While we're at it, remove any SafePoints from the peeled code |
kvn@2727 | 551 | if (old->Opcode() == Op_SafePoint) { |
duke@435 | 552 | Node *nnn = old_new[old->_idx]; |
duke@435 | 553 | lazy_replace(nnn,nnn->in(TypeFunc::Control)); |
duke@435 | 554 | } |
duke@435 | 555 | } |
duke@435 | 556 | |
duke@435 | 557 | // Now force out all loop-invariant dominating tests. The optimizer |
duke@435 | 558 | // finds some, but we _know_ they are all useless. |
duke@435 | 559 | peeled_dom_test_elim(loop,old_new); |
duke@435 | 560 | |
duke@435 | 561 | loop->record_for_igvn(); |
duke@435 | 562 | } |
duke@435 | 563 | |
kvn@2735 | 564 | #define EMPTY_LOOP_SIZE 7 // number of nodes in an empty loop |
kvn@2735 | 565 | |
duke@435 | 566 | //------------------------------policy_maximally_unroll------------------------ |
kvn@2735 | 567 | // Calculate exact loop trip count and return true if loop can be maximally |
kvn@2735 | 568 | // unrolled. |
duke@435 | 569 | bool IdealLoopTree::policy_maximally_unroll( PhaseIdealLoop *phase ) const { |
duke@435 | 570 | CountedLoopNode *cl = _head->as_CountedLoop(); |
kvn@2694 | 571 | assert(cl->is_normal_loop(), ""); |
kvn@2735 | 572 | if (!cl->is_valid_counted_loop()) |
kvn@2735 | 573 | return false; // Malformed counted loop |
duke@435 | 574 | |
kvn@2747 | 575 | if (!cl->has_exact_trip_count()) { |
kvn@2747 | 576 | // Trip count is not exact. |
duke@435 | 577 | return false; |
duke@435 | 578 | } |
duke@435 | 579 | |
kvn@2747 | 580 | uint trip_count = cl->trip_count(); |
kvn@2747 | 581 | // Note, max_juint is used to indicate unknown trip count. |
kvn@2747 | 582 | assert(trip_count > 1, "one iteration loop should be optimized out already"); |
kvn@2747 | 583 | assert(trip_count < max_juint, "exact trip_count should be less than max_uint."); |
duke@435 | 584 | |
duke@435 | 585 | // Real policy: if we maximally unroll, does it get too big? |
duke@435 | 586 | // Allow the unrolled mess to get larger than standard loop |
duke@435 | 587 | // size. After all, it will no longer be a loop. |
duke@435 | 588 | uint body_size = _body.size(); |
duke@435 | 589 | uint unroll_limit = (uint)LoopUnrollLimit * 4; |
duke@435 | 590 | assert( (intx)unroll_limit == LoopUnrollLimit * 4, "LoopUnrollLimit must fit in 32bits"); |
kvn@2694 | 591 | if (trip_count > unroll_limit || body_size > unroll_limit) { |
kvn@2694 | 592 | return false; |
kvn@2694 | 593 | } |
kvn@2694 | 594 | |
kvn@2877 | 595 | // Fully unroll a loop with few iterations regardless next |
kvn@2877 | 596 | // conditions since following loop optimizations will split |
kvn@2877 | 597 | // such loop anyway (pre-main-post). |
kvn@2877 | 598 | if (trip_count <= 3) |
kvn@2877 | 599 | return true; |
kvn@2877 | 600 | |
kvn@2735 | 601 | // Take into account that after unroll conjoined heads and tails will fold, |
kvn@2735 | 602 | // otherwise policy_unroll() may allow more unrolling than max unrolling. |
kvn@2735 | 603 | uint new_body_size = EMPTY_LOOP_SIZE + (body_size - EMPTY_LOOP_SIZE) * trip_count; |
kvn@2735 | 604 | uint tst_body_size = (new_body_size - EMPTY_LOOP_SIZE) / trip_count + EMPTY_LOOP_SIZE; |
kvn@2735 | 605 | if (body_size != tst_body_size) // Check for int overflow |
kvn@2735 | 606 | return false; |
kvn@2735 | 607 | if (new_body_size > unroll_limit || |
kvn@2735 | 608 | // Unrolling can result in a large amount of node construction |
kvn@2735 | 609 | new_body_size >= MaxNodeLimit - phase->C->unique()) { |
kvn@2735 | 610 | return false; |
kvn@2735 | 611 | } |
kvn@2735 | 612 | |
kvn@2694 | 613 | // Do not unroll a loop with String intrinsics code. |
kvn@2694 | 614 | // String intrinsics are large and have loops. |
kvn@2694 | 615 | for (uint k = 0; k < _body.size(); k++) { |
kvn@2694 | 616 | Node* n = _body.at(k); |
kvn@2694 | 617 | switch (n->Opcode()) { |
kvn@2694 | 618 | case Op_StrComp: |
kvn@2694 | 619 | case Op_StrEquals: |
kvn@2694 | 620 | case Op_StrIndexOf: |
kvn@2694 | 621 | case Op_AryEq: { |
kvn@2694 | 622 | return false; |
kvn@2694 | 623 | } |
kvn@2694 | 624 | } // switch |
kvn@2694 | 625 | } |
kvn@2694 | 626 | |
kvn@2735 | 627 | return true; // Do maximally unroll |
duke@435 | 628 | } |
duke@435 | 629 | |
duke@435 | 630 | |
kvn@2865 | 631 | #define MAX_UNROLL 16 // maximum number of unrolls for main loop |
kvn@2865 | 632 | |
duke@435 | 633 | //------------------------------policy_unroll---------------------------------- |
duke@435 | 634 | // Return TRUE or FALSE if the loop should be unrolled or not. Unroll if |
duke@435 | 635 | // the loop is a CountedLoop and the body is small enough. |
duke@435 | 636 | bool IdealLoopTree::policy_unroll( PhaseIdealLoop *phase ) const { |
duke@435 | 637 | |
duke@435 | 638 | CountedLoopNode *cl = _head->as_CountedLoop(); |
kvn@2694 | 639 | assert(cl->is_normal_loop() || cl->is_main_loop(), ""); |
duke@435 | 640 | |
kvn@2735 | 641 | if (!cl->is_valid_counted_loop()) |
kvn@2735 | 642 | return false; // Malformed counted loop |
duke@435 | 643 | |
kvn@2877 | 644 | // Protect against over-unrolling. |
kvn@2877 | 645 | // After split at least one iteration will be executed in pre-loop. |
kvn@2877 | 646 | if (cl->trip_count() <= (uint)(cl->is_normal_loop() ? 2 : 1)) return false; |
duke@435 | 647 | |
kvn@2865 | 648 | int future_unroll_ct = cl->unrolled_count() * 2; |
kvn@2865 | 649 | if (future_unroll_ct > MAX_UNROLL) return false; |
kvn@2735 | 650 | |
kvn@2865 | 651 | // Check for initial stride being a small enough constant |
kvn@2865 | 652 | if (abs(cl->stride_con()) > (1<<2)*future_unroll_ct) return false; |
duke@435 | 653 | |
duke@435 | 654 | // Don't unroll if the next round of unrolling would push us |
duke@435 | 655 | // over the expected trip count of the loop. One is subtracted |
duke@435 | 656 | // from the expected trip count because the pre-loop normally |
duke@435 | 657 | // executes 1 iteration. |
duke@435 | 658 | if (UnrollLimitForProfileCheck > 0 && |
duke@435 | 659 | cl->profile_trip_cnt() != COUNT_UNKNOWN && |
duke@435 | 660 | future_unroll_ct > UnrollLimitForProfileCheck && |
duke@435 | 661 | (float)future_unroll_ct > cl->profile_trip_cnt() - 1.0) { |
duke@435 | 662 | return false; |
duke@435 | 663 | } |
duke@435 | 664 | |
duke@435 | 665 | // When unroll count is greater than LoopUnrollMin, don't unroll if: |
duke@435 | 666 | // the residual iterations are more than 10% of the trip count |
duke@435 | 667 | // and rounds of "unroll,optimize" are not making significant progress |
duke@435 | 668 | // Progress defined as current size less than 20% larger than previous size. |
duke@435 | 669 | if (UseSuperWord && cl->node_count_before_unroll() > 0 && |
duke@435 | 670 | future_unroll_ct > LoopUnrollMin && |
duke@435 | 671 | (future_unroll_ct - 1) * 10.0 > cl->profile_trip_cnt() && |
duke@435 | 672 | 1.2 * cl->node_count_before_unroll() < (double)_body.size()) { |
duke@435 | 673 | return false; |
duke@435 | 674 | } |
duke@435 | 675 | |
duke@435 | 676 | Node *init_n = cl->init_trip(); |
duke@435 | 677 | Node *limit_n = cl->limit(); |
kvn@2877 | 678 | int stride_con = cl->stride_con(); |
duke@435 | 679 | // Non-constant bounds. |
duke@435 | 680 | // Protect against over-unrolling when init or/and limit are not constant |
duke@435 | 681 | // (so that trip_count's init value is maxint) but iv range is known. |
kvn@2694 | 682 | if (init_n == NULL || !init_n->is_Con() || |
kvn@2694 | 683 | limit_n == NULL || !limit_n->is_Con()) { |
duke@435 | 684 | Node* phi = cl->phi(); |
kvn@2694 | 685 | if (phi != NULL) { |
duke@435 | 686 | assert(phi->is_Phi() && phi->in(0) == _head, "Counted loop should have iv phi."); |
duke@435 | 687 | const TypeInt* iv_type = phase->_igvn.type(phi)->is_int(); |
kvn@2877 | 688 | int next_stride = stride_con * 2; // stride after this unroll |
kvn@2694 | 689 | if (next_stride > 0) { |
kvn@2694 | 690 | if (iv_type->_lo + next_stride <= iv_type->_lo || // overflow |
kvn@2694 | 691 | iv_type->_lo + next_stride > iv_type->_hi) { |
duke@435 | 692 | return false; // over-unrolling |
duke@435 | 693 | } |
kvn@2694 | 694 | } else if (next_stride < 0) { |
kvn@2694 | 695 | if (iv_type->_hi + next_stride >= iv_type->_hi || // overflow |
kvn@2694 | 696 | iv_type->_hi + next_stride < iv_type->_lo) { |
duke@435 | 697 | return false; // over-unrolling |
duke@435 | 698 | } |
duke@435 | 699 | } |
duke@435 | 700 | } |
duke@435 | 701 | } |
duke@435 | 702 | |
kvn@2877 | 703 | // After unroll limit will be adjusted: new_limit = limit-stride. |
kvn@2877 | 704 | // Bailout if adjustment overflow. |
kvn@2877 | 705 | const TypeInt* limit_type = phase->_igvn.type(limit_n)->is_int(); |
kvn@2877 | 706 | if (stride_con > 0 && ((limit_type->_hi - stride_con) >= limit_type->_hi) || |
kvn@2877 | 707 | stride_con < 0 && ((limit_type->_lo - stride_con) <= limit_type->_lo)) |
kvn@2877 | 708 | return false; // overflow |
kvn@2877 | 709 | |
duke@435 | 710 | // Adjust body_size to determine if we unroll or not |
duke@435 | 711 | uint body_size = _body.size(); |
kvn@3129 | 712 | // Key test to unroll loop in CRC32 java code |
kvn@3129 | 713 | int xors_in_loop = 0; |
duke@435 | 714 | // Also count ModL, DivL and MulL which expand mightly |
kvn@2694 | 715 | for (uint k = 0; k < _body.size(); k++) { |
kvn@2694 | 716 | Node* n = _body.at(k); |
kvn@2694 | 717 | switch (n->Opcode()) { |
kvn@3129 | 718 | case Op_XorI: xors_in_loop++; break; // CRC32 java code |
kvn@2694 | 719 | case Op_ModL: body_size += 30; break; |
kvn@2694 | 720 | case Op_DivL: body_size += 30; break; |
kvn@2694 | 721 | case Op_MulL: body_size += 10; break; |
kvn@2694 | 722 | case Op_StrComp: |
kvn@2694 | 723 | case Op_StrEquals: |
kvn@2694 | 724 | case Op_StrIndexOf: |
kvn@2694 | 725 | case Op_AryEq: { |
kvn@2694 | 726 | // Do not unroll a loop with String intrinsics code. |
kvn@2694 | 727 | // String intrinsics are large and have loops. |
kvn@2694 | 728 | return false; |
kvn@2694 | 729 | } |
kvn@2694 | 730 | } // switch |
duke@435 | 731 | } |
duke@435 | 732 | |
duke@435 | 733 | // Check for being too big |
kvn@2694 | 734 | if (body_size > (uint)LoopUnrollLimit) { |
kvn@3129 | 735 | if (xors_in_loop >= 4 && body_size < (uint)LoopUnrollLimit*4) return true; |
kvn@3129 | 736 | // Normal case: loop too big |
duke@435 | 737 | return false; |
duke@435 | 738 | } |
duke@435 | 739 | |
duke@435 | 740 | // Unroll once! (Each trip will soon do double iterations) |
duke@435 | 741 | return true; |
duke@435 | 742 | } |
duke@435 | 743 | |
duke@435 | 744 | //------------------------------policy_align----------------------------------- |
duke@435 | 745 | // Return TRUE or FALSE if the loop should be cache-line aligned. Gather the |
duke@435 | 746 | // expression that does the alignment. Note that only one array base can be |
twisti@1040 | 747 | // aligned in a loop (unless the VM guarantees mutual alignment). Note that |
duke@435 | 748 | // if we vectorize short memory ops into longer memory ops, we may want to |
duke@435 | 749 | // increase alignment. |
duke@435 | 750 | bool IdealLoopTree::policy_align( PhaseIdealLoop *phase ) const { |
duke@435 | 751 | return false; |
duke@435 | 752 | } |
duke@435 | 753 | |
duke@435 | 754 | //------------------------------policy_range_check----------------------------- |
duke@435 | 755 | // Return TRUE or FALSE if the loop should be range-check-eliminated. |
duke@435 | 756 | // Actually we do iteration-splitting, a more powerful form of RCE. |
duke@435 | 757 | bool IdealLoopTree::policy_range_check( PhaseIdealLoop *phase ) const { |
kvn@2877 | 758 | if (!RangeCheckElimination) return false; |
duke@435 | 759 | |
duke@435 | 760 | CountedLoopNode *cl = _head->as_CountedLoop(); |
duke@435 | 761 | // If we unrolled with no intention of doing RCE and we later |
duke@435 | 762 | // changed our minds, we got no pre-loop. Either we need to |
duke@435 | 763 | // make a new pre-loop, or we gotta disallow RCE. |
kvn@2877 | 764 | if (cl->is_main_no_pre_loop()) return false; // Disallowed for now. |
duke@435 | 765 | Node *trip_counter = cl->phi(); |
duke@435 | 766 | |
duke@435 | 767 | // Check loop body for tests of trip-counter plus loop-invariant vs |
duke@435 | 768 | // loop-invariant. |
kvn@2877 | 769 | for (uint i = 0; i < _body.size(); i++) { |
duke@435 | 770 | Node *iff = _body[i]; |
kvn@2877 | 771 | if (iff->Opcode() == Op_If) { // Test? |
duke@435 | 772 | |
duke@435 | 773 | // Comparing trip+off vs limit |
duke@435 | 774 | Node *bol = iff->in(1); |
kvn@2877 | 775 | if (bol->req() != 2) continue; // dead constant test |
cfang@1607 | 776 | if (!bol->is_Bool()) { |
cfang@1607 | 777 | assert(UseLoopPredicate && bol->Opcode() == Op_Conv2B, "predicate check only"); |
cfang@1607 | 778 | continue; |
cfang@1607 | 779 | } |
kvn@2877 | 780 | if (bol->as_Bool()->_test._test == BoolTest::ne) |
kvn@2877 | 781 | continue; // not RC |
kvn@2877 | 782 | |
duke@435 | 783 | Node *cmp = bol->in(1); |
duke@435 | 784 | |
duke@435 | 785 | Node *rc_exp = cmp->in(1); |
duke@435 | 786 | Node *limit = cmp->in(2); |
duke@435 | 787 | |
duke@435 | 788 | Node *limit_c = phase->get_ctrl(limit); |
duke@435 | 789 | if( limit_c == phase->C->top() ) |
duke@435 | 790 | return false; // Found dead test on live IF? No RCE! |
duke@435 | 791 | if( is_member(phase->get_loop(limit_c) ) ) { |
duke@435 | 792 | // Compare might have operands swapped; commute them |
duke@435 | 793 | rc_exp = cmp->in(2); |
duke@435 | 794 | limit = cmp->in(1); |
duke@435 | 795 | limit_c = phase->get_ctrl(limit); |
duke@435 | 796 | if( is_member(phase->get_loop(limit_c) ) ) |
duke@435 | 797 | continue; // Both inputs are loop varying; cannot RCE |
duke@435 | 798 | } |
duke@435 | 799 | |
duke@435 | 800 | if (!phase->is_scaled_iv_plus_offset(rc_exp, trip_counter, NULL, NULL)) { |
duke@435 | 801 | continue; |
duke@435 | 802 | } |
duke@435 | 803 | // Yeah! Found a test like 'trip+off vs limit' |
duke@435 | 804 | // Test is an IfNode, has 2 projections. If BOTH are in the loop |
duke@435 | 805 | // we need loop unswitching instead of iteration splitting. |
duke@435 | 806 | if( is_loop_exit(iff) ) |
duke@435 | 807 | return true; // Found reason to split iterations |
duke@435 | 808 | } // End of is IF |
duke@435 | 809 | } |
duke@435 | 810 | |
duke@435 | 811 | return false; |
duke@435 | 812 | } |
duke@435 | 813 | |
duke@435 | 814 | //------------------------------policy_peel_only------------------------------- |
duke@435 | 815 | // Return TRUE or FALSE if the loop should NEVER be RCE'd or aligned. Useful |
duke@435 | 816 | // for unrolling loops with NO array accesses. |
duke@435 | 817 | bool IdealLoopTree::policy_peel_only( PhaseIdealLoop *phase ) const { |
duke@435 | 818 | |
duke@435 | 819 | for( uint i = 0; i < _body.size(); i++ ) |
duke@435 | 820 | if( _body[i]->is_Mem() ) |
duke@435 | 821 | return false; |
duke@435 | 822 | |
duke@435 | 823 | // No memory accesses at all! |
duke@435 | 824 | return true; |
duke@435 | 825 | } |
duke@435 | 826 | |
duke@435 | 827 | //------------------------------clone_up_backedge_goo-------------------------- |
duke@435 | 828 | // If Node n lives in the back_ctrl block and cannot float, we clone a private |
duke@435 | 829 | // version of n in preheader_ctrl block and return that, otherwise return n. |
kvn@2985 | 830 | Node *PhaseIdealLoop::clone_up_backedge_goo( Node *back_ctrl, Node *preheader_ctrl, Node *n, VectorSet &visited, Node_Stack &clones ) { |
duke@435 | 831 | if( get_ctrl(n) != back_ctrl ) return n; |
duke@435 | 832 | |
kvn@2985 | 833 | // Only visit once |
kvn@2985 | 834 | if (visited.test_set(n->_idx)) { |
kvn@2985 | 835 | Node *x = clones.find(n->_idx); |
kvn@2985 | 836 | if (x != NULL) |
kvn@2985 | 837 | return x; |
kvn@2985 | 838 | return n; |
kvn@2985 | 839 | } |
kvn@2985 | 840 | |
duke@435 | 841 | Node *x = NULL; // If required, a clone of 'n' |
duke@435 | 842 | // Check for 'n' being pinned in the backedge. |
duke@435 | 843 | if( n->in(0) && n->in(0) == back_ctrl ) { |
kvn@2985 | 844 | assert(clones.find(n->_idx) == NULL, "dead loop"); |
duke@435 | 845 | x = n->clone(); // Clone a copy of 'n' to preheader |
kvn@2985 | 846 | clones.push(x, n->_idx); |
duke@435 | 847 | x->set_req( 0, preheader_ctrl ); // Fix x's control input to preheader |
duke@435 | 848 | } |
duke@435 | 849 | |
duke@435 | 850 | // Recursive fixup any other input edges into x. |
duke@435 | 851 | // If there are no changes we can just return 'n', otherwise |
duke@435 | 852 | // we need to clone a private copy and change it. |
duke@435 | 853 | for( uint i = 1; i < n->req(); i++ ) { |
kvn@2985 | 854 | Node *g = clone_up_backedge_goo( back_ctrl, preheader_ctrl, n->in(i), visited, clones ); |
duke@435 | 855 | if( g != n->in(i) ) { |
kvn@2985 | 856 | if( !x ) { |
kvn@2985 | 857 | assert(clones.find(n->_idx) == NULL, "dead loop"); |
duke@435 | 858 | x = n->clone(); |
kvn@2985 | 859 | clones.push(x, n->_idx); |
kvn@2985 | 860 | } |
duke@435 | 861 | x->set_req(i, g); |
duke@435 | 862 | } |
duke@435 | 863 | } |
duke@435 | 864 | if( x ) { // x can legally float to pre-header location |
duke@435 | 865 | register_new_node( x, preheader_ctrl ); |
duke@435 | 866 | return x; |
duke@435 | 867 | } else { // raise n to cover LCA of uses |
duke@435 | 868 | set_ctrl( n, find_non_split_ctrl(back_ctrl->in(0)) ); |
duke@435 | 869 | } |
duke@435 | 870 | return n; |
duke@435 | 871 | } |
duke@435 | 872 | |
duke@435 | 873 | //------------------------------insert_pre_post_loops-------------------------- |
duke@435 | 874 | // Insert pre and post loops. If peel_only is set, the pre-loop can not have |
duke@435 | 875 | // more iterations added. It acts as a 'peel' only, no lower-bound RCE, no |
duke@435 | 876 | // alignment. Useful to unroll loops that do no array accesses. |
duke@435 | 877 | void PhaseIdealLoop::insert_pre_post_loops( IdealLoopTree *loop, Node_List &old_new, bool peel_only ) { |
duke@435 | 878 | |
kvn@2665 | 879 | #ifndef PRODUCT |
kvn@2665 | 880 | if (TraceLoopOpts) { |
kvn@2665 | 881 | if (peel_only) |
kvn@2665 | 882 | tty->print("PeelMainPost "); |
kvn@2665 | 883 | else |
kvn@2665 | 884 | tty->print("PreMainPost "); |
kvn@2665 | 885 | loop->dump_head(); |
kvn@2665 | 886 | } |
kvn@2665 | 887 | #endif |
duke@435 | 888 | C->set_major_progress(); |
duke@435 | 889 | |
duke@435 | 890 | // Find common pieces of the loop being guarded with pre & post loops |
duke@435 | 891 | CountedLoopNode *main_head = loop->_head->as_CountedLoop(); |
duke@435 | 892 | assert( main_head->is_normal_loop(), "" ); |
duke@435 | 893 | CountedLoopEndNode *main_end = main_head->loopexit(); |
duke@435 | 894 | assert( main_end->outcnt() == 2, "1 true, 1 false path only" ); |
duke@435 | 895 | uint dd_main_head = dom_depth(main_head); |
duke@435 | 896 | uint max = main_head->outcnt(); |
duke@435 | 897 | |
duke@435 | 898 | Node *pre_header= main_head->in(LoopNode::EntryControl); |
duke@435 | 899 | Node *init = main_head->init_trip(); |
duke@435 | 900 | Node *incr = main_end ->incr(); |
duke@435 | 901 | Node *limit = main_end ->limit(); |
duke@435 | 902 | Node *stride = main_end ->stride(); |
duke@435 | 903 | Node *cmp = main_end ->cmp_node(); |
duke@435 | 904 | BoolTest::mask b_test = main_end->test_trip(); |
duke@435 | 905 | |
duke@435 | 906 | // Need only 1 user of 'bol' because I will be hacking the loop bounds. |
duke@435 | 907 | Node *bol = main_end->in(CountedLoopEndNode::TestValue); |
duke@435 | 908 | if( bol->outcnt() != 1 ) { |
duke@435 | 909 | bol = bol->clone(); |
duke@435 | 910 | register_new_node(bol,main_end->in(CountedLoopEndNode::TestControl)); |
duke@435 | 911 | _igvn.hash_delete(main_end); |
duke@435 | 912 | main_end->set_req(CountedLoopEndNode::TestValue, bol); |
duke@435 | 913 | } |
duke@435 | 914 | // Need only 1 user of 'cmp' because I will be hacking the loop bounds. |
duke@435 | 915 | if( cmp->outcnt() != 1 ) { |
duke@435 | 916 | cmp = cmp->clone(); |
duke@435 | 917 | register_new_node(cmp,main_end->in(CountedLoopEndNode::TestControl)); |
duke@435 | 918 | _igvn.hash_delete(bol); |
duke@435 | 919 | bol->set_req(1, cmp); |
duke@435 | 920 | } |
duke@435 | 921 | |
duke@435 | 922 | //------------------------------ |
duke@435 | 923 | // Step A: Create Post-Loop. |
duke@435 | 924 | Node* main_exit = main_end->proj_out(false); |
duke@435 | 925 | assert( main_exit->Opcode() == Op_IfFalse, "" ); |
duke@435 | 926 | int dd_main_exit = dom_depth(main_exit); |
duke@435 | 927 | |
duke@435 | 928 | // Step A1: Clone the loop body. The clone becomes the post-loop. The main |
duke@435 | 929 | // loop pre-header illegally has 2 control users (old & new loops). |
duke@435 | 930 | clone_loop( loop, old_new, dd_main_exit ); |
duke@435 | 931 | assert( old_new[main_end ->_idx]->Opcode() == Op_CountedLoopEnd, "" ); |
duke@435 | 932 | CountedLoopNode *post_head = old_new[main_head->_idx]->as_CountedLoop(); |
duke@435 | 933 | post_head->set_post_loop(main_head); |
duke@435 | 934 | |
kvn@835 | 935 | // Reduce the post-loop trip count. |
kvn@835 | 936 | CountedLoopEndNode* post_end = old_new[main_end ->_idx]->as_CountedLoopEnd(); |
kvn@835 | 937 | post_end->_prob = PROB_FAIR; |
kvn@835 | 938 | |
duke@435 | 939 | // Build the main-loop normal exit. |
duke@435 | 940 | IfFalseNode *new_main_exit = new (C, 1) IfFalseNode(main_end); |
duke@435 | 941 | _igvn.register_new_node_with_optimizer( new_main_exit ); |
duke@435 | 942 | set_idom(new_main_exit, main_end, dd_main_exit ); |
duke@435 | 943 | set_loop(new_main_exit, loop->_parent); |
duke@435 | 944 | |
duke@435 | 945 | // Step A2: Build a zero-trip guard for the post-loop. After leaving the |
duke@435 | 946 | // main-loop, the post-loop may not execute at all. We 'opaque' the incr |
duke@435 | 947 | // (the main-loop trip-counter exit value) because we will be changing |
duke@435 | 948 | // the exit value (via unrolling) so we cannot constant-fold away the zero |
duke@435 | 949 | // trip guard until all unrolling is done. |
kvn@651 | 950 | Node *zer_opaq = new (C, 2) Opaque1Node(C, incr); |
duke@435 | 951 | Node *zer_cmp = new (C, 3) CmpINode( zer_opaq, limit ); |
duke@435 | 952 | Node *zer_bol = new (C, 2) BoolNode( zer_cmp, b_test ); |
duke@435 | 953 | register_new_node( zer_opaq, new_main_exit ); |
duke@435 | 954 | register_new_node( zer_cmp , new_main_exit ); |
duke@435 | 955 | register_new_node( zer_bol , new_main_exit ); |
duke@435 | 956 | |
duke@435 | 957 | // Build the IfNode |
duke@435 | 958 | IfNode *zer_iff = new (C, 2) IfNode( new_main_exit, zer_bol, PROB_FAIR, COUNT_UNKNOWN ); |
duke@435 | 959 | _igvn.register_new_node_with_optimizer( zer_iff ); |
duke@435 | 960 | set_idom(zer_iff, new_main_exit, dd_main_exit); |
duke@435 | 961 | set_loop(zer_iff, loop->_parent); |
duke@435 | 962 | |
duke@435 | 963 | // Plug in the false-path, taken if we need to skip post-loop |
duke@435 | 964 | _igvn.hash_delete( main_exit ); |
duke@435 | 965 | main_exit->set_req(0, zer_iff); |
duke@435 | 966 | _igvn._worklist.push(main_exit); |
duke@435 | 967 | set_idom(main_exit, zer_iff, dd_main_exit); |
duke@435 | 968 | set_idom(main_exit->unique_out(), zer_iff, dd_main_exit); |
duke@435 | 969 | // Make the true-path, must enter the post loop |
duke@435 | 970 | Node *zer_taken = new (C, 1) IfTrueNode( zer_iff ); |
duke@435 | 971 | _igvn.register_new_node_with_optimizer( zer_taken ); |
duke@435 | 972 | set_idom(zer_taken, zer_iff, dd_main_exit); |
duke@435 | 973 | set_loop(zer_taken, loop->_parent); |
duke@435 | 974 | // Plug in the true path |
duke@435 | 975 | _igvn.hash_delete( post_head ); |
duke@435 | 976 | post_head->set_req(LoopNode::EntryControl, zer_taken); |
duke@435 | 977 | set_idom(post_head, zer_taken, dd_main_exit); |
duke@435 | 978 | |
kvn@2985 | 979 | Arena *a = Thread::current()->resource_area(); |
kvn@2985 | 980 | VectorSet visited(a); |
kvn@2985 | 981 | Node_Stack clones(a, main_head->back_control()->outcnt()); |
duke@435 | 982 | // Step A3: Make the fall-in values to the post-loop come from the |
duke@435 | 983 | // fall-out values of the main-loop. |
duke@435 | 984 | for (DUIterator_Fast imax, i = main_head->fast_outs(imax); i < imax; i++) { |
duke@435 | 985 | Node* main_phi = main_head->fast_out(i); |
duke@435 | 986 | if( main_phi->is_Phi() && main_phi->in(0) == main_head && main_phi->outcnt() >0 ) { |
duke@435 | 987 | Node *post_phi = old_new[main_phi->_idx]; |
duke@435 | 988 | Node *fallmain = clone_up_backedge_goo(main_head->back_control(), |
duke@435 | 989 | post_head->init_control(), |
kvn@2985 | 990 | main_phi->in(LoopNode::LoopBackControl), |
kvn@2985 | 991 | visited, clones); |
duke@435 | 992 | _igvn.hash_delete(post_phi); |
duke@435 | 993 | post_phi->set_req( LoopNode::EntryControl, fallmain ); |
duke@435 | 994 | } |
duke@435 | 995 | } |
duke@435 | 996 | |
duke@435 | 997 | // Update local caches for next stanza |
duke@435 | 998 | main_exit = new_main_exit; |
duke@435 | 999 | |
duke@435 | 1000 | |
duke@435 | 1001 | //------------------------------ |
duke@435 | 1002 | // Step B: Create Pre-Loop. |
duke@435 | 1003 | |
duke@435 | 1004 | // Step B1: Clone the loop body. The clone becomes the pre-loop. The main |
duke@435 | 1005 | // loop pre-header illegally has 2 control users (old & new loops). |
duke@435 | 1006 | clone_loop( loop, old_new, dd_main_head ); |
duke@435 | 1007 | CountedLoopNode* pre_head = old_new[main_head->_idx]->as_CountedLoop(); |
duke@435 | 1008 | CountedLoopEndNode* pre_end = old_new[main_end ->_idx]->as_CountedLoopEnd(); |
duke@435 | 1009 | pre_head->set_pre_loop(main_head); |
duke@435 | 1010 | Node *pre_incr = old_new[incr->_idx]; |
duke@435 | 1011 | |
kvn@835 | 1012 | // Reduce the pre-loop trip count. |
kvn@835 | 1013 | pre_end->_prob = PROB_FAIR; |
kvn@835 | 1014 | |
duke@435 | 1015 | // Find the pre-loop normal exit. |
duke@435 | 1016 | Node* pre_exit = pre_end->proj_out(false); |
duke@435 | 1017 | assert( pre_exit->Opcode() == Op_IfFalse, "" ); |
duke@435 | 1018 | IfFalseNode *new_pre_exit = new (C, 1) IfFalseNode(pre_end); |
duke@435 | 1019 | _igvn.register_new_node_with_optimizer( new_pre_exit ); |
duke@435 | 1020 | set_idom(new_pre_exit, pre_end, dd_main_head); |
duke@435 | 1021 | set_loop(new_pre_exit, loop->_parent); |
duke@435 | 1022 | |
duke@435 | 1023 | // Step B2: Build a zero-trip guard for the main-loop. After leaving the |
duke@435 | 1024 | // pre-loop, the main-loop may not execute at all. Later in life this |
duke@435 | 1025 | // zero-trip guard will become the minimum-trip guard when we unroll |
duke@435 | 1026 | // the main-loop. |
kvn@651 | 1027 | Node *min_opaq = new (C, 2) Opaque1Node(C, limit); |
duke@435 | 1028 | Node *min_cmp = new (C, 3) CmpINode( pre_incr, min_opaq ); |
duke@435 | 1029 | Node *min_bol = new (C, 2) BoolNode( min_cmp, b_test ); |
duke@435 | 1030 | register_new_node( min_opaq, new_pre_exit ); |
duke@435 | 1031 | register_new_node( min_cmp , new_pre_exit ); |
duke@435 | 1032 | register_new_node( min_bol , new_pre_exit ); |
duke@435 | 1033 | |
kvn@835 | 1034 | // Build the IfNode (assume the main-loop is executed always). |
kvn@835 | 1035 | IfNode *min_iff = new (C, 2) IfNode( new_pre_exit, min_bol, PROB_ALWAYS, COUNT_UNKNOWN ); |
duke@435 | 1036 | _igvn.register_new_node_with_optimizer( min_iff ); |
duke@435 | 1037 | set_idom(min_iff, new_pre_exit, dd_main_head); |
duke@435 | 1038 | set_loop(min_iff, loop->_parent); |
duke@435 | 1039 | |
duke@435 | 1040 | // Plug in the false-path, taken if we need to skip main-loop |
duke@435 | 1041 | _igvn.hash_delete( pre_exit ); |
duke@435 | 1042 | pre_exit->set_req(0, min_iff); |
duke@435 | 1043 | set_idom(pre_exit, min_iff, dd_main_head); |
duke@435 | 1044 | set_idom(pre_exit->unique_out(), min_iff, dd_main_head); |
duke@435 | 1045 | // Make the true-path, must enter the main loop |
duke@435 | 1046 | Node *min_taken = new (C, 1) IfTrueNode( min_iff ); |
duke@435 | 1047 | _igvn.register_new_node_with_optimizer( min_taken ); |
duke@435 | 1048 | set_idom(min_taken, min_iff, dd_main_head); |
duke@435 | 1049 | set_loop(min_taken, loop->_parent); |
duke@435 | 1050 | // Plug in the true path |
duke@435 | 1051 | _igvn.hash_delete( main_head ); |
duke@435 | 1052 | main_head->set_req(LoopNode::EntryControl, min_taken); |
duke@435 | 1053 | set_idom(main_head, min_taken, dd_main_head); |
duke@435 | 1054 | |
kvn@2985 | 1055 | visited.Clear(); |
kvn@2985 | 1056 | clones.clear(); |
duke@435 | 1057 | // Step B3: Make the fall-in values to the main-loop come from the |
duke@435 | 1058 | // fall-out values of the pre-loop. |
duke@435 | 1059 | for (DUIterator_Fast i2max, i2 = main_head->fast_outs(i2max); i2 < i2max; i2++) { |
duke@435 | 1060 | Node* main_phi = main_head->fast_out(i2); |
duke@435 | 1061 | if( main_phi->is_Phi() && main_phi->in(0) == main_head && main_phi->outcnt() > 0 ) { |
duke@435 | 1062 | Node *pre_phi = old_new[main_phi->_idx]; |
duke@435 | 1063 | Node *fallpre = clone_up_backedge_goo(pre_head->back_control(), |
duke@435 | 1064 | main_head->init_control(), |
kvn@2985 | 1065 | pre_phi->in(LoopNode::LoopBackControl), |
kvn@2985 | 1066 | visited, clones); |
duke@435 | 1067 | _igvn.hash_delete(main_phi); |
duke@435 | 1068 | main_phi->set_req( LoopNode::EntryControl, fallpre ); |
duke@435 | 1069 | } |
duke@435 | 1070 | } |
duke@435 | 1071 | |
duke@435 | 1072 | // Step B4: Shorten the pre-loop to run only 1 iteration (for now). |
duke@435 | 1073 | // RCE and alignment may change this later. |
duke@435 | 1074 | Node *cmp_end = pre_end->cmp_node(); |
duke@435 | 1075 | assert( cmp_end->in(2) == limit, "" ); |
duke@435 | 1076 | Node *pre_limit = new (C, 3) AddINode( init, stride ); |
duke@435 | 1077 | |
duke@435 | 1078 | // Save the original loop limit in this Opaque1 node for |
duke@435 | 1079 | // use by range check elimination. |
kvn@651 | 1080 | Node *pre_opaq = new (C, 3) Opaque1Node(C, pre_limit, limit); |
duke@435 | 1081 | |
duke@435 | 1082 | register_new_node( pre_limit, pre_head->in(0) ); |
duke@435 | 1083 | register_new_node( pre_opaq , pre_head->in(0) ); |
duke@435 | 1084 | |
duke@435 | 1085 | // Since no other users of pre-loop compare, I can hack limit directly |
duke@435 | 1086 | assert( cmp_end->outcnt() == 1, "no other users" ); |
duke@435 | 1087 | _igvn.hash_delete(cmp_end); |
duke@435 | 1088 | cmp_end->set_req(2, peel_only ? pre_limit : pre_opaq); |
duke@435 | 1089 | |
duke@435 | 1090 | // Special case for not-equal loop bounds: |
duke@435 | 1091 | // Change pre loop test, main loop test, and the |
duke@435 | 1092 | // main loop guard test to use lt or gt depending on stride |
duke@435 | 1093 | // direction: |
duke@435 | 1094 | // positive stride use < |
duke@435 | 1095 | // negative stride use > |
kvn@2979 | 1096 | // |
kvn@2979 | 1097 | // not-equal test is kept for post loop to handle case |
kvn@2979 | 1098 | // when init > limit when stride > 0 (and reverse). |
duke@435 | 1099 | |
duke@435 | 1100 | if (pre_end->in(CountedLoopEndNode::TestValue)->as_Bool()->_test._test == BoolTest::ne) { |
duke@435 | 1101 | |
duke@435 | 1102 | BoolTest::mask new_test = (main_end->stride_con() > 0) ? BoolTest::lt : BoolTest::gt; |
duke@435 | 1103 | // Modify pre loop end condition |
duke@435 | 1104 | Node* pre_bol = pre_end->in(CountedLoopEndNode::TestValue)->as_Bool(); |
duke@435 | 1105 | BoolNode* new_bol0 = new (C, 2) BoolNode(pre_bol->in(1), new_test); |
duke@435 | 1106 | register_new_node( new_bol0, pre_head->in(0) ); |
duke@435 | 1107 | _igvn.hash_delete(pre_end); |
duke@435 | 1108 | pre_end->set_req(CountedLoopEndNode::TestValue, new_bol0); |
duke@435 | 1109 | // Modify main loop guard condition |
duke@435 | 1110 | assert(min_iff->in(CountedLoopEndNode::TestValue) == min_bol, "guard okay"); |
duke@435 | 1111 | BoolNode* new_bol1 = new (C, 2) BoolNode(min_bol->in(1), new_test); |
duke@435 | 1112 | register_new_node( new_bol1, new_pre_exit ); |
duke@435 | 1113 | _igvn.hash_delete(min_iff); |
duke@435 | 1114 | min_iff->set_req(CountedLoopEndNode::TestValue, new_bol1); |
duke@435 | 1115 | // Modify main loop end condition |
duke@435 | 1116 | BoolNode* main_bol = main_end->in(CountedLoopEndNode::TestValue)->as_Bool(); |
duke@435 | 1117 | BoolNode* new_bol2 = new (C, 2) BoolNode(main_bol->in(1), new_test); |
duke@435 | 1118 | register_new_node( new_bol2, main_end->in(CountedLoopEndNode::TestControl) ); |
duke@435 | 1119 | _igvn.hash_delete(main_end); |
duke@435 | 1120 | main_end->set_req(CountedLoopEndNode::TestValue, new_bol2); |
duke@435 | 1121 | } |
duke@435 | 1122 | |
duke@435 | 1123 | // Flag main loop |
duke@435 | 1124 | main_head->set_main_loop(); |
duke@435 | 1125 | if( peel_only ) main_head->set_main_no_pre_loop(); |
duke@435 | 1126 | |
kvn@2877 | 1127 | // Subtract a trip count for the pre-loop. |
kvn@2877 | 1128 | main_head->set_trip_count(main_head->trip_count() - 1); |
kvn@2877 | 1129 | |
duke@435 | 1130 | // It's difficult to be precise about the trip-counts |
duke@435 | 1131 | // for the pre/post loops. They are usually very short, |
duke@435 | 1132 | // so guess that 4 trips is a reasonable value. |
duke@435 | 1133 | post_head->set_profile_trip_cnt(4.0); |
duke@435 | 1134 | pre_head->set_profile_trip_cnt(4.0); |
duke@435 | 1135 | |
duke@435 | 1136 | // Now force out all loop-invariant dominating tests. The optimizer |
duke@435 | 1137 | // finds some, but we _know_ they are all useless. |
duke@435 | 1138 | peeled_dom_test_elim(loop,old_new); |
duke@435 | 1139 | } |
duke@435 | 1140 | |
duke@435 | 1141 | //------------------------------is_invariant----------------------------- |
duke@435 | 1142 | // Return true if n is invariant |
duke@435 | 1143 | bool IdealLoopTree::is_invariant(Node* n) const { |
cfang@1607 | 1144 | Node *n_c = _phase->has_ctrl(n) ? _phase->get_ctrl(n) : n; |
duke@435 | 1145 | if (n_c->is_top()) return false; |
duke@435 | 1146 | return !is_member(_phase->get_loop(n_c)); |
duke@435 | 1147 | } |
duke@435 | 1148 | |
duke@435 | 1149 | |
duke@435 | 1150 | //------------------------------do_unroll-------------------------------------- |
duke@435 | 1151 | // Unroll the loop body one step - make each trip do 2 iterations. |
duke@435 | 1152 | void PhaseIdealLoop::do_unroll( IdealLoopTree *loop, Node_List &old_new, bool adjust_min_trip ) { |
kvn@2665 | 1153 | assert(LoopUnrollLimit, ""); |
kvn@2665 | 1154 | CountedLoopNode *loop_head = loop->_head->as_CountedLoop(); |
kvn@2665 | 1155 | CountedLoopEndNode *loop_end = loop_head->loopexit(); |
kvn@2665 | 1156 | assert(loop_end, ""); |
duke@435 | 1157 | #ifndef PRODUCT |
kvn@2665 | 1158 | if (PrintOpto && VerifyLoopOptimizations) { |
duke@435 | 1159 | tty->print("Unrolling "); |
duke@435 | 1160 | loop->dump_head(); |
kvn@2665 | 1161 | } else if (TraceLoopOpts) { |
kvn@2747 | 1162 | if (loop_head->trip_count() < (uint)LoopUnrollLimit) { |
kvn@2877 | 1163 | tty->print("Unroll %d(%2d) ", loop_head->unrolled_count()*2, loop_head->trip_count()); |
kvn@2735 | 1164 | } else { |
kvn@2877 | 1165 | tty->print("Unroll %d ", loop_head->unrolled_count()*2); |
kvn@2735 | 1166 | } |
kvn@2665 | 1167 | loop->dump_head(); |
duke@435 | 1168 | } |
duke@435 | 1169 | #endif |
duke@435 | 1170 | |
duke@435 | 1171 | // Remember loop node count before unrolling to detect |
duke@435 | 1172 | // if rounds of unroll,optimize are making progress |
duke@435 | 1173 | loop_head->set_node_count_before_unroll(loop->_body.size()); |
duke@435 | 1174 | |
duke@435 | 1175 | Node *ctrl = loop_head->in(LoopNode::EntryControl); |
duke@435 | 1176 | Node *limit = loop_head->limit(); |
duke@435 | 1177 | Node *init = loop_head->init_trip(); |
kvn@2665 | 1178 | Node *stride = loop_head->stride(); |
duke@435 | 1179 | |
duke@435 | 1180 | Node *opaq = NULL; |
kvn@2877 | 1181 | if (adjust_min_trip) { // If not maximally unrolling, need adjustment |
kvn@2877 | 1182 | // Search for zero-trip guard. |
duke@435 | 1183 | assert( loop_head->is_main_loop(), "" ); |
duke@435 | 1184 | assert( ctrl->Opcode() == Op_IfTrue || ctrl->Opcode() == Op_IfFalse, "" ); |
duke@435 | 1185 | Node *iff = ctrl->in(0); |
duke@435 | 1186 | assert( iff->Opcode() == Op_If, "" ); |
duke@435 | 1187 | Node *bol = iff->in(1); |
duke@435 | 1188 | assert( bol->Opcode() == Op_Bool, "" ); |
duke@435 | 1189 | Node *cmp = bol->in(1); |
duke@435 | 1190 | assert( cmp->Opcode() == Op_CmpI, "" ); |
duke@435 | 1191 | opaq = cmp->in(2); |
kvn@2877 | 1192 | // Occasionally it's possible for a zero-trip guard Opaque1 node to be |
duke@435 | 1193 | // optimized away and then another round of loop opts attempted. |
duke@435 | 1194 | // We can not optimize this particular loop in that case. |
kvn@2877 | 1195 | if (opaq->Opcode() != Op_Opaque1) |
kvn@2877 | 1196 | return; // Cannot find zero-trip guard! Bail out! |
kvn@2877 | 1197 | // Zero-trip test uses an 'opaque' node which is not shared. |
kvn@2877 | 1198 | assert(opaq->outcnt() == 1 && opaq->in(1) == limit, ""); |
duke@435 | 1199 | } |
duke@435 | 1200 | |
duke@435 | 1201 | C->set_major_progress(); |
duke@435 | 1202 | |
kvn@2877 | 1203 | Node* new_limit = NULL; |
kvn@2877 | 1204 | if (UnrollLimitCheck) { |
kvn@2877 | 1205 | int stride_con = stride->get_int(); |
kvn@2877 | 1206 | int stride_p = (stride_con > 0) ? stride_con : -stride_con; |
kvn@2877 | 1207 | uint old_trip_count = loop_head->trip_count(); |
kvn@2877 | 1208 | // Verify that unroll policy result is still valid. |
kvn@2877 | 1209 | assert(old_trip_count > 1 && |
kvn@2877 | 1210 | (!adjust_min_trip || stride_p <= (1<<3)*loop_head->unrolled_count()), "sanity"); |
duke@435 | 1211 | |
kvn@2877 | 1212 | // Adjust loop limit to keep valid iterations number after unroll. |
kvn@2877 | 1213 | // Use (limit - stride) instead of (((limit - init)/stride) & (-2))*stride |
kvn@2877 | 1214 | // which may overflow. |
kvn@2877 | 1215 | if (!adjust_min_trip) { |
kvn@2877 | 1216 | assert(old_trip_count > 1 && (old_trip_count & 1) == 0, |
kvn@2877 | 1217 | "odd trip count for maximally unroll"); |
kvn@2877 | 1218 | // Don't need to adjust limit for maximally unroll since trip count is even. |
kvn@2877 | 1219 | } else if (loop_head->has_exact_trip_count() && init->is_Con()) { |
kvn@2877 | 1220 | // Loop's limit is constant. Loop's init could be constant when pre-loop |
kvn@2877 | 1221 | // become peeled iteration. |
kvn@2877 | 1222 | long init_con = init->get_int(); |
kvn@2877 | 1223 | // We can keep old loop limit if iterations count stays the same: |
kvn@2877 | 1224 | // old_trip_count == new_trip_count * 2 |
kvn@2877 | 1225 | // Note: since old_trip_count >= 2 then new_trip_count >= 1 |
kvn@2877 | 1226 | // so we also don't need to adjust zero trip test. |
kvn@2877 | 1227 | long limit_con = limit->get_int(); |
kvn@2877 | 1228 | // (stride_con*2) not overflow since stride_con <= 8. |
kvn@2877 | 1229 | int new_stride_con = stride_con * 2; |
kvn@2877 | 1230 | int stride_m = new_stride_con - (stride_con > 0 ? 1 : -1); |
kvn@2877 | 1231 | long trip_count = (limit_con - init_con + stride_m)/new_stride_con; |
kvn@2877 | 1232 | // New trip count should satisfy next conditions. |
kvn@2877 | 1233 | assert(trip_count > 0 && (julong)trip_count < (julong)max_juint/2, "sanity"); |
kvn@2877 | 1234 | uint new_trip_count = (uint)trip_count; |
kvn@2877 | 1235 | adjust_min_trip = (old_trip_count != new_trip_count*2); |
kvn@2877 | 1236 | } |
duke@435 | 1237 | |
kvn@2877 | 1238 | if (adjust_min_trip) { |
kvn@2877 | 1239 | // Step 2: Adjust the trip limit if it is called for. |
kvn@2877 | 1240 | // The adjustment amount is -stride. Need to make sure if the |
kvn@2877 | 1241 | // adjustment underflows or overflows, then the main loop is skipped. |
kvn@2877 | 1242 | Node* cmp = loop_end->cmp_node(); |
kvn@2877 | 1243 | assert(cmp->in(2) == limit, "sanity"); |
kvn@2877 | 1244 | assert(opaq != NULL && opaq->in(1) == limit, "sanity"); |
duke@435 | 1245 | |
kvn@2877 | 1246 | // Verify that policy_unroll result is still valid. |
kvn@2877 | 1247 | const TypeInt* limit_type = _igvn.type(limit)->is_int(); |
kvn@2877 | 1248 | assert(stride_con > 0 && ((limit_type->_hi - stride_con) < limit_type->_hi) || |
kvn@2877 | 1249 | stride_con < 0 && ((limit_type->_lo - stride_con) > limit_type->_lo), "sanity"); |
duke@435 | 1250 | |
kvn@2877 | 1251 | if (limit->is_Con()) { |
kvn@2877 | 1252 | // The check in policy_unroll and the assert above guarantee |
kvn@2877 | 1253 | // no underflow if limit is constant. |
kvn@2877 | 1254 | new_limit = _igvn.intcon(limit->get_int() - stride_con); |
kvn@2877 | 1255 | set_ctrl(new_limit, C->root()); |
kvn@2877 | 1256 | } else { |
kvn@2880 | 1257 | // Limit is not constant. |
kvn@2899 | 1258 | if (loop_head->unrolled_count() == 1) { // only for first unroll |
kvn@2880 | 1259 | // Separate limit by Opaque node in case it is an incremented |
kvn@2880 | 1260 | // variable from previous loop to avoid using pre-incremented |
kvn@2880 | 1261 | // value which could increase register pressure. |
kvn@2880 | 1262 | // Otherwise reorg_offsets() optimization will create a separate |
kvn@2880 | 1263 | // Opaque node for each use of trip-counter and as result |
kvn@2880 | 1264 | // zero trip guard limit will be different from loop limit. |
kvn@2880 | 1265 | assert(has_ctrl(opaq), "should have it"); |
kvn@2880 | 1266 | Node* opaq_ctrl = get_ctrl(opaq); |
kvn@2880 | 1267 | limit = new (C, 2) Opaque2Node( C, limit ); |
kvn@2880 | 1268 | register_new_node( limit, opaq_ctrl ); |
kvn@2880 | 1269 | } |
kvn@2877 | 1270 | if (stride_con > 0 && ((limit_type->_lo - stride_con) < limit_type->_lo) || |
kvn@2877 | 1271 | stride_con < 0 && ((limit_type->_hi - stride_con) > limit_type->_hi)) { |
kvn@2877 | 1272 | // No underflow. |
kvn@2877 | 1273 | new_limit = new (C, 3) SubINode(limit, stride); |
kvn@2877 | 1274 | } else { |
kvn@2877 | 1275 | // (limit - stride) may underflow. |
kvn@2877 | 1276 | // Clamp the adjustment value with MININT or MAXINT: |
kvn@2877 | 1277 | // |
kvn@2877 | 1278 | // new_limit = limit-stride |
kvn@2877 | 1279 | // if (stride > 0) |
kvn@2877 | 1280 | // new_limit = (limit < new_limit) ? MININT : new_limit; |
kvn@2877 | 1281 | // else |
kvn@2877 | 1282 | // new_limit = (limit > new_limit) ? MAXINT : new_limit; |
kvn@2877 | 1283 | // |
kvn@2877 | 1284 | BoolTest::mask bt = loop_end->test_trip(); |
kvn@2877 | 1285 | assert(bt == BoolTest::lt || bt == BoolTest::gt, "canonical test is expected"); |
kvn@2877 | 1286 | Node* adj_max = _igvn.intcon((stride_con > 0) ? min_jint : max_jint); |
kvn@2877 | 1287 | set_ctrl(adj_max, C->root()); |
kvn@2877 | 1288 | Node* old_limit = NULL; |
kvn@2877 | 1289 | Node* adj_limit = NULL; |
kvn@2877 | 1290 | Node* bol = limit->is_CMove() ? limit->in(CMoveNode::Condition) : NULL; |
kvn@2877 | 1291 | if (loop_head->unrolled_count() > 1 && |
kvn@2877 | 1292 | limit->is_CMove() && limit->Opcode() == Op_CMoveI && |
kvn@2877 | 1293 | limit->in(CMoveNode::IfTrue) == adj_max && |
kvn@2877 | 1294 | bol->as_Bool()->_test._test == bt && |
kvn@2877 | 1295 | bol->in(1)->Opcode() == Op_CmpI && |
kvn@2877 | 1296 | bol->in(1)->in(2) == limit->in(CMoveNode::IfFalse)) { |
kvn@2877 | 1297 | // Loop was unrolled before. |
kvn@2877 | 1298 | // Optimize the limit to avoid nested CMove: |
kvn@2877 | 1299 | // use original limit as old limit. |
kvn@2877 | 1300 | old_limit = bol->in(1)->in(1); |
kvn@2877 | 1301 | // Adjust previous adjusted limit. |
kvn@2877 | 1302 | adj_limit = limit->in(CMoveNode::IfFalse); |
kvn@2877 | 1303 | adj_limit = new (C, 3) SubINode(adj_limit, stride); |
kvn@2877 | 1304 | } else { |
kvn@2877 | 1305 | old_limit = limit; |
kvn@2877 | 1306 | adj_limit = new (C, 3) SubINode(limit, stride); |
kvn@2877 | 1307 | } |
kvn@2877 | 1308 | assert(old_limit != NULL && adj_limit != NULL, ""); |
kvn@2877 | 1309 | register_new_node( adj_limit, ctrl ); // adjust amount |
kvn@2877 | 1310 | Node* adj_cmp = new (C, 3) CmpINode(old_limit, adj_limit); |
kvn@2877 | 1311 | register_new_node( adj_cmp, ctrl ); |
kvn@2877 | 1312 | Node* adj_bool = new (C, 2) BoolNode(adj_cmp, bt); |
kvn@2877 | 1313 | register_new_node( adj_bool, ctrl ); |
kvn@2877 | 1314 | new_limit = new (C, 4) CMoveINode(adj_bool, adj_limit, adj_max, TypeInt::INT); |
kvn@2877 | 1315 | } |
kvn@2877 | 1316 | register_new_node(new_limit, ctrl); |
kvn@2877 | 1317 | } |
kvn@2877 | 1318 | assert(new_limit != NULL, ""); |
kvn@2880 | 1319 | // Replace in loop test. |
kvn@2929 | 1320 | assert(loop_end->in(1)->in(1) == cmp, "sanity"); |
kvn@2929 | 1321 | if (cmp->outcnt() == 1 && loop_end->in(1)->outcnt() == 1) { |
kvn@2929 | 1322 | // Don't need to create new test since only one user. |
kvn@2929 | 1323 | _igvn.hash_delete(cmp); |
kvn@2929 | 1324 | cmp->set_req(2, new_limit); |
kvn@2929 | 1325 | } else { |
kvn@2929 | 1326 | // Create new test since it is shared. |
kvn@2929 | 1327 | Node* ctrl2 = loop_end->in(0); |
kvn@2929 | 1328 | Node* cmp2 = cmp->clone(); |
kvn@2929 | 1329 | cmp2->set_req(2, new_limit); |
kvn@2929 | 1330 | register_new_node(cmp2, ctrl2); |
kvn@2929 | 1331 | Node* bol2 = loop_end->in(1)->clone(); |
kvn@2929 | 1332 | bol2->set_req(1, cmp2); |
kvn@2929 | 1333 | register_new_node(bol2, ctrl2); |
kvn@2929 | 1334 | _igvn.hash_delete(loop_end); |
kvn@2929 | 1335 | loop_end->set_req(1, bol2); |
kvn@2929 | 1336 | } |
kvn@2880 | 1337 | // Step 3: Find the min-trip test guaranteed before a 'main' loop. |
kvn@2880 | 1338 | // Make it a 1-trip test (means at least 2 trips). |
kvn@2877 | 1339 | |
kvn@2880 | 1340 | // Guard test uses an 'opaque' node which is not shared. Hence I |
kvn@2880 | 1341 | // can edit it's inputs directly. Hammer in the new limit for the |
kvn@2880 | 1342 | // minimum-trip guard. |
kvn@2880 | 1343 | assert(opaq->outcnt() == 1, ""); |
kvn@2880 | 1344 | _igvn.hash_delete(opaq); |
kvn@2880 | 1345 | opaq->set_req(1, new_limit); |
kvn@2877 | 1346 | } |
kvn@2877 | 1347 | |
kvn@2877 | 1348 | // Adjust max trip count. The trip count is intentionally rounded |
kvn@2877 | 1349 | // down here (e.g. 15-> 7-> 3-> 1) because if we unwittingly over-unroll, |
kvn@2877 | 1350 | // the main, unrolled, part of the loop will never execute as it is protected |
kvn@2877 | 1351 | // by the min-trip test. See bug 4834191 for a case where we over-unrolled |
kvn@2877 | 1352 | // and later determined that part of the unrolled loop was dead. |
kvn@2877 | 1353 | loop_head->set_trip_count(old_trip_count / 2); |
kvn@2877 | 1354 | |
kvn@2877 | 1355 | // Double the count of original iterations in the unrolled loop body. |
kvn@2877 | 1356 | loop_head->double_unrolled_count(); |
kvn@2877 | 1357 | |
kvn@2877 | 1358 | } else { // LoopLimitCheck |
kvn@2877 | 1359 | |
kvn@2877 | 1360 | // Adjust max trip count. The trip count is intentionally rounded |
kvn@2877 | 1361 | // down here (e.g. 15-> 7-> 3-> 1) because if we unwittingly over-unroll, |
kvn@2877 | 1362 | // the main, unrolled, part of the loop will never execute as it is protected |
kvn@2877 | 1363 | // by the min-trip test. See bug 4834191 for a case where we over-unrolled |
kvn@2877 | 1364 | // and later determined that part of the unrolled loop was dead. |
kvn@2877 | 1365 | loop_head->set_trip_count(loop_head->trip_count() / 2); |
kvn@2877 | 1366 | |
kvn@2877 | 1367 | // Double the count of original iterations in the unrolled loop body. |
kvn@2877 | 1368 | loop_head->double_unrolled_count(); |
kvn@2877 | 1369 | |
kvn@2877 | 1370 | // ----------- |
kvn@2877 | 1371 | // Step 2: Cut back the trip counter for an unroll amount of 2. |
kvn@2877 | 1372 | // Loop will normally trip (limit - init)/stride_con. Since it's a |
kvn@2877 | 1373 | // CountedLoop this is exact (stride divides limit-init exactly). |
kvn@2877 | 1374 | // We are going to double the loop body, so we want to knock off any |
kvn@2877 | 1375 | // odd iteration: (trip_cnt & ~1). Then back compute a new limit. |
kvn@2877 | 1376 | Node *span = new (C, 3) SubINode( limit, init ); |
kvn@2877 | 1377 | register_new_node( span, ctrl ); |
kvn@2877 | 1378 | Node *trip = new (C, 3) DivINode( 0, span, stride ); |
kvn@2877 | 1379 | register_new_node( trip, ctrl ); |
kvn@2877 | 1380 | Node *mtwo = _igvn.intcon(-2); |
kvn@2877 | 1381 | set_ctrl(mtwo, C->root()); |
kvn@2877 | 1382 | Node *rond = new (C, 3) AndINode( trip, mtwo ); |
kvn@2877 | 1383 | register_new_node( rond, ctrl ); |
kvn@2877 | 1384 | Node *spn2 = new (C, 3) MulINode( rond, stride ); |
kvn@2877 | 1385 | register_new_node( spn2, ctrl ); |
kvn@2877 | 1386 | new_limit = new (C, 3) AddINode( spn2, init ); |
kvn@2877 | 1387 | register_new_node( new_limit, ctrl ); |
kvn@2877 | 1388 | |
kvn@2877 | 1389 | // Hammer in the new limit |
kvn@2877 | 1390 | Node *ctrl2 = loop_end->in(0); |
kvn@2877 | 1391 | Node *cmp2 = new (C, 3) CmpINode( loop_head->incr(), new_limit ); |
kvn@2877 | 1392 | register_new_node( cmp2, ctrl2 ); |
kvn@2877 | 1393 | Node *bol2 = new (C, 2) BoolNode( cmp2, loop_end->test_trip() ); |
kvn@2877 | 1394 | register_new_node( bol2, ctrl2 ); |
kvn@2877 | 1395 | _igvn.hash_delete(loop_end); |
kvn@2877 | 1396 | loop_end->set_req(CountedLoopEndNode::TestValue, bol2); |
kvn@2877 | 1397 | |
kvn@2877 | 1398 | // Step 3: Find the min-trip test guaranteed before a 'main' loop. |
kvn@2877 | 1399 | // Make it a 1-trip test (means at least 2 trips). |
kvn@2877 | 1400 | if( adjust_min_trip ) { |
kvn@2877 | 1401 | assert( new_limit != NULL, "" ); |
kvn@2877 | 1402 | // Guard test uses an 'opaque' node which is not shared. Hence I |
kvn@2877 | 1403 | // can edit it's inputs directly. Hammer in the new limit for the |
kvn@2877 | 1404 | // minimum-trip guard. |
kvn@2877 | 1405 | assert( opaq->outcnt() == 1, "" ); |
kvn@2877 | 1406 | _igvn.hash_delete(opaq); |
kvn@2877 | 1407 | opaq->set_req(1, new_limit); |
kvn@2877 | 1408 | } |
kvn@2877 | 1409 | } // LoopLimitCheck |
duke@435 | 1410 | |
duke@435 | 1411 | // --------- |
duke@435 | 1412 | // Step 4: Clone the loop body. Move it inside the loop. This loop body |
duke@435 | 1413 | // represents the odd iterations; since the loop trips an even number of |
duke@435 | 1414 | // times its backedge is never taken. Kill the backedge. |
duke@435 | 1415 | uint dd = dom_depth(loop_head); |
duke@435 | 1416 | clone_loop( loop, old_new, dd ); |
duke@435 | 1417 | |
duke@435 | 1418 | // Make backedges of the clone equal to backedges of the original. |
duke@435 | 1419 | // Make the fall-in from the original come from the fall-out of the clone. |
duke@435 | 1420 | for (DUIterator_Fast jmax, j = loop_head->fast_outs(jmax); j < jmax; j++) { |
duke@435 | 1421 | Node* phi = loop_head->fast_out(j); |
duke@435 | 1422 | if( phi->is_Phi() && phi->in(0) == loop_head && phi->outcnt() > 0 ) { |
duke@435 | 1423 | Node *newphi = old_new[phi->_idx]; |
duke@435 | 1424 | _igvn.hash_delete( phi ); |
duke@435 | 1425 | _igvn.hash_delete( newphi ); |
duke@435 | 1426 | |
duke@435 | 1427 | phi ->set_req(LoopNode:: EntryControl, newphi->in(LoopNode::LoopBackControl)); |
duke@435 | 1428 | newphi->set_req(LoopNode::LoopBackControl, phi ->in(LoopNode::LoopBackControl)); |
duke@435 | 1429 | phi ->set_req(LoopNode::LoopBackControl, C->top()); |
duke@435 | 1430 | } |
duke@435 | 1431 | } |
duke@435 | 1432 | Node *clone_head = old_new[loop_head->_idx]; |
duke@435 | 1433 | _igvn.hash_delete( clone_head ); |
duke@435 | 1434 | loop_head ->set_req(LoopNode:: EntryControl, clone_head->in(LoopNode::LoopBackControl)); |
duke@435 | 1435 | clone_head->set_req(LoopNode::LoopBackControl, loop_head ->in(LoopNode::LoopBackControl)); |
duke@435 | 1436 | loop_head ->set_req(LoopNode::LoopBackControl, C->top()); |
duke@435 | 1437 | loop->_head = clone_head; // New loop header |
duke@435 | 1438 | |
duke@435 | 1439 | set_idom(loop_head, loop_head ->in(LoopNode::EntryControl), dd); |
duke@435 | 1440 | set_idom(clone_head, clone_head->in(LoopNode::EntryControl), dd); |
duke@435 | 1441 | |
duke@435 | 1442 | // Kill the clone's backedge |
duke@435 | 1443 | Node *newcle = old_new[loop_end->_idx]; |
duke@435 | 1444 | _igvn.hash_delete( newcle ); |
duke@435 | 1445 | Node *one = _igvn.intcon(1); |
duke@435 | 1446 | set_ctrl(one, C->root()); |
duke@435 | 1447 | newcle->set_req(1, one); |
duke@435 | 1448 | // Force clone into same loop body |
duke@435 | 1449 | uint max = loop->_body.size(); |
duke@435 | 1450 | for( uint k = 0; k < max; k++ ) { |
duke@435 | 1451 | Node *old = loop->_body.at(k); |
duke@435 | 1452 | Node *nnn = old_new[old->_idx]; |
duke@435 | 1453 | loop->_body.push(nnn); |
duke@435 | 1454 | if (!has_ctrl(old)) |
duke@435 | 1455 | set_loop(nnn, loop); |
duke@435 | 1456 | } |
never@802 | 1457 | |
never@802 | 1458 | loop->record_for_igvn(); |
duke@435 | 1459 | } |
duke@435 | 1460 | |
duke@435 | 1461 | //------------------------------do_maximally_unroll---------------------------- |
duke@435 | 1462 | |
duke@435 | 1463 | void PhaseIdealLoop::do_maximally_unroll( IdealLoopTree *loop, Node_List &old_new ) { |
duke@435 | 1464 | CountedLoopNode *cl = loop->_head->as_CountedLoop(); |
kvn@2877 | 1465 | assert(cl->has_exact_trip_count(), "trip count is not exact"); |
kvn@2665 | 1466 | assert(cl->trip_count() > 0, ""); |
kvn@2665 | 1467 | #ifndef PRODUCT |
kvn@2665 | 1468 | if (TraceLoopOpts) { |
kvn@2665 | 1469 | tty->print("MaxUnroll %d ", cl->trip_count()); |
kvn@2665 | 1470 | loop->dump_head(); |
kvn@2665 | 1471 | } |
kvn@2665 | 1472 | #endif |
duke@435 | 1473 | |
duke@435 | 1474 | // If loop is tripping an odd number of times, peel odd iteration |
kvn@2665 | 1475 | if ((cl->trip_count() & 1) == 1) { |
kvn@2665 | 1476 | do_peeling(loop, old_new); |
duke@435 | 1477 | } |
duke@435 | 1478 | |
duke@435 | 1479 | // Now its tripping an even number of times remaining. Double loop body. |
duke@435 | 1480 | // Do not adjust pre-guards; they are not needed and do not exist. |
kvn@2665 | 1481 | if (cl->trip_count() > 0) { |
kvn@2877 | 1482 | assert((cl->trip_count() & 1) == 0, "missed peeling"); |
kvn@2665 | 1483 | do_unroll(loop, old_new, false); |
duke@435 | 1484 | } |
duke@435 | 1485 | } |
duke@435 | 1486 | |
duke@435 | 1487 | //------------------------------dominates_backedge--------------------------------- |
duke@435 | 1488 | // Returns true if ctrl is executed on every complete iteration |
duke@435 | 1489 | bool IdealLoopTree::dominates_backedge(Node* ctrl) { |
duke@435 | 1490 | assert(ctrl->is_CFG(), "must be control"); |
duke@435 | 1491 | Node* backedge = _head->as_Loop()->in(LoopNode::LoopBackControl); |
duke@435 | 1492 | return _phase->dom_lca_internal(ctrl, backedge) == ctrl; |
duke@435 | 1493 | } |
duke@435 | 1494 | |
kvn@2915 | 1495 | //------------------------------adjust_limit----------------------------------- |
kvn@2915 | 1496 | // Helper function for add_constraint(). |
kvn@2915 | 1497 | Node* PhaseIdealLoop::adjust_limit(int stride_con, Node * scale, Node *offset, Node *rc_limit, Node *loop_limit, Node *pre_ctrl) { |
kvn@2915 | 1498 | // Compute "I :: (limit-offset)/scale" |
kvn@2915 | 1499 | Node *con = new (C, 3) SubINode(rc_limit, offset); |
kvn@2915 | 1500 | register_new_node(con, pre_ctrl); |
kvn@2915 | 1501 | Node *X = new (C, 3) DivINode(0, con, scale); |
kvn@2915 | 1502 | register_new_node(X, pre_ctrl); |
kvn@2915 | 1503 | |
kvn@2915 | 1504 | // Adjust loop limit |
kvn@2915 | 1505 | loop_limit = (stride_con > 0) |
kvn@2915 | 1506 | ? (Node*)(new (C, 3) MinINode(loop_limit, X)) |
kvn@2915 | 1507 | : (Node*)(new (C, 3) MaxINode(loop_limit, X)); |
kvn@2915 | 1508 | register_new_node(loop_limit, pre_ctrl); |
kvn@2915 | 1509 | return loop_limit; |
kvn@2915 | 1510 | } |
kvn@2915 | 1511 | |
duke@435 | 1512 | //------------------------------add_constraint--------------------------------- |
kvn@2877 | 1513 | // Constrain the main loop iterations so the conditions: |
kvn@2877 | 1514 | // low_limit <= scale_con * I + offset < upper_limit |
duke@435 | 1515 | // always holds true. That is, either increase the number of iterations in |
duke@435 | 1516 | // the pre-loop or the post-loop until the condition holds true in the main |
duke@435 | 1517 | // loop. Stride, scale, offset and limit are all loop invariant. Further, |
duke@435 | 1518 | // stride and scale are constants (offset and limit often are). |
kvn@2877 | 1519 | void PhaseIdealLoop::add_constraint( int stride_con, int scale_con, Node *offset, Node *low_limit, Node *upper_limit, Node *pre_ctrl, Node **pre_limit, Node **main_limit ) { |
duke@435 | 1520 | // For positive stride, the pre-loop limit always uses a MAX function |
duke@435 | 1521 | // and the main loop a MIN function. For negative stride these are |
duke@435 | 1522 | // reversed. |
duke@435 | 1523 | |
duke@435 | 1524 | // Also for positive stride*scale the affine function is increasing, so the |
duke@435 | 1525 | // pre-loop must check for underflow and the post-loop for overflow. |
duke@435 | 1526 | // Negative stride*scale reverses this; pre-loop checks for overflow and |
duke@435 | 1527 | // post-loop for underflow. |
kvn@2915 | 1528 | |
kvn@2915 | 1529 | Node *scale = _igvn.intcon(scale_con); |
kvn@2915 | 1530 | set_ctrl(scale, C->root()); |
kvn@2915 | 1531 | |
kvn@2915 | 1532 | if ((stride_con^scale_con) >= 0) { // Use XOR to avoid overflow |
kvn@2877 | 1533 | // The overflow limit: scale*I+offset < upper_limit |
kvn@2877 | 1534 | // For main-loop compute |
kvn@2877 | 1535 | // ( if (scale > 0) /* and stride > 0 */ |
kvn@2877 | 1536 | // I < (upper_limit-offset)/scale |
kvn@2877 | 1537 | // else /* scale < 0 and stride < 0 */ |
kvn@2877 | 1538 | // I > (upper_limit-offset)/scale |
kvn@2877 | 1539 | // ) |
kvn@2877 | 1540 | // |
kvn@2915 | 1541 | // (upper_limit-offset) may overflow or underflow. |
kvn@2877 | 1542 | // But it is fine since main loop will either have |
kvn@2877 | 1543 | // less iterations or will be skipped in such case. |
kvn@2915 | 1544 | *main_limit = adjust_limit(stride_con, scale, offset, upper_limit, *main_limit, pre_ctrl); |
duke@435 | 1545 | |
kvn@2877 | 1546 | // The underflow limit: low_limit <= scale*I+offset. |
kvn@2877 | 1547 | // For pre-loop compute |
kvn@2877 | 1548 | // NOT(scale*I+offset >= low_limit) |
kvn@2877 | 1549 | // scale*I+offset < low_limit |
kvn@2877 | 1550 | // ( if (scale > 0) /* and stride > 0 */ |
kvn@2877 | 1551 | // I < (low_limit-offset)/scale |
kvn@2877 | 1552 | // else /* scale < 0 and stride < 0 */ |
kvn@2877 | 1553 | // I > (low_limit-offset)/scale |
kvn@2877 | 1554 | // ) |
kvn@2877 | 1555 | |
kvn@2877 | 1556 | if (low_limit->get_int() == -max_jint) { |
kvn@2877 | 1557 | if (!RangeLimitCheck) return; |
kvn@2877 | 1558 | // We need this guard when scale*pre_limit+offset >= limit |
kvn@2915 | 1559 | // due to underflow. So we need execute pre-loop until |
kvn@2915 | 1560 | // scale*I+offset >= min_int. But (min_int-offset) will |
kvn@2915 | 1561 | // underflow when offset > 0 and X will be > original_limit |
kvn@2915 | 1562 | // when stride > 0. To avoid it we replace positive offset with 0. |
kvn@2915 | 1563 | // |
kvn@2915 | 1564 | // Also (min_int+1 == -max_int) is used instead of min_int here |
kvn@2915 | 1565 | // to avoid problem with scale == -1 (min_int/(-1) == min_int). |
kvn@2877 | 1566 | Node* shift = _igvn.intcon(31); |
kvn@2877 | 1567 | set_ctrl(shift, C->root()); |
kvn@2915 | 1568 | Node* sign = new (C, 3) RShiftINode(offset, shift); |
kvn@2915 | 1569 | register_new_node(sign, pre_ctrl); |
kvn@2915 | 1570 | offset = new (C, 3) AndINode(offset, sign); |
kvn@2877 | 1571 | register_new_node(offset, pre_ctrl); |
kvn@2877 | 1572 | } else { |
kvn@2877 | 1573 | assert(low_limit->get_int() == 0, "wrong low limit for range check"); |
kvn@2877 | 1574 | // The only problem we have here when offset == min_int |
kvn@2915 | 1575 | // since (0-min_int) == min_int. It may be fine for stride > 0 |
kvn@2915 | 1576 | // but for stride < 0 X will be < original_limit. To avoid it |
kvn@2915 | 1577 | // max(pre_limit, original_limit) is used in do_range_check(). |
kvn@2877 | 1578 | } |
kvn@2915 | 1579 | // Pass (-stride) to indicate pre_loop_cond = NOT(main_loop_cond); |
kvn@2915 | 1580 | *pre_limit = adjust_limit((-stride_con), scale, offset, low_limit, *pre_limit, pre_ctrl); |
kvn@2877 | 1581 | |
kvn@2877 | 1582 | } else { // stride_con*scale_con < 0 |
kvn@2877 | 1583 | // For negative stride*scale pre-loop checks for overflow and |
kvn@2877 | 1584 | // post-loop for underflow. |
kvn@2877 | 1585 | // |
kvn@2877 | 1586 | // The overflow limit: scale*I+offset < upper_limit |
kvn@2877 | 1587 | // For pre-loop compute |
kvn@2877 | 1588 | // NOT(scale*I+offset < upper_limit) |
kvn@2877 | 1589 | // scale*I+offset >= upper_limit |
kvn@2877 | 1590 | // scale*I+offset+1 > upper_limit |
kvn@2877 | 1591 | // ( if (scale < 0) /* and stride > 0 */ |
kvn@2877 | 1592 | // I < (upper_limit-(offset+1))/scale |
kvn@2915 | 1593 | // else /* scale > 0 and stride < 0 */ |
kvn@2877 | 1594 | // I > (upper_limit-(offset+1))/scale |
kvn@2877 | 1595 | // ) |
kvn@2915 | 1596 | // |
kvn@2915 | 1597 | // (upper_limit-offset-1) may underflow or overflow. |
kvn@2915 | 1598 | // To avoid it min(pre_limit, original_limit) is used |
kvn@2915 | 1599 | // in do_range_check() for stride > 0 and max() for < 0. |
kvn@2915 | 1600 | Node *one = _igvn.intcon(1); |
kvn@2915 | 1601 | set_ctrl(one, C->root()); |
kvn@2915 | 1602 | |
kvn@2915 | 1603 | Node *plus_one = new (C, 3) AddINode(offset, one); |
kvn@2877 | 1604 | register_new_node( plus_one, pre_ctrl ); |
kvn@2915 | 1605 | // Pass (-stride) to indicate pre_loop_cond = NOT(main_loop_cond); |
kvn@2915 | 1606 | *pre_limit = adjust_limit((-stride_con), scale, plus_one, upper_limit, *pre_limit, pre_ctrl); |
kvn@2877 | 1607 | |
kvn@2915 | 1608 | if (low_limit->get_int() == -max_jint) { |
kvn@2915 | 1609 | if (!RangeLimitCheck) return; |
kvn@2915 | 1610 | // We need this guard when scale*main_limit+offset >= limit |
kvn@2915 | 1611 | // due to underflow. So we need execute main-loop while |
kvn@2915 | 1612 | // scale*I+offset+1 > min_int. But (min_int-offset-1) will |
kvn@2915 | 1613 | // underflow when (offset+1) > 0 and X will be < main_limit |
kvn@2915 | 1614 | // when scale < 0 (and stride > 0). To avoid it we replace |
kvn@2915 | 1615 | // positive (offset+1) with 0. |
kvn@2915 | 1616 | // |
kvn@2915 | 1617 | // Also (min_int+1 == -max_int) is used instead of min_int here |
kvn@2915 | 1618 | // to avoid problem with scale == -1 (min_int/(-1) == min_int). |
kvn@2915 | 1619 | Node* shift = _igvn.intcon(31); |
kvn@2915 | 1620 | set_ctrl(shift, C->root()); |
kvn@2915 | 1621 | Node* sign = new (C, 3) RShiftINode(plus_one, shift); |
kvn@2915 | 1622 | register_new_node(sign, pre_ctrl); |
kvn@2915 | 1623 | plus_one = new (C, 3) AndINode(plus_one, sign); |
kvn@2915 | 1624 | register_new_node(plus_one, pre_ctrl); |
kvn@2915 | 1625 | } else { |
kvn@2915 | 1626 | assert(low_limit->get_int() == 0, "wrong low limit for range check"); |
kvn@2915 | 1627 | // The only problem we have here when offset == max_int |
kvn@2915 | 1628 | // since (max_int+1) == min_int and (0-min_int) == min_int. |
kvn@2915 | 1629 | // But it is fine since main loop will either have |
kvn@2915 | 1630 | // less iterations or will be skipped in such case. |
kvn@2915 | 1631 | } |
kvn@2915 | 1632 | // The underflow limit: low_limit <= scale*I+offset. |
kvn@2915 | 1633 | // For main-loop compute |
kvn@2915 | 1634 | // scale*I+offset+1 > low_limit |
kvn@2915 | 1635 | // ( if (scale < 0) /* and stride > 0 */ |
kvn@2915 | 1636 | // I < (low_limit-(offset+1))/scale |
kvn@2915 | 1637 | // else /* scale > 0 and stride < 0 */ |
kvn@2915 | 1638 | // I > (low_limit-(offset+1))/scale |
kvn@2915 | 1639 | // ) |
kvn@2877 | 1640 | |
kvn@2915 | 1641 | *main_limit = adjust_limit(stride_con, scale, plus_one, low_limit, *main_limit, pre_ctrl); |
duke@435 | 1642 | } |
duke@435 | 1643 | } |
duke@435 | 1644 | |
duke@435 | 1645 | |
duke@435 | 1646 | //------------------------------is_scaled_iv--------------------------------- |
duke@435 | 1647 | // Return true if exp is a constant times an induction var |
duke@435 | 1648 | bool PhaseIdealLoop::is_scaled_iv(Node* exp, Node* iv, int* p_scale) { |
duke@435 | 1649 | if (exp == iv) { |
duke@435 | 1650 | if (p_scale != NULL) { |
duke@435 | 1651 | *p_scale = 1; |
duke@435 | 1652 | } |
duke@435 | 1653 | return true; |
duke@435 | 1654 | } |
duke@435 | 1655 | int opc = exp->Opcode(); |
duke@435 | 1656 | if (opc == Op_MulI) { |
duke@435 | 1657 | if (exp->in(1) == iv && exp->in(2)->is_Con()) { |
duke@435 | 1658 | if (p_scale != NULL) { |
duke@435 | 1659 | *p_scale = exp->in(2)->get_int(); |
duke@435 | 1660 | } |
duke@435 | 1661 | return true; |
duke@435 | 1662 | } |
duke@435 | 1663 | if (exp->in(2) == iv && exp->in(1)->is_Con()) { |
duke@435 | 1664 | if (p_scale != NULL) { |
duke@435 | 1665 | *p_scale = exp->in(1)->get_int(); |
duke@435 | 1666 | } |
duke@435 | 1667 | return true; |
duke@435 | 1668 | } |
duke@435 | 1669 | } else if (opc == Op_LShiftI) { |
duke@435 | 1670 | if (exp->in(1) == iv && exp->in(2)->is_Con()) { |
duke@435 | 1671 | if (p_scale != NULL) { |
duke@435 | 1672 | *p_scale = 1 << exp->in(2)->get_int(); |
duke@435 | 1673 | } |
duke@435 | 1674 | return true; |
duke@435 | 1675 | } |
duke@435 | 1676 | } |
duke@435 | 1677 | return false; |
duke@435 | 1678 | } |
duke@435 | 1679 | |
duke@435 | 1680 | //-----------------------------is_scaled_iv_plus_offset------------------------------ |
duke@435 | 1681 | // Return true if exp is a simple induction variable expression: k1*iv + (invar + k2) |
duke@435 | 1682 | bool PhaseIdealLoop::is_scaled_iv_plus_offset(Node* exp, Node* iv, int* p_scale, Node** p_offset, int depth) { |
duke@435 | 1683 | if (is_scaled_iv(exp, iv, p_scale)) { |
duke@435 | 1684 | if (p_offset != NULL) { |
duke@435 | 1685 | Node *zero = _igvn.intcon(0); |
duke@435 | 1686 | set_ctrl(zero, C->root()); |
duke@435 | 1687 | *p_offset = zero; |
duke@435 | 1688 | } |
duke@435 | 1689 | return true; |
duke@435 | 1690 | } |
duke@435 | 1691 | int opc = exp->Opcode(); |
duke@435 | 1692 | if (opc == Op_AddI) { |
duke@435 | 1693 | if (is_scaled_iv(exp->in(1), iv, p_scale)) { |
duke@435 | 1694 | if (p_offset != NULL) { |
duke@435 | 1695 | *p_offset = exp->in(2); |
duke@435 | 1696 | } |
duke@435 | 1697 | return true; |
duke@435 | 1698 | } |
duke@435 | 1699 | if (exp->in(2)->is_Con()) { |
duke@435 | 1700 | Node* offset2 = NULL; |
duke@435 | 1701 | if (depth < 2 && |
duke@435 | 1702 | is_scaled_iv_plus_offset(exp->in(1), iv, p_scale, |
duke@435 | 1703 | p_offset != NULL ? &offset2 : NULL, depth+1)) { |
duke@435 | 1704 | if (p_offset != NULL) { |
duke@435 | 1705 | Node *ctrl_off2 = get_ctrl(offset2); |
duke@435 | 1706 | Node* offset = new (C, 3) AddINode(offset2, exp->in(2)); |
duke@435 | 1707 | register_new_node(offset, ctrl_off2); |
duke@435 | 1708 | *p_offset = offset; |
duke@435 | 1709 | } |
duke@435 | 1710 | return true; |
duke@435 | 1711 | } |
duke@435 | 1712 | } |
duke@435 | 1713 | } else if (opc == Op_SubI) { |
duke@435 | 1714 | if (is_scaled_iv(exp->in(1), iv, p_scale)) { |
duke@435 | 1715 | if (p_offset != NULL) { |
duke@435 | 1716 | Node *zero = _igvn.intcon(0); |
duke@435 | 1717 | set_ctrl(zero, C->root()); |
duke@435 | 1718 | Node *ctrl_off = get_ctrl(exp->in(2)); |
duke@435 | 1719 | Node* offset = new (C, 3) SubINode(zero, exp->in(2)); |
duke@435 | 1720 | register_new_node(offset, ctrl_off); |
duke@435 | 1721 | *p_offset = offset; |
duke@435 | 1722 | } |
duke@435 | 1723 | return true; |
duke@435 | 1724 | } |
duke@435 | 1725 | if (is_scaled_iv(exp->in(2), iv, p_scale)) { |
duke@435 | 1726 | if (p_offset != NULL) { |
duke@435 | 1727 | *p_scale *= -1; |
duke@435 | 1728 | *p_offset = exp->in(1); |
duke@435 | 1729 | } |
duke@435 | 1730 | return true; |
duke@435 | 1731 | } |
duke@435 | 1732 | } |
duke@435 | 1733 | return false; |
duke@435 | 1734 | } |
duke@435 | 1735 | |
duke@435 | 1736 | //------------------------------do_range_check--------------------------------- |
duke@435 | 1737 | // Eliminate range-checks and other trip-counter vs loop-invariant tests. |
duke@435 | 1738 | void PhaseIdealLoop::do_range_check( IdealLoopTree *loop, Node_List &old_new ) { |
duke@435 | 1739 | #ifndef PRODUCT |
kvn@2665 | 1740 | if (PrintOpto && VerifyLoopOptimizations) { |
duke@435 | 1741 | tty->print("Range Check Elimination "); |
duke@435 | 1742 | loop->dump_head(); |
kvn@2665 | 1743 | } else if (TraceLoopOpts) { |
kvn@2665 | 1744 | tty->print("RangeCheck "); |
kvn@2665 | 1745 | loop->dump_head(); |
duke@435 | 1746 | } |
duke@435 | 1747 | #endif |
kvn@2665 | 1748 | assert(RangeCheckElimination, ""); |
duke@435 | 1749 | CountedLoopNode *cl = loop->_head->as_CountedLoop(); |
kvn@2665 | 1750 | assert(cl->is_main_loop(), ""); |
kvn@2665 | 1751 | |
kvn@2665 | 1752 | // protect against stride not being a constant |
kvn@2665 | 1753 | if (!cl->stride_is_con()) |
kvn@2665 | 1754 | return; |
duke@435 | 1755 | |
duke@435 | 1756 | // Find the trip counter; we are iteration splitting based on it |
duke@435 | 1757 | Node *trip_counter = cl->phi(); |
duke@435 | 1758 | // Find the main loop limit; we will trim it's iterations |
duke@435 | 1759 | // to not ever trip end tests |
duke@435 | 1760 | Node *main_limit = cl->limit(); |
kvn@2665 | 1761 | |
kvn@2665 | 1762 | // Need to find the main-loop zero-trip guard |
kvn@2665 | 1763 | Node *ctrl = cl->in(LoopNode::EntryControl); |
kvn@2665 | 1764 | assert(ctrl->Opcode() == Op_IfTrue || ctrl->Opcode() == Op_IfFalse, ""); |
kvn@2665 | 1765 | Node *iffm = ctrl->in(0); |
kvn@2665 | 1766 | assert(iffm->Opcode() == Op_If, ""); |
kvn@2665 | 1767 | Node *bolzm = iffm->in(1); |
kvn@2665 | 1768 | assert(bolzm->Opcode() == Op_Bool, ""); |
kvn@2665 | 1769 | Node *cmpzm = bolzm->in(1); |
kvn@2665 | 1770 | assert(cmpzm->is_Cmp(), ""); |
kvn@2665 | 1771 | Node *opqzm = cmpzm->in(2); |
kvn@2877 | 1772 | // Can not optimize a loop if zero-trip Opaque1 node is optimized |
kvn@2665 | 1773 | // away and then another round of loop opts attempted. |
kvn@2665 | 1774 | if (opqzm->Opcode() != Op_Opaque1) |
kvn@2665 | 1775 | return; |
kvn@2665 | 1776 | assert(opqzm->in(1) == main_limit, "do not understand situation"); |
kvn@2665 | 1777 | |
duke@435 | 1778 | // Find the pre-loop limit; we will expand it's iterations to |
duke@435 | 1779 | // not ever trip low tests. |
duke@435 | 1780 | Node *p_f = iffm->in(0); |
kvn@2665 | 1781 | assert(p_f->Opcode() == Op_IfFalse, ""); |
duke@435 | 1782 | CountedLoopEndNode *pre_end = p_f->in(0)->as_CountedLoopEnd(); |
kvn@2665 | 1783 | assert(pre_end->loopnode()->is_pre_loop(), ""); |
duke@435 | 1784 | Node *pre_opaq1 = pre_end->limit(); |
duke@435 | 1785 | // Occasionally it's possible for a pre-loop Opaque1 node to be |
duke@435 | 1786 | // optimized away and then another round of loop opts attempted. |
duke@435 | 1787 | // We can not optimize this particular loop in that case. |
kvn@2665 | 1788 | if (pre_opaq1->Opcode() != Op_Opaque1) |
duke@435 | 1789 | return; |
duke@435 | 1790 | Opaque1Node *pre_opaq = (Opaque1Node*)pre_opaq1; |
duke@435 | 1791 | Node *pre_limit = pre_opaq->in(1); |
duke@435 | 1792 | |
duke@435 | 1793 | // Where do we put new limit calculations |
duke@435 | 1794 | Node *pre_ctrl = pre_end->loopnode()->in(LoopNode::EntryControl); |
duke@435 | 1795 | |
duke@435 | 1796 | // Ensure the original loop limit is available from the |
duke@435 | 1797 | // pre-loop Opaque1 node. |
duke@435 | 1798 | Node *orig_limit = pre_opaq->original_loop_limit(); |
kvn@2665 | 1799 | if (orig_limit == NULL || _igvn.type(orig_limit) == Type::TOP) |
duke@435 | 1800 | return; |
duke@435 | 1801 | |
duke@435 | 1802 | // Must know if its a count-up or count-down loop |
duke@435 | 1803 | |
duke@435 | 1804 | int stride_con = cl->stride_con(); |
duke@435 | 1805 | Node *zero = _igvn.intcon(0); |
duke@435 | 1806 | Node *one = _igvn.intcon(1); |
kvn@2877 | 1807 | // Use symmetrical int range [-max_jint,max_jint] |
kvn@2877 | 1808 | Node *mini = _igvn.intcon(-max_jint); |
duke@435 | 1809 | set_ctrl(zero, C->root()); |
duke@435 | 1810 | set_ctrl(one, C->root()); |
kvn@2877 | 1811 | set_ctrl(mini, C->root()); |
duke@435 | 1812 | |
duke@435 | 1813 | // Range checks that do not dominate the loop backedge (ie. |
duke@435 | 1814 | // conditionally executed) can lengthen the pre loop limit beyond |
duke@435 | 1815 | // the original loop limit. To prevent this, the pre limit is |
duke@435 | 1816 | // (for stride > 0) MINed with the original loop limit (MAXed |
duke@435 | 1817 | // stride < 0) when some range_check (rc) is conditionally |
duke@435 | 1818 | // executed. |
duke@435 | 1819 | bool conditional_rc = false; |
duke@435 | 1820 | |
duke@435 | 1821 | // Check loop body for tests of trip-counter plus loop-invariant vs |
duke@435 | 1822 | // loop-invariant. |
duke@435 | 1823 | for( uint i = 0; i < loop->_body.size(); i++ ) { |
duke@435 | 1824 | Node *iff = loop->_body[i]; |
duke@435 | 1825 | if( iff->Opcode() == Op_If ) { // Test? |
duke@435 | 1826 | |
duke@435 | 1827 | // Test is an IfNode, has 2 projections. If BOTH are in the loop |
duke@435 | 1828 | // we need loop unswitching instead of iteration splitting. |
duke@435 | 1829 | Node *exit = loop->is_loop_exit(iff); |
duke@435 | 1830 | if( !exit ) continue; |
duke@435 | 1831 | int flip = (exit->Opcode() == Op_IfTrue) ? 1 : 0; |
duke@435 | 1832 | |
duke@435 | 1833 | // Get boolean condition to test |
duke@435 | 1834 | Node *i1 = iff->in(1); |
duke@435 | 1835 | if( !i1->is_Bool() ) continue; |
duke@435 | 1836 | BoolNode *bol = i1->as_Bool(); |
duke@435 | 1837 | BoolTest b_test = bol->_test; |
duke@435 | 1838 | // Flip sense of test if exit condition is flipped |
duke@435 | 1839 | if( flip ) |
duke@435 | 1840 | b_test = b_test.negate(); |
duke@435 | 1841 | |
duke@435 | 1842 | // Get compare |
duke@435 | 1843 | Node *cmp = bol->in(1); |
duke@435 | 1844 | |
duke@435 | 1845 | // Look for trip_counter + offset vs limit |
duke@435 | 1846 | Node *rc_exp = cmp->in(1); |
duke@435 | 1847 | Node *limit = cmp->in(2); |
duke@435 | 1848 | jint scale_con= 1; // Assume trip counter not scaled |
duke@435 | 1849 | |
duke@435 | 1850 | Node *limit_c = get_ctrl(limit); |
duke@435 | 1851 | if( loop->is_member(get_loop(limit_c) ) ) { |
duke@435 | 1852 | // Compare might have operands swapped; commute them |
duke@435 | 1853 | b_test = b_test.commute(); |
duke@435 | 1854 | rc_exp = cmp->in(2); |
duke@435 | 1855 | limit = cmp->in(1); |
duke@435 | 1856 | limit_c = get_ctrl(limit); |
duke@435 | 1857 | if( loop->is_member(get_loop(limit_c) ) ) |
duke@435 | 1858 | continue; // Both inputs are loop varying; cannot RCE |
duke@435 | 1859 | } |
duke@435 | 1860 | // Here we know 'limit' is loop invariant |
duke@435 | 1861 | |
duke@435 | 1862 | // 'limit' maybe pinned below the zero trip test (probably from a |
duke@435 | 1863 | // previous round of rce), in which case, it can't be used in the |
duke@435 | 1864 | // zero trip test expression which must occur before the zero test's if. |
duke@435 | 1865 | if( limit_c == ctrl ) { |
duke@435 | 1866 | continue; // Don't rce this check but continue looking for other candidates. |
duke@435 | 1867 | } |
duke@435 | 1868 | |
duke@435 | 1869 | // Check for scaled induction variable plus an offset |
duke@435 | 1870 | Node *offset = NULL; |
duke@435 | 1871 | |
duke@435 | 1872 | if (!is_scaled_iv_plus_offset(rc_exp, trip_counter, &scale_con, &offset)) { |
duke@435 | 1873 | continue; |
duke@435 | 1874 | } |
duke@435 | 1875 | |
duke@435 | 1876 | Node *offset_c = get_ctrl(offset); |
duke@435 | 1877 | if( loop->is_member( get_loop(offset_c) ) ) |
duke@435 | 1878 | continue; // Offset is not really loop invariant |
duke@435 | 1879 | // Here we know 'offset' is loop invariant. |
duke@435 | 1880 | |
duke@435 | 1881 | // As above for the 'limit', the 'offset' maybe pinned below the |
duke@435 | 1882 | // zero trip test. |
duke@435 | 1883 | if( offset_c == ctrl ) { |
duke@435 | 1884 | continue; // Don't rce this check but continue looking for other candidates. |
duke@435 | 1885 | } |
kvn@2877 | 1886 | #ifdef ASSERT |
kvn@2877 | 1887 | if (TraceRangeLimitCheck) { |
kvn@2877 | 1888 | tty->print_cr("RC bool node%s", flip ? " flipped:" : ":"); |
kvn@2877 | 1889 | bol->dump(2); |
kvn@2877 | 1890 | } |
kvn@2877 | 1891 | #endif |
duke@435 | 1892 | // At this point we have the expression as: |
duke@435 | 1893 | // scale_con * trip_counter + offset :: limit |
duke@435 | 1894 | // where scale_con, offset and limit are loop invariant. Trip_counter |
duke@435 | 1895 | // monotonically increases by stride_con, a constant. Both (or either) |
duke@435 | 1896 | // stride_con and scale_con can be negative which will flip about the |
duke@435 | 1897 | // sense of the test. |
duke@435 | 1898 | |
duke@435 | 1899 | // Adjust pre and main loop limits to guard the correct iteration set |
duke@435 | 1900 | if( cmp->Opcode() == Op_CmpU ) {// Unsigned compare is really 2 tests |
duke@435 | 1901 | if( b_test._test == BoolTest::lt ) { // Range checks always use lt |
kvn@2877 | 1902 | // The underflow and overflow limits: 0 <= scale*I+offset < limit |
kvn@2877 | 1903 | add_constraint( stride_con, scale_con, offset, zero, limit, pre_ctrl, &pre_limit, &main_limit ); |
duke@435 | 1904 | if (!conditional_rc) { |
kvn@2915 | 1905 | // (0-offset)/scale could be outside of loop iterations range. |
kvn@2915 | 1906 | conditional_rc = !loop->dominates_backedge(iff) || RangeLimitCheck; |
duke@435 | 1907 | } |
duke@435 | 1908 | } else { |
duke@435 | 1909 | #ifndef PRODUCT |
duke@435 | 1910 | if( PrintOpto ) |
duke@435 | 1911 | tty->print_cr("missed RCE opportunity"); |
duke@435 | 1912 | #endif |
duke@435 | 1913 | continue; // In release mode, ignore it |
duke@435 | 1914 | } |
duke@435 | 1915 | } else { // Otherwise work on normal compares |
duke@435 | 1916 | switch( b_test._test ) { |
kvn@2877 | 1917 | case BoolTest::gt: |
kvn@2877 | 1918 | // Fall into GE case |
kvn@2877 | 1919 | case BoolTest::ge: |
kvn@2877 | 1920 | // Convert (I*scale+offset) >= Limit to (I*(-scale)+(-offset)) <= -Limit |
duke@435 | 1921 | scale_con = -scale_con; |
duke@435 | 1922 | offset = new (C, 3) SubINode( zero, offset ); |
duke@435 | 1923 | register_new_node( offset, pre_ctrl ); |
duke@435 | 1924 | limit = new (C, 3) SubINode( zero, limit ); |
duke@435 | 1925 | register_new_node( limit, pre_ctrl ); |
duke@435 | 1926 | // Fall into LE case |
kvn@2877 | 1927 | case BoolTest::le: |
kvn@2877 | 1928 | if (b_test._test != BoolTest::gt) { |
kvn@2877 | 1929 | // Convert X <= Y to X < Y+1 |
kvn@2877 | 1930 | limit = new (C, 3) AddINode( limit, one ); |
kvn@2877 | 1931 | register_new_node( limit, pre_ctrl ); |
kvn@2877 | 1932 | } |
duke@435 | 1933 | // Fall into LT case |
duke@435 | 1934 | case BoolTest::lt: |
kvn@2877 | 1935 | // The underflow and overflow limits: MIN_INT <= scale*I+offset < limit |
kvn@2915 | 1936 | // Note: (MIN_INT+1 == -MAX_INT) is used instead of MIN_INT here |
kvn@2915 | 1937 | // to avoid problem with scale == -1: MIN_INT/(-1) == MIN_INT. |
kvn@2877 | 1938 | add_constraint( stride_con, scale_con, offset, mini, limit, pre_ctrl, &pre_limit, &main_limit ); |
duke@435 | 1939 | if (!conditional_rc) { |
kvn@2915 | 1940 | // ((MIN_INT+1)-offset)/scale could be outside of loop iterations range. |
kvn@2915 | 1941 | // Note: negative offset is replaced with 0 but (MIN_INT+1)/scale could |
kvn@2915 | 1942 | // still be outside of loop range. |
kvn@2915 | 1943 | conditional_rc = !loop->dominates_backedge(iff) || RangeLimitCheck; |
duke@435 | 1944 | } |
duke@435 | 1945 | break; |
duke@435 | 1946 | default: |
duke@435 | 1947 | #ifndef PRODUCT |
duke@435 | 1948 | if( PrintOpto ) |
duke@435 | 1949 | tty->print_cr("missed RCE opportunity"); |
duke@435 | 1950 | #endif |
duke@435 | 1951 | continue; // Unhandled case |
duke@435 | 1952 | } |
duke@435 | 1953 | } |
duke@435 | 1954 | |
duke@435 | 1955 | // Kill the eliminated test |
duke@435 | 1956 | C->set_major_progress(); |
duke@435 | 1957 | Node *kill_con = _igvn.intcon( 1-flip ); |
duke@435 | 1958 | set_ctrl(kill_con, C->root()); |
duke@435 | 1959 | _igvn.hash_delete(iff); |
duke@435 | 1960 | iff->set_req(1, kill_con); |
duke@435 | 1961 | _igvn._worklist.push(iff); |
duke@435 | 1962 | // Find surviving projection |
duke@435 | 1963 | assert(iff->is_If(), ""); |
duke@435 | 1964 | ProjNode* dp = ((IfNode*)iff)->proj_out(1-flip); |
duke@435 | 1965 | // Find loads off the surviving projection; remove their control edge |
duke@435 | 1966 | for (DUIterator_Fast imax, i = dp->fast_outs(imax); i < imax; i++) { |
duke@435 | 1967 | Node* cd = dp->fast_out(i); // Control-dependent node |
duke@435 | 1968 | if( cd->is_Load() ) { // Loads can now float around in the loop |
duke@435 | 1969 | _igvn.hash_delete(cd); |
duke@435 | 1970 | // Allow the load to float around in the loop, or before it |
duke@435 | 1971 | // but NOT before the pre-loop. |
duke@435 | 1972 | cd->set_req(0, ctrl); // ctrl, not NULL |
duke@435 | 1973 | _igvn._worklist.push(cd); |
duke@435 | 1974 | --i; |
duke@435 | 1975 | --imax; |
duke@435 | 1976 | } |
duke@435 | 1977 | } |
duke@435 | 1978 | |
duke@435 | 1979 | } // End of is IF |
duke@435 | 1980 | |
duke@435 | 1981 | } |
duke@435 | 1982 | |
duke@435 | 1983 | // Update loop limits |
duke@435 | 1984 | if (conditional_rc) { |
duke@435 | 1985 | pre_limit = (stride_con > 0) ? (Node*)new (C,3) MinINode(pre_limit, orig_limit) |
duke@435 | 1986 | : (Node*)new (C,3) MaxINode(pre_limit, orig_limit); |
duke@435 | 1987 | register_new_node(pre_limit, pre_ctrl); |
duke@435 | 1988 | } |
duke@435 | 1989 | _igvn.hash_delete(pre_opaq); |
duke@435 | 1990 | pre_opaq->set_req(1, pre_limit); |
duke@435 | 1991 | |
duke@435 | 1992 | // Note:: we are making the main loop limit no longer precise; |
duke@435 | 1993 | // need to round up based on stride. |
kvn@2877 | 1994 | cl->set_nonexact_trip_count(); |
kvn@2877 | 1995 | if (!LoopLimitCheck && stride_con != 1 && stride_con != -1) { // Cutout for common case |
duke@435 | 1996 | // "Standard" round-up logic: ([main_limit-init+(y-1)]/y)*y+init |
duke@435 | 1997 | // Hopefully, compiler will optimize for powers of 2. |
duke@435 | 1998 | Node *ctrl = get_ctrl(main_limit); |
duke@435 | 1999 | Node *stride = cl->stride(); |
duke@435 | 2000 | Node *init = cl->init_trip(); |
duke@435 | 2001 | Node *span = new (C, 3) SubINode(main_limit,init); |
duke@435 | 2002 | register_new_node(span,ctrl); |
duke@435 | 2003 | Node *rndup = _igvn.intcon(stride_con + ((stride_con>0)?-1:1)); |
duke@435 | 2004 | Node *add = new (C, 3) AddINode(span,rndup); |
duke@435 | 2005 | register_new_node(add,ctrl); |
duke@435 | 2006 | Node *div = new (C, 3) DivINode(0,add,stride); |
duke@435 | 2007 | register_new_node(div,ctrl); |
duke@435 | 2008 | Node *mul = new (C, 3) MulINode(div,stride); |
duke@435 | 2009 | register_new_node(mul,ctrl); |
duke@435 | 2010 | Node *newlim = new (C, 3) AddINode(mul,init); |
duke@435 | 2011 | register_new_node(newlim,ctrl); |
duke@435 | 2012 | main_limit = newlim; |
duke@435 | 2013 | } |
duke@435 | 2014 | |
duke@435 | 2015 | Node *main_cle = cl->loopexit(); |
duke@435 | 2016 | Node *main_bol = main_cle->in(1); |
duke@435 | 2017 | // Hacking loop bounds; need private copies of exit test |
duke@435 | 2018 | if( main_bol->outcnt() > 1 ) {// BoolNode shared? |
duke@435 | 2019 | _igvn.hash_delete(main_cle); |
duke@435 | 2020 | main_bol = main_bol->clone();// Clone a private BoolNode |
duke@435 | 2021 | register_new_node( main_bol, main_cle->in(0) ); |
duke@435 | 2022 | main_cle->set_req(1,main_bol); |
duke@435 | 2023 | } |
duke@435 | 2024 | Node *main_cmp = main_bol->in(1); |
duke@435 | 2025 | if( main_cmp->outcnt() > 1 ) { // CmpNode shared? |
duke@435 | 2026 | _igvn.hash_delete(main_bol); |
duke@435 | 2027 | main_cmp = main_cmp->clone();// Clone a private CmpNode |
duke@435 | 2028 | register_new_node( main_cmp, main_cle->in(0) ); |
duke@435 | 2029 | main_bol->set_req(1,main_cmp); |
duke@435 | 2030 | } |
duke@435 | 2031 | // Hack the now-private loop bounds |
duke@435 | 2032 | _igvn.hash_delete(main_cmp); |
duke@435 | 2033 | main_cmp->set_req(2, main_limit); |
duke@435 | 2034 | _igvn._worklist.push(main_cmp); |
duke@435 | 2035 | // The OpaqueNode is unshared by design |
duke@435 | 2036 | _igvn.hash_delete(opqzm); |
duke@435 | 2037 | assert( opqzm->outcnt() == 1, "cannot hack shared node" ); |
duke@435 | 2038 | opqzm->set_req(1,main_limit); |
duke@435 | 2039 | _igvn._worklist.push(opqzm); |
duke@435 | 2040 | } |
duke@435 | 2041 | |
duke@435 | 2042 | //------------------------------DCE_loop_body---------------------------------- |
duke@435 | 2043 | // Remove simplistic dead code from loop body |
duke@435 | 2044 | void IdealLoopTree::DCE_loop_body() { |
duke@435 | 2045 | for( uint i = 0; i < _body.size(); i++ ) |
duke@435 | 2046 | if( _body.at(i)->outcnt() == 0 ) |
duke@435 | 2047 | _body.map( i--, _body.pop() ); |
duke@435 | 2048 | } |
duke@435 | 2049 | |
duke@435 | 2050 | |
duke@435 | 2051 | //------------------------------adjust_loop_exit_prob-------------------------- |
duke@435 | 2052 | // Look for loop-exit tests with the 50/50 (or worse) guesses from the parsing stage. |
duke@435 | 2053 | // Replace with a 1-in-10 exit guess. |
duke@435 | 2054 | void IdealLoopTree::adjust_loop_exit_prob( PhaseIdealLoop *phase ) { |
duke@435 | 2055 | Node *test = tail(); |
duke@435 | 2056 | while( test != _head ) { |
duke@435 | 2057 | uint top = test->Opcode(); |
duke@435 | 2058 | if( top == Op_IfTrue || top == Op_IfFalse ) { |
duke@435 | 2059 | int test_con = ((ProjNode*)test)->_con; |
duke@435 | 2060 | assert(top == (uint)(test_con? Op_IfTrue: Op_IfFalse), "sanity"); |
duke@435 | 2061 | IfNode *iff = test->in(0)->as_If(); |
duke@435 | 2062 | if( iff->outcnt() == 2 ) { // Ignore dead tests |
duke@435 | 2063 | Node *bol = iff->in(1); |
duke@435 | 2064 | if( bol && bol->req() > 1 && bol->in(1) && |
duke@435 | 2065 | ((bol->in(1)->Opcode() == Op_StorePConditional ) || |
kvn@855 | 2066 | (bol->in(1)->Opcode() == Op_StoreIConditional ) || |
duke@435 | 2067 | (bol->in(1)->Opcode() == Op_StoreLConditional ) || |
duke@435 | 2068 | (bol->in(1)->Opcode() == Op_CompareAndSwapI ) || |
duke@435 | 2069 | (bol->in(1)->Opcode() == Op_CompareAndSwapL ) || |
coleenp@548 | 2070 | (bol->in(1)->Opcode() == Op_CompareAndSwapP ) || |
coleenp@548 | 2071 | (bol->in(1)->Opcode() == Op_CompareAndSwapN ))) |
duke@435 | 2072 | return; // Allocation loops RARELY take backedge |
duke@435 | 2073 | // Find the OTHER exit path from the IF |
duke@435 | 2074 | Node* ex = iff->proj_out(1-test_con); |
duke@435 | 2075 | float p = iff->_prob; |
duke@435 | 2076 | if( !phase->is_member( this, ex ) && iff->_fcnt == COUNT_UNKNOWN ) { |
duke@435 | 2077 | if( top == Op_IfTrue ) { |
duke@435 | 2078 | if( p < (PROB_FAIR + PROB_UNLIKELY_MAG(3))) { |
duke@435 | 2079 | iff->_prob = PROB_STATIC_FREQUENT; |
duke@435 | 2080 | } |
duke@435 | 2081 | } else { |
duke@435 | 2082 | if( p > (PROB_FAIR - PROB_UNLIKELY_MAG(3))) { |
duke@435 | 2083 | iff->_prob = PROB_STATIC_INFREQUENT; |
duke@435 | 2084 | } |
duke@435 | 2085 | } |
duke@435 | 2086 | } |
duke@435 | 2087 | } |
duke@435 | 2088 | } |
duke@435 | 2089 | test = phase->idom(test); |
duke@435 | 2090 | } |
duke@435 | 2091 | } |
duke@435 | 2092 | |
duke@435 | 2093 | |
duke@435 | 2094 | //------------------------------policy_do_remove_empty_loop-------------------- |
duke@435 | 2095 | // Micro-benchmark spamming. Policy is to always remove empty loops. |
duke@435 | 2096 | // The 'DO' part is to replace the trip counter with the value it will |
duke@435 | 2097 | // have on the last iteration. This will break the loop. |
duke@435 | 2098 | bool IdealLoopTree::policy_do_remove_empty_loop( PhaseIdealLoop *phase ) { |
duke@435 | 2099 | // Minimum size must be empty loop |
kvn@2735 | 2100 | if (_body.size() > EMPTY_LOOP_SIZE) |
kvn@2665 | 2101 | return false; |
duke@435 | 2102 | |
kvn@2665 | 2103 | if (!_head->is_CountedLoop()) |
kvn@2665 | 2104 | return false; // Dead loop |
duke@435 | 2105 | CountedLoopNode *cl = _head->as_CountedLoop(); |
kvn@3048 | 2106 | if (!cl->is_valid_counted_loop()) |
kvn@2665 | 2107 | return false; // Malformed loop |
kvn@2665 | 2108 | if (!phase->is_member(this, phase->get_ctrl(cl->loopexit()->in(CountedLoopEndNode::TestValue)))) |
duke@435 | 2109 | return false; // Infinite loop |
never@2685 | 2110 | |
duke@435 | 2111 | #ifdef ASSERT |
duke@435 | 2112 | // Ensure only one phi which is the iv. |
duke@435 | 2113 | Node* iv = NULL; |
duke@435 | 2114 | for (DUIterator_Fast imax, i = cl->fast_outs(imax); i < imax; i++) { |
duke@435 | 2115 | Node* n = cl->fast_out(i); |
duke@435 | 2116 | if (n->Opcode() == Op_Phi) { |
duke@435 | 2117 | assert(iv == NULL, "Too many phis" ); |
duke@435 | 2118 | iv = n; |
duke@435 | 2119 | } |
duke@435 | 2120 | } |
duke@435 | 2121 | assert(iv == cl->phi(), "Wrong phi" ); |
duke@435 | 2122 | #endif |
never@2685 | 2123 | |
never@2685 | 2124 | // main and post loops have explicitly created zero trip guard |
never@2685 | 2125 | bool needs_guard = !cl->is_main_loop() && !cl->is_post_loop(); |
never@2685 | 2126 | if (needs_guard) { |
kvn@2747 | 2127 | // Skip guard if values not overlap. |
kvn@2747 | 2128 | const TypeInt* init_t = phase->_igvn.type(cl->init_trip())->is_int(); |
kvn@2747 | 2129 | const TypeInt* limit_t = phase->_igvn.type(cl->limit())->is_int(); |
kvn@2747 | 2130 | int stride_con = cl->stride_con(); |
kvn@2747 | 2131 | if (stride_con > 0) { |
kvn@2747 | 2132 | needs_guard = (init_t->_hi >= limit_t->_lo); |
kvn@2747 | 2133 | } else { |
kvn@2747 | 2134 | needs_guard = (init_t->_lo <= limit_t->_hi); |
kvn@2747 | 2135 | } |
kvn@2747 | 2136 | } |
kvn@2747 | 2137 | if (needs_guard) { |
never@2685 | 2138 | // Check for an obvious zero trip guard. |
kvn@2727 | 2139 | Node* inctrl = PhaseIdealLoop::skip_loop_predicates(cl->in(LoopNode::EntryControl)); |
never@2685 | 2140 | if (inctrl->Opcode() == Op_IfTrue) { |
never@2685 | 2141 | // The test should look like just the backedge of a CountedLoop |
never@2685 | 2142 | Node* iff = inctrl->in(0); |
never@2685 | 2143 | if (iff->is_If()) { |
never@2685 | 2144 | Node* bol = iff->in(1); |
never@2685 | 2145 | if (bol->is_Bool() && bol->as_Bool()->_test._test == cl->loopexit()->test_trip()) { |
never@2685 | 2146 | Node* cmp = bol->in(1); |
never@2685 | 2147 | if (cmp->is_Cmp() && cmp->in(1) == cl->init_trip() && cmp->in(2) == cl->limit()) { |
never@2685 | 2148 | needs_guard = false; |
never@2685 | 2149 | } |
never@2685 | 2150 | } |
never@2685 | 2151 | } |
never@2685 | 2152 | } |
never@2685 | 2153 | } |
never@2685 | 2154 | |
never@2685 | 2155 | #ifndef PRODUCT |
never@2685 | 2156 | if (PrintOpto) { |
never@2685 | 2157 | tty->print("Removing empty loop with%s zero trip guard", needs_guard ? "out" : ""); |
never@2685 | 2158 | this->dump_head(); |
never@2685 | 2159 | } else if (TraceLoopOpts) { |
never@2685 | 2160 | tty->print("Empty with%s zero trip guard ", needs_guard ? "out" : ""); |
never@2685 | 2161 | this->dump_head(); |
never@2685 | 2162 | } |
never@2685 | 2163 | #endif |
never@2685 | 2164 | |
never@2685 | 2165 | if (needs_guard) { |
never@2685 | 2166 | // Peel the loop to ensure there's a zero trip guard |
never@2685 | 2167 | Node_List old_new; |
never@2685 | 2168 | phase->do_peeling(this, old_new); |
never@2685 | 2169 | } |
never@2685 | 2170 | |
duke@435 | 2171 | // Replace the phi at loop head with the final value of the last |
duke@435 | 2172 | // iteration. Then the CountedLoopEnd will collapse (backedge never |
duke@435 | 2173 | // taken) and all loop-invariant uses of the exit values will be correct. |
duke@435 | 2174 | Node *phi = cl->phi(); |
kvn@2877 | 2175 | Node *exact_limit = phase->exact_limit(this); |
kvn@2877 | 2176 | if (exact_limit != cl->limit()) { |
kvn@2877 | 2177 | // We also need to replace the original limit to collapse loop exit. |
kvn@2877 | 2178 | Node* cmp = cl->loopexit()->cmp_node(); |
kvn@2877 | 2179 | assert(cl->limit() == cmp->in(2), "sanity"); |
kvn@2877 | 2180 | phase->_igvn._worklist.push(cmp->in(2)); // put limit on worklist |
kvn@2877 | 2181 | phase->_igvn.hash_delete(cmp); |
kvn@2877 | 2182 | cmp->set_req(2, exact_limit); |
kvn@2877 | 2183 | phase->_igvn._worklist.push(cmp); // put cmp on worklist |
kvn@2877 | 2184 | } |
kvn@2877 | 2185 | // Note: the final value after increment should not overflow since |
kvn@2877 | 2186 | // counted loop has limit check predicate. |
kvn@2877 | 2187 | Node *final = new (phase->C, 3) SubINode( exact_limit, cl->stride() ); |
duke@435 | 2188 | phase->register_new_node(final,cl->in(LoopNode::EntryControl)); |
kvn@1976 | 2189 | phase->_igvn.replace_node(phi,final); |
duke@435 | 2190 | phase->C->set_major_progress(); |
duke@435 | 2191 | return true; |
duke@435 | 2192 | } |
duke@435 | 2193 | |
kvn@2747 | 2194 | //------------------------------policy_do_one_iteration_loop------------------- |
kvn@2747 | 2195 | // Convert one iteration loop into normal code. |
kvn@2747 | 2196 | bool IdealLoopTree::policy_do_one_iteration_loop( PhaseIdealLoop *phase ) { |
kvn@2747 | 2197 | if (!_head->as_Loop()->is_valid_counted_loop()) |
kvn@2747 | 2198 | return false; // Only for counted loop |
kvn@2747 | 2199 | |
kvn@2747 | 2200 | CountedLoopNode *cl = _head->as_CountedLoop(); |
kvn@2747 | 2201 | if (!cl->has_exact_trip_count() || cl->trip_count() != 1) { |
kvn@2747 | 2202 | return false; |
kvn@2747 | 2203 | } |
kvn@2747 | 2204 | |
kvn@2747 | 2205 | #ifndef PRODUCT |
kvn@2747 | 2206 | if(TraceLoopOpts) { |
kvn@2747 | 2207 | tty->print("OneIteration "); |
kvn@2747 | 2208 | this->dump_head(); |
kvn@2747 | 2209 | } |
kvn@2747 | 2210 | #endif |
kvn@2747 | 2211 | |
kvn@2747 | 2212 | Node *init_n = cl->init_trip(); |
kvn@2747 | 2213 | #ifdef ASSERT |
kvn@2747 | 2214 | // Loop boundaries should be constant since trip count is exact. |
kvn@2747 | 2215 | assert(init_n->get_int() + cl->stride_con() >= cl->limit()->get_int(), "should be one iteration"); |
kvn@2747 | 2216 | #endif |
kvn@2747 | 2217 | // Replace the phi at loop head with the value of the init_trip. |
kvn@2747 | 2218 | // Then the CountedLoopEnd will collapse (backedge will not be taken) |
kvn@2747 | 2219 | // and all loop-invariant uses of the exit values will be correct. |
kvn@2747 | 2220 | phase->_igvn.replace_node(cl->phi(), cl->init_trip()); |
kvn@2747 | 2221 | phase->C->set_major_progress(); |
kvn@2747 | 2222 | return true; |
kvn@2747 | 2223 | } |
duke@435 | 2224 | |
duke@435 | 2225 | //============================================================================= |
duke@435 | 2226 | //------------------------------iteration_split_impl--------------------------- |
never@836 | 2227 | bool IdealLoopTree::iteration_split_impl( PhaseIdealLoop *phase, Node_List &old_new ) { |
kvn@2747 | 2228 | // Compute exact loop trip count if possible. |
kvn@2747 | 2229 | compute_exact_trip_count(phase); |
kvn@2747 | 2230 | |
kvn@2747 | 2231 | // Convert one iteration loop into normal code. |
kvn@2747 | 2232 | if (policy_do_one_iteration_loop(phase)) |
kvn@2747 | 2233 | return true; |
kvn@2747 | 2234 | |
duke@435 | 2235 | // Check and remove empty loops (spam micro-benchmarks) |
kvn@2747 | 2236 | if (policy_do_remove_empty_loop(phase)) |
cfang@1607 | 2237 | return true; // Here we removed an empty loop |
duke@435 | 2238 | |
duke@435 | 2239 | bool should_peel = policy_peeling(phase); // Should we peel? |
duke@435 | 2240 | |
duke@435 | 2241 | bool should_unswitch = policy_unswitching(phase); |
duke@435 | 2242 | |
duke@435 | 2243 | // Non-counted loops may be peeled; exactly 1 iteration is peeled. |
duke@435 | 2244 | // This removes loop-invariant tests (usually null checks). |
kvn@2747 | 2245 | if (!_head->is_CountedLoop()) { // Non-counted loop |
duke@435 | 2246 | if (PartialPeelLoop && phase->partial_peel(this, old_new)) { |
never@836 | 2247 | // Partial peel succeeded so terminate this round of loop opts |
never@836 | 2248 | return false; |
duke@435 | 2249 | } |
kvn@2747 | 2250 | if (should_peel) { // Should we peel? |
duke@435 | 2251 | #ifndef PRODUCT |
duke@435 | 2252 | if (PrintOpto) tty->print_cr("should_peel"); |
duke@435 | 2253 | #endif |
duke@435 | 2254 | phase->do_peeling(this,old_new); |
kvn@2747 | 2255 | } else if (should_unswitch) { |
duke@435 | 2256 | phase->do_unswitching(this, old_new); |
duke@435 | 2257 | } |
never@836 | 2258 | return true; |
duke@435 | 2259 | } |
duke@435 | 2260 | CountedLoopNode *cl = _head->as_CountedLoop(); |
duke@435 | 2261 | |
kvn@3048 | 2262 | if (!cl->is_valid_counted_loop()) return true; // Ignore various kinds of broken loops |
duke@435 | 2263 | |
duke@435 | 2264 | // Do nothing special to pre- and post- loops |
kvn@2747 | 2265 | if (cl->is_pre_loop() || cl->is_post_loop()) return true; |
duke@435 | 2266 | |
duke@435 | 2267 | // Compute loop trip count from profile data |
duke@435 | 2268 | compute_profile_trip_cnt(phase); |
duke@435 | 2269 | |
duke@435 | 2270 | // Before attempting fancy unrolling, RCE or alignment, see if we want |
duke@435 | 2271 | // to completely unroll this loop or do loop unswitching. |
kvn@2747 | 2272 | if (cl->is_normal_loop()) { |
cfang@1224 | 2273 | if (should_unswitch) { |
cfang@1224 | 2274 | phase->do_unswitching(this, old_new); |
cfang@1224 | 2275 | return true; |
cfang@1224 | 2276 | } |
duke@435 | 2277 | bool should_maximally_unroll = policy_maximally_unroll(phase); |
kvn@2747 | 2278 | if (should_maximally_unroll) { |
duke@435 | 2279 | // Here we did some unrolling and peeling. Eventually we will |
duke@435 | 2280 | // completely unroll this loop and it will no longer be a loop. |
duke@435 | 2281 | phase->do_maximally_unroll(this,old_new); |
never@836 | 2282 | return true; |
duke@435 | 2283 | } |
duke@435 | 2284 | } |
duke@435 | 2285 | |
kvn@2735 | 2286 | // Skip next optimizations if running low on nodes. Note that |
kvn@2735 | 2287 | // policy_unswitching and policy_maximally_unroll have this check. |
kvn@2735 | 2288 | uint nodes_left = MaxNodeLimit - phase->C->unique(); |
kvn@2735 | 2289 | if ((2 * _body.size()) > nodes_left) { |
kvn@2735 | 2290 | return true; |
kvn@2735 | 2291 | } |
duke@435 | 2292 | |
duke@435 | 2293 | // Counted loops may be peeled, may need some iterations run up |
duke@435 | 2294 | // front for RCE, and may want to align loop refs to a cache |
duke@435 | 2295 | // line. Thus we clone a full loop up front whose trip count is |
duke@435 | 2296 | // at least 1 (if peeling), but may be several more. |
duke@435 | 2297 | |
duke@435 | 2298 | // The main loop will start cache-line aligned with at least 1 |
duke@435 | 2299 | // iteration of the unrolled body (zero-trip test required) and |
duke@435 | 2300 | // will have some range checks removed. |
duke@435 | 2301 | |
duke@435 | 2302 | // A post-loop will finish any odd iterations (leftover after |
duke@435 | 2303 | // unrolling), plus any needed for RCE purposes. |
duke@435 | 2304 | |
duke@435 | 2305 | bool should_unroll = policy_unroll(phase); |
duke@435 | 2306 | |
duke@435 | 2307 | bool should_rce = policy_range_check(phase); |
duke@435 | 2308 | |
duke@435 | 2309 | bool should_align = policy_align(phase); |
duke@435 | 2310 | |
duke@435 | 2311 | // If not RCE'ing (iteration splitting) or Aligning, then we do not |
duke@435 | 2312 | // need a pre-loop. We may still need to peel an initial iteration but |
duke@435 | 2313 | // we will not be needing an unknown number of pre-iterations. |
duke@435 | 2314 | // |
duke@435 | 2315 | // Basically, if may_rce_align reports FALSE first time through, |
duke@435 | 2316 | // we will not be able to later do RCE or Aligning on this loop. |
duke@435 | 2317 | bool may_rce_align = !policy_peel_only(phase) || should_rce || should_align; |
duke@435 | 2318 | |
duke@435 | 2319 | // If we have any of these conditions (RCE, alignment, unrolling) met, then |
duke@435 | 2320 | // we switch to the pre-/main-/post-loop model. This model also covers |
duke@435 | 2321 | // peeling. |
kvn@2747 | 2322 | if (should_rce || should_align || should_unroll) { |
kvn@2747 | 2323 | if (cl->is_normal_loop()) // Convert to 'pre/main/post' loops |
duke@435 | 2324 | phase->insert_pre_post_loops(this,old_new, !may_rce_align); |
duke@435 | 2325 | |
duke@435 | 2326 | // Adjust the pre- and main-loop limits to let the pre and post loops run |
duke@435 | 2327 | // with full checks, but the main-loop with no checks. Remove said |
duke@435 | 2328 | // checks from the main body. |
kvn@2747 | 2329 | if (should_rce) |
duke@435 | 2330 | phase->do_range_check(this,old_new); |
duke@435 | 2331 | |
duke@435 | 2332 | // Double loop body for unrolling. Adjust the minimum-trip test (will do |
duke@435 | 2333 | // twice as many iterations as before) and the main body limit (only do |
duke@435 | 2334 | // an even number of trips). If we are peeling, we might enable some RCE |
duke@435 | 2335 | // and we'd rather unroll the post-RCE'd loop SO... do not unroll if |
duke@435 | 2336 | // peeling. |
kvn@2747 | 2337 | if (should_unroll && !should_peel) |
kvn@2747 | 2338 | phase->do_unroll(this,old_new, true); |
duke@435 | 2339 | |
duke@435 | 2340 | // Adjust the pre-loop limits to align the main body |
duke@435 | 2341 | // iterations. |
kvn@2747 | 2342 | if (should_align) |
duke@435 | 2343 | Unimplemented(); |
duke@435 | 2344 | |
duke@435 | 2345 | } else { // Else we have an unchanged counted loop |
kvn@2747 | 2346 | if (should_peel) // Might want to peel but do nothing else |
duke@435 | 2347 | phase->do_peeling(this,old_new); |
duke@435 | 2348 | } |
never@836 | 2349 | return true; |
duke@435 | 2350 | } |
duke@435 | 2351 | |
duke@435 | 2352 | |
duke@435 | 2353 | //============================================================================= |
duke@435 | 2354 | //------------------------------iteration_split-------------------------------- |
never@836 | 2355 | bool IdealLoopTree::iteration_split( PhaseIdealLoop *phase, Node_List &old_new ) { |
duke@435 | 2356 | // Recursively iteration split nested loops |
kvn@2665 | 2357 | if (_child && !_child->iteration_split(phase, old_new)) |
never@836 | 2358 | return false; |
duke@435 | 2359 | |
duke@435 | 2360 | // Clean out prior deadwood |
duke@435 | 2361 | DCE_loop_body(); |
duke@435 | 2362 | |
duke@435 | 2363 | |
duke@435 | 2364 | // Look for loop-exit tests with my 50/50 guesses from the Parsing stage. |
duke@435 | 2365 | // Replace with a 1-in-10 exit guess. |
kvn@2665 | 2366 | if (_parent /*not the root loop*/ && |
duke@435 | 2367 | !_irreducible && |
duke@435 | 2368 | // Also ignore the occasional dead backedge |
kvn@2665 | 2369 | !tail()->is_top()) { |
duke@435 | 2370 | adjust_loop_exit_prob(phase); |
duke@435 | 2371 | } |
duke@435 | 2372 | |
duke@435 | 2373 | // Gate unrolling, RCE and peeling efforts. |
kvn@2665 | 2374 | if (!_child && // If not an inner loop, do not split |
duke@435 | 2375 | !_irreducible && |
kvn@474 | 2376 | _allow_optimizations && |
kvn@2665 | 2377 | !tail()->is_top()) { // Also ignore the occasional dead backedge |
duke@435 | 2378 | if (!_has_call) { |
kvn@2665 | 2379 | if (!iteration_split_impl(phase, old_new)) { |
cfang@1607 | 2380 | return false; |
cfang@1607 | 2381 | } |
duke@435 | 2382 | } else if (policy_unswitching(phase)) { |
duke@435 | 2383 | phase->do_unswitching(this, old_new); |
duke@435 | 2384 | } |
duke@435 | 2385 | } |
duke@435 | 2386 | |
duke@435 | 2387 | // Minor offset re-organization to remove loop-fallout uses of |
kvn@2665 | 2388 | // trip counter when there was no major reshaping. |
kvn@2665 | 2389 | phase->reorg_offsets(this); |
kvn@2665 | 2390 | |
kvn@2665 | 2391 | if (_next && !_next->iteration_split(phase, old_new)) |
never@836 | 2392 | return false; |
never@836 | 2393 | return true; |
duke@435 | 2394 | } |
cfang@1607 | 2395 | |
cfang@1607 | 2396 | |
kvn@2727 | 2397 | //============================================================================= |
never@2118 | 2398 | // Process all the loops in the loop tree and replace any fill |
never@2118 | 2399 | // patterns with an intrisc version. |
never@2118 | 2400 | bool PhaseIdealLoop::do_intrinsify_fill() { |
never@2118 | 2401 | bool changed = false; |
never@2118 | 2402 | for (LoopTreeIterator iter(_ltree_root); !iter.done(); iter.next()) { |
never@2118 | 2403 | IdealLoopTree* lpt = iter.current(); |
never@2118 | 2404 | changed |= intrinsify_fill(lpt); |
never@2118 | 2405 | } |
never@2118 | 2406 | return changed; |
never@2118 | 2407 | } |
never@2118 | 2408 | |
never@2118 | 2409 | |
never@2118 | 2410 | // Examine an inner loop looking for a a single store of an invariant |
never@2118 | 2411 | // value in a unit stride loop, |
never@2118 | 2412 | bool PhaseIdealLoop::match_fill_loop(IdealLoopTree* lpt, Node*& store, Node*& store_value, |
never@2118 | 2413 | Node*& shift, Node*& con) { |
never@2118 | 2414 | const char* msg = NULL; |
never@2118 | 2415 | Node* msg_node = NULL; |
never@2118 | 2416 | |
never@2118 | 2417 | store_value = NULL; |
never@2118 | 2418 | con = NULL; |
never@2118 | 2419 | shift = NULL; |
never@2118 | 2420 | |
never@2118 | 2421 | // Process the loop looking for stores. If there are multiple |
never@2118 | 2422 | // stores or extra control flow give at this point. |
never@2118 | 2423 | CountedLoopNode* head = lpt->_head->as_CountedLoop(); |
never@2118 | 2424 | for (uint i = 0; msg == NULL && i < lpt->_body.size(); i++) { |
never@2118 | 2425 | Node* n = lpt->_body.at(i); |
never@2118 | 2426 | if (n->outcnt() == 0) continue; // Ignore dead |
never@2118 | 2427 | if (n->is_Store()) { |
never@2118 | 2428 | if (store != NULL) { |
never@2118 | 2429 | msg = "multiple stores"; |
never@2118 | 2430 | break; |
never@2118 | 2431 | } |
never@2118 | 2432 | int opc = n->Opcode(); |
never@2118 | 2433 | if (opc == Op_StoreP || opc == Op_StoreN || opc == Op_StoreCM) { |
never@2118 | 2434 | msg = "oop fills not handled"; |
never@2118 | 2435 | break; |
never@2118 | 2436 | } |
never@2118 | 2437 | Node* value = n->in(MemNode::ValueIn); |
never@2118 | 2438 | if (!lpt->is_invariant(value)) { |
never@2118 | 2439 | msg = "variant store value"; |
never@2140 | 2440 | } else if (!_igvn.type(n->in(MemNode::Address))->isa_aryptr()) { |
never@2140 | 2441 | msg = "not array address"; |
never@2118 | 2442 | } |
never@2118 | 2443 | store = n; |
never@2118 | 2444 | store_value = value; |
never@2118 | 2445 | } else if (n->is_If() && n != head->loopexit()) { |
never@2118 | 2446 | msg = "extra control flow"; |
never@2118 | 2447 | msg_node = n; |
never@2118 | 2448 | } |
never@2118 | 2449 | } |
never@2118 | 2450 | |
never@2118 | 2451 | if (store == NULL) { |
never@2118 | 2452 | // No store in loop |
never@2118 | 2453 | return false; |
never@2118 | 2454 | } |
never@2118 | 2455 | |
never@2118 | 2456 | if (msg == NULL && head->stride_con() != 1) { |
never@2118 | 2457 | // could handle negative strides too |
never@2118 | 2458 | if (head->stride_con() < 0) { |
never@2118 | 2459 | msg = "negative stride"; |
never@2118 | 2460 | } else { |
never@2118 | 2461 | msg = "non-unit stride"; |
never@2118 | 2462 | } |
never@2118 | 2463 | } |
never@2118 | 2464 | |
never@2118 | 2465 | if (msg == NULL && !store->in(MemNode::Address)->is_AddP()) { |
never@2118 | 2466 | msg = "can't handle store address"; |
never@2118 | 2467 | msg_node = store->in(MemNode::Address); |
never@2118 | 2468 | } |
never@2118 | 2469 | |
never@2168 | 2470 | if (msg == NULL && |
never@2168 | 2471 | (!store->in(MemNode::Memory)->is_Phi() || |
never@2168 | 2472 | store->in(MemNode::Memory)->in(LoopNode::LoopBackControl) != store)) { |
never@2168 | 2473 | msg = "store memory isn't proper phi"; |
never@2168 | 2474 | msg_node = store->in(MemNode::Memory); |
never@2168 | 2475 | } |
never@2168 | 2476 | |
never@2118 | 2477 | // Make sure there is an appropriate fill routine |
never@2118 | 2478 | BasicType t = store->as_Mem()->memory_type(); |
never@2118 | 2479 | const char* fill_name; |
never@2118 | 2480 | if (msg == NULL && |
never@2118 | 2481 | StubRoutines::select_fill_function(t, false, fill_name) == NULL) { |
never@2118 | 2482 | msg = "unsupported store"; |
never@2118 | 2483 | msg_node = store; |
never@2118 | 2484 | } |
never@2118 | 2485 | |
never@2118 | 2486 | if (msg != NULL) { |
never@2118 | 2487 | #ifndef PRODUCT |
never@2118 | 2488 | if (TraceOptimizeFill) { |
never@2118 | 2489 | tty->print_cr("not fill intrinsic candidate: %s", msg); |
never@2118 | 2490 | if (msg_node != NULL) msg_node->dump(); |
never@2118 | 2491 | } |
never@2118 | 2492 | #endif |
never@2118 | 2493 | return false; |
never@2118 | 2494 | } |
never@2118 | 2495 | |
never@2118 | 2496 | // Make sure the address expression can be handled. It should be |
never@2118 | 2497 | // head->phi * elsize + con. head->phi might have a ConvI2L. |
never@2118 | 2498 | Node* elements[4]; |
never@2118 | 2499 | Node* conv = NULL; |
never@2140 | 2500 | bool found_index = false; |
never@2118 | 2501 | int count = store->in(MemNode::Address)->as_AddP()->unpack_offsets(elements, ARRAY_SIZE(elements)); |
never@2118 | 2502 | for (int e = 0; e < count; e++) { |
never@2118 | 2503 | Node* n = elements[e]; |
never@2118 | 2504 | if (n->is_Con() && con == NULL) { |
never@2118 | 2505 | con = n; |
never@2118 | 2506 | } else if (n->Opcode() == Op_LShiftX && shift == NULL) { |
never@2118 | 2507 | Node* value = n->in(1); |
never@2118 | 2508 | #ifdef _LP64 |
never@2118 | 2509 | if (value->Opcode() == Op_ConvI2L) { |
never@2118 | 2510 | conv = value; |
never@2118 | 2511 | value = value->in(1); |
never@2118 | 2512 | } |
never@2118 | 2513 | #endif |
never@2118 | 2514 | if (value != head->phi()) { |
never@2118 | 2515 | msg = "unhandled shift in address"; |
never@2118 | 2516 | } else { |
never@2730 | 2517 | if (type2aelembytes(store->as_Mem()->memory_type(), true) != (1 << n->in(2)->get_int())) { |
never@2730 | 2518 | msg = "scale doesn't match"; |
never@2730 | 2519 | } else { |
never@2730 | 2520 | found_index = true; |
never@2730 | 2521 | shift = n; |
never@2730 | 2522 | } |
never@2118 | 2523 | } |
never@2118 | 2524 | } else if (n->Opcode() == Op_ConvI2L && conv == NULL) { |
never@2118 | 2525 | if (n->in(1) == head->phi()) { |
never@2140 | 2526 | found_index = true; |
never@2118 | 2527 | conv = n; |
never@2118 | 2528 | } else { |
never@2118 | 2529 | msg = "unhandled input to ConvI2L"; |
never@2118 | 2530 | } |
never@2118 | 2531 | } else if (n == head->phi()) { |
never@2118 | 2532 | // no shift, check below for allowed cases |
never@2140 | 2533 | found_index = true; |
never@2118 | 2534 | } else { |
never@2118 | 2535 | msg = "unhandled node in address"; |
never@2118 | 2536 | msg_node = n; |
never@2118 | 2537 | } |
never@2118 | 2538 | } |
never@2118 | 2539 | |
never@2118 | 2540 | if (count == -1) { |
never@2118 | 2541 | msg = "malformed address expression"; |
never@2118 | 2542 | msg_node = store; |
never@2118 | 2543 | } |
never@2118 | 2544 | |
never@2140 | 2545 | if (!found_index) { |
never@2140 | 2546 | msg = "missing use of index"; |
never@2140 | 2547 | } |
never@2140 | 2548 | |
never@2118 | 2549 | // byte sized items won't have a shift |
never@2118 | 2550 | if (msg == NULL && shift == NULL && t != T_BYTE && t != T_BOOLEAN) { |
never@2118 | 2551 | msg = "can't find shift"; |
never@2118 | 2552 | msg_node = store; |
never@2118 | 2553 | } |
never@2118 | 2554 | |
never@2118 | 2555 | if (msg != NULL) { |
never@2118 | 2556 | #ifndef PRODUCT |
never@2118 | 2557 | if (TraceOptimizeFill) { |
never@2118 | 2558 | tty->print_cr("not fill intrinsic: %s", msg); |
never@2118 | 2559 | if (msg_node != NULL) msg_node->dump(); |
never@2118 | 2560 | } |
never@2118 | 2561 | #endif |
never@2118 | 2562 | return false; |
never@2118 | 2563 | } |
never@2118 | 2564 | |
never@2118 | 2565 | // No make sure all the other nodes in the loop can be handled |
never@2118 | 2566 | VectorSet ok(Thread::current()->resource_area()); |
never@2118 | 2567 | |
never@2118 | 2568 | // store related values are ok |
never@2118 | 2569 | ok.set(store->_idx); |
never@2118 | 2570 | ok.set(store->in(MemNode::Memory)->_idx); |
never@2118 | 2571 | |
never@2118 | 2572 | // Loop structure is ok |
never@2118 | 2573 | ok.set(head->_idx); |
never@2118 | 2574 | ok.set(head->loopexit()->_idx); |
never@2118 | 2575 | ok.set(head->phi()->_idx); |
never@2118 | 2576 | ok.set(head->incr()->_idx); |
never@2118 | 2577 | ok.set(head->loopexit()->cmp_node()->_idx); |
never@2118 | 2578 | ok.set(head->loopexit()->in(1)->_idx); |
never@2118 | 2579 | |
never@2118 | 2580 | // Address elements are ok |
never@2118 | 2581 | if (con) ok.set(con->_idx); |
never@2118 | 2582 | if (shift) ok.set(shift->_idx); |
never@2118 | 2583 | if (conv) ok.set(conv->_idx); |
never@2118 | 2584 | |
never@2118 | 2585 | for (uint i = 0; msg == NULL && i < lpt->_body.size(); i++) { |
never@2118 | 2586 | Node* n = lpt->_body.at(i); |
never@2118 | 2587 | if (n->outcnt() == 0) continue; // Ignore dead |
never@2118 | 2588 | if (ok.test(n->_idx)) continue; |
never@2118 | 2589 | // Backedge projection is ok |
never@2118 | 2590 | if (n->is_IfTrue() && n->in(0) == head->loopexit()) continue; |
never@2118 | 2591 | if (!n->is_AddP()) { |
never@2118 | 2592 | msg = "unhandled node"; |
never@2118 | 2593 | msg_node = n; |
never@2118 | 2594 | break; |
never@2118 | 2595 | } |
never@2118 | 2596 | } |
never@2118 | 2597 | |
never@2118 | 2598 | // Make sure no unexpected values are used outside the loop |
never@2118 | 2599 | for (uint i = 0; msg == NULL && i < lpt->_body.size(); i++) { |
never@2118 | 2600 | Node* n = lpt->_body.at(i); |
never@2118 | 2601 | // These values can be replaced with other nodes if they are used |
never@2118 | 2602 | // outside the loop. |
never@2168 | 2603 | if (n == store || n == head->loopexit() || n == head->incr() || n == store->in(MemNode::Memory)) continue; |
never@2118 | 2604 | for (SimpleDUIterator iter(n); iter.has_next(); iter.next()) { |
never@2118 | 2605 | Node* use = iter.get(); |
never@2118 | 2606 | if (!lpt->_body.contains(use)) { |
never@2118 | 2607 | msg = "node is used outside loop"; |
never@2118 | 2608 | // lpt->_body.dump(); |
never@2118 | 2609 | msg_node = n; |
never@2118 | 2610 | break; |
never@2118 | 2611 | } |
never@2118 | 2612 | } |
never@2118 | 2613 | } |
never@2118 | 2614 | |
never@2118 | 2615 | #ifdef ASSERT |
never@2118 | 2616 | if (TraceOptimizeFill) { |
never@2118 | 2617 | if (msg != NULL) { |
never@2118 | 2618 | tty->print_cr("no fill intrinsic: %s", msg); |
never@2118 | 2619 | if (msg_node != NULL) msg_node->dump(); |
never@2118 | 2620 | } else { |
never@2118 | 2621 | tty->print_cr("fill intrinsic for:"); |
never@2118 | 2622 | } |
never@2118 | 2623 | store->dump(); |
never@2118 | 2624 | if (Verbose) { |
never@2118 | 2625 | lpt->_body.dump(); |
never@2118 | 2626 | } |
never@2118 | 2627 | } |
never@2118 | 2628 | #endif |
never@2118 | 2629 | |
never@2118 | 2630 | return msg == NULL; |
never@2118 | 2631 | } |
never@2118 | 2632 | |
never@2118 | 2633 | |
never@2118 | 2634 | |
never@2118 | 2635 | bool PhaseIdealLoop::intrinsify_fill(IdealLoopTree* lpt) { |
never@2118 | 2636 | // Only for counted inner loops |
never@2118 | 2637 | if (!lpt->is_counted() || !lpt->is_inner()) { |
never@2118 | 2638 | return false; |
never@2118 | 2639 | } |
never@2118 | 2640 | |
never@2118 | 2641 | // Must have constant stride |
never@2118 | 2642 | CountedLoopNode* head = lpt->_head->as_CountedLoop(); |
kvn@3048 | 2643 | if (!head->is_valid_counted_loop() || !head->is_normal_loop()) { |
never@2118 | 2644 | return false; |
never@2118 | 2645 | } |
never@2118 | 2646 | |
never@2118 | 2647 | // Check that the body only contains a store of a loop invariant |
never@2118 | 2648 | // value that is indexed by the loop phi. |
never@2118 | 2649 | Node* store = NULL; |
never@2118 | 2650 | Node* store_value = NULL; |
never@2118 | 2651 | Node* shift = NULL; |
never@2118 | 2652 | Node* offset = NULL; |
never@2118 | 2653 | if (!match_fill_loop(lpt, store, store_value, shift, offset)) { |
never@2118 | 2654 | return false; |
never@2118 | 2655 | } |
never@2118 | 2656 | |
kvn@2727 | 2657 | #ifndef PRODUCT |
kvn@2727 | 2658 | if (TraceLoopOpts) { |
kvn@2727 | 2659 | tty->print("ArrayFill "); |
kvn@2727 | 2660 | lpt->dump_head(); |
kvn@2727 | 2661 | } |
kvn@2727 | 2662 | #endif |
kvn@2727 | 2663 | |
never@2118 | 2664 | // Now replace the whole loop body by a call to a fill routine that |
never@2118 | 2665 | // covers the same region as the loop. |
never@2118 | 2666 | Node* base = store->in(MemNode::Address)->as_AddP()->in(AddPNode::Base); |
never@2118 | 2667 | |
never@2118 | 2668 | // Build an expression for the beginning of the copy region |
never@2118 | 2669 | Node* index = head->init_trip(); |
never@2118 | 2670 | #ifdef _LP64 |
never@2118 | 2671 | index = new (C, 2) ConvI2LNode(index); |
never@2118 | 2672 | _igvn.register_new_node_with_optimizer(index); |
never@2118 | 2673 | #endif |
never@2118 | 2674 | if (shift != NULL) { |
never@2118 | 2675 | // byte arrays don't require a shift but others do. |
never@2118 | 2676 | index = new (C, 3) LShiftXNode(index, shift->in(2)); |
never@2118 | 2677 | _igvn.register_new_node_with_optimizer(index); |
never@2118 | 2678 | } |
never@2118 | 2679 | index = new (C, 4) AddPNode(base, base, index); |
never@2118 | 2680 | _igvn.register_new_node_with_optimizer(index); |
never@2118 | 2681 | Node* from = new (C, 4) AddPNode(base, index, offset); |
never@2118 | 2682 | _igvn.register_new_node_with_optimizer(from); |
never@2118 | 2683 | // Compute the number of elements to copy |
never@2118 | 2684 | Node* len = new (C, 3) SubINode(head->limit(), head->init_trip()); |
never@2118 | 2685 | _igvn.register_new_node_with_optimizer(len); |
never@2118 | 2686 | |
never@2118 | 2687 | BasicType t = store->as_Mem()->memory_type(); |
never@2118 | 2688 | bool aligned = false; |
never@2118 | 2689 | if (offset != NULL && head->init_trip()->is_Con()) { |
never@2118 | 2690 | int element_size = type2aelembytes(t); |
never@2118 | 2691 | aligned = (offset->find_intptr_t_type()->get_con() + head->init_trip()->get_int() * element_size) % HeapWordSize == 0; |
never@2118 | 2692 | } |
never@2118 | 2693 | |
never@2118 | 2694 | // Build a call to the fill routine |
never@2118 | 2695 | const char* fill_name; |
never@2118 | 2696 | address fill = StubRoutines::select_fill_function(t, aligned, fill_name); |
never@2118 | 2697 | assert(fill != NULL, "what?"); |
never@2118 | 2698 | |
never@2118 | 2699 | // Convert float/double to int/long for fill routines |
never@2118 | 2700 | if (t == T_FLOAT) { |
never@2118 | 2701 | store_value = new (C, 2) MoveF2INode(store_value); |
never@2118 | 2702 | _igvn.register_new_node_with_optimizer(store_value); |
never@2118 | 2703 | } else if (t == T_DOUBLE) { |
never@2118 | 2704 | store_value = new (C, 2) MoveD2LNode(store_value); |
never@2118 | 2705 | _igvn.register_new_node_with_optimizer(store_value); |
never@2118 | 2706 | } |
never@2118 | 2707 | |
never@2118 | 2708 | Node* mem_phi = store->in(MemNode::Memory); |
never@2118 | 2709 | Node* result_ctrl; |
never@2118 | 2710 | Node* result_mem; |
never@2118 | 2711 | const TypeFunc* call_type = OptoRuntime::array_fill_Type(); |
never@2118 | 2712 | int size = call_type->domain()->cnt(); |
never@2118 | 2713 | CallLeafNode *call = new (C, size) CallLeafNoFPNode(call_type, fill, |
never@2118 | 2714 | fill_name, TypeAryPtr::get_array_body_type(t)); |
never@2118 | 2715 | call->init_req(TypeFunc::Parms+0, from); |
never@2118 | 2716 | call->init_req(TypeFunc::Parms+1, store_value); |
never@2199 | 2717 | #ifdef _LP64 |
never@2199 | 2718 | len = new (C, 2) ConvI2LNode(len); |
never@2199 | 2719 | _igvn.register_new_node_with_optimizer(len); |
never@2199 | 2720 | #endif |
never@2118 | 2721 | call->init_req(TypeFunc::Parms+2, len); |
never@2199 | 2722 | #ifdef _LP64 |
never@2199 | 2723 | call->init_req(TypeFunc::Parms+3, C->top()); |
never@2199 | 2724 | #endif |
never@2118 | 2725 | call->init_req( TypeFunc::Control, head->init_control()); |
never@2118 | 2726 | call->init_req( TypeFunc::I_O , C->top() ) ; // does no i/o |
never@2118 | 2727 | call->init_req( TypeFunc::Memory , mem_phi->in(LoopNode::EntryControl) ); |
never@2118 | 2728 | call->init_req( TypeFunc::ReturnAdr, C->start()->proj_out(TypeFunc::ReturnAdr) ); |
never@2118 | 2729 | call->init_req( TypeFunc::FramePtr, C->start()->proj_out(TypeFunc::FramePtr) ); |
never@2118 | 2730 | _igvn.register_new_node_with_optimizer(call); |
never@2118 | 2731 | result_ctrl = new (C, 1) ProjNode(call,TypeFunc::Control); |
never@2118 | 2732 | _igvn.register_new_node_with_optimizer(result_ctrl); |
never@2118 | 2733 | result_mem = new (C, 1) ProjNode(call,TypeFunc::Memory); |
never@2118 | 2734 | _igvn.register_new_node_with_optimizer(result_mem); |
never@2118 | 2735 | |
never@2118 | 2736 | // If this fill is tightly coupled to an allocation and overwrites |
never@2118 | 2737 | // the whole body, allow it to take over the zeroing. |
never@2118 | 2738 | AllocateNode* alloc = AllocateNode::Ideal_allocation(base, this); |
never@2118 | 2739 | if (alloc != NULL && alloc->is_AllocateArray()) { |
never@2118 | 2740 | Node* length = alloc->as_AllocateArray()->Ideal_length(); |
never@2118 | 2741 | if (head->limit() == length && |
never@2118 | 2742 | head->init_trip() == _igvn.intcon(0)) { |
never@2118 | 2743 | if (TraceOptimizeFill) { |
never@2118 | 2744 | tty->print_cr("Eliminated zeroing in allocation"); |
never@2118 | 2745 | } |
never@2118 | 2746 | alloc->maybe_set_complete(&_igvn); |
never@2118 | 2747 | } else { |
never@2118 | 2748 | #ifdef ASSERT |
never@2118 | 2749 | if (TraceOptimizeFill) { |
never@2118 | 2750 | tty->print_cr("filling array but bounds don't match"); |
never@2118 | 2751 | alloc->dump(); |
never@2118 | 2752 | head->init_trip()->dump(); |
never@2118 | 2753 | head->limit()->dump(); |
never@2118 | 2754 | length->dump(); |
never@2118 | 2755 | } |
never@2118 | 2756 | #endif |
never@2118 | 2757 | } |
never@2118 | 2758 | } |
never@2118 | 2759 | |
never@2118 | 2760 | // Redirect the old control and memory edges that are outside the loop. |
never@2118 | 2761 | Node* exit = head->loopexit()->proj_out(0); |
never@2168 | 2762 | // Sometimes the memory phi of the head is used as the outgoing |
never@2168 | 2763 | // state of the loop. It's safe in this case to replace it with the |
never@2168 | 2764 | // result_mem. |
never@2168 | 2765 | _igvn.replace_node(store->in(MemNode::Memory), result_mem); |
never@2118 | 2766 | _igvn.replace_node(exit, result_ctrl); |
never@2118 | 2767 | _igvn.replace_node(store, result_mem); |
never@2118 | 2768 | // Any uses the increment outside of the loop become the loop limit. |
never@2118 | 2769 | _igvn.replace_node(head->incr(), head->limit()); |
never@2118 | 2770 | |
never@2118 | 2771 | // Disconnect the head from the loop. |
never@2118 | 2772 | for (uint i = 0; i < lpt->_body.size(); i++) { |
never@2118 | 2773 | Node* n = lpt->_body.at(i); |
never@2118 | 2774 | _igvn.replace_node(n, C->top()); |
never@2118 | 2775 | } |
never@2118 | 2776 | |
never@2118 | 2777 | return true; |
never@2118 | 2778 | } |