Mon, 21 Mar 2011 11:28:14 -0700
7008866: Missing loop predicate for loop with multiple entries
Summary: Add predicates when loop head bytecode is parsed instead of when back branch bytecode is parsed.
Reviewed-by: never
1 /*
2 * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "precompiled.hpp"
26 #include "compiler/compileLog.hpp"
27 #include "memory/allocation.inline.hpp"
28 #include "opto/addnode.hpp"
29 #include "opto/callnode.hpp"
30 #include "opto/connode.hpp"
31 #include "opto/divnode.hpp"
32 #include "opto/loopnode.hpp"
33 #include "opto/mulnode.hpp"
34 #include "opto/rootnode.hpp"
35 #include "opto/runtime.hpp"
36 #include "opto/subnode.hpp"
38 //------------------------------is_loop_exit-----------------------------------
39 // Given an IfNode, return the loop-exiting projection or NULL if both
40 // arms remain in the loop.
41 Node *IdealLoopTree::is_loop_exit(Node *iff) const {
42 if( iff->outcnt() != 2 ) return NULL; // Ignore partially dead tests
43 PhaseIdealLoop *phase = _phase;
44 // Test is an IfNode, has 2 projections. If BOTH are in the loop
45 // we need loop unswitching instead of peeling.
46 if( !is_member(phase->get_loop( iff->raw_out(0) )) )
47 return iff->raw_out(0);
48 if( !is_member(phase->get_loop( iff->raw_out(1) )) )
49 return iff->raw_out(1);
50 return NULL;
51 }
54 //=============================================================================
57 //------------------------------record_for_igvn----------------------------
58 // Put loop body on igvn work list
59 void IdealLoopTree::record_for_igvn() {
60 for( uint i = 0; i < _body.size(); i++ ) {
61 Node *n = _body.at(i);
62 _phase->_igvn._worklist.push(n);
63 }
64 }
66 //------------------------------compute_profile_trip_cnt----------------------------
67 // Compute loop trip count from profile data as
68 // (backedge_count + loop_exit_count) / loop_exit_count
69 void IdealLoopTree::compute_profile_trip_cnt( PhaseIdealLoop *phase ) {
70 if (!_head->is_CountedLoop()) {
71 return;
72 }
73 CountedLoopNode* head = _head->as_CountedLoop();
74 if (head->profile_trip_cnt() != COUNT_UNKNOWN) {
75 return; // Already computed
76 }
77 float trip_cnt = (float)max_jint; // default is big
79 Node* back = head->in(LoopNode::LoopBackControl);
80 while (back != head) {
81 if ((back->Opcode() == Op_IfTrue || back->Opcode() == Op_IfFalse) &&
82 back->in(0) &&
83 back->in(0)->is_If() &&
84 back->in(0)->as_If()->_fcnt != COUNT_UNKNOWN &&
85 back->in(0)->as_If()->_prob != PROB_UNKNOWN) {
86 break;
87 }
88 back = phase->idom(back);
89 }
90 if (back != head) {
91 assert((back->Opcode() == Op_IfTrue || back->Opcode() == Op_IfFalse) &&
92 back->in(0), "if-projection exists");
93 IfNode* back_if = back->in(0)->as_If();
94 float loop_back_cnt = back_if->_fcnt * back_if->_prob;
96 // Now compute a loop exit count
97 float loop_exit_cnt = 0.0f;
98 for( uint i = 0; i < _body.size(); i++ ) {
99 Node *n = _body[i];
100 if( n->is_If() ) {
101 IfNode *iff = n->as_If();
102 if( iff->_fcnt != COUNT_UNKNOWN && iff->_prob != PROB_UNKNOWN ) {
103 Node *exit = is_loop_exit(iff);
104 if( exit ) {
105 float exit_prob = iff->_prob;
106 if (exit->Opcode() == Op_IfFalse) exit_prob = 1.0 - exit_prob;
107 if (exit_prob > PROB_MIN) {
108 float exit_cnt = iff->_fcnt * exit_prob;
109 loop_exit_cnt += exit_cnt;
110 }
111 }
112 }
113 }
114 }
115 if (loop_exit_cnt > 0.0f) {
116 trip_cnt = (loop_back_cnt + loop_exit_cnt) / loop_exit_cnt;
117 } else {
118 // No exit count so use
119 trip_cnt = loop_back_cnt;
120 }
121 }
122 #ifndef PRODUCT
123 if (TraceProfileTripCount) {
124 tty->print_cr("compute_profile_trip_cnt lp: %d cnt: %f\n", head->_idx, trip_cnt);
125 }
126 #endif
127 head->set_profile_trip_cnt(trip_cnt);
128 }
130 //---------------------is_invariant_addition-----------------------------
131 // Return nonzero index of invariant operand for an Add or Sub
132 // of (nonconstant) invariant and variant values. Helper for reassociate_invariants.
133 int IdealLoopTree::is_invariant_addition(Node* n, PhaseIdealLoop *phase) {
134 int op = n->Opcode();
135 if (op == Op_AddI || op == Op_SubI) {
136 bool in1_invar = this->is_invariant(n->in(1));
137 bool in2_invar = this->is_invariant(n->in(2));
138 if (in1_invar && !in2_invar) return 1;
139 if (!in1_invar && in2_invar) return 2;
140 }
141 return 0;
142 }
144 //---------------------reassociate_add_sub-----------------------------
145 // Reassociate invariant add and subtract expressions:
146 //
147 // inv1 + (x + inv2) => ( inv1 + inv2) + x
148 // (x + inv2) + inv1 => ( inv1 + inv2) + x
149 // inv1 + (x - inv2) => ( inv1 - inv2) + x
150 // inv1 - (inv2 - x) => ( inv1 - inv2) + x
151 // (x + inv2) - inv1 => (-inv1 + inv2) + x
152 // (x - inv2) + inv1 => ( inv1 - inv2) + x
153 // (x - inv2) - inv1 => (-inv1 - inv2) + x
154 // inv1 + (inv2 - x) => ( inv1 + inv2) - x
155 // inv1 - (x - inv2) => ( inv1 + inv2) - x
156 // (inv2 - x) + inv1 => ( inv1 + inv2) - x
157 // (inv2 - x) - inv1 => (-inv1 + inv2) - x
158 // inv1 - (x + inv2) => ( inv1 - inv2) - x
159 //
160 Node* IdealLoopTree::reassociate_add_sub(Node* n1, PhaseIdealLoop *phase) {
161 if (!n1->is_Add() && !n1->is_Sub() || n1->outcnt() == 0) return NULL;
162 if (is_invariant(n1)) return NULL;
163 int inv1_idx = is_invariant_addition(n1, phase);
164 if (!inv1_idx) return NULL;
165 // Don't mess with add of constant (igvn moves them to expression tree root.)
166 if (n1->is_Add() && n1->in(2)->is_Con()) return NULL;
167 Node* inv1 = n1->in(inv1_idx);
168 Node* n2 = n1->in(3 - inv1_idx);
169 int inv2_idx = is_invariant_addition(n2, phase);
170 if (!inv2_idx) return NULL;
171 Node* x = n2->in(3 - inv2_idx);
172 Node* inv2 = n2->in(inv2_idx);
174 bool neg_x = n2->is_Sub() && inv2_idx == 1;
175 bool neg_inv2 = n2->is_Sub() && inv2_idx == 2;
176 bool neg_inv1 = n1->is_Sub() && inv1_idx == 2;
177 if (n1->is_Sub() && inv1_idx == 1) {
178 neg_x = !neg_x;
179 neg_inv2 = !neg_inv2;
180 }
181 Node* inv1_c = phase->get_ctrl(inv1);
182 Node* inv2_c = phase->get_ctrl(inv2);
183 Node* n_inv1;
184 if (neg_inv1) {
185 Node *zero = phase->_igvn.intcon(0);
186 phase->set_ctrl(zero, phase->C->root());
187 n_inv1 = new (phase->C, 3) SubINode(zero, inv1);
188 phase->register_new_node(n_inv1, inv1_c);
189 } else {
190 n_inv1 = inv1;
191 }
192 Node* inv;
193 if (neg_inv2) {
194 inv = new (phase->C, 3) SubINode(n_inv1, inv2);
195 } else {
196 inv = new (phase->C, 3) AddINode(n_inv1, inv2);
197 }
198 phase->register_new_node(inv, phase->get_early_ctrl(inv));
200 Node* addx;
201 if (neg_x) {
202 addx = new (phase->C, 3) SubINode(inv, x);
203 } else {
204 addx = new (phase->C, 3) AddINode(x, inv);
205 }
206 phase->register_new_node(addx, phase->get_ctrl(x));
207 phase->_igvn.replace_node(n1, addx);
208 assert(phase->get_loop(phase->get_ctrl(n1)) == this, "");
209 _body.yank(n1);
210 return addx;
211 }
213 //---------------------reassociate_invariants-----------------------------
214 // Reassociate invariant expressions:
215 void IdealLoopTree::reassociate_invariants(PhaseIdealLoop *phase) {
216 for (int i = _body.size() - 1; i >= 0; i--) {
217 Node *n = _body.at(i);
218 for (int j = 0; j < 5; j++) {
219 Node* nn = reassociate_add_sub(n, phase);
220 if (nn == NULL) break;
221 n = nn; // again
222 };
223 }
224 }
226 //------------------------------policy_peeling---------------------------------
227 // Return TRUE or FALSE if the loop should be peeled or not. Peel if we can
228 // make some loop-invariant test (usually a null-check) happen before the loop.
229 bool IdealLoopTree::policy_peeling( PhaseIdealLoop *phase ) const {
230 Node *test = ((IdealLoopTree*)this)->tail();
231 int body_size = ((IdealLoopTree*)this)->_body.size();
232 int uniq = phase->C->unique();
233 // Peeling does loop cloning which can result in O(N^2) node construction
234 if( body_size > 255 /* Prevent overflow for large body_size */
235 || (body_size * body_size + uniq > MaxNodeLimit) ) {
236 return false; // too large to safely clone
237 }
238 while( test != _head ) { // Scan till run off top of loop
239 if( test->is_If() ) { // Test?
240 Node *ctrl = phase->get_ctrl(test->in(1));
241 if (ctrl->is_top())
242 return false; // Found dead test on live IF? No peeling!
243 // Standard IF only has one input value to check for loop invariance
244 assert( test->Opcode() == Op_If || test->Opcode() == Op_CountedLoopEnd, "Check this code when new subtype is added");
245 // Condition is not a member of this loop?
246 if( !is_member(phase->get_loop(ctrl)) &&
247 is_loop_exit(test) )
248 return true; // Found reason to peel!
249 }
250 // Walk up dominators to loop _head looking for test which is
251 // executed on every path thru loop.
252 test = phase->idom(test);
253 }
254 return false;
255 }
257 //------------------------------peeled_dom_test_elim---------------------------
258 // If we got the effect of peeling, either by actually peeling or by making
259 // a pre-loop which must execute at least once, we can remove all
260 // loop-invariant dominated tests in the main body.
261 void PhaseIdealLoop::peeled_dom_test_elim( IdealLoopTree *loop, Node_List &old_new ) {
262 bool progress = true;
263 while( progress ) {
264 progress = false; // Reset for next iteration
265 Node *prev = loop->_head->in(LoopNode::LoopBackControl);//loop->tail();
266 Node *test = prev->in(0);
267 while( test != loop->_head ) { // Scan till run off top of loop
269 int p_op = prev->Opcode();
270 if( (p_op == Op_IfFalse || p_op == Op_IfTrue) &&
271 test->is_If() && // Test?
272 !test->in(1)->is_Con() && // And not already obvious?
273 // Condition is not a member of this loop?
274 !loop->is_member(get_loop(get_ctrl(test->in(1))))){
275 // Walk loop body looking for instances of this test
276 for( uint i = 0; i < loop->_body.size(); i++ ) {
277 Node *n = loop->_body.at(i);
278 if( n->is_If() && n->in(1) == test->in(1) /*&& n != loop->tail()->in(0)*/ ) {
279 // IfNode was dominated by version in peeled loop body
280 progress = true;
281 dominated_by( old_new[prev->_idx], n );
282 }
283 }
284 }
285 prev = test;
286 test = idom(test);
287 } // End of scan tests in loop
289 } // End of while( progress )
290 }
292 //------------------------------do_peeling-------------------------------------
293 // Peel the first iteration of the given loop.
294 // Step 1: Clone the loop body. The clone becomes the peeled iteration.
295 // The pre-loop illegally has 2 control users (old & new loops).
296 // Step 2: Make the old-loop fall-in edges point to the peeled iteration.
297 // Do this by making the old-loop fall-in edges act as if they came
298 // around the loopback from the prior iteration (follow the old-loop
299 // backedges) and then map to the new peeled iteration. This leaves
300 // the pre-loop with only 1 user (the new peeled iteration), but the
301 // peeled-loop backedge has 2 users.
302 // Step 3: Cut the backedge on the clone (so its not a loop) and remove the
303 // extra backedge user.
304 void PhaseIdealLoop::do_peeling( IdealLoopTree *loop, Node_List &old_new ) {
306 C->set_major_progress();
307 // Peeling a 'main' loop in a pre/main/post situation obfuscates the
308 // 'pre' loop from the main and the 'pre' can no longer have it's
309 // iterations adjusted. Therefore, we need to declare this loop as
310 // no longer a 'main' loop; it will need new pre and post loops before
311 // we can do further RCE.
312 #ifndef PRODUCT
313 if (TraceLoopOpts) {
314 tty->print("Peel ");
315 loop->dump_head();
316 }
317 #endif
318 Node *h = loop->_head;
319 if (h->is_CountedLoop()) {
320 CountedLoopNode *cl = h->as_CountedLoop();
321 assert(cl->trip_count() > 0, "peeling a fully unrolled loop");
322 cl->set_trip_count(cl->trip_count() - 1);
323 if (cl->is_main_loop()) {
324 cl->set_normal_loop();
325 #ifndef PRODUCT
326 if (PrintOpto && VerifyLoopOptimizations) {
327 tty->print("Peeling a 'main' loop; resetting to 'normal' ");
328 loop->dump_head();
329 }
330 #endif
331 }
332 }
334 // Step 1: Clone the loop body. The clone becomes the peeled iteration.
335 // The pre-loop illegally has 2 control users (old & new loops).
336 clone_loop( loop, old_new, dom_depth(loop->_head) );
339 // Step 2: Make the old-loop fall-in edges point to the peeled iteration.
340 // Do this by making the old-loop fall-in edges act as if they came
341 // around the loopback from the prior iteration (follow the old-loop
342 // backedges) and then map to the new peeled iteration. This leaves
343 // the pre-loop with only 1 user (the new peeled iteration), but the
344 // peeled-loop backedge has 2 users.
345 for (DUIterator_Fast jmax, j = loop->_head->fast_outs(jmax); j < jmax; j++) {
346 Node* old = loop->_head->fast_out(j);
347 if( old->in(0) == loop->_head && old->req() == 3 &&
348 (old->is_Loop() || old->is_Phi()) ) {
349 Node *new_exit_value = old_new[old->in(LoopNode::LoopBackControl)->_idx];
350 if( !new_exit_value ) // Backedge value is ALSO loop invariant?
351 // Then loop body backedge value remains the same.
352 new_exit_value = old->in(LoopNode::LoopBackControl);
353 _igvn.hash_delete(old);
354 old->set_req(LoopNode::EntryControl, new_exit_value);
355 }
356 }
359 // Step 3: Cut the backedge on the clone (so its not a loop) and remove the
360 // extra backedge user.
361 Node *nnn = old_new[loop->_head->_idx];
362 _igvn.hash_delete(nnn);
363 nnn->set_req(LoopNode::LoopBackControl, C->top());
364 for (DUIterator_Fast j2max, j2 = nnn->fast_outs(j2max); j2 < j2max; j2++) {
365 Node* use = nnn->fast_out(j2);
366 if( use->in(0) == nnn && use->req() == 3 && use->is_Phi() ) {
367 _igvn.hash_delete(use);
368 use->set_req(LoopNode::LoopBackControl, C->top());
369 }
370 }
373 // Step 4: Correct dom-depth info. Set to loop-head depth.
374 int dd = dom_depth(loop->_head);
375 set_idom(loop->_head, loop->_head->in(1), dd);
376 for (uint j3 = 0; j3 < loop->_body.size(); j3++) {
377 Node *old = loop->_body.at(j3);
378 Node *nnn = old_new[old->_idx];
379 if (!has_ctrl(nnn))
380 set_idom(nnn, idom(nnn), dd-1);
381 // While we're at it, remove any SafePoints from the peeled code
382 if( old->Opcode() == Op_SafePoint ) {
383 Node *nnn = old_new[old->_idx];
384 lazy_replace(nnn,nnn->in(TypeFunc::Control));
385 }
386 }
388 // Now force out all loop-invariant dominating tests. The optimizer
389 // finds some, but we _know_ they are all useless.
390 peeled_dom_test_elim(loop,old_new);
392 loop->record_for_igvn();
393 }
395 //------------------------------policy_maximally_unroll------------------------
396 // Return exact loop trip count, or 0 if not maximally unrolling
397 bool IdealLoopTree::policy_maximally_unroll( PhaseIdealLoop *phase ) const {
398 CountedLoopNode *cl = _head->as_CountedLoop();
399 assert( cl->is_normal_loop(), "" );
401 Node *init_n = cl->init_trip();
402 Node *limit_n = cl->limit();
404 // Non-constant bounds
405 if( init_n == NULL || !init_n->is_Con() ||
406 limit_n == NULL || !limit_n->is_Con() ||
407 // protect against stride not being a constant
408 !cl->stride_is_con() ) {
409 return false;
410 }
411 int init = init_n->get_int();
412 int limit = limit_n->get_int();
413 int span = limit - init;
414 int stride = cl->stride_con();
416 if (init >= limit || stride > span) {
417 // return a false (no maximally unroll) and the regular unroll/peel
418 // route will make a small mess which CCP will fold away.
419 return false;
420 }
421 uint trip_count = span/stride; // trip_count can be greater than 2 Gig.
422 assert( (int)trip_count*stride == span, "must divide evenly" );
424 // Real policy: if we maximally unroll, does it get too big?
425 // Allow the unrolled mess to get larger than standard loop
426 // size. After all, it will no longer be a loop.
427 uint body_size = _body.size();
428 uint unroll_limit = (uint)LoopUnrollLimit * 4;
429 assert( (intx)unroll_limit == LoopUnrollLimit * 4, "LoopUnrollLimit must fit in 32bits");
430 cl->set_trip_count(trip_count);
431 if( trip_count <= unroll_limit && body_size <= unroll_limit ) {
432 uint new_body_size = body_size * trip_count;
433 if (new_body_size <= unroll_limit &&
434 body_size == new_body_size / trip_count &&
435 // Unrolling can result in a large amount of node construction
436 new_body_size < MaxNodeLimit - phase->C->unique()) {
437 return true; // maximally unroll
438 }
439 }
441 return false; // Do not maximally unroll
442 }
445 //------------------------------policy_unroll----------------------------------
446 // Return TRUE or FALSE if the loop should be unrolled or not. Unroll if
447 // the loop is a CountedLoop and the body is small enough.
448 bool IdealLoopTree::policy_unroll( PhaseIdealLoop *phase ) const {
450 CountedLoopNode *cl = _head->as_CountedLoop();
451 assert( cl->is_normal_loop() || cl->is_main_loop(), "" );
453 // protect against stride not being a constant
454 if( !cl->stride_is_con() ) return false;
456 // protect against over-unrolling
457 if( cl->trip_count() <= 1 ) return false;
459 int future_unroll_ct = cl->unrolled_count() * 2;
461 // Don't unroll if the next round of unrolling would push us
462 // over the expected trip count of the loop. One is subtracted
463 // from the expected trip count because the pre-loop normally
464 // executes 1 iteration.
465 if (UnrollLimitForProfileCheck > 0 &&
466 cl->profile_trip_cnt() != COUNT_UNKNOWN &&
467 future_unroll_ct > UnrollLimitForProfileCheck &&
468 (float)future_unroll_ct > cl->profile_trip_cnt() - 1.0) {
469 return false;
470 }
472 // When unroll count is greater than LoopUnrollMin, don't unroll if:
473 // the residual iterations are more than 10% of the trip count
474 // and rounds of "unroll,optimize" are not making significant progress
475 // Progress defined as current size less than 20% larger than previous size.
476 if (UseSuperWord && cl->node_count_before_unroll() > 0 &&
477 future_unroll_ct > LoopUnrollMin &&
478 (future_unroll_ct - 1) * 10.0 > cl->profile_trip_cnt() &&
479 1.2 * cl->node_count_before_unroll() < (double)_body.size()) {
480 return false;
481 }
483 Node *init_n = cl->init_trip();
484 Node *limit_n = cl->limit();
485 // Non-constant bounds.
486 // Protect against over-unrolling when init or/and limit are not constant
487 // (so that trip_count's init value is maxint) but iv range is known.
488 if( init_n == NULL || !init_n->is_Con() ||
489 limit_n == NULL || !limit_n->is_Con() ) {
490 Node* phi = cl->phi();
491 if( phi != NULL ) {
492 assert(phi->is_Phi() && phi->in(0) == _head, "Counted loop should have iv phi.");
493 const TypeInt* iv_type = phase->_igvn.type(phi)->is_int();
494 int next_stride = cl->stride_con() * 2; // stride after this unroll
495 if( next_stride > 0 ) {
496 if( iv_type->_lo + next_stride <= iv_type->_lo || // overflow
497 iv_type->_lo + next_stride > iv_type->_hi ) {
498 return false; // over-unrolling
499 }
500 } else if( next_stride < 0 ) {
501 if( iv_type->_hi + next_stride >= iv_type->_hi || // overflow
502 iv_type->_hi + next_stride < iv_type->_lo ) {
503 return false; // over-unrolling
504 }
505 }
506 }
507 }
509 // Adjust body_size to determine if we unroll or not
510 uint body_size = _body.size();
511 // Key test to unroll CaffeineMark's Logic test
512 int xors_in_loop = 0;
513 // Also count ModL, DivL and MulL which expand mightly
514 for( uint k = 0; k < _body.size(); k++ ) {
515 switch( _body.at(k)->Opcode() ) {
516 case Op_XorI: xors_in_loop++; break; // CaffeineMark's Logic test
517 case Op_ModL: body_size += 30; break;
518 case Op_DivL: body_size += 30; break;
519 case Op_MulL: body_size += 10; break;
520 }
521 }
523 // Check for being too big
524 if( body_size > (uint)LoopUnrollLimit ) {
525 if( xors_in_loop >= 4 && body_size < (uint)LoopUnrollLimit*4) return true;
526 // Normal case: loop too big
527 return false;
528 }
530 // Check for stride being a small enough constant
531 if( abs(cl->stride_con()) > (1<<3) ) return false;
533 // Unroll once! (Each trip will soon do double iterations)
534 return true;
535 }
537 //------------------------------policy_align-----------------------------------
538 // Return TRUE or FALSE if the loop should be cache-line aligned. Gather the
539 // expression that does the alignment. Note that only one array base can be
540 // aligned in a loop (unless the VM guarantees mutual alignment). Note that
541 // if we vectorize short memory ops into longer memory ops, we may want to
542 // increase alignment.
543 bool IdealLoopTree::policy_align( PhaseIdealLoop *phase ) const {
544 return false;
545 }
547 //------------------------------policy_range_check-----------------------------
548 // Return TRUE or FALSE if the loop should be range-check-eliminated.
549 // Actually we do iteration-splitting, a more powerful form of RCE.
550 bool IdealLoopTree::policy_range_check( PhaseIdealLoop *phase ) const {
551 if( !RangeCheckElimination ) return false;
553 CountedLoopNode *cl = _head->as_CountedLoop();
554 // If we unrolled with no intention of doing RCE and we later
555 // changed our minds, we got no pre-loop. Either we need to
556 // make a new pre-loop, or we gotta disallow RCE.
557 if( cl->is_main_no_pre_loop() ) return false; // Disallowed for now.
558 Node *trip_counter = cl->phi();
560 // Check loop body for tests of trip-counter plus loop-invariant vs
561 // loop-invariant.
562 for( uint i = 0; i < _body.size(); i++ ) {
563 Node *iff = _body[i];
564 if( iff->Opcode() == Op_If ) { // Test?
566 // Comparing trip+off vs limit
567 Node *bol = iff->in(1);
568 if( bol->req() != 2 ) continue; // dead constant test
569 if (!bol->is_Bool()) {
570 assert(UseLoopPredicate && bol->Opcode() == Op_Conv2B, "predicate check only");
571 continue;
572 }
573 Node *cmp = bol->in(1);
575 Node *rc_exp = cmp->in(1);
576 Node *limit = cmp->in(2);
578 Node *limit_c = phase->get_ctrl(limit);
579 if( limit_c == phase->C->top() )
580 return false; // Found dead test on live IF? No RCE!
581 if( is_member(phase->get_loop(limit_c) ) ) {
582 // Compare might have operands swapped; commute them
583 rc_exp = cmp->in(2);
584 limit = cmp->in(1);
585 limit_c = phase->get_ctrl(limit);
586 if( is_member(phase->get_loop(limit_c) ) )
587 continue; // Both inputs are loop varying; cannot RCE
588 }
590 if (!phase->is_scaled_iv_plus_offset(rc_exp, trip_counter, NULL, NULL)) {
591 continue;
592 }
593 // Yeah! Found a test like 'trip+off vs limit'
594 // Test is an IfNode, has 2 projections. If BOTH are in the loop
595 // we need loop unswitching instead of iteration splitting.
596 if( is_loop_exit(iff) )
597 return true; // Found reason to split iterations
598 } // End of is IF
599 }
601 return false;
602 }
604 //------------------------------policy_peel_only-------------------------------
605 // Return TRUE or FALSE if the loop should NEVER be RCE'd or aligned. Useful
606 // for unrolling loops with NO array accesses.
607 bool IdealLoopTree::policy_peel_only( PhaseIdealLoop *phase ) const {
609 for( uint i = 0; i < _body.size(); i++ )
610 if( _body[i]->is_Mem() )
611 return false;
613 // No memory accesses at all!
614 return true;
615 }
617 //------------------------------clone_up_backedge_goo--------------------------
618 // If Node n lives in the back_ctrl block and cannot float, we clone a private
619 // version of n in preheader_ctrl block and return that, otherwise return n.
620 Node *PhaseIdealLoop::clone_up_backedge_goo( Node *back_ctrl, Node *preheader_ctrl, Node *n ) {
621 if( get_ctrl(n) != back_ctrl ) return n;
623 Node *x = NULL; // If required, a clone of 'n'
624 // Check for 'n' being pinned in the backedge.
625 if( n->in(0) && n->in(0) == back_ctrl ) {
626 x = n->clone(); // Clone a copy of 'n' to preheader
627 x->set_req( 0, preheader_ctrl ); // Fix x's control input to preheader
628 }
630 // Recursive fixup any other input edges into x.
631 // If there are no changes we can just return 'n', otherwise
632 // we need to clone a private copy and change it.
633 for( uint i = 1; i < n->req(); i++ ) {
634 Node *g = clone_up_backedge_goo( back_ctrl, preheader_ctrl, n->in(i) );
635 if( g != n->in(i) ) {
636 if( !x )
637 x = n->clone();
638 x->set_req(i, g);
639 }
640 }
641 if( x ) { // x can legally float to pre-header location
642 register_new_node( x, preheader_ctrl );
643 return x;
644 } else { // raise n to cover LCA of uses
645 set_ctrl( n, find_non_split_ctrl(back_ctrl->in(0)) );
646 }
647 return n;
648 }
650 //------------------------------insert_pre_post_loops--------------------------
651 // Insert pre and post loops. If peel_only is set, the pre-loop can not have
652 // more iterations added. It acts as a 'peel' only, no lower-bound RCE, no
653 // alignment. Useful to unroll loops that do no array accesses.
654 void PhaseIdealLoop::insert_pre_post_loops( IdealLoopTree *loop, Node_List &old_new, bool peel_only ) {
656 #ifndef PRODUCT
657 if (TraceLoopOpts) {
658 if (peel_only)
659 tty->print("PeelMainPost ");
660 else
661 tty->print("PreMainPost ");
662 loop->dump_head();
663 }
664 #endif
665 C->set_major_progress();
667 // Find common pieces of the loop being guarded with pre & post loops
668 CountedLoopNode *main_head = loop->_head->as_CountedLoop();
669 assert( main_head->is_normal_loop(), "" );
670 CountedLoopEndNode *main_end = main_head->loopexit();
671 assert( main_end->outcnt() == 2, "1 true, 1 false path only" );
672 uint dd_main_head = dom_depth(main_head);
673 uint max = main_head->outcnt();
675 Node *pre_header= main_head->in(LoopNode::EntryControl);
676 Node *init = main_head->init_trip();
677 Node *incr = main_end ->incr();
678 Node *limit = main_end ->limit();
679 Node *stride = main_end ->stride();
680 Node *cmp = main_end ->cmp_node();
681 BoolTest::mask b_test = main_end->test_trip();
683 // Need only 1 user of 'bol' because I will be hacking the loop bounds.
684 Node *bol = main_end->in(CountedLoopEndNode::TestValue);
685 if( bol->outcnt() != 1 ) {
686 bol = bol->clone();
687 register_new_node(bol,main_end->in(CountedLoopEndNode::TestControl));
688 _igvn.hash_delete(main_end);
689 main_end->set_req(CountedLoopEndNode::TestValue, bol);
690 }
691 // Need only 1 user of 'cmp' because I will be hacking the loop bounds.
692 if( cmp->outcnt() != 1 ) {
693 cmp = cmp->clone();
694 register_new_node(cmp,main_end->in(CountedLoopEndNode::TestControl));
695 _igvn.hash_delete(bol);
696 bol->set_req(1, cmp);
697 }
699 //------------------------------
700 // Step A: Create Post-Loop.
701 Node* main_exit = main_end->proj_out(false);
702 assert( main_exit->Opcode() == Op_IfFalse, "" );
703 int dd_main_exit = dom_depth(main_exit);
705 // Step A1: Clone the loop body. The clone becomes the post-loop. The main
706 // loop pre-header illegally has 2 control users (old & new loops).
707 clone_loop( loop, old_new, dd_main_exit );
708 assert( old_new[main_end ->_idx]->Opcode() == Op_CountedLoopEnd, "" );
709 CountedLoopNode *post_head = old_new[main_head->_idx]->as_CountedLoop();
710 post_head->set_post_loop(main_head);
712 // Reduce the post-loop trip count.
713 CountedLoopEndNode* post_end = old_new[main_end ->_idx]->as_CountedLoopEnd();
714 post_end->_prob = PROB_FAIR;
716 // Build the main-loop normal exit.
717 IfFalseNode *new_main_exit = new (C, 1) IfFalseNode(main_end);
718 _igvn.register_new_node_with_optimizer( new_main_exit );
719 set_idom(new_main_exit, main_end, dd_main_exit );
720 set_loop(new_main_exit, loop->_parent);
722 // Step A2: Build a zero-trip guard for the post-loop. After leaving the
723 // main-loop, the post-loop may not execute at all. We 'opaque' the incr
724 // (the main-loop trip-counter exit value) because we will be changing
725 // the exit value (via unrolling) so we cannot constant-fold away the zero
726 // trip guard until all unrolling is done.
727 Node *zer_opaq = new (C, 2) Opaque1Node(C, incr);
728 Node *zer_cmp = new (C, 3) CmpINode( zer_opaq, limit );
729 Node *zer_bol = new (C, 2) BoolNode( zer_cmp, b_test );
730 register_new_node( zer_opaq, new_main_exit );
731 register_new_node( zer_cmp , new_main_exit );
732 register_new_node( zer_bol , new_main_exit );
734 // Build the IfNode
735 IfNode *zer_iff = new (C, 2) IfNode( new_main_exit, zer_bol, PROB_FAIR, COUNT_UNKNOWN );
736 _igvn.register_new_node_with_optimizer( zer_iff );
737 set_idom(zer_iff, new_main_exit, dd_main_exit);
738 set_loop(zer_iff, loop->_parent);
740 // Plug in the false-path, taken if we need to skip post-loop
741 _igvn.hash_delete( main_exit );
742 main_exit->set_req(0, zer_iff);
743 _igvn._worklist.push(main_exit);
744 set_idom(main_exit, zer_iff, dd_main_exit);
745 set_idom(main_exit->unique_out(), zer_iff, dd_main_exit);
746 // Make the true-path, must enter the post loop
747 Node *zer_taken = new (C, 1) IfTrueNode( zer_iff );
748 _igvn.register_new_node_with_optimizer( zer_taken );
749 set_idom(zer_taken, zer_iff, dd_main_exit);
750 set_loop(zer_taken, loop->_parent);
751 // Plug in the true path
752 _igvn.hash_delete( post_head );
753 post_head->set_req(LoopNode::EntryControl, zer_taken);
754 set_idom(post_head, zer_taken, dd_main_exit);
756 // Step A3: Make the fall-in values to the post-loop come from the
757 // fall-out values of the main-loop.
758 for (DUIterator_Fast imax, i = main_head->fast_outs(imax); i < imax; i++) {
759 Node* main_phi = main_head->fast_out(i);
760 if( main_phi->is_Phi() && main_phi->in(0) == main_head && main_phi->outcnt() >0 ) {
761 Node *post_phi = old_new[main_phi->_idx];
762 Node *fallmain = clone_up_backedge_goo(main_head->back_control(),
763 post_head->init_control(),
764 main_phi->in(LoopNode::LoopBackControl));
765 _igvn.hash_delete(post_phi);
766 post_phi->set_req( LoopNode::EntryControl, fallmain );
767 }
768 }
770 // Update local caches for next stanza
771 main_exit = new_main_exit;
774 //------------------------------
775 // Step B: Create Pre-Loop.
777 // Step B1: Clone the loop body. The clone becomes the pre-loop. The main
778 // loop pre-header illegally has 2 control users (old & new loops).
779 clone_loop( loop, old_new, dd_main_head );
780 CountedLoopNode* pre_head = old_new[main_head->_idx]->as_CountedLoop();
781 CountedLoopEndNode* pre_end = old_new[main_end ->_idx]->as_CountedLoopEnd();
782 pre_head->set_pre_loop(main_head);
783 Node *pre_incr = old_new[incr->_idx];
785 // Reduce the pre-loop trip count.
786 pre_end->_prob = PROB_FAIR;
788 // Find the pre-loop normal exit.
789 Node* pre_exit = pre_end->proj_out(false);
790 assert( pre_exit->Opcode() == Op_IfFalse, "" );
791 IfFalseNode *new_pre_exit = new (C, 1) IfFalseNode(pre_end);
792 _igvn.register_new_node_with_optimizer( new_pre_exit );
793 set_idom(new_pre_exit, pre_end, dd_main_head);
794 set_loop(new_pre_exit, loop->_parent);
796 // Step B2: Build a zero-trip guard for the main-loop. After leaving the
797 // pre-loop, the main-loop may not execute at all. Later in life this
798 // zero-trip guard will become the minimum-trip guard when we unroll
799 // the main-loop.
800 Node *min_opaq = new (C, 2) Opaque1Node(C, limit);
801 Node *min_cmp = new (C, 3) CmpINode( pre_incr, min_opaq );
802 Node *min_bol = new (C, 2) BoolNode( min_cmp, b_test );
803 register_new_node( min_opaq, new_pre_exit );
804 register_new_node( min_cmp , new_pre_exit );
805 register_new_node( min_bol , new_pre_exit );
807 // Build the IfNode (assume the main-loop is executed always).
808 IfNode *min_iff = new (C, 2) IfNode( new_pre_exit, min_bol, PROB_ALWAYS, COUNT_UNKNOWN );
809 _igvn.register_new_node_with_optimizer( min_iff );
810 set_idom(min_iff, new_pre_exit, dd_main_head);
811 set_loop(min_iff, loop->_parent);
813 // Plug in the false-path, taken if we need to skip main-loop
814 _igvn.hash_delete( pre_exit );
815 pre_exit->set_req(0, min_iff);
816 set_idom(pre_exit, min_iff, dd_main_head);
817 set_idom(pre_exit->unique_out(), min_iff, dd_main_head);
818 // Make the true-path, must enter the main loop
819 Node *min_taken = new (C, 1) IfTrueNode( min_iff );
820 _igvn.register_new_node_with_optimizer( min_taken );
821 set_idom(min_taken, min_iff, dd_main_head);
822 set_loop(min_taken, loop->_parent);
823 // Plug in the true path
824 _igvn.hash_delete( main_head );
825 main_head->set_req(LoopNode::EntryControl, min_taken);
826 set_idom(main_head, min_taken, dd_main_head);
828 // Step B3: Make the fall-in values to the main-loop come from the
829 // fall-out values of the pre-loop.
830 for (DUIterator_Fast i2max, i2 = main_head->fast_outs(i2max); i2 < i2max; i2++) {
831 Node* main_phi = main_head->fast_out(i2);
832 if( main_phi->is_Phi() && main_phi->in(0) == main_head && main_phi->outcnt() > 0 ) {
833 Node *pre_phi = old_new[main_phi->_idx];
834 Node *fallpre = clone_up_backedge_goo(pre_head->back_control(),
835 main_head->init_control(),
836 pre_phi->in(LoopNode::LoopBackControl));
837 _igvn.hash_delete(main_phi);
838 main_phi->set_req( LoopNode::EntryControl, fallpre );
839 }
840 }
842 // Step B4: Shorten the pre-loop to run only 1 iteration (for now).
843 // RCE and alignment may change this later.
844 Node *cmp_end = pre_end->cmp_node();
845 assert( cmp_end->in(2) == limit, "" );
846 Node *pre_limit = new (C, 3) AddINode( init, stride );
848 // Save the original loop limit in this Opaque1 node for
849 // use by range check elimination.
850 Node *pre_opaq = new (C, 3) Opaque1Node(C, pre_limit, limit);
852 register_new_node( pre_limit, pre_head->in(0) );
853 register_new_node( pre_opaq , pre_head->in(0) );
855 // Since no other users of pre-loop compare, I can hack limit directly
856 assert( cmp_end->outcnt() == 1, "no other users" );
857 _igvn.hash_delete(cmp_end);
858 cmp_end->set_req(2, peel_only ? pre_limit : pre_opaq);
860 // Special case for not-equal loop bounds:
861 // Change pre loop test, main loop test, and the
862 // main loop guard test to use lt or gt depending on stride
863 // direction:
864 // positive stride use <
865 // negative stride use >
867 if (pre_end->in(CountedLoopEndNode::TestValue)->as_Bool()->_test._test == BoolTest::ne) {
869 BoolTest::mask new_test = (main_end->stride_con() > 0) ? BoolTest::lt : BoolTest::gt;
870 // Modify pre loop end condition
871 Node* pre_bol = pre_end->in(CountedLoopEndNode::TestValue)->as_Bool();
872 BoolNode* new_bol0 = new (C, 2) BoolNode(pre_bol->in(1), new_test);
873 register_new_node( new_bol0, pre_head->in(0) );
874 _igvn.hash_delete(pre_end);
875 pre_end->set_req(CountedLoopEndNode::TestValue, new_bol0);
876 // Modify main loop guard condition
877 assert(min_iff->in(CountedLoopEndNode::TestValue) == min_bol, "guard okay");
878 BoolNode* new_bol1 = new (C, 2) BoolNode(min_bol->in(1), new_test);
879 register_new_node( new_bol1, new_pre_exit );
880 _igvn.hash_delete(min_iff);
881 min_iff->set_req(CountedLoopEndNode::TestValue, new_bol1);
882 // Modify main loop end condition
883 BoolNode* main_bol = main_end->in(CountedLoopEndNode::TestValue)->as_Bool();
884 BoolNode* new_bol2 = new (C, 2) BoolNode(main_bol->in(1), new_test);
885 register_new_node( new_bol2, main_end->in(CountedLoopEndNode::TestControl) );
886 _igvn.hash_delete(main_end);
887 main_end->set_req(CountedLoopEndNode::TestValue, new_bol2);
888 }
890 // Flag main loop
891 main_head->set_main_loop();
892 if( peel_only ) main_head->set_main_no_pre_loop();
894 // It's difficult to be precise about the trip-counts
895 // for the pre/post loops. They are usually very short,
896 // so guess that 4 trips is a reasonable value.
897 post_head->set_profile_trip_cnt(4.0);
898 pre_head->set_profile_trip_cnt(4.0);
900 // Now force out all loop-invariant dominating tests. The optimizer
901 // finds some, but we _know_ they are all useless.
902 peeled_dom_test_elim(loop,old_new);
903 }
905 //------------------------------is_invariant-----------------------------
906 // Return true if n is invariant
907 bool IdealLoopTree::is_invariant(Node* n) const {
908 Node *n_c = _phase->has_ctrl(n) ? _phase->get_ctrl(n) : n;
909 if (n_c->is_top()) return false;
910 return !is_member(_phase->get_loop(n_c));
911 }
914 //------------------------------do_unroll--------------------------------------
915 // Unroll the loop body one step - make each trip do 2 iterations.
916 void PhaseIdealLoop::do_unroll( IdealLoopTree *loop, Node_List &old_new, bool adjust_min_trip ) {
917 assert(LoopUnrollLimit, "");
918 CountedLoopNode *loop_head = loop->_head->as_CountedLoop();
919 CountedLoopEndNode *loop_end = loop_head->loopexit();
920 assert(loop_end, "");
921 #ifndef PRODUCT
922 if (PrintOpto && VerifyLoopOptimizations) {
923 tty->print("Unrolling ");
924 loop->dump_head();
925 } else if (TraceLoopOpts) {
926 tty->print("Unroll %d ", loop_head->unrolled_count()*2);
927 loop->dump_head();
928 }
929 #endif
931 // Remember loop node count before unrolling to detect
932 // if rounds of unroll,optimize are making progress
933 loop_head->set_node_count_before_unroll(loop->_body.size());
935 Node *ctrl = loop_head->in(LoopNode::EntryControl);
936 Node *limit = loop_head->limit();
937 Node *init = loop_head->init_trip();
938 Node *stride = loop_head->stride();
940 Node *opaq = NULL;
941 if( adjust_min_trip ) { // If not maximally unrolling, need adjustment
942 assert( loop_head->is_main_loop(), "" );
943 assert( ctrl->Opcode() == Op_IfTrue || ctrl->Opcode() == Op_IfFalse, "" );
944 Node *iff = ctrl->in(0);
945 assert( iff->Opcode() == Op_If, "" );
946 Node *bol = iff->in(1);
947 assert( bol->Opcode() == Op_Bool, "" );
948 Node *cmp = bol->in(1);
949 assert( cmp->Opcode() == Op_CmpI, "" );
950 opaq = cmp->in(2);
951 // Occasionally it's possible for a pre-loop Opaque1 node to be
952 // optimized away and then another round of loop opts attempted.
953 // We can not optimize this particular loop in that case.
954 if( opaq->Opcode() != Op_Opaque1 )
955 return; // Cannot find pre-loop! Bail out!
956 }
958 C->set_major_progress();
960 // Adjust max trip count. The trip count is intentionally rounded
961 // down here (e.g. 15-> 7-> 3-> 1) because if we unwittingly over-unroll,
962 // the main, unrolled, part of the loop will never execute as it is protected
963 // by the min-trip test. See bug 4834191 for a case where we over-unrolled
964 // and later determined that part of the unrolled loop was dead.
965 loop_head->set_trip_count(loop_head->trip_count() / 2);
967 // Double the count of original iterations in the unrolled loop body.
968 loop_head->double_unrolled_count();
970 // -----------
971 // Step 2: Cut back the trip counter for an unroll amount of 2.
972 // Loop will normally trip (limit - init)/stride_con. Since it's a
973 // CountedLoop this is exact (stride divides limit-init exactly).
974 // We are going to double the loop body, so we want to knock off any
975 // odd iteration: (trip_cnt & ~1). Then back compute a new limit.
976 Node *span = new (C, 3) SubINode( limit, init );
977 register_new_node( span, ctrl );
978 Node *trip = new (C, 3) DivINode( 0, span, stride );
979 register_new_node( trip, ctrl );
980 Node *mtwo = _igvn.intcon(-2);
981 set_ctrl(mtwo, C->root());
982 Node *rond = new (C, 3) AndINode( trip, mtwo );
983 register_new_node( rond, ctrl );
984 Node *spn2 = new (C, 3) MulINode( rond, stride );
985 register_new_node( spn2, ctrl );
986 Node *lim2 = new (C, 3) AddINode( spn2, init );
987 register_new_node( lim2, ctrl );
989 // Hammer in the new limit
990 Node *ctrl2 = loop_end->in(0);
991 Node *cmp2 = new (C, 3) CmpINode( loop_head->incr(), lim2 );
992 register_new_node( cmp2, ctrl2 );
993 Node *bol2 = new (C, 2) BoolNode( cmp2, loop_end->test_trip() );
994 register_new_node( bol2, ctrl2 );
995 _igvn.hash_delete(loop_end);
996 loop_end->set_req(CountedLoopEndNode::TestValue, bol2);
998 // Step 3: Find the min-trip test guaranteed before a 'main' loop.
999 // Make it a 1-trip test (means at least 2 trips).
1000 if( adjust_min_trip ) {
1001 // Guard test uses an 'opaque' node which is not shared. Hence I
1002 // can edit it's inputs directly. Hammer in the new limit for the
1003 // minimum-trip guard.
1004 assert( opaq->outcnt() == 1, "" );
1005 _igvn.hash_delete(opaq);
1006 opaq->set_req(1, lim2);
1007 }
1009 // ---------
1010 // Step 4: Clone the loop body. Move it inside the loop. This loop body
1011 // represents the odd iterations; since the loop trips an even number of
1012 // times its backedge is never taken. Kill the backedge.
1013 uint dd = dom_depth(loop_head);
1014 clone_loop( loop, old_new, dd );
1016 // Make backedges of the clone equal to backedges of the original.
1017 // Make the fall-in from the original come from the fall-out of the clone.
1018 for (DUIterator_Fast jmax, j = loop_head->fast_outs(jmax); j < jmax; j++) {
1019 Node* phi = loop_head->fast_out(j);
1020 if( phi->is_Phi() && phi->in(0) == loop_head && phi->outcnt() > 0 ) {
1021 Node *newphi = old_new[phi->_idx];
1022 _igvn.hash_delete( phi );
1023 _igvn.hash_delete( newphi );
1025 phi ->set_req(LoopNode:: EntryControl, newphi->in(LoopNode::LoopBackControl));
1026 newphi->set_req(LoopNode::LoopBackControl, phi ->in(LoopNode::LoopBackControl));
1027 phi ->set_req(LoopNode::LoopBackControl, C->top());
1028 }
1029 }
1030 Node *clone_head = old_new[loop_head->_idx];
1031 _igvn.hash_delete( clone_head );
1032 loop_head ->set_req(LoopNode:: EntryControl, clone_head->in(LoopNode::LoopBackControl));
1033 clone_head->set_req(LoopNode::LoopBackControl, loop_head ->in(LoopNode::LoopBackControl));
1034 loop_head ->set_req(LoopNode::LoopBackControl, C->top());
1035 loop->_head = clone_head; // New loop header
1037 set_idom(loop_head, loop_head ->in(LoopNode::EntryControl), dd);
1038 set_idom(clone_head, clone_head->in(LoopNode::EntryControl), dd);
1040 // Kill the clone's backedge
1041 Node *newcle = old_new[loop_end->_idx];
1042 _igvn.hash_delete( newcle );
1043 Node *one = _igvn.intcon(1);
1044 set_ctrl(one, C->root());
1045 newcle->set_req(1, one);
1046 // Force clone into same loop body
1047 uint max = loop->_body.size();
1048 for( uint k = 0; k < max; k++ ) {
1049 Node *old = loop->_body.at(k);
1050 Node *nnn = old_new[old->_idx];
1051 loop->_body.push(nnn);
1052 if (!has_ctrl(old))
1053 set_loop(nnn, loop);
1054 }
1056 loop->record_for_igvn();
1057 }
1059 //------------------------------do_maximally_unroll----------------------------
1061 void PhaseIdealLoop::do_maximally_unroll( IdealLoopTree *loop, Node_List &old_new ) {
1062 CountedLoopNode *cl = loop->_head->as_CountedLoop();
1063 assert(cl->trip_count() > 0, "");
1064 #ifndef PRODUCT
1065 if (TraceLoopOpts) {
1066 tty->print("MaxUnroll %d ", cl->trip_count());
1067 loop->dump_head();
1068 }
1069 #endif
1071 // If loop is tripping an odd number of times, peel odd iteration
1072 if ((cl->trip_count() & 1) == 1) {
1073 do_peeling(loop, old_new);
1074 }
1076 // Now its tripping an even number of times remaining. Double loop body.
1077 // Do not adjust pre-guards; they are not needed and do not exist.
1078 if (cl->trip_count() > 0) {
1079 do_unroll(loop, old_new, false);
1080 }
1081 }
1083 //------------------------------dominates_backedge---------------------------------
1084 // Returns true if ctrl is executed on every complete iteration
1085 bool IdealLoopTree::dominates_backedge(Node* ctrl) {
1086 assert(ctrl->is_CFG(), "must be control");
1087 Node* backedge = _head->as_Loop()->in(LoopNode::LoopBackControl);
1088 return _phase->dom_lca_internal(ctrl, backedge) == ctrl;
1089 }
1091 //------------------------------add_constraint---------------------------------
1092 // Constrain the main loop iterations so the condition:
1093 // scale_con * I + offset < limit
1094 // always holds true. That is, either increase the number of iterations in
1095 // the pre-loop or the post-loop until the condition holds true in the main
1096 // loop. Stride, scale, offset and limit are all loop invariant. Further,
1097 // stride and scale are constants (offset and limit often are).
1098 void PhaseIdealLoop::add_constraint( int stride_con, int scale_con, Node *offset, Node *limit, Node *pre_ctrl, Node **pre_limit, Node **main_limit ) {
1100 // Compute "I :: (limit-offset)/scale_con"
1101 Node *con = new (C, 3) SubINode( limit, offset );
1102 register_new_node( con, pre_ctrl );
1103 Node *scale = _igvn.intcon(scale_con);
1104 set_ctrl(scale, C->root());
1105 Node *X = new (C, 3) DivINode( 0, con, scale );
1106 register_new_node( X, pre_ctrl );
1108 // For positive stride, the pre-loop limit always uses a MAX function
1109 // and the main loop a MIN function. For negative stride these are
1110 // reversed.
1112 // Also for positive stride*scale the affine function is increasing, so the
1113 // pre-loop must check for underflow and the post-loop for overflow.
1114 // Negative stride*scale reverses this; pre-loop checks for overflow and
1115 // post-loop for underflow.
1116 if( stride_con*scale_con > 0 ) {
1117 // Compute I < (limit-offset)/scale_con
1118 // Adjust main-loop last iteration to be MIN/MAX(main_loop,X)
1119 *main_limit = (stride_con > 0)
1120 ? (Node*)(new (C, 3) MinINode( *main_limit, X ))
1121 : (Node*)(new (C, 3) MaxINode( *main_limit, X ));
1122 register_new_node( *main_limit, pre_ctrl );
1124 } else {
1125 // Compute (limit-offset)/scale_con + SGN(-scale_con) <= I
1126 // Add the negation of the main-loop constraint to the pre-loop.
1127 // See footnote [++] below for a derivation of the limit expression.
1128 Node *incr = _igvn.intcon(scale_con > 0 ? -1 : 1);
1129 set_ctrl(incr, C->root());
1130 Node *adj = new (C, 3) AddINode( X, incr );
1131 register_new_node( adj, pre_ctrl );
1132 *pre_limit = (scale_con > 0)
1133 ? (Node*)new (C, 3) MinINode( *pre_limit, adj )
1134 : (Node*)new (C, 3) MaxINode( *pre_limit, adj );
1135 register_new_node( *pre_limit, pre_ctrl );
1137 // [++] Here's the algebra that justifies the pre-loop limit expression:
1138 //
1139 // NOT( scale_con * I + offset < limit )
1140 // ==
1141 // scale_con * I + offset >= limit
1142 // ==
1143 // SGN(scale_con) * I >= (limit-offset)/|scale_con|
1144 // ==
1145 // (limit-offset)/|scale_con| <= I * SGN(scale_con)
1146 // ==
1147 // (limit-offset)/|scale_con|-1 < I * SGN(scale_con)
1148 // ==
1149 // ( if (scale_con > 0) /*common case*/
1150 // (limit-offset)/scale_con - 1 < I
1151 // else
1152 // (limit-offset)/scale_con + 1 > I
1153 // )
1154 // ( if (scale_con > 0) /*common case*/
1155 // (limit-offset)/scale_con + SGN(-scale_con) < I
1156 // else
1157 // (limit-offset)/scale_con + SGN(-scale_con) > I
1158 }
1159 }
1162 //------------------------------is_scaled_iv---------------------------------
1163 // Return true if exp is a constant times an induction var
1164 bool PhaseIdealLoop::is_scaled_iv(Node* exp, Node* iv, int* p_scale) {
1165 if (exp == iv) {
1166 if (p_scale != NULL) {
1167 *p_scale = 1;
1168 }
1169 return true;
1170 }
1171 int opc = exp->Opcode();
1172 if (opc == Op_MulI) {
1173 if (exp->in(1) == iv && exp->in(2)->is_Con()) {
1174 if (p_scale != NULL) {
1175 *p_scale = exp->in(2)->get_int();
1176 }
1177 return true;
1178 }
1179 if (exp->in(2) == iv && exp->in(1)->is_Con()) {
1180 if (p_scale != NULL) {
1181 *p_scale = exp->in(1)->get_int();
1182 }
1183 return true;
1184 }
1185 } else if (opc == Op_LShiftI) {
1186 if (exp->in(1) == iv && exp->in(2)->is_Con()) {
1187 if (p_scale != NULL) {
1188 *p_scale = 1 << exp->in(2)->get_int();
1189 }
1190 return true;
1191 }
1192 }
1193 return false;
1194 }
1196 //-----------------------------is_scaled_iv_plus_offset------------------------------
1197 // Return true if exp is a simple induction variable expression: k1*iv + (invar + k2)
1198 bool PhaseIdealLoop::is_scaled_iv_plus_offset(Node* exp, Node* iv, int* p_scale, Node** p_offset, int depth) {
1199 if (is_scaled_iv(exp, iv, p_scale)) {
1200 if (p_offset != NULL) {
1201 Node *zero = _igvn.intcon(0);
1202 set_ctrl(zero, C->root());
1203 *p_offset = zero;
1204 }
1205 return true;
1206 }
1207 int opc = exp->Opcode();
1208 if (opc == Op_AddI) {
1209 if (is_scaled_iv(exp->in(1), iv, p_scale)) {
1210 if (p_offset != NULL) {
1211 *p_offset = exp->in(2);
1212 }
1213 return true;
1214 }
1215 if (exp->in(2)->is_Con()) {
1216 Node* offset2 = NULL;
1217 if (depth < 2 &&
1218 is_scaled_iv_plus_offset(exp->in(1), iv, p_scale,
1219 p_offset != NULL ? &offset2 : NULL, depth+1)) {
1220 if (p_offset != NULL) {
1221 Node *ctrl_off2 = get_ctrl(offset2);
1222 Node* offset = new (C, 3) AddINode(offset2, exp->in(2));
1223 register_new_node(offset, ctrl_off2);
1224 *p_offset = offset;
1225 }
1226 return true;
1227 }
1228 }
1229 } else if (opc == Op_SubI) {
1230 if (is_scaled_iv(exp->in(1), iv, p_scale)) {
1231 if (p_offset != NULL) {
1232 Node *zero = _igvn.intcon(0);
1233 set_ctrl(zero, C->root());
1234 Node *ctrl_off = get_ctrl(exp->in(2));
1235 Node* offset = new (C, 3) SubINode(zero, exp->in(2));
1236 register_new_node(offset, ctrl_off);
1237 *p_offset = offset;
1238 }
1239 return true;
1240 }
1241 if (is_scaled_iv(exp->in(2), iv, p_scale)) {
1242 if (p_offset != NULL) {
1243 *p_scale *= -1;
1244 *p_offset = exp->in(1);
1245 }
1246 return true;
1247 }
1248 }
1249 return false;
1250 }
1252 //------------------------------do_range_check---------------------------------
1253 // Eliminate range-checks and other trip-counter vs loop-invariant tests.
1254 void PhaseIdealLoop::do_range_check( IdealLoopTree *loop, Node_List &old_new ) {
1255 #ifndef PRODUCT
1256 if (PrintOpto && VerifyLoopOptimizations) {
1257 tty->print("Range Check Elimination ");
1258 loop->dump_head();
1259 } else if (TraceLoopOpts) {
1260 tty->print("RangeCheck ");
1261 loop->dump_head();
1262 }
1263 #endif
1264 assert(RangeCheckElimination, "");
1265 CountedLoopNode *cl = loop->_head->as_CountedLoop();
1266 assert(cl->is_main_loop(), "");
1268 // protect against stride not being a constant
1269 if (!cl->stride_is_con())
1270 return;
1272 // Find the trip counter; we are iteration splitting based on it
1273 Node *trip_counter = cl->phi();
1274 // Find the main loop limit; we will trim it's iterations
1275 // to not ever trip end tests
1276 Node *main_limit = cl->limit();
1278 // Need to find the main-loop zero-trip guard
1279 Node *ctrl = cl->in(LoopNode::EntryControl);
1280 assert(ctrl->Opcode() == Op_IfTrue || ctrl->Opcode() == Op_IfFalse, "");
1281 Node *iffm = ctrl->in(0);
1282 assert(iffm->Opcode() == Op_If, "");
1283 Node *bolzm = iffm->in(1);
1284 assert(bolzm->Opcode() == Op_Bool, "");
1285 Node *cmpzm = bolzm->in(1);
1286 assert(cmpzm->is_Cmp(), "");
1287 Node *opqzm = cmpzm->in(2);
1288 // Can not optimize a loop if pre-loop Opaque1 node is optimized
1289 // away and then another round of loop opts attempted.
1290 if (opqzm->Opcode() != Op_Opaque1)
1291 return;
1292 assert(opqzm->in(1) == main_limit, "do not understand situation");
1294 // Find the pre-loop limit; we will expand it's iterations to
1295 // not ever trip low tests.
1296 Node *p_f = iffm->in(0);
1297 assert(p_f->Opcode() == Op_IfFalse, "");
1298 CountedLoopEndNode *pre_end = p_f->in(0)->as_CountedLoopEnd();
1299 assert(pre_end->loopnode()->is_pre_loop(), "");
1300 Node *pre_opaq1 = pre_end->limit();
1301 // Occasionally it's possible for a pre-loop Opaque1 node to be
1302 // optimized away and then another round of loop opts attempted.
1303 // We can not optimize this particular loop in that case.
1304 if (pre_opaq1->Opcode() != Op_Opaque1)
1305 return;
1306 Opaque1Node *pre_opaq = (Opaque1Node*)pre_opaq1;
1307 Node *pre_limit = pre_opaq->in(1);
1309 // Where do we put new limit calculations
1310 Node *pre_ctrl = pre_end->loopnode()->in(LoopNode::EntryControl);
1312 // Ensure the original loop limit is available from the
1313 // pre-loop Opaque1 node.
1314 Node *orig_limit = pre_opaq->original_loop_limit();
1315 if (orig_limit == NULL || _igvn.type(orig_limit) == Type::TOP)
1316 return;
1318 // Must know if its a count-up or count-down loop
1320 int stride_con = cl->stride_con();
1321 Node *zero = _igvn.intcon(0);
1322 Node *one = _igvn.intcon(1);
1323 set_ctrl(zero, C->root());
1324 set_ctrl(one, C->root());
1326 // Range checks that do not dominate the loop backedge (ie.
1327 // conditionally executed) can lengthen the pre loop limit beyond
1328 // the original loop limit. To prevent this, the pre limit is
1329 // (for stride > 0) MINed with the original loop limit (MAXed
1330 // stride < 0) when some range_check (rc) is conditionally
1331 // executed.
1332 bool conditional_rc = false;
1334 // Check loop body for tests of trip-counter plus loop-invariant vs
1335 // loop-invariant.
1336 for( uint i = 0; i < loop->_body.size(); i++ ) {
1337 Node *iff = loop->_body[i];
1338 if( iff->Opcode() == Op_If ) { // Test?
1340 // Test is an IfNode, has 2 projections. If BOTH are in the loop
1341 // we need loop unswitching instead of iteration splitting.
1342 Node *exit = loop->is_loop_exit(iff);
1343 if( !exit ) continue;
1344 int flip = (exit->Opcode() == Op_IfTrue) ? 1 : 0;
1346 // Get boolean condition to test
1347 Node *i1 = iff->in(1);
1348 if( !i1->is_Bool() ) continue;
1349 BoolNode *bol = i1->as_Bool();
1350 BoolTest b_test = bol->_test;
1351 // Flip sense of test if exit condition is flipped
1352 if( flip )
1353 b_test = b_test.negate();
1355 // Get compare
1356 Node *cmp = bol->in(1);
1358 // Look for trip_counter + offset vs limit
1359 Node *rc_exp = cmp->in(1);
1360 Node *limit = cmp->in(2);
1361 jint scale_con= 1; // Assume trip counter not scaled
1363 Node *limit_c = get_ctrl(limit);
1364 if( loop->is_member(get_loop(limit_c) ) ) {
1365 // Compare might have operands swapped; commute them
1366 b_test = b_test.commute();
1367 rc_exp = cmp->in(2);
1368 limit = cmp->in(1);
1369 limit_c = get_ctrl(limit);
1370 if( loop->is_member(get_loop(limit_c) ) )
1371 continue; // Both inputs are loop varying; cannot RCE
1372 }
1373 // Here we know 'limit' is loop invariant
1375 // 'limit' maybe pinned below the zero trip test (probably from a
1376 // previous round of rce), in which case, it can't be used in the
1377 // zero trip test expression which must occur before the zero test's if.
1378 if( limit_c == ctrl ) {
1379 continue; // Don't rce this check but continue looking for other candidates.
1380 }
1382 // Check for scaled induction variable plus an offset
1383 Node *offset = NULL;
1385 if (!is_scaled_iv_plus_offset(rc_exp, trip_counter, &scale_con, &offset)) {
1386 continue;
1387 }
1389 Node *offset_c = get_ctrl(offset);
1390 if( loop->is_member( get_loop(offset_c) ) )
1391 continue; // Offset is not really loop invariant
1392 // Here we know 'offset' is loop invariant.
1394 // As above for the 'limit', the 'offset' maybe pinned below the
1395 // zero trip test.
1396 if( offset_c == ctrl ) {
1397 continue; // Don't rce this check but continue looking for other candidates.
1398 }
1400 // At this point we have the expression as:
1401 // scale_con * trip_counter + offset :: limit
1402 // where scale_con, offset and limit are loop invariant. Trip_counter
1403 // monotonically increases by stride_con, a constant. Both (or either)
1404 // stride_con and scale_con can be negative which will flip about the
1405 // sense of the test.
1407 // Adjust pre and main loop limits to guard the correct iteration set
1408 if( cmp->Opcode() == Op_CmpU ) {// Unsigned compare is really 2 tests
1409 if( b_test._test == BoolTest::lt ) { // Range checks always use lt
1410 // The overflow limit: scale*I+offset < limit
1411 add_constraint( stride_con, scale_con, offset, limit, pre_ctrl, &pre_limit, &main_limit );
1412 // The underflow limit: 0 <= scale*I+offset.
1413 // Some math yields: -scale*I-(offset+1) < 0
1414 Node *plus_one = new (C, 3) AddINode( offset, one );
1415 register_new_node( plus_one, pre_ctrl );
1416 Node *neg_offset = new (C, 3) SubINode( zero, plus_one );
1417 register_new_node( neg_offset, pre_ctrl );
1418 add_constraint( stride_con, -scale_con, neg_offset, zero, pre_ctrl, &pre_limit, &main_limit );
1419 if (!conditional_rc) {
1420 conditional_rc = !loop->dominates_backedge(iff);
1421 }
1422 } else {
1423 #ifndef PRODUCT
1424 if( PrintOpto )
1425 tty->print_cr("missed RCE opportunity");
1426 #endif
1427 continue; // In release mode, ignore it
1428 }
1429 } else { // Otherwise work on normal compares
1430 switch( b_test._test ) {
1431 case BoolTest::ge: // Convert X >= Y to -X <= -Y
1432 scale_con = -scale_con;
1433 offset = new (C, 3) SubINode( zero, offset );
1434 register_new_node( offset, pre_ctrl );
1435 limit = new (C, 3) SubINode( zero, limit );
1436 register_new_node( limit, pre_ctrl );
1437 // Fall into LE case
1438 case BoolTest::le: // Convert X <= Y to X < Y+1
1439 limit = new (C, 3) AddINode( limit, one );
1440 register_new_node( limit, pre_ctrl );
1441 // Fall into LT case
1442 case BoolTest::lt:
1443 add_constraint( stride_con, scale_con, offset, limit, pre_ctrl, &pre_limit, &main_limit );
1444 if (!conditional_rc) {
1445 conditional_rc = !loop->dominates_backedge(iff);
1446 }
1447 break;
1448 default:
1449 #ifndef PRODUCT
1450 if( PrintOpto )
1451 tty->print_cr("missed RCE opportunity");
1452 #endif
1453 continue; // Unhandled case
1454 }
1455 }
1457 // Kill the eliminated test
1458 C->set_major_progress();
1459 Node *kill_con = _igvn.intcon( 1-flip );
1460 set_ctrl(kill_con, C->root());
1461 _igvn.hash_delete(iff);
1462 iff->set_req(1, kill_con);
1463 _igvn._worklist.push(iff);
1464 // Find surviving projection
1465 assert(iff->is_If(), "");
1466 ProjNode* dp = ((IfNode*)iff)->proj_out(1-flip);
1467 // Find loads off the surviving projection; remove their control edge
1468 for (DUIterator_Fast imax, i = dp->fast_outs(imax); i < imax; i++) {
1469 Node* cd = dp->fast_out(i); // Control-dependent node
1470 if( cd->is_Load() ) { // Loads can now float around in the loop
1471 _igvn.hash_delete(cd);
1472 // Allow the load to float around in the loop, or before it
1473 // but NOT before the pre-loop.
1474 cd->set_req(0, ctrl); // ctrl, not NULL
1475 _igvn._worklist.push(cd);
1476 --i;
1477 --imax;
1478 }
1479 }
1481 } // End of is IF
1483 }
1485 // Update loop limits
1486 if (conditional_rc) {
1487 pre_limit = (stride_con > 0) ? (Node*)new (C,3) MinINode(pre_limit, orig_limit)
1488 : (Node*)new (C,3) MaxINode(pre_limit, orig_limit);
1489 register_new_node(pre_limit, pre_ctrl);
1490 }
1491 _igvn.hash_delete(pre_opaq);
1492 pre_opaq->set_req(1, pre_limit);
1494 // Note:: we are making the main loop limit no longer precise;
1495 // need to round up based on stride.
1496 if( stride_con != 1 && stride_con != -1 ) { // Cutout for common case
1497 // "Standard" round-up logic: ([main_limit-init+(y-1)]/y)*y+init
1498 // Hopefully, compiler will optimize for powers of 2.
1499 Node *ctrl = get_ctrl(main_limit);
1500 Node *stride = cl->stride();
1501 Node *init = cl->init_trip();
1502 Node *span = new (C, 3) SubINode(main_limit,init);
1503 register_new_node(span,ctrl);
1504 Node *rndup = _igvn.intcon(stride_con + ((stride_con>0)?-1:1));
1505 Node *add = new (C, 3) AddINode(span,rndup);
1506 register_new_node(add,ctrl);
1507 Node *div = new (C, 3) DivINode(0,add,stride);
1508 register_new_node(div,ctrl);
1509 Node *mul = new (C, 3) MulINode(div,stride);
1510 register_new_node(mul,ctrl);
1511 Node *newlim = new (C, 3) AddINode(mul,init);
1512 register_new_node(newlim,ctrl);
1513 main_limit = newlim;
1514 }
1516 Node *main_cle = cl->loopexit();
1517 Node *main_bol = main_cle->in(1);
1518 // Hacking loop bounds; need private copies of exit test
1519 if( main_bol->outcnt() > 1 ) {// BoolNode shared?
1520 _igvn.hash_delete(main_cle);
1521 main_bol = main_bol->clone();// Clone a private BoolNode
1522 register_new_node( main_bol, main_cle->in(0) );
1523 main_cle->set_req(1,main_bol);
1524 }
1525 Node *main_cmp = main_bol->in(1);
1526 if( main_cmp->outcnt() > 1 ) { // CmpNode shared?
1527 _igvn.hash_delete(main_bol);
1528 main_cmp = main_cmp->clone();// Clone a private CmpNode
1529 register_new_node( main_cmp, main_cle->in(0) );
1530 main_bol->set_req(1,main_cmp);
1531 }
1532 // Hack the now-private loop bounds
1533 _igvn.hash_delete(main_cmp);
1534 main_cmp->set_req(2, main_limit);
1535 _igvn._worklist.push(main_cmp);
1536 // The OpaqueNode is unshared by design
1537 _igvn.hash_delete(opqzm);
1538 assert( opqzm->outcnt() == 1, "cannot hack shared node" );
1539 opqzm->set_req(1,main_limit);
1540 _igvn._worklist.push(opqzm);
1541 }
1543 //------------------------------DCE_loop_body----------------------------------
1544 // Remove simplistic dead code from loop body
1545 void IdealLoopTree::DCE_loop_body() {
1546 for( uint i = 0; i < _body.size(); i++ )
1547 if( _body.at(i)->outcnt() == 0 )
1548 _body.map( i--, _body.pop() );
1549 }
1552 //------------------------------adjust_loop_exit_prob--------------------------
1553 // Look for loop-exit tests with the 50/50 (or worse) guesses from the parsing stage.
1554 // Replace with a 1-in-10 exit guess.
1555 void IdealLoopTree::adjust_loop_exit_prob( PhaseIdealLoop *phase ) {
1556 Node *test = tail();
1557 while( test != _head ) {
1558 uint top = test->Opcode();
1559 if( top == Op_IfTrue || top == Op_IfFalse ) {
1560 int test_con = ((ProjNode*)test)->_con;
1561 assert(top == (uint)(test_con? Op_IfTrue: Op_IfFalse), "sanity");
1562 IfNode *iff = test->in(0)->as_If();
1563 if( iff->outcnt() == 2 ) { // Ignore dead tests
1564 Node *bol = iff->in(1);
1565 if( bol && bol->req() > 1 && bol->in(1) &&
1566 ((bol->in(1)->Opcode() == Op_StorePConditional ) ||
1567 (bol->in(1)->Opcode() == Op_StoreIConditional ) ||
1568 (bol->in(1)->Opcode() == Op_StoreLConditional ) ||
1569 (bol->in(1)->Opcode() == Op_CompareAndSwapI ) ||
1570 (bol->in(1)->Opcode() == Op_CompareAndSwapL ) ||
1571 (bol->in(1)->Opcode() == Op_CompareAndSwapP ) ||
1572 (bol->in(1)->Opcode() == Op_CompareAndSwapN )))
1573 return; // Allocation loops RARELY take backedge
1574 // Find the OTHER exit path from the IF
1575 Node* ex = iff->proj_out(1-test_con);
1576 float p = iff->_prob;
1577 if( !phase->is_member( this, ex ) && iff->_fcnt == COUNT_UNKNOWN ) {
1578 if( top == Op_IfTrue ) {
1579 if( p < (PROB_FAIR + PROB_UNLIKELY_MAG(3))) {
1580 iff->_prob = PROB_STATIC_FREQUENT;
1581 }
1582 } else {
1583 if( p > (PROB_FAIR - PROB_UNLIKELY_MAG(3))) {
1584 iff->_prob = PROB_STATIC_INFREQUENT;
1585 }
1586 }
1587 }
1588 }
1589 }
1590 test = phase->idom(test);
1591 }
1592 }
1595 //------------------------------policy_do_remove_empty_loop--------------------
1596 // Micro-benchmark spamming. Policy is to always remove empty loops.
1597 // The 'DO' part is to replace the trip counter with the value it will
1598 // have on the last iteration. This will break the loop.
1599 bool IdealLoopTree::policy_do_remove_empty_loop( PhaseIdealLoop *phase ) {
1600 // Minimum size must be empty loop
1601 if (_body.size() > 7/*number of nodes in an empty loop*/)
1602 return false;
1604 if (!_head->is_CountedLoop())
1605 return false; // Dead loop
1606 CountedLoopNode *cl = _head->as_CountedLoop();
1607 if (!cl->loopexit())
1608 return false; // Malformed loop
1609 if (!phase->is_member(this, phase->get_ctrl(cl->loopexit()->in(CountedLoopEndNode::TestValue))))
1610 return false; // Infinite loop
1611 #ifndef PRODUCT
1612 if (PrintOpto) {
1613 tty->print("Removing empty loop");
1614 this->dump_head();
1615 } else if (TraceLoopOpts) {
1616 tty->print("Empty ");
1617 this->dump_head();
1618 }
1619 #endif
1620 #ifdef ASSERT
1621 // Ensure only one phi which is the iv.
1622 Node* iv = NULL;
1623 for (DUIterator_Fast imax, i = cl->fast_outs(imax); i < imax; i++) {
1624 Node* n = cl->fast_out(i);
1625 if (n->Opcode() == Op_Phi) {
1626 assert(iv == NULL, "Too many phis" );
1627 iv = n;
1628 }
1629 }
1630 assert(iv == cl->phi(), "Wrong phi" );
1631 #endif
1632 // Replace the phi at loop head with the final value of the last
1633 // iteration. Then the CountedLoopEnd will collapse (backedge never
1634 // taken) and all loop-invariant uses of the exit values will be correct.
1635 Node *phi = cl->phi();
1636 Node *final = new (phase->C, 3) SubINode( cl->limit(), cl->stride() );
1637 phase->register_new_node(final,cl->in(LoopNode::EntryControl));
1638 phase->_igvn.replace_node(phi,final);
1639 phase->C->set_major_progress();
1640 return true;
1641 }
1644 //=============================================================================
1645 //------------------------------iteration_split_impl---------------------------
1646 bool IdealLoopTree::iteration_split_impl( PhaseIdealLoop *phase, Node_List &old_new ) {
1647 // Check and remove empty loops (spam micro-benchmarks)
1648 if( policy_do_remove_empty_loop(phase) )
1649 return true; // Here we removed an empty loop
1651 bool should_peel = policy_peeling(phase); // Should we peel?
1653 bool should_unswitch = policy_unswitching(phase);
1655 // Non-counted loops may be peeled; exactly 1 iteration is peeled.
1656 // This removes loop-invariant tests (usually null checks).
1657 if( !_head->is_CountedLoop() ) { // Non-counted loop
1658 if (PartialPeelLoop && phase->partial_peel(this, old_new)) {
1659 // Partial peel succeeded so terminate this round of loop opts
1660 return false;
1661 }
1662 if( should_peel ) { // Should we peel?
1663 #ifndef PRODUCT
1664 if (PrintOpto) tty->print_cr("should_peel");
1665 #endif
1666 phase->do_peeling(this,old_new);
1667 } else if( should_unswitch ) {
1668 phase->do_unswitching(this, old_new);
1669 }
1670 return true;
1671 }
1672 CountedLoopNode *cl = _head->as_CountedLoop();
1674 if( !cl->loopexit() ) return true; // Ignore various kinds of broken loops
1676 // Do nothing special to pre- and post- loops
1677 if( cl->is_pre_loop() || cl->is_post_loop() ) return true;
1679 // Compute loop trip count from profile data
1680 compute_profile_trip_cnt(phase);
1682 // Before attempting fancy unrolling, RCE or alignment, see if we want
1683 // to completely unroll this loop or do loop unswitching.
1684 if( cl->is_normal_loop() ) {
1685 if (should_unswitch) {
1686 phase->do_unswitching(this, old_new);
1687 return true;
1688 }
1689 bool should_maximally_unroll = policy_maximally_unroll(phase);
1690 if( should_maximally_unroll ) {
1691 // Here we did some unrolling and peeling. Eventually we will
1692 // completely unroll this loop and it will no longer be a loop.
1693 phase->do_maximally_unroll(this,old_new);
1694 return true;
1695 }
1696 }
1699 // Counted loops may be peeled, may need some iterations run up
1700 // front for RCE, and may want to align loop refs to a cache
1701 // line. Thus we clone a full loop up front whose trip count is
1702 // at least 1 (if peeling), but may be several more.
1704 // The main loop will start cache-line aligned with at least 1
1705 // iteration of the unrolled body (zero-trip test required) and
1706 // will have some range checks removed.
1708 // A post-loop will finish any odd iterations (leftover after
1709 // unrolling), plus any needed for RCE purposes.
1711 bool should_unroll = policy_unroll(phase);
1713 bool should_rce = policy_range_check(phase);
1715 bool should_align = policy_align(phase);
1717 // If not RCE'ing (iteration splitting) or Aligning, then we do not
1718 // need a pre-loop. We may still need to peel an initial iteration but
1719 // we will not be needing an unknown number of pre-iterations.
1720 //
1721 // Basically, if may_rce_align reports FALSE first time through,
1722 // we will not be able to later do RCE or Aligning on this loop.
1723 bool may_rce_align = !policy_peel_only(phase) || should_rce || should_align;
1725 // If we have any of these conditions (RCE, alignment, unrolling) met, then
1726 // we switch to the pre-/main-/post-loop model. This model also covers
1727 // peeling.
1728 if( should_rce || should_align || should_unroll ) {
1729 if( cl->is_normal_loop() ) // Convert to 'pre/main/post' loops
1730 phase->insert_pre_post_loops(this,old_new, !may_rce_align);
1732 // Adjust the pre- and main-loop limits to let the pre and post loops run
1733 // with full checks, but the main-loop with no checks. Remove said
1734 // checks from the main body.
1735 if( should_rce )
1736 phase->do_range_check(this,old_new);
1738 // Double loop body for unrolling. Adjust the minimum-trip test (will do
1739 // twice as many iterations as before) and the main body limit (only do
1740 // an even number of trips). If we are peeling, we might enable some RCE
1741 // and we'd rather unroll the post-RCE'd loop SO... do not unroll if
1742 // peeling.
1743 if( should_unroll && !should_peel )
1744 phase->do_unroll(this,old_new, true);
1746 // Adjust the pre-loop limits to align the main body
1747 // iterations.
1748 if( should_align )
1749 Unimplemented();
1751 } else { // Else we have an unchanged counted loop
1752 if( should_peel ) // Might want to peel but do nothing else
1753 phase->do_peeling(this,old_new);
1754 }
1755 return true;
1756 }
1759 //=============================================================================
1760 //------------------------------iteration_split--------------------------------
1761 bool IdealLoopTree::iteration_split( PhaseIdealLoop *phase, Node_List &old_new ) {
1762 // Recursively iteration split nested loops
1763 if (_child && !_child->iteration_split(phase, old_new))
1764 return false;
1766 // Clean out prior deadwood
1767 DCE_loop_body();
1770 // Look for loop-exit tests with my 50/50 guesses from the Parsing stage.
1771 // Replace with a 1-in-10 exit guess.
1772 if (_parent /*not the root loop*/ &&
1773 !_irreducible &&
1774 // Also ignore the occasional dead backedge
1775 !tail()->is_top()) {
1776 adjust_loop_exit_prob(phase);
1777 }
1779 // Gate unrolling, RCE and peeling efforts.
1780 if (!_child && // If not an inner loop, do not split
1781 !_irreducible &&
1782 _allow_optimizations &&
1783 !tail()->is_top()) { // Also ignore the occasional dead backedge
1784 if (!_has_call) {
1785 if (!iteration_split_impl(phase, old_new)) {
1786 return false;
1787 }
1788 } else if (policy_unswitching(phase)) {
1789 phase->do_unswitching(this, old_new);
1790 }
1791 }
1793 // Minor offset re-organization to remove loop-fallout uses of
1794 // trip counter when there was no major reshaping.
1795 phase->reorg_offsets(this);
1797 if (_next && !_next->iteration_split(phase, old_new))
1798 return false;
1799 return true;
1800 }
1802 //-------------------------------is_uncommon_trap_proj----------------------------
1803 // Return true if proj is the form of "proj->[region->..]call_uct"
1804 bool PhaseIdealLoop::is_uncommon_trap_proj(ProjNode* proj, Deoptimization::DeoptReason reason) {
1805 int path_limit = 10;
1806 assert(proj, "invalid argument");
1807 Node* out = proj;
1808 for (int ct = 0; ct < path_limit; ct++) {
1809 out = out->unique_ctrl_out();
1810 if (out == NULL || out->is_Root() || out->is_Start())
1811 return false;
1812 if (out->is_CallStaticJava()) {
1813 int req = out->as_CallStaticJava()->uncommon_trap_request();
1814 if (req != 0) {
1815 Deoptimization::DeoptReason trap_reason = Deoptimization::trap_request_reason(req);
1816 if (trap_reason == reason || reason == Deoptimization::Reason_none) {
1817 return true;
1818 }
1819 }
1820 return false; // don't do further after call
1821 }
1822 }
1823 return false;
1824 }
1826 //-------------------------------is_uncommon_trap_if_pattern-------------------------
1827 // Return true for "if(test)-> proj -> ...
1828 // |
1829 // V
1830 // other_proj->[region->..]call_uct"
1831 //
1832 // "must_reason_predicate" means the uct reason must be Reason_predicate
1833 bool PhaseIdealLoop::is_uncommon_trap_if_pattern(ProjNode *proj, Deoptimization::DeoptReason reason) {
1834 Node *in0 = proj->in(0);
1835 if (!in0->is_If()) return false;
1836 // Variation of a dead If node.
1837 if (in0->outcnt() < 2) return false;
1838 IfNode* iff = in0->as_If();
1840 // we need "If(Conv2B(Opaque1(...)))" pattern for reason_predicate
1841 if (reason != Deoptimization::Reason_none) {
1842 if (iff->in(1)->Opcode() != Op_Conv2B ||
1843 iff->in(1)->in(1)->Opcode() != Op_Opaque1) {
1844 return false;
1845 }
1846 }
1848 ProjNode* other_proj = iff->proj_out(1-proj->_con)->as_Proj();
1849 return is_uncommon_trap_proj(other_proj, reason);
1850 }
1852 //-------------------------------register_control-------------------------
1853 void PhaseIdealLoop::register_control(Node* n, IdealLoopTree *loop, Node* pred) {
1854 assert(n->is_CFG(), "must be control node");
1855 _igvn.register_new_node_with_optimizer(n);
1856 loop->_body.push(n);
1857 set_loop(n, loop);
1858 // When called from beautify_loops() idom is not constructed yet.
1859 if (_idom != NULL) {
1860 set_idom(n, pred, dom_depth(pred));
1861 }
1862 }
1864 //------------------------------create_new_if_for_predicate------------------------
1865 // create a new if above the uct_if_pattern for the predicate to be promoted.
1866 //
1867 // before after
1868 // ---------- ----------
1869 // ctrl ctrl
1870 // | |
1871 // | |
1872 // v v
1873 // iff new_iff
1874 // / \ / \
1875 // / \ / \
1876 // v v v v
1877 // uncommon_proj cont_proj if_uct if_cont
1878 // \ | | | |
1879 // \ | | | |
1880 // v v v | v
1881 // rgn loop | iff
1882 // | | / \
1883 // | | / \
1884 // v | v v
1885 // uncommon_trap | uncommon_proj cont_proj
1886 // \ \ | |
1887 // \ \ | |
1888 // v v v v
1889 // rgn loop
1890 // |
1891 // |
1892 // v
1893 // uncommon_trap
1894 //
1895 //
1896 // We will create a region to guard the uct call if there is no one there.
1897 // The true projecttion (if_cont) of the new_iff is returned.
1898 // This code is also used to clone predicates to clonned loops.
1899 ProjNode* PhaseIdealLoop::create_new_if_for_predicate(ProjNode* cont_proj, Node* new_entry,
1900 Deoptimization::DeoptReason reason) {
1901 assert(is_uncommon_trap_if_pattern(cont_proj, reason), "must be a uct if pattern!");
1902 IfNode* iff = cont_proj->in(0)->as_If();
1904 ProjNode *uncommon_proj = iff->proj_out(1 - cont_proj->_con);
1905 Node *rgn = uncommon_proj->unique_ctrl_out();
1906 assert(rgn->is_Region() || rgn->is_Call(), "must be a region or call uct");
1908 if (!rgn->is_Region()) { // create a region to guard the call
1909 assert(rgn->is_Call(), "must be call uct");
1910 CallNode* call = rgn->as_Call();
1911 IdealLoopTree* loop = get_loop(call);
1912 rgn = new (C, 1) RegionNode(1);
1913 rgn->add_req(uncommon_proj);
1914 register_control(rgn, loop, uncommon_proj);
1915 _igvn.hash_delete(call);
1916 call->set_req(0, rgn);
1917 // When called from beautify_loops() idom is not constructed yet.
1918 if (_idom != NULL) {
1919 set_idom(call, rgn, dom_depth(rgn));
1920 }
1921 }
1923 Node* entry = iff->in(0);
1924 if (new_entry != NULL) {
1925 // Clonning the predicate to new location.
1926 entry = new_entry;
1927 }
1928 // Create new_iff
1929 IdealLoopTree* lp = get_loop(entry);
1930 IfNode *new_iff = new (C, 2) IfNode(entry, NULL, iff->_prob, iff->_fcnt);
1931 register_control(new_iff, lp, entry);
1932 Node *if_cont = new (C, 1) IfTrueNode(new_iff);
1933 Node *if_uct = new (C, 1) IfFalseNode(new_iff);
1934 if (cont_proj->is_IfFalse()) {
1935 // Swap
1936 Node* tmp = if_uct; if_uct = if_cont; if_cont = tmp;
1937 }
1938 register_control(if_cont, lp, new_iff);
1939 register_control(if_uct, get_loop(rgn), new_iff);
1941 // if_uct to rgn
1942 _igvn.hash_delete(rgn);
1943 rgn->add_req(if_uct);
1944 // When called from beautify_loops() idom is not constructed yet.
1945 if (_idom != NULL) {
1946 Node* ridom = idom(rgn);
1947 Node* nrdom = dom_lca(ridom, new_iff);
1948 set_idom(rgn, nrdom, dom_depth(rgn));
1949 }
1950 // rgn must have no phis
1951 assert(!rgn->as_Region()->has_phi(), "region must have no phis");
1953 if (new_entry == NULL) {
1954 // Attach if_cont to iff
1955 _igvn.hash_delete(iff);
1956 iff->set_req(0, if_cont);
1957 if (_idom != NULL) {
1958 set_idom(iff, if_cont, dom_depth(iff));
1959 }
1960 }
1961 return if_cont->as_Proj();
1962 }
1964 //--------------------------find_predicate_insertion_point-------------------
1965 // Find a good location to insert a predicate
1966 ProjNode* PhaseIdealLoop::find_predicate_insertion_point(Node* start_c, Deoptimization::DeoptReason reason) {
1967 if (start_c == NULL || !start_c->is_Proj())
1968 return NULL;
1969 if (is_uncommon_trap_if_pattern(start_c->as_Proj(), reason)) {
1970 return start_c->as_Proj();
1971 }
1972 return NULL;
1973 }
1975 //--------------------------find_predicate------------------------------------
1976 // Find a predicate
1977 Node* PhaseIdealLoop::find_predicate(Node* entry) {
1978 Node* predicate = NULL;
1979 if (UseLoopPredicate) {
1980 predicate = find_predicate_insertion_point(entry, Deoptimization::Reason_predicate);
1981 if (predicate != NULL) { // right pattern that can be used by loop predication
1982 assert(entry->in(0)->in(1)->in(1)->Opcode()==Op_Opaque1, "must be");
1983 return entry;
1984 }
1985 }
1986 return NULL;
1987 }
1989 //------------------------------Invariance-----------------------------------
1990 // Helper class for loop_predication_impl to compute invariance on the fly and
1991 // clone invariants.
1992 class Invariance : public StackObj {
1993 VectorSet _visited, _invariant;
1994 Node_Stack _stack;
1995 VectorSet _clone_visited;
1996 Node_List _old_new; // map of old to new (clone)
1997 IdealLoopTree* _lpt;
1998 PhaseIdealLoop* _phase;
2000 // Helper function to set up the invariance for invariance computation
2001 // If n is a known invariant, set up directly. Otherwise, look up the
2002 // the possibility to push n onto the stack for further processing.
2003 void visit(Node* use, Node* n) {
2004 if (_lpt->is_invariant(n)) { // known invariant
2005 _invariant.set(n->_idx);
2006 } else if (!n->is_CFG()) {
2007 Node *n_ctrl = _phase->ctrl_or_self(n);
2008 Node *u_ctrl = _phase->ctrl_or_self(use); // self if use is a CFG
2009 if (_phase->is_dominator(n_ctrl, u_ctrl)) {
2010 _stack.push(n, n->in(0) == NULL ? 1 : 0);
2011 }
2012 }
2013 }
2015 // Compute invariance for "the_node" and (possibly) all its inputs recursively
2016 // on the fly
2017 void compute_invariance(Node* n) {
2018 assert(_visited.test(n->_idx), "must be");
2019 visit(n, n);
2020 while (_stack.is_nonempty()) {
2021 Node* n = _stack.node();
2022 uint idx = _stack.index();
2023 if (idx == n->req()) { // all inputs are processed
2024 _stack.pop();
2025 // n is invariant if it's inputs are all invariant
2026 bool all_inputs_invariant = true;
2027 for (uint i = 0; i < n->req(); i++) {
2028 Node* in = n->in(i);
2029 if (in == NULL) continue;
2030 assert(_visited.test(in->_idx), "must have visited input");
2031 if (!_invariant.test(in->_idx)) { // bad guy
2032 all_inputs_invariant = false;
2033 break;
2034 }
2035 }
2036 if (all_inputs_invariant) {
2037 _invariant.set(n->_idx); // I am a invariant too
2038 }
2039 } else { // process next input
2040 _stack.set_index(idx + 1);
2041 Node* m = n->in(idx);
2042 if (m != NULL && !_visited.test_set(m->_idx)) {
2043 visit(n, m);
2044 }
2045 }
2046 }
2047 }
2049 // Helper function to set up _old_new map for clone_nodes.
2050 // If n is a known invariant, set up directly ("clone" of n == n).
2051 // Otherwise, push n onto the stack for real cloning.
2052 void clone_visit(Node* n) {
2053 assert(_invariant.test(n->_idx), "must be invariant");
2054 if (_lpt->is_invariant(n)) { // known invariant
2055 _old_new.map(n->_idx, n);
2056 } else{ // to be cloned
2057 assert (!n->is_CFG(), "should not see CFG here");
2058 _stack.push(n, n->in(0) == NULL ? 1 : 0);
2059 }
2060 }
2062 // Clone "n" and (possibly) all its inputs recursively
2063 void clone_nodes(Node* n, Node* ctrl) {
2064 clone_visit(n);
2065 while (_stack.is_nonempty()) {
2066 Node* n = _stack.node();
2067 uint idx = _stack.index();
2068 if (idx == n->req()) { // all inputs processed, clone n!
2069 _stack.pop();
2070 // clone invariant node
2071 Node* n_cl = n->clone();
2072 _old_new.map(n->_idx, n_cl);
2073 _phase->register_new_node(n_cl, ctrl);
2074 for (uint i = 0; i < n->req(); i++) {
2075 Node* in = n_cl->in(i);
2076 if (in == NULL) continue;
2077 n_cl->set_req(i, _old_new[in->_idx]);
2078 }
2079 } else { // process next input
2080 _stack.set_index(idx + 1);
2081 Node* m = n->in(idx);
2082 if (m != NULL && !_clone_visited.test_set(m->_idx)) {
2083 clone_visit(m); // visit the input
2084 }
2085 }
2086 }
2087 }
2089 public:
2090 Invariance(Arena* area, IdealLoopTree* lpt) :
2091 _lpt(lpt), _phase(lpt->_phase),
2092 _visited(area), _invariant(area), _stack(area, 10 /* guess */),
2093 _clone_visited(area), _old_new(area)
2094 {}
2096 // Map old to n for invariance computation and clone
2097 void map_ctrl(Node* old, Node* n) {
2098 assert(old->is_CFG() && n->is_CFG(), "must be");
2099 _old_new.map(old->_idx, n); // "clone" of old is n
2100 _invariant.set(old->_idx); // old is invariant
2101 _clone_visited.set(old->_idx);
2102 }
2104 // Driver function to compute invariance
2105 bool is_invariant(Node* n) {
2106 if (!_visited.test_set(n->_idx))
2107 compute_invariance(n);
2108 return (_invariant.test(n->_idx) != 0);
2109 }
2111 // Driver function to clone invariant
2112 Node* clone(Node* n, Node* ctrl) {
2113 assert(ctrl->is_CFG(), "must be");
2114 assert(_invariant.test(n->_idx), "must be an invariant");
2115 if (!_clone_visited.test(n->_idx))
2116 clone_nodes(n, ctrl);
2117 return _old_new[n->_idx];
2118 }
2119 };
2121 //------------------------------is_range_check_if -----------------------------------
2122 // Returns true if the predicate of iff is in "scale*iv + offset u< load_range(ptr)" format
2123 // Note: this function is particularly designed for loop predication. We require load_range
2124 // and offset to be loop invariant computed on the fly by "invar"
2125 bool IdealLoopTree::is_range_check_if(IfNode *iff, PhaseIdealLoop *phase, Invariance& invar) const {
2126 if (!is_loop_exit(iff)) {
2127 return false;
2128 }
2129 if (!iff->in(1)->is_Bool()) {
2130 return false;
2131 }
2132 const BoolNode *bol = iff->in(1)->as_Bool();
2133 if (bol->_test._test != BoolTest::lt) {
2134 return false;
2135 }
2136 if (!bol->in(1)->is_Cmp()) {
2137 return false;
2138 }
2139 const CmpNode *cmp = bol->in(1)->as_Cmp();
2140 if (cmp->Opcode() != Op_CmpU ) {
2141 return false;
2142 }
2143 Node* range = cmp->in(2);
2144 if (range->Opcode() != Op_LoadRange) {
2145 const TypeInt* tint = phase->_igvn.type(range)->isa_int();
2146 if (!OptimizeFill || tint == NULL || tint->empty() || tint->_lo < 0) {
2147 // Allow predication on positive values that aren't LoadRanges.
2148 // This allows optimization of loops where the length of the
2149 // array is a known value and doesn't need to be loaded back
2150 // from the array.
2151 return false;
2152 }
2153 }
2154 if (!invar.is_invariant(range)) {
2155 return false;
2156 }
2157 Node *iv = _head->as_CountedLoop()->phi();
2158 int scale = 0;
2159 Node *offset = NULL;
2160 if (!phase->is_scaled_iv_plus_offset(cmp->in(1), iv, &scale, &offset)) {
2161 return false;
2162 }
2163 if(offset && !invar.is_invariant(offset)) { // offset must be invariant
2164 return false;
2165 }
2166 return true;
2167 }
2169 //------------------------------rc_predicate-----------------------------------
2170 // Create a range check predicate
2171 //
2172 // for (i = init; i < limit; i += stride) {
2173 // a[scale*i+offset]
2174 // }
2175 //
2176 // Compute max(scale*i + offset) for init <= i < limit and build the predicate
2177 // as "max(scale*i + offset) u< a.length".
2178 //
2179 // There are two cases for max(scale*i + offset):
2180 // (1) stride*scale > 0
2181 // max(scale*i + offset) = scale*(limit-stride) + offset
2182 // (2) stride*scale < 0
2183 // max(scale*i + offset) = scale*init + offset
2184 BoolNode* PhaseIdealLoop::rc_predicate(Node* ctrl,
2185 int scale, Node* offset,
2186 Node* init, Node* limit, Node* stride,
2187 Node* range, bool upper) {
2188 DEBUG_ONLY(ttyLocker ttyl);
2189 if (TraceLoopPredicate) tty->print("rc_predicate ");
2191 Node* max_idx_expr = init;
2192 int stride_con = stride->get_int();
2193 if ((stride_con > 0) == (scale > 0) == upper) {
2194 max_idx_expr = new (C, 3) SubINode(limit, stride);
2195 register_new_node(max_idx_expr, ctrl);
2196 if (TraceLoopPredicate) tty->print("(limit - stride) ");
2197 } else {
2198 if (TraceLoopPredicate) tty->print("init ");
2199 }
2201 if (scale != 1) {
2202 ConNode* con_scale = _igvn.intcon(scale);
2203 max_idx_expr = new (C, 3) MulINode(max_idx_expr, con_scale);
2204 register_new_node(max_idx_expr, ctrl);
2205 if (TraceLoopPredicate) tty->print("* %d ", scale);
2206 }
2208 if (offset && (!offset->is_Con() || offset->get_int() != 0)){
2209 max_idx_expr = new (C, 3) AddINode(max_idx_expr, offset);
2210 register_new_node(max_idx_expr, ctrl);
2211 if (TraceLoopPredicate)
2212 if (offset->is_Con()) tty->print("+ %d ", offset->get_int());
2213 else tty->print("+ offset ");
2214 }
2216 CmpUNode* cmp = new (C, 3) CmpUNode(max_idx_expr, range);
2217 register_new_node(cmp, ctrl);
2218 BoolNode* bol = new (C, 2) BoolNode(cmp, BoolTest::lt);
2219 register_new_node(bol, ctrl);
2221 if (TraceLoopPredicate) tty->print_cr("<u range");
2222 return bol;
2223 }
2225 //------------------------------ loop_predication_impl--------------------------
2226 // Insert loop predicates for null checks and range checks
2227 bool PhaseIdealLoop::loop_predication_impl(IdealLoopTree *loop) {
2228 if (!UseLoopPredicate) return false;
2230 if (!loop->_head->is_Loop()) {
2231 // Could be a simple region when irreducible loops are present.
2232 return false;
2233 }
2235 if (loop->_head->unique_ctrl_out()->Opcode() == Op_NeverBranch) {
2236 // do nothing for infinite loops
2237 return false;
2238 }
2240 CountedLoopNode *cl = NULL;
2241 if (loop->_head->is_CountedLoop()) {
2242 cl = loop->_head->as_CountedLoop();
2243 // do nothing for iteration-splitted loops
2244 if (!cl->is_normal_loop()) return false;
2245 }
2247 LoopNode *lpn = loop->_head->as_Loop();
2248 Node* entry = lpn->in(LoopNode::EntryControl);
2250 ProjNode *predicate_proj = find_predicate_insertion_point(entry, Deoptimization::Reason_predicate);
2251 if (!predicate_proj) {
2252 #ifndef PRODUCT
2253 if (TraceLoopPredicate) {
2254 tty->print("missing predicate:");
2255 loop->dump_head();
2256 lpn->dump(1);
2257 }
2258 #endif
2259 return false;
2260 }
2261 ConNode* zero = _igvn.intcon(0);
2262 set_ctrl(zero, C->root());
2264 ResourceArea *area = Thread::current()->resource_area();
2265 Invariance invar(area, loop);
2267 // Create list of if-projs such that a newer proj dominates all older
2268 // projs in the list, and they all dominate loop->tail()
2269 Node_List if_proj_list(area);
2270 LoopNode *head = loop->_head->as_Loop();
2271 Node *current_proj = loop->tail(); //start from tail
2272 while ( current_proj != head ) {
2273 if (loop == get_loop(current_proj) && // still in the loop ?
2274 current_proj->is_Proj() && // is a projection ?
2275 current_proj->in(0)->Opcode() == Op_If) { // is a if projection ?
2276 if_proj_list.push(current_proj);
2277 }
2278 current_proj = idom(current_proj);
2279 }
2281 bool hoisted = false; // true if at least one proj is promoted
2282 while (if_proj_list.size() > 0) {
2283 // Following are changed to nonnull when a predicate can be hoisted
2284 ProjNode* new_predicate_proj = NULL;
2286 ProjNode* proj = if_proj_list.pop()->as_Proj();
2287 IfNode* iff = proj->in(0)->as_If();
2289 if (!is_uncommon_trap_if_pattern(proj, Deoptimization::Reason_none)) {
2290 if (loop->is_loop_exit(iff)) {
2291 // stop processing the remaining projs in the list because the execution of them
2292 // depends on the condition of "iff" (iff->in(1)).
2293 break;
2294 } else {
2295 // Both arms are inside the loop. There are two cases:
2296 // (1) there is one backward branch. In this case, any remaining proj
2297 // in the if_proj list post-dominates "iff". So, the condition of "iff"
2298 // does not determine the execution the remining projs directly, and we
2299 // can safely continue.
2300 // (2) both arms are forwarded, i.e. a diamond shape. In this case, "proj"
2301 // does not dominate loop->tail(), so it can not be in the if_proj list.
2302 continue;
2303 }
2304 }
2306 Node* test = iff->in(1);
2307 if (!test->is_Bool()){ //Conv2B, ...
2308 continue;
2309 }
2310 BoolNode* bol = test->as_Bool();
2311 if (invar.is_invariant(bol)) {
2312 // Invariant test
2313 new_predicate_proj = create_new_if_for_predicate(predicate_proj, NULL,
2314 Deoptimization::Reason_predicate);
2315 Node* ctrl = new_predicate_proj->in(0)->as_If()->in(0);
2316 BoolNode* new_predicate_bol = invar.clone(bol, ctrl)->as_Bool();
2318 // Negate test if necessary
2319 bool negated = false;
2320 if (proj->_con != predicate_proj->_con) {
2321 new_predicate_bol = new (C, 2) BoolNode(new_predicate_bol->in(1), new_predicate_bol->_test.negate());
2322 register_new_node(new_predicate_bol, ctrl);
2323 negated = true;
2324 }
2325 IfNode* new_predicate_iff = new_predicate_proj->in(0)->as_If();
2326 _igvn.hash_delete(new_predicate_iff);
2327 new_predicate_iff->set_req(1, new_predicate_bol);
2328 #ifndef PRODUCT
2329 if (TraceLoopPredicate) {
2330 tty->print("Predicate invariant if%s: %d ", negated ? " negated" : "", new_predicate_iff->_idx);
2331 loop->dump_head();
2332 } else if (TraceLoopOpts) {
2333 tty->print("Predicate IC ");
2334 loop->dump_head();
2335 }
2336 #endif
2337 } else if (cl != NULL && loop->is_range_check_if(iff, this, invar)) {
2338 assert(proj->_con == predicate_proj->_con, "must match");
2340 // Range check for counted loops
2341 const Node* cmp = bol->in(1)->as_Cmp();
2342 Node* idx = cmp->in(1);
2343 assert(!invar.is_invariant(idx), "index is variant");
2344 assert(cmp->in(2)->Opcode() == Op_LoadRange || OptimizeFill, "must be");
2345 Node* rng = cmp->in(2);
2346 assert(invar.is_invariant(rng), "range must be invariant");
2347 int scale = 1;
2348 Node* offset = zero;
2349 bool ok = is_scaled_iv_plus_offset(idx, cl->phi(), &scale, &offset);
2350 assert(ok, "must be index expression");
2352 Node* init = cl->init_trip();
2353 Node* limit = cl->limit();
2354 Node* stride = cl->stride();
2356 // Build if's for the upper and lower bound tests. The
2357 // lower_bound test will dominate the upper bound test and all
2358 // cloned or created nodes will use the lower bound test as
2359 // their declared control.
2360 ProjNode* lower_bound_proj = create_new_if_for_predicate(predicate_proj, NULL, Deoptimization::Reason_predicate);
2361 ProjNode* upper_bound_proj = create_new_if_for_predicate(predicate_proj, NULL, Deoptimization::Reason_predicate);
2362 assert(upper_bound_proj->in(0)->as_If()->in(0) == lower_bound_proj, "should dominate");
2363 Node *ctrl = lower_bound_proj->in(0)->as_If()->in(0);
2365 // Perform cloning to keep Invariance state correct since the
2366 // late schedule will place invariant things in the loop.
2367 rng = invar.clone(rng, ctrl);
2368 if (offset && offset != zero) {
2369 assert(invar.is_invariant(offset), "offset must be loop invariant");
2370 offset = invar.clone(offset, ctrl);
2371 }
2373 // Test the lower bound
2374 Node* lower_bound_bol = rc_predicate(ctrl, scale, offset, init, limit, stride, rng, false);
2375 IfNode* lower_bound_iff = lower_bound_proj->in(0)->as_If();
2376 _igvn.hash_delete(lower_bound_iff);
2377 lower_bound_iff->set_req(1, lower_bound_bol);
2378 if (TraceLoopPredicate) tty->print_cr("lower bound check if: %d", lower_bound_iff->_idx);
2380 // Test the upper bound
2381 Node* upper_bound_bol = rc_predicate(ctrl, scale, offset, init, limit, stride, rng, true);
2382 IfNode* upper_bound_iff = upper_bound_proj->in(0)->as_If();
2383 _igvn.hash_delete(upper_bound_iff);
2384 upper_bound_iff->set_req(1, upper_bound_bol);
2385 if (TraceLoopPredicate) tty->print_cr("upper bound check if: %d", lower_bound_iff->_idx);
2387 // Fall through into rest of the clean up code which will move
2388 // any dependent nodes onto the upper bound test.
2389 new_predicate_proj = upper_bound_proj;
2391 #ifndef PRODUCT
2392 if (TraceLoopOpts && !TraceLoopPredicate) {
2393 tty->print("Predicate RC ");
2394 loop->dump_head();
2395 }
2396 #endif
2397 } else {
2398 // Loop variant check (for example, range check in non-counted loop)
2399 // with uncommon trap.
2400 continue;
2401 }
2402 assert(new_predicate_proj != NULL, "sanity");
2403 // Success - attach condition (new_predicate_bol) to predicate if
2404 invar.map_ctrl(proj, new_predicate_proj); // so that invariance test can be appropriate
2406 // Eliminate the old If in the loop body
2407 dominated_by( new_predicate_proj, iff, proj->_con != new_predicate_proj->_con );
2409 hoisted = true;
2410 C->set_major_progress();
2411 } // end while
2413 #ifndef PRODUCT
2414 // report that the loop predication has been actually performed
2415 // for this loop
2416 if (TraceLoopPredicate && hoisted) {
2417 tty->print("Loop Predication Performed:");
2418 loop->dump_head();
2419 }
2420 #endif
2422 return hoisted;
2423 }
2425 //------------------------------loop_predication--------------------------------
2426 // driver routine for loop predication optimization
2427 bool IdealLoopTree::loop_predication( PhaseIdealLoop *phase) {
2428 bool hoisted = false;
2429 // Recursively promote predicates
2430 if ( _child ) {
2431 hoisted = _child->loop_predication( phase);
2432 }
2434 // self
2435 if (!_irreducible && !tail()->is_top()) {
2436 hoisted |= phase->loop_predication_impl(this);
2437 }
2439 if ( _next ) { //sibling
2440 hoisted |= _next->loop_predication( phase);
2441 }
2443 return hoisted;
2444 }
2447 // Process all the loops in the loop tree and replace any fill
2448 // patterns with an intrisc version.
2449 bool PhaseIdealLoop::do_intrinsify_fill() {
2450 bool changed = false;
2451 for (LoopTreeIterator iter(_ltree_root); !iter.done(); iter.next()) {
2452 IdealLoopTree* lpt = iter.current();
2453 changed |= intrinsify_fill(lpt);
2454 }
2455 return changed;
2456 }
2459 // Examine an inner loop looking for a a single store of an invariant
2460 // value in a unit stride loop,
2461 bool PhaseIdealLoop::match_fill_loop(IdealLoopTree* lpt, Node*& store, Node*& store_value,
2462 Node*& shift, Node*& con) {
2463 const char* msg = NULL;
2464 Node* msg_node = NULL;
2466 store_value = NULL;
2467 con = NULL;
2468 shift = NULL;
2470 // Process the loop looking for stores. If there are multiple
2471 // stores or extra control flow give at this point.
2472 CountedLoopNode* head = lpt->_head->as_CountedLoop();
2473 for (uint i = 0; msg == NULL && i < lpt->_body.size(); i++) {
2474 Node* n = lpt->_body.at(i);
2475 if (n->outcnt() == 0) continue; // Ignore dead
2476 if (n->is_Store()) {
2477 if (store != NULL) {
2478 msg = "multiple stores";
2479 break;
2480 }
2481 int opc = n->Opcode();
2482 if (opc == Op_StoreP || opc == Op_StoreN || opc == Op_StoreCM) {
2483 msg = "oop fills not handled";
2484 break;
2485 }
2486 Node* value = n->in(MemNode::ValueIn);
2487 if (!lpt->is_invariant(value)) {
2488 msg = "variant store value";
2489 } else if (!_igvn.type(n->in(MemNode::Address))->isa_aryptr()) {
2490 msg = "not array address";
2491 }
2492 store = n;
2493 store_value = value;
2494 } else if (n->is_If() && n != head->loopexit()) {
2495 msg = "extra control flow";
2496 msg_node = n;
2497 }
2498 }
2500 if (store == NULL) {
2501 // No store in loop
2502 return false;
2503 }
2505 if (msg == NULL && head->stride_con() != 1) {
2506 // could handle negative strides too
2507 if (head->stride_con() < 0) {
2508 msg = "negative stride";
2509 } else {
2510 msg = "non-unit stride";
2511 }
2512 }
2514 if (msg == NULL && !store->in(MemNode::Address)->is_AddP()) {
2515 msg = "can't handle store address";
2516 msg_node = store->in(MemNode::Address);
2517 }
2519 if (msg == NULL &&
2520 (!store->in(MemNode::Memory)->is_Phi() ||
2521 store->in(MemNode::Memory)->in(LoopNode::LoopBackControl) != store)) {
2522 msg = "store memory isn't proper phi";
2523 msg_node = store->in(MemNode::Memory);
2524 }
2526 // Make sure there is an appropriate fill routine
2527 BasicType t = store->as_Mem()->memory_type();
2528 const char* fill_name;
2529 if (msg == NULL &&
2530 StubRoutines::select_fill_function(t, false, fill_name) == NULL) {
2531 msg = "unsupported store";
2532 msg_node = store;
2533 }
2535 if (msg != NULL) {
2536 #ifndef PRODUCT
2537 if (TraceOptimizeFill) {
2538 tty->print_cr("not fill intrinsic candidate: %s", msg);
2539 if (msg_node != NULL) msg_node->dump();
2540 }
2541 #endif
2542 return false;
2543 }
2545 // Make sure the address expression can be handled. It should be
2546 // head->phi * elsize + con. head->phi might have a ConvI2L.
2547 Node* elements[4];
2548 Node* conv = NULL;
2549 bool found_index = false;
2550 int count = store->in(MemNode::Address)->as_AddP()->unpack_offsets(elements, ARRAY_SIZE(elements));
2551 for (int e = 0; e < count; e++) {
2552 Node* n = elements[e];
2553 if (n->is_Con() && con == NULL) {
2554 con = n;
2555 } else if (n->Opcode() == Op_LShiftX && shift == NULL) {
2556 Node* value = n->in(1);
2557 #ifdef _LP64
2558 if (value->Opcode() == Op_ConvI2L) {
2559 conv = value;
2560 value = value->in(1);
2561 }
2562 #endif
2563 if (value != head->phi()) {
2564 msg = "unhandled shift in address";
2565 } else {
2566 found_index = true;
2567 shift = n;
2568 assert(type2aelembytes(store->as_Mem()->memory_type(), true) == 1 << shift->in(2)->get_int(), "scale should match");
2569 }
2570 } else if (n->Opcode() == Op_ConvI2L && conv == NULL) {
2571 if (n->in(1) == head->phi()) {
2572 found_index = true;
2573 conv = n;
2574 } else {
2575 msg = "unhandled input to ConvI2L";
2576 }
2577 } else if (n == head->phi()) {
2578 // no shift, check below for allowed cases
2579 found_index = true;
2580 } else {
2581 msg = "unhandled node in address";
2582 msg_node = n;
2583 }
2584 }
2586 if (count == -1) {
2587 msg = "malformed address expression";
2588 msg_node = store;
2589 }
2591 if (!found_index) {
2592 msg = "missing use of index";
2593 }
2595 // byte sized items won't have a shift
2596 if (msg == NULL && shift == NULL && t != T_BYTE && t != T_BOOLEAN) {
2597 msg = "can't find shift";
2598 msg_node = store;
2599 }
2601 if (msg != NULL) {
2602 #ifndef PRODUCT
2603 if (TraceOptimizeFill) {
2604 tty->print_cr("not fill intrinsic: %s", msg);
2605 if (msg_node != NULL) msg_node->dump();
2606 }
2607 #endif
2608 return false;
2609 }
2611 // No make sure all the other nodes in the loop can be handled
2612 VectorSet ok(Thread::current()->resource_area());
2614 // store related values are ok
2615 ok.set(store->_idx);
2616 ok.set(store->in(MemNode::Memory)->_idx);
2618 // Loop structure is ok
2619 ok.set(head->_idx);
2620 ok.set(head->loopexit()->_idx);
2621 ok.set(head->phi()->_idx);
2622 ok.set(head->incr()->_idx);
2623 ok.set(head->loopexit()->cmp_node()->_idx);
2624 ok.set(head->loopexit()->in(1)->_idx);
2626 // Address elements are ok
2627 if (con) ok.set(con->_idx);
2628 if (shift) ok.set(shift->_idx);
2629 if (conv) ok.set(conv->_idx);
2631 for (uint i = 0; msg == NULL && i < lpt->_body.size(); i++) {
2632 Node* n = lpt->_body.at(i);
2633 if (n->outcnt() == 0) continue; // Ignore dead
2634 if (ok.test(n->_idx)) continue;
2635 // Backedge projection is ok
2636 if (n->is_IfTrue() && n->in(0) == head->loopexit()) continue;
2637 if (!n->is_AddP()) {
2638 msg = "unhandled node";
2639 msg_node = n;
2640 break;
2641 }
2642 }
2644 // Make sure no unexpected values are used outside the loop
2645 for (uint i = 0; msg == NULL && i < lpt->_body.size(); i++) {
2646 Node* n = lpt->_body.at(i);
2647 // These values can be replaced with other nodes if they are used
2648 // outside the loop.
2649 if (n == store || n == head->loopexit() || n == head->incr() || n == store->in(MemNode::Memory)) continue;
2650 for (SimpleDUIterator iter(n); iter.has_next(); iter.next()) {
2651 Node* use = iter.get();
2652 if (!lpt->_body.contains(use)) {
2653 msg = "node is used outside loop";
2654 // lpt->_body.dump();
2655 msg_node = n;
2656 break;
2657 }
2658 }
2659 }
2661 #ifdef ASSERT
2662 if (TraceOptimizeFill) {
2663 if (msg != NULL) {
2664 tty->print_cr("no fill intrinsic: %s", msg);
2665 if (msg_node != NULL) msg_node->dump();
2666 } else {
2667 tty->print_cr("fill intrinsic for:");
2668 }
2669 store->dump();
2670 if (Verbose) {
2671 lpt->_body.dump();
2672 }
2673 }
2674 #endif
2676 return msg == NULL;
2677 }
2681 bool PhaseIdealLoop::intrinsify_fill(IdealLoopTree* lpt) {
2682 // Only for counted inner loops
2683 if (!lpt->is_counted() || !lpt->is_inner()) {
2684 return false;
2685 }
2687 // Must have constant stride
2688 CountedLoopNode* head = lpt->_head->as_CountedLoop();
2689 if (!head->stride_is_con() || !head->is_normal_loop()) {
2690 return false;
2691 }
2693 // Check that the body only contains a store of a loop invariant
2694 // value that is indexed by the loop phi.
2695 Node* store = NULL;
2696 Node* store_value = NULL;
2697 Node* shift = NULL;
2698 Node* offset = NULL;
2699 if (!match_fill_loop(lpt, store, store_value, shift, offset)) {
2700 return false;
2701 }
2703 // Now replace the whole loop body by a call to a fill routine that
2704 // covers the same region as the loop.
2705 Node* base = store->in(MemNode::Address)->as_AddP()->in(AddPNode::Base);
2707 // Build an expression for the beginning of the copy region
2708 Node* index = head->init_trip();
2709 #ifdef _LP64
2710 index = new (C, 2) ConvI2LNode(index);
2711 _igvn.register_new_node_with_optimizer(index);
2712 #endif
2713 if (shift != NULL) {
2714 // byte arrays don't require a shift but others do.
2715 index = new (C, 3) LShiftXNode(index, shift->in(2));
2716 _igvn.register_new_node_with_optimizer(index);
2717 }
2718 index = new (C, 4) AddPNode(base, base, index);
2719 _igvn.register_new_node_with_optimizer(index);
2720 Node* from = new (C, 4) AddPNode(base, index, offset);
2721 _igvn.register_new_node_with_optimizer(from);
2722 // Compute the number of elements to copy
2723 Node* len = new (C, 3) SubINode(head->limit(), head->init_trip());
2724 _igvn.register_new_node_with_optimizer(len);
2726 BasicType t = store->as_Mem()->memory_type();
2727 bool aligned = false;
2728 if (offset != NULL && head->init_trip()->is_Con()) {
2729 int element_size = type2aelembytes(t);
2730 aligned = (offset->find_intptr_t_type()->get_con() + head->init_trip()->get_int() * element_size) % HeapWordSize == 0;
2731 }
2733 // Build a call to the fill routine
2734 const char* fill_name;
2735 address fill = StubRoutines::select_fill_function(t, aligned, fill_name);
2736 assert(fill != NULL, "what?");
2738 // Convert float/double to int/long for fill routines
2739 if (t == T_FLOAT) {
2740 store_value = new (C, 2) MoveF2INode(store_value);
2741 _igvn.register_new_node_with_optimizer(store_value);
2742 } else if (t == T_DOUBLE) {
2743 store_value = new (C, 2) MoveD2LNode(store_value);
2744 _igvn.register_new_node_with_optimizer(store_value);
2745 }
2747 Node* mem_phi = store->in(MemNode::Memory);
2748 Node* result_ctrl;
2749 Node* result_mem;
2750 const TypeFunc* call_type = OptoRuntime::array_fill_Type();
2751 int size = call_type->domain()->cnt();
2752 CallLeafNode *call = new (C, size) CallLeafNoFPNode(call_type, fill,
2753 fill_name, TypeAryPtr::get_array_body_type(t));
2754 call->init_req(TypeFunc::Parms+0, from);
2755 call->init_req(TypeFunc::Parms+1, store_value);
2756 #ifdef _LP64
2757 len = new (C, 2) ConvI2LNode(len);
2758 _igvn.register_new_node_with_optimizer(len);
2759 #endif
2760 call->init_req(TypeFunc::Parms+2, len);
2761 #ifdef _LP64
2762 call->init_req(TypeFunc::Parms+3, C->top());
2763 #endif
2764 call->init_req( TypeFunc::Control, head->init_control());
2765 call->init_req( TypeFunc::I_O , C->top() ) ; // does no i/o
2766 call->init_req( TypeFunc::Memory , mem_phi->in(LoopNode::EntryControl) );
2767 call->init_req( TypeFunc::ReturnAdr, C->start()->proj_out(TypeFunc::ReturnAdr) );
2768 call->init_req( TypeFunc::FramePtr, C->start()->proj_out(TypeFunc::FramePtr) );
2769 _igvn.register_new_node_with_optimizer(call);
2770 result_ctrl = new (C, 1) ProjNode(call,TypeFunc::Control);
2771 _igvn.register_new_node_with_optimizer(result_ctrl);
2772 result_mem = new (C, 1) ProjNode(call,TypeFunc::Memory);
2773 _igvn.register_new_node_with_optimizer(result_mem);
2775 // If this fill is tightly coupled to an allocation and overwrites
2776 // the whole body, allow it to take over the zeroing.
2777 AllocateNode* alloc = AllocateNode::Ideal_allocation(base, this);
2778 if (alloc != NULL && alloc->is_AllocateArray()) {
2779 Node* length = alloc->as_AllocateArray()->Ideal_length();
2780 if (head->limit() == length &&
2781 head->init_trip() == _igvn.intcon(0)) {
2782 if (TraceOptimizeFill) {
2783 tty->print_cr("Eliminated zeroing in allocation");
2784 }
2785 alloc->maybe_set_complete(&_igvn);
2786 } else {
2787 #ifdef ASSERT
2788 if (TraceOptimizeFill) {
2789 tty->print_cr("filling array but bounds don't match");
2790 alloc->dump();
2791 head->init_trip()->dump();
2792 head->limit()->dump();
2793 length->dump();
2794 }
2795 #endif
2796 }
2797 }
2799 // Redirect the old control and memory edges that are outside the loop.
2800 Node* exit = head->loopexit()->proj_out(0);
2801 // Sometimes the memory phi of the head is used as the outgoing
2802 // state of the loop. It's safe in this case to replace it with the
2803 // result_mem.
2804 _igvn.replace_node(store->in(MemNode::Memory), result_mem);
2805 _igvn.replace_node(exit, result_ctrl);
2806 _igvn.replace_node(store, result_mem);
2807 // Any uses the increment outside of the loop become the loop limit.
2808 _igvn.replace_node(head->incr(), head->limit());
2810 // Disconnect the head from the loop.
2811 for (uint i = 0; i < lpt->_body.size(); i++) {
2812 Node* n = lpt->_body.at(i);
2813 _igvn.replace_node(n, C->top());
2814 }
2816 return true;
2817 }