Sat, 02 Apr 2011 10:54:15 -0700
7004535: Clone loop predicate during loop unswitch
Summary: Clone loop predicate for clonned loops
Reviewed-by: never
1 /*
2 * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "precompiled.hpp"
26 #include "compiler/compileLog.hpp"
27 #include "memory/allocation.inline.hpp"
28 #include "opto/addnode.hpp"
29 #include "opto/callnode.hpp"
30 #include "opto/connode.hpp"
31 #include "opto/divnode.hpp"
32 #include "opto/loopnode.hpp"
33 #include "opto/mulnode.hpp"
34 #include "opto/rootnode.hpp"
35 #include "opto/runtime.hpp"
36 #include "opto/subnode.hpp"
38 //------------------------------is_loop_exit-----------------------------------
39 // Given an IfNode, return the loop-exiting projection or NULL if both
40 // arms remain in the loop.
41 Node *IdealLoopTree::is_loop_exit(Node *iff) const {
42 if( iff->outcnt() != 2 ) return NULL; // Ignore partially dead tests
43 PhaseIdealLoop *phase = _phase;
44 // Test is an IfNode, has 2 projections. If BOTH are in the loop
45 // we need loop unswitching instead of peeling.
46 if( !is_member(phase->get_loop( iff->raw_out(0) )) )
47 return iff->raw_out(0);
48 if( !is_member(phase->get_loop( iff->raw_out(1) )) )
49 return iff->raw_out(1);
50 return NULL;
51 }
54 //=============================================================================
57 //------------------------------record_for_igvn----------------------------
58 // Put loop body on igvn work list
59 void IdealLoopTree::record_for_igvn() {
60 for( uint i = 0; i < _body.size(); i++ ) {
61 Node *n = _body.at(i);
62 _phase->_igvn._worklist.push(n);
63 }
64 }
66 //------------------------------compute_profile_trip_cnt----------------------------
67 // Compute loop trip count from profile data as
68 // (backedge_count + loop_exit_count) / loop_exit_count
69 void IdealLoopTree::compute_profile_trip_cnt( PhaseIdealLoop *phase ) {
70 if (!_head->is_CountedLoop()) {
71 return;
72 }
73 CountedLoopNode* head = _head->as_CountedLoop();
74 if (head->profile_trip_cnt() != COUNT_UNKNOWN) {
75 return; // Already computed
76 }
77 float trip_cnt = (float)max_jint; // default is big
79 Node* back = head->in(LoopNode::LoopBackControl);
80 while (back != head) {
81 if ((back->Opcode() == Op_IfTrue || back->Opcode() == Op_IfFalse) &&
82 back->in(0) &&
83 back->in(0)->is_If() &&
84 back->in(0)->as_If()->_fcnt != COUNT_UNKNOWN &&
85 back->in(0)->as_If()->_prob != PROB_UNKNOWN) {
86 break;
87 }
88 back = phase->idom(back);
89 }
90 if (back != head) {
91 assert((back->Opcode() == Op_IfTrue || back->Opcode() == Op_IfFalse) &&
92 back->in(0), "if-projection exists");
93 IfNode* back_if = back->in(0)->as_If();
94 float loop_back_cnt = back_if->_fcnt * back_if->_prob;
96 // Now compute a loop exit count
97 float loop_exit_cnt = 0.0f;
98 for( uint i = 0; i < _body.size(); i++ ) {
99 Node *n = _body[i];
100 if( n->is_If() ) {
101 IfNode *iff = n->as_If();
102 if( iff->_fcnt != COUNT_UNKNOWN && iff->_prob != PROB_UNKNOWN ) {
103 Node *exit = is_loop_exit(iff);
104 if( exit ) {
105 float exit_prob = iff->_prob;
106 if (exit->Opcode() == Op_IfFalse) exit_prob = 1.0 - exit_prob;
107 if (exit_prob > PROB_MIN) {
108 float exit_cnt = iff->_fcnt * exit_prob;
109 loop_exit_cnt += exit_cnt;
110 }
111 }
112 }
113 }
114 }
115 if (loop_exit_cnt > 0.0f) {
116 trip_cnt = (loop_back_cnt + loop_exit_cnt) / loop_exit_cnt;
117 } else {
118 // No exit count so use
119 trip_cnt = loop_back_cnt;
120 }
121 }
122 #ifndef PRODUCT
123 if (TraceProfileTripCount) {
124 tty->print_cr("compute_profile_trip_cnt lp: %d cnt: %f\n", head->_idx, trip_cnt);
125 }
126 #endif
127 head->set_profile_trip_cnt(trip_cnt);
128 }
130 //---------------------is_invariant_addition-----------------------------
131 // Return nonzero index of invariant operand for an Add or Sub
132 // of (nonconstant) invariant and variant values. Helper for reassociate_invariants.
133 int IdealLoopTree::is_invariant_addition(Node* n, PhaseIdealLoop *phase) {
134 int op = n->Opcode();
135 if (op == Op_AddI || op == Op_SubI) {
136 bool in1_invar = this->is_invariant(n->in(1));
137 bool in2_invar = this->is_invariant(n->in(2));
138 if (in1_invar && !in2_invar) return 1;
139 if (!in1_invar && in2_invar) return 2;
140 }
141 return 0;
142 }
144 //---------------------reassociate_add_sub-----------------------------
145 // Reassociate invariant add and subtract expressions:
146 //
147 // inv1 + (x + inv2) => ( inv1 + inv2) + x
148 // (x + inv2) + inv1 => ( inv1 + inv2) + x
149 // inv1 + (x - inv2) => ( inv1 - inv2) + x
150 // inv1 - (inv2 - x) => ( inv1 - inv2) + x
151 // (x + inv2) - inv1 => (-inv1 + inv2) + x
152 // (x - inv2) + inv1 => ( inv1 - inv2) + x
153 // (x - inv2) - inv1 => (-inv1 - inv2) + x
154 // inv1 + (inv2 - x) => ( inv1 + inv2) - x
155 // inv1 - (x - inv2) => ( inv1 + inv2) - x
156 // (inv2 - x) + inv1 => ( inv1 + inv2) - x
157 // (inv2 - x) - inv1 => (-inv1 + inv2) - x
158 // inv1 - (x + inv2) => ( inv1 - inv2) - x
159 //
160 Node* IdealLoopTree::reassociate_add_sub(Node* n1, PhaseIdealLoop *phase) {
161 if (!n1->is_Add() && !n1->is_Sub() || n1->outcnt() == 0) return NULL;
162 if (is_invariant(n1)) return NULL;
163 int inv1_idx = is_invariant_addition(n1, phase);
164 if (!inv1_idx) return NULL;
165 // Don't mess with add of constant (igvn moves them to expression tree root.)
166 if (n1->is_Add() && n1->in(2)->is_Con()) return NULL;
167 Node* inv1 = n1->in(inv1_idx);
168 Node* n2 = n1->in(3 - inv1_idx);
169 int inv2_idx = is_invariant_addition(n2, phase);
170 if (!inv2_idx) return NULL;
171 Node* x = n2->in(3 - inv2_idx);
172 Node* inv2 = n2->in(inv2_idx);
174 bool neg_x = n2->is_Sub() && inv2_idx == 1;
175 bool neg_inv2 = n2->is_Sub() && inv2_idx == 2;
176 bool neg_inv1 = n1->is_Sub() && inv1_idx == 2;
177 if (n1->is_Sub() && inv1_idx == 1) {
178 neg_x = !neg_x;
179 neg_inv2 = !neg_inv2;
180 }
181 Node* inv1_c = phase->get_ctrl(inv1);
182 Node* inv2_c = phase->get_ctrl(inv2);
183 Node* n_inv1;
184 if (neg_inv1) {
185 Node *zero = phase->_igvn.intcon(0);
186 phase->set_ctrl(zero, phase->C->root());
187 n_inv1 = new (phase->C, 3) SubINode(zero, inv1);
188 phase->register_new_node(n_inv1, inv1_c);
189 } else {
190 n_inv1 = inv1;
191 }
192 Node* inv;
193 if (neg_inv2) {
194 inv = new (phase->C, 3) SubINode(n_inv1, inv2);
195 } else {
196 inv = new (phase->C, 3) AddINode(n_inv1, inv2);
197 }
198 phase->register_new_node(inv, phase->get_early_ctrl(inv));
200 Node* addx;
201 if (neg_x) {
202 addx = new (phase->C, 3) SubINode(inv, x);
203 } else {
204 addx = new (phase->C, 3) AddINode(x, inv);
205 }
206 phase->register_new_node(addx, phase->get_ctrl(x));
207 phase->_igvn.replace_node(n1, addx);
208 assert(phase->get_loop(phase->get_ctrl(n1)) == this, "");
209 _body.yank(n1);
210 return addx;
211 }
213 //---------------------reassociate_invariants-----------------------------
214 // Reassociate invariant expressions:
215 void IdealLoopTree::reassociate_invariants(PhaseIdealLoop *phase) {
216 for (int i = _body.size() - 1; i >= 0; i--) {
217 Node *n = _body.at(i);
218 for (int j = 0; j < 5; j++) {
219 Node* nn = reassociate_add_sub(n, phase);
220 if (nn == NULL) break;
221 n = nn; // again
222 };
223 }
224 }
226 //------------------------------policy_peeling---------------------------------
227 // Return TRUE or FALSE if the loop should be peeled or not. Peel if we can
228 // make some loop-invariant test (usually a null-check) happen before the loop.
229 bool IdealLoopTree::policy_peeling( PhaseIdealLoop *phase ) const {
230 Node *test = ((IdealLoopTree*)this)->tail();
231 int body_size = ((IdealLoopTree*)this)->_body.size();
232 int uniq = phase->C->unique();
233 // Peeling does loop cloning which can result in O(N^2) node construction
234 if( body_size > 255 /* Prevent overflow for large body_size */
235 || (body_size * body_size + uniq > MaxNodeLimit) ) {
236 return false; // too large to safely clone
237 }
238 while( test != _head ) { // Scan till run off top of loop
239 if( test->is_If() ) { // Test?
240 Node *ctrl = phase->get_ctrl(test->in(1));
241 if (ctrl->is_top())
242 return false; // Found dead test on live IF? No peeling!
243 // Standard IF only has one input value to check for loop invariance
244 assert( test->Opcode() == Op_If || test->Opcode() == Op_CountedLoopEnd, "Check this code when new subtype is added");
245 // Condition is not a member of this loop?
246 if( !is_member(phase->get_loop(ctrl)) &&
247 is_loop_exit(test) )
248 return true; // Found reason to peel!
249 }
250 // Walk up dominators to loop _head looking for test which is
251 // executed on every path thru loop.
252 test = phase->idom(test);
253 }
254 return false;
255 }
257 //------------------------------peeled_dom_test_elim---------------------------
258 // If we got the effect of peeling, either by actually peeling or by making
259 // a pre-loop which must execute at least once, we can remove all
260 // loop-invariant dominated tests in the main body.
261 void PhaseIdealLoop::peeled_dom_test_elim( IdealLoopTree *loop, Node_List &old_new ) {
262 bool progress = true;
263 while( progress ) {
264 progress = false; // Reset for next iteration
265 Node *prev = loop->_head->in(LoopNode::LoopBackControl);//loop->tail();
266 Node *test = prev->in(0);
267 while( test != loop->_head ) { // Scan till run off top of loop
269 int p_op = prev->Opcode();
270 if( (p_op == Op_IfFalse || p_op == Op_IfTrue) &&
271 test->is_If() && // Test?
272 !test->in(1)->is_Con() && // And not already obvious?
273 // Condition is not a member of this loop?
274 !loop->is_member(get_loop(get_ctrl(test->in(1))))){
275 // Walk loop body looking for instances of this test
276 for( uint i = 0; i < loop->_body.size(); i++ ) {
277 Node *n = loop->_body.at(i);
278 if( n->is_If() && n->in(1) == test->in(1) /*&& n != loop->tail()->in(0)*/ ) {
279 // IfNode was dominated by version in peeled loop body
280 progress = true;
281 dominated_by( old_new[prev->_idx], n );
282 }
283 }
284 }
285 prev = test;
286 test = idom(test);
287 } // End of scan tests in loop
289 } // End of while( progress )
290 }
292 //------------------------------do_peeling-------------------------------------
293 // Peel the first iteration of the given loop.
294 // Step 1: Clone the loop body. The clone becomes the peeled iteration.
295 // The pre-loop illegally has 2 control users (old & new loops).
296 // Step 2: Make the old-loop fall-in edges point to the peeled iteration.
297 // Do this by making the old-loop fall-in edges act as if they came
298 // around the loopback from the prior iteration (follow the old-loop
299 // backedges) and then map to the new peeled iteration. This leaves
300 // the pre-loop with only 1 user (the new peeled iteration), but the
301 // peeled-loop backedge has 2 users.
302 // Step 3: Cut the backedge on the clone (so its not a loop) and remove the
303 // extra backedge user.
304 //
305 // orig
306 //
307 // stmt1
308 // |
309 // v
310 // loop predicate
311 // |
312 // v
313 // loop<----+
314 // | |
315 // stmt2 |
316 // | |
317 // v |
318 // if ^
319 // / \ |
320 // / \ |
321 // v v |
322 // false true |
323 // / \ |
324 // / ----+
325 // |
326 // v
327 // exit
328 //
329 //
330 // after clone loop
331 //
332 // stmt1
333 // |
334 // v
335 // loop predicate
336 // / \
337 // clone / \ orig
338 // / \
339 // / \
340 // v v
341 // +---->loop clone loop<----+
342 // | | | |
343 // | stmt2 clone stmt2 |
344 // | | | |
345 // | v v |
346 // ^ if clone If ^
347 // | / \ / \ |
348 // | / \ / \ |
349 // | v v v v |
350 // | true false false true |
351 // | / \ / \ |
352 // +---- \ / ----+
353 // \ /
354 // 1v v2
355 // region
356 // |
357 // v
358 // exit
359 //
360 //
361 // after peel and predicate move
362 //
363 // stmt1
364 // /
365 // /
366 // clone / orig
367 // /
368 // / +----------+
369 // / | |
370 // / loop predicate |
371 // / | |
372 // v v |
373 // TOP-->loop clone loop<----+ |
374 // | | | |
375 // stmt2 clone stmt2 | |
376 // | | | ^
377 // v v | |
378 // if clone If ^ |
379 // / \ / \ | |
380 // / \ / \ | |
381 // v v v v | |
382 // true false false true | |
383 // | \ / \ | |
384 // | \ / ----+ ^
385 // | \ / |
386 // | 1v v2 |
387 // v region |
388 // | | |
389 // | v |
390 // | exit |
391 // | |
392 // +--------------->-----------------+
393 //
394 //
395 // final graph
396 //
397 // stmt1
398 // |
399 // v
400 // stmt2 clone
401 // |
402 // v
403 // if clone
404 // / |
405 // / |
406 // v v
407 // false true
408 // | |
409 // | v
410 // | loop predicate
411 // | |
412 // | v
413 // | loop<----+
414 // | | |
415 // | stmt2 |
416 // | | |
417 // | v |
418 // v if ^
419 // | / \ |
420 // | / \ |
421 // | v v |
422 // | false true |
423 // | | \ |
424 // v v --+
425 // region
426 // |
427 // v
428 // exit
429 //
430 void PhaseIdealLoop::do_peeling( IdealLoopTree *loop, Node_List &old_new ) {
432 C->set_major_progress();
433 // Peeling a 'main' loop in a pre/main/post situation obfuscates the
434 // 'pre' loop from the main and the 'pre' can no longer have it's
435 // iterations adjusted. Therefore, we need to declare this loop as
436 // no longer a 'main' loop; it will need new pre and post loops before
437 // we can do further RCE.
438 #ifndef PRODUCT
439 if (TraceLoopOpts) {
440 tty->print("Peel ");
441 loop->dump_head();
442 }
443 #endif
444 Node* head = loop->_head;
445 bool counted_loop = head->is_CountedLoop();
446 if (counted_loop) {
447 CountedLoopNode *cl = head->as_CountedLoop();
448 assert(cl->trip_count() > 0, "peeling a fully unrolled loop");
449 cl->set_trip_count(cl->trip_count() - 1);
450 if (cl->is_main_loop()) {
451 cl->set_normal_loop();
452 #ifndef PRODUCT
453 if (PrintOpto && VerifyLoopOptimizations) {
454 tty->print("Peeling a 'main' loop; resetting to 'normal' ");
455 loop->dump_head();
456 }
457 #endif
458 }
459 }
460 Node* entry = head->in(LoopNode::EntryControl);
462 // Step 1: Clone the loop body. The clone becomes the peeled iteration.
463 // The pre-loop illegally has 2 control users (old & new loops).
464 clone_loop( loop, old_new, dom_depth(head) );
466 // Step 2: Make the old-loop fall-in edges point to the peeled iteration.
467 // Do this by making the old-loop fall-in edges act as if they came
468 // around the loopback from the prior iteration (follow the old-loop
469 // backedges) and then map to the new peeled iteration. This leaves
470 // the pre-loop with only 1 user (the new peeled iteration), but the
471 // peeled-loop backedge has 2 users.
472 Node* new_exit_value = old_new[head->in(LoopNode::LoopBackControl)->_idx];
473 new_exit_value = move_loop_predicates(entry, new_exit_value);
474 _igvn.hash_delete(head);
475 head->set_req(LoopNode::EntryControl, new_exit_value);
476 for (DUIterator_Fast jmax, j = head->fast_outs(jmax); j < jmax; j++) {
477 Node* old = head->fast_out(j);
478 if (old->in(0) == loop->_head && old->req() == 3 && old->is_Phi()) {
479 new_exit_value = old_new[old->in(LoopNode::LoopBackControl)->_idx];
480 if (!new_exit_value ) // Backedge value is ALSO loop invariant?
481 // Then loop body backedge value remains the same.
482 new_exit_value = old->in(LoopNode::LoopBackControl);
483 _igvn.hash_delete(old);
484 old->set_req(LoopNode::EntryControl, new_exit_value);
485 }
486 }
489 // Step 3: Cut the backedge on the clone (so its not a loop) and remove the
490 // extra backedge user.
491 Node* new_head = old_new[head->_idx];
492 _igvn.hash_delete(new_head);
493 new_head->set_req(LoopNode::LoopBackControl, C->top());
494 for (DUIterator_Fast j2max, j2 = new_head->fast_outs(j2max); j2 < j2max; j2++) {
495 Node* use = new_head->fast_out(j2);
496 if (use->in(0) == new_head && use->req() == 3 && use->is_Phi()) {
497 _igvn.hash_delete(use);
498 use->set_req(LoopNode::LoopBackControl, C->top());
499 }
500 }
503 // Step 4: Correct dom-depth info. Set to loop-head depth.
504 int dd = dom_depth(head);
505 set_idom(head, head->in(1), dd);
506 for (uint j3 = 0; j3 < loop->_body.size(); j3++) {
507 Node *old = loop->_body.at(j3);
508 Node *nnn = old_new[old->_idx];
509 if (!has_ctrl(nnn))
510 set_idom(nnn, idom(nnn), dd-1);
511 // While we're at it, remove any SafePoints from the peeled code
512 if (old->Opcode() == Op_SafePoint) {
513 Node *nnn = old_new[old->_idx];
514 lazy_replace(nnn,nnn->in(TypeFunc::Control));
515 }
516 }
518 // Now force out all loop-invariant dominating tests. The optimizer
519 // finds some, but we _know_ they are all useless.
520 peeled_dom_test_elim(loop,old_new);
522 loop->record_for_igvn();
523 }
525 //------------------------------policy_maximally_unroll------------------------
526 // Return exact loop trip count, or 0 if not maximally unrolling
527 bool IdealLoopTree::policy_maximally_unroll( PhaseIdealLoop *phase ) const {
528 CountedLoopNode *cl = _head->as_CountedLoop();
529 assert(cl->is_normal_loop(), "");
531 Node *init_n = cl->init_trip();
532 Node *limit_n = cl->limit();
534 // Non-constant bounds
535 if (init_n == NULL || !init_n->is_Con() ||
536 limit_n == NULL || !limit_n->is_Con() ||
537 // protect against stride not being a constant
538 !cl->stride_is_con()) {
539 return false;
540 }
541 int init = init_n->get_int();
542 int limit = limit_n->get_int();
543 int span = limit - init;
544 int stride = cl->stride_con();
546 if (init >= limit || stride > span) {
547 // return a false (no maximally unroll) and the regular unroll/peel
548 // route will make a small mess which CCP will fold away.
549 return false;
550 }
551 uint trip_count = span/stride; // trip_count can be greater than 2 Gig.
552 assert( (int)trip_count*stride == span, "must divide evenly" );
554 // Real policy: if we maximally unroll, does it get too big?
555 // Allow the unrolled mess to get larger than standard loop
556 // size. After all, it will no longer be a loop.
557 uint body_size = _body.size();
558 uint unroll_limit = (uint)LoopUnrollLimit * 4;
559 assert( (intx)unroll_limit == LoopUnrollLimit * 4, "LoopUnrollLimit must fit in 32bits");
560 cl->set_trip_count(trip_count);
561 if (trip_count > unroll_limit || body_size > unroll_limit) {
562 return false;
563 }
565 // Currently we don't have policy to optimize one iteration loops.
566 // Maximally unrolling transformation is used for that:
567 // it is peeled and the original loop become non reachable (dead).
568 if (trip_count == 1)
569 return true;
571 // Do not unroll a loop with String intrinsics code.
572 // String intrinsics are large and have loops.
573 for (uint k = 0; k < _body.size(); k++) {
574 Node* n = _body.at(k);
575 switch (n->Opcode()) {
576 case Op_StrComp:
577 case Op_StrEquals:
578 case Op_StrIndexOf:
579 case Op_AryEq: {
580 return false;
581 }
582 } // switch
583 }
585 if (body_size <= unroll_limit) {
586 uint new_body_size = body_size * trip_count;
587 if (new_body_size <= unroll_limit &&
588 body_size == new_body_size / trip_count &&
589 // Unrolling can result in a large amount of node construction
590 new_body_size < MaxNodeLimit - phase->C->unique()) {
591 return true; // maximally unroll
592 }
593 }
595 return false; // Do not maximally unroll
596 }
599 //------------------------------policy_unroll----------------------------------
600 // Return TRUE or FALSE if the loop should be unrolled or not. Unroll if
601 // the loop is a CountedLoop and the body is small enough.
602 bool IdealLoopTree::policy_unroll( PhaseIdealLoop *phase ) const {
604 CountedLoopNode *cl = _head->as_CountedLoop();
605 assert(cl->is_normal_loop() || cl->is_main_loop(), "");
607 // protect against stride not being a constant
608 if (!cl->stride_is_con()) return false;
610 // protect against over-unrolling
611 if (cl->trip_count() <= 1) return false;
613 int future_unroll_ct = cl->unrolled_count() * 2;
615 // Don't unroll if the next round of unrolling would push us
616 // over the expected trip count of the loop. One is subtracted
617 // from the expected trip count because the pre-loop normally
618 // executes 1 iteration.
619 if (UnrollLimitForProfileCheck > 0 &&
620 cl->profile_trip_cnt() != COUNT_UNKNOWN &&
621 future_unroll_ct > UnrollLimitForProfileCheck &&
622 (float)future_unroll_ct > cl->profile_trip_cnt() - 1.0) {
623 return false;
624 }
626 // When unroll count is greater than LoopUnrollMin, don't unroll if:
627 // the residual iterations are more than 10% of the trip count
628 // and rounds of "unroll,optimize" are not making significant progress
629 // Progress defined as current size less than 20% larger than previous size.
630 if (UseSuperWord && cl->node_count_before_unroll() > 0 &&
631 future_unroll_ct > LoopUnrollMin &&
632 (future_unroll_ct - 1) * 10.0 > cl->profile_trip_cnt() &&
633 1.2 * cl->node_count_before_unroll() < (double)_body.size()) {
634 return false;
635 }
637 Node *init_n = cl->init_trip();
638 Node *limit_n = cl->limit();
639 // Non-constant bounds.
640 // Protect against over-unrolling when init or/and limit are not constant
641 // (so that trip_count's init value is maxint) but iv range is known.
642 if (init_n == NULL || !init_n->is_Con() ||
643 limit_n == NULL || !limit_n->is_Con()) {
644 Node* phi = cl->phi();
645 if (phi != NULL) {
646 assert(phi->is_Phi() && phi->in(0) == _head, "Counted loop should have iv phi.");
647 const TypeInt* iv_type = phase->_igvn.type(phi)->is_int();
648 int next_stride = cl->stride_con() * 2; // stride after this unroll
649 if (next_stride > 0) {
650 if (iv_type->_lo + next_stride <= iv_type->_lo || // overflow
651 iv_type->_lo + next_stride > iv_type->_hi) {
652 return false; // over-unrolling
653 }
654 } else if (next_stride < 0) {
655 if (iv_type->_hi + next_stride >= iv_type->_hi || // overflow
656 iv_type->_hi + next_stride < iv_type->_lo) {
657 return false; // over-unrolling
658 }
659 }
660 }
661 }
663 // Adjust body_size to determine if we unroll or not
664 uint body_size = _body.size();
665 // Key test to unroll CaffeineMark's Logic test
666 int xors_in_loop = 0;
667 // Also count ModL, DivL and MulL which expand mightly
668 for (uint k = 0; k < _body.size(); k++) {
669 Node* n = _body.at(k);
670 switch (n->Opcode()) {
671 case Op_XorI: xors_in_loop++; break; // CaffeineMark's Logic test
672 case Op_ModL: body_size += 30; break;
673 case Op_DivL: body_size += 30; break;
674 case Op_MulL: body_size += 10; break;
675 case Op_StrComp:
676 case Op_StrEquals:
677 case Op_StrIndexOf:
678 case Op_AryEq: {
679 // Do not unroll a loop with String intrinsics code.
680 // String intrinsics are large and have loops.
681 return false;
682 }
683 } // switch
684 }
686 // Check for being too big
687 if (body_size > (uint)LoopUnrollLimit) {
688 if (xors_in_loop >= 4 && body_size < (uint)LoopUnrollLimit*4) return true;
689 // Normal case: loop too big
690 return false;
691 }
693 // Check for stride being a small enough constant
694 if (abs(cl->stride_con()) > (1<<3)) return false;
696 // Unroll once! (Each trip will soon do double iterations)
697 return true;
698 }
700 //------------------------------policy_align-----------------------------------
701 // Return TRUE or FALSE if the loop should be cache-line aligned. Gather the
702 // expression that does the alignment. Note that only one array base can be
703 // aligned in a loop (unless the VM guarantees mutual alignment). Note that
704 // if we vectorize short memory ops into longer memory ops, we may want to
705 // increase alignment.
706 bool IdealLoopTree::policy_align( PhaseIdealLoop *phase ) const {
707 return false;
708 }
710 //------------------------------policy_range_check-----------------------------
711 // Return TRUE or FALSE if the loop should be range-check-eliminated.
712 // Actually we do iteration-splitting, a more powerful form of RCE.
713 bool IdealLoopTree::policy_range_check( PhaseIdealLoop *phase ) const {
714 if( !RangeCheckElimination ) return false;
716 CountedLoopNode *cl = _head->as_CountedLoop();
717 // If we unrolled with no intention of doing RCE and we later
718 // changed our minds, we got no pre-loop. Either we need to
719 // make a new pre-loop, or we gotta disallow RCE.
720 if( cl->is_main_no_pre_loop() ) return false; // Disallowed for now.
721 Node *trip_counter = cl->phi();
723 // Check loop body for tests of trip-counter plus loop-invariant vs
724 // loop-invariant.
725 for( uint i = 0; i < _body.size(); i++ ) {
726 Node *iff = _body[i];
727 if( iff->Opcode() == Op_If ) { // Test?
729 // Comparing trip+off vs limit
730 Node *bol = iff->in(1);
731 if( bol->req() != 2 ) continue; // dead constant test
732 if (!bol->is_Bool()) {
733 assert(UseLoopPredicate && bol->Opcode() == Op_Conv2B, "predicate check only");
734 continue;
735 }
736 Node *cmp = bol->in(1);
738 Node *rc_exp = cmp->in(1);
739 Node *limit = cmp->in(2);
741 Node *limit_c = phase->get_ctrl(limit);
742 if( limit_c == phase->C->top() )
743 return false; // Found dead test on live IF? No RCE!
744 if( is_member(phase->get_loop(limit_c) ) ) {
745 // Compare might have operands swapped; commute them
746 rc_exp = cmp->in(2);
747 limit = cmp->in(1);
748 limit_c = phase->get_ctrl(limit);
749 if( is_member(phase->get_loop(limit_c) ) )
750 continue; // Both inputs are loop varying; cannot RCE
751 }
753 if (!phase->is_scaled_iv_plus_offset(rc_exp, trip_counter, NULL, NULL)) {
754 continue;
755 }
756 // Yeah! Found a test like 'trip+off vs limit'
757 // Test is an IfNode, has 2 projections. If BOTH are in the loop
758 // we need loop unswitching instead of iteration splitting.
759 if( is_loop_exit(iff) )
760 return true; // Found reason to split iterations
761 } // End of is IF
762 }
764 return false;
765 }
767 //------------------------------policy_peel_only-------------------------------
768 // Return TRUE or FALSE if the loop should NEVER be RCE'd or aligned. Useful
769 // for unrolling loops with NO array accesses.
770 bool IdealLoopTree::policy_peel_only( PhaseIdealLoop *phase ) const {
772 for( uint i = 0; i < _body.size(); i++ )
773 if( _body[i]->is_Mem() )
774 return false;
776 // No memory accesses at all!
777 return true;
778 }
780 //------------------------------clone_up_backedge_goo--------------------------
781 // If Node n lives in the back_ctrl block and cannot float, we clone a private
782 // version of n in preheader_ctrl block and return that, otherwise return n.
783 Node *PhaseIdealLoop::clone_up_backedge_goo( Node *back_ctrl, Node *preheader_ctrl, Node *n ) {
784 if( get_ctrl(n) != back_ctrl ) return n;
786 Node *x = NULL; // If required, a clone of 'n'
787 // Check for 'n' being pinned in the backedge.
788 if( n->in(0) && n->in(0) == back_ctrl ) {
789 x = n->clone(); // Clone a copy of 'n' to preheader
790 x->set_req( 0, preheader_ctrl ); // Fix x's control input to preheader
791 }
793 // Recursive fixup any other input edges into x.
794 // If there are no changes we can just return 'n', otherwise
795 // we need to clone a private copy and change it.
796 for( uint i = 1; i < n->req(); i++ ) {
797 Node *g = clone_up_backedge_goo( back_ctrl, preheader_ctrl, n->in(i) );
798 if( g != n->in(i) ) {
799 if( !x )
800 x = n->clone();
801 x->set_req(i, g);
802 }
803 }
804 if( x ) { // x can legally float to pre-header location
805 register_new_node( x, preheader_ctrl );
806 return x;
807 } else { // raise n to cover LCA of uses
808 set_ctrl( n, find_non_split_ctrl(back_ctrl->in(0)) );
809 }
810 return n;
811 }
813 //------------------------------insert_pre_post_loops--------------------------
814 // Insert pre and post loops. If peel_only is set, the pre-loop can not have
815 // more iterations added. It acts as a 'peel' only, no lower-bound RCE, no
816 // alignment. Useful to unroll loops that do no array accesses.
817 void PhaseIdealLoop::insert_pre_post_loops( IdealLoopTree *loop, Node_List &old_new, bool peel_only ) {
819 #ifndef PRODUCT
820 if (TraceLoopOpts) {
821 if (peel_only)
822 tty->print("PeelMainPost ");
823 else
824 tty->print("PreMainPost ");
825 loop->dump_head();
826 }
827 #endif
828 C->set_major_progress();
830 // Find common pieces of the loop being guarded with pre & post loops
831 CountedLoopNode *main_head = loop->_head->as_CountedLoop();
832 assert( main_head->is_normal_loop(), "" );
833 CountedLoopEndNode *main_end = main_head->loopexit();
834 assert( main_end->outcnt() == 2, "1 true, 1 false path only" );
835 uint dd_main_head = dom_depth(main_head);
836 uint max = main_head->outcnt();
838 Node *pre_header= main_head->in(LoopNode::EntryControl);
839 Node *init = main_head->init_trip();
840 Node *incr = main_end ->incr();
841 Node *limit = main_end ->limit();
842 Node *stride = main_end ->stride();
843 Node *cmp = main_end ->cmp_node();
844 BoolTest::mask b_test = main_end->test_trip();
846 // Need only 1 user of 'bol' because I will be hacking the loop bounds.
847 Node *bol = main_end->in(CountedLoopEndNode::TestValue);
848 if( bol->outcnt() != 1 ) {
849 bol = bol->clone();
850 register_new_node(bol,main_end->in(CountedLoopEndNode::TestControl));
851 _igvn.hash_delete(main_end);
852 main_end->set_req(CountedLoopEndNode::TestValue, bol);
853 }
854 // Need only 1 user of 'cmp' because I will be hacking the loop bounds.
855 if( cmp->outcnt() != 1 ) {
856 cmp = cmp->clone();
857 register_new_node(cmp,main_end->in(CountedLoopEndNode::TestControl));
858 _igvn.hash_delete(bol);
859 bol->set_req(1, cmp);
860 }
862 //------------------------------
863 // Step A: Create Post-Loop.
864 Node* main_exit = main_end->proj_out(false);
865 assert( main_exit->Opcode() == Op_IfFalse, "" );
866 int dd_main_exit = dom_depth(main_exit);
868 // Step A1: Clone the loop body. The clone becomes the post-loop. The main
869 // loop pre-header illegally has 2 control users (old & new loops).
870 clone_loop( loop, old_new, dd_main_exit );
871 assert( old_new[main_end ->_idx]->Opcode() == Op_CountedLoopEnd, "" );
872 CountedLoopNode *post_head = old_new[main_head->_idx]->as_CountedLoop();
873 post_head->set_post_loop(main_head);
875 // Reduce the post-loop trip count.
876 CountedLoopEndNode* post_end = old_new[main_end ->_idx]->as_CountedLoopEnd();
877 post_end->_prob = PROB_FAIR;
879 // Build the main-loop normal exit.
880 IfFalseNode *new_main_exit = new (C, 1) IfFalseNode(main_end);
881 _igvn.register_new_node_with_optimizer( new_main_exit );
882 set_idom(new_main_exit, main_end, dd_main_exit );
883 set_loop(new_main_exit, loop->_parent);
885 // Step A2: Build a zero-trip guard for the post-loop. After leaving the
886 // main-loop, the post-loop may not execute at all. We 'opaque' the incr
887 // (the main-loop trip-counter exit value) because we will be changing
888 // the exit value (via unrolling) so we cannot constant-fold away the zero
889 // trip guard until all unrolling is done.
890 Node *zer_opaq = new (C, 2) Opaque1Node(C, incr);
891 Node *zer_cmp = new (C, 3) CmpINode( zer_opaq, limit );
892 Node *zer_bol = new (C, 2) BoolNode( zer_cmp, b_test );
893 register_new_node( zer_opaq, new_main_exit );
894 register_new_node( zer_cmp , new_main_exit );
895 register_new_node( zer_bol , new_main_exit );
897 // Build the IfNode
898 IfNode *zer_iff = new (C, 2) IfNode( new_main_exit, zer_bol, PROB_FAIR, COUNT_UNKNOWN );
899 _igvn.register_new_node_with_optimizer( zer_iff );
900 set_idom(zer_iff, new_main_exit, dd_main_exit);
901 set_loop(zer_iff, loop->_parent);
903 // Plug in the false-path, taken if we need to skip post-loop
904 _igvn.hash_delete( main_exit );
905 main_exit->set_req(0, zer_iff);
906 _igvn._worklist.push(main_exit);
907 set_idom(main_exit, zer_iff, dd_main_exit);
908 set_idom(main_exit->unique_out(), zer_iff, dd_main_exit);
909 // Make the true-path, must enter the post loop
910 Node *zer_taken = new (C, 1) IfTrueNode( zer_iff );
911 _igvn.register_new_node_with_optimizer( zer_taken );
912 set_idom(zer_taken, zer_iff, dd_main_exit);
913 set_loop(zer_taken, loop->_parent);
914 // Plug in the true path
915 _igvn.hash_delete( post_head );
916 post_head->set_req(LoopNode::EntryControl, zer_taken);
917 set_idom(post_head, zer_taken, dd_main_exit);
919 // Step A3: Make the fall-in values to the post-loop come from the
920 // fall-out values of the main-loop.
921 for (DUIterator_Fast imax, i = main_head->fast_outs(imax); i < imax; i++) {
922 Node* main_phi = main_head->fast_out(i);
923 if( main_phi->is_Phi() && main_phi->in(0) == main_head && main_phi->outcnt() >0 ) {
924 Node *post_phi = old_new[main_phi->_idx];
925 Node *fallmain = clone_up_backedge_goo(main_head->back_control(),
926 post_head->init_control(),
927 main_phi->in(LoopNode::LoopBackControl));
928 _igvn.hash_delete(post_phi);
929 post_phi->set_req( LoopNode::EntryControl, fallmain );
930 }
931 }
933 // Update local caches for next stanza
934 main_exit = new_main_exit;
937 //------------------------------
938 // Step B: Create Pre-Loop.
940 // Step B1: Clone the loop body. The clone becomes the pre-loop. The main
941 // loop pre-header illegally has 2 control users (old & new loops).
942 clone_loop( loop, old_new, dd_main_head );
943 CountedLoopNode* pre_head = old_new[main_head->_idx]->as_CountedLoop();
944 CountedLoopEndNode* pre_end = old_new[main_end ->_idx]->as_CountedLoopEnd();
945 pre_head->set_pre_loop(main_head);
946 Node *pre_incr = old_new[incr->_idx];
948 // Reduce the pre-loop trip count.
949 pre_end->_prob = PROB_FAIR;
951 // Find the pre-loop normal exit.
952 Node* pre_exit = pre_end->proj_out(false);
953 assert( pre_exit->Opcode() == Op_IfFalse, "" );
954 IfFalseNode *new_pre_exit = new (C, 1) IfFalseNode(pre_end);
955 _igvn.register_new_node_with_optimizer( new_pre_exit );
956 set_idom(new_pre_exit, pre_end, dd_main_head);
957 set_loop(new_pre_exit, loop->_parent);
959 // Step B2: Build a zero-trip guard for the main-loop. After leaving the
960 // pre-loop, the main-loop may not execute at all. Later in life this
961 // zero-trip guard will become the minimum-trip guard when we unroll
962 // the main-loop.
963 Node *min_opaq = new (C, 2) Opaque1Node(C, limit);
964 Node *min_cmp = new (C, 3) CmpINode( pre_incr, min_opaq );
965 Node *min_bol = new (C, 2) BoolNode( min_cmp, b_test );
966 register_new_node( min_opaq, new_pre_exit );
967 register_new_node( min_cmp , new_pre_exit );
968 register_new_node( min_bol , new_pre_exit );
970 // Build the IfNode (assume the main-loop is executed always).
971 IfNode *min_iff = new (C, 2) IfNode( new_pre_exit, min_bol, PROB_ALWAYS, COUNT_UNKNOWN );
972 _igvn.register_new_node_with_optimizer( min_iff );
973 set_idom(min_iff, new_pre_exit, dd_main_head);
974 set_loop(min_iff, loop->_parent);
976 // Plug in the false-path, taken if we need to skip main-loop
977 _igvn.hash_delete( pre_exit );
978 pre_exit->set_req(0, min_iff);
979 set_idom(pre_exit, min_iff, dd_main_head);
980 set_idom(pre_exit->unique_out(), min_iff, dd_main_head);
981 // Make the true-path, must enter the main loop
982 Node *min_taken = new (C, 1) IfTrueNode( min_iff );
983 _igvn.register_new_node_with_optimizer( min_taken );
984 set_idom(min_taken, min_iff, dd_main_head);
985 set_loop(min_taken, loop->_parent);
986 // Plug in the true path
987 _igvn.hash_delete( main_head );
988 main_head->set_req(LoopNode::EntryControl, min_taken);
989 set_idom(main_head, min_taken, dd_main_head);
991 // Step B3: Make the fall-in values to the main-loop come from the
992 // fall-out values of the pre-loop.
993 for (DUIterator_Fast i2max, i2 = main_head->fast_outs(i2max); i2 < i2max; i2++) {
994 Node* main_phi = main_head->fast_out(i2);
995 if( main_phi->is_Phi() && main_phi->in(0) == main_head && main_phi->outcnt() > 0 ) {
996 Node *pre_phi = old_new[main_phi->_idx];
997 Node *fallpre = clone_up_backedge_goo(pre_head->back_control(),
998 main_head->init_control(),
999 pre_phi->in(LoopNode::LoopBackControl));
1000 _igvn.hash_delete(main_phi);
1001 main_phi->set_req( LoopNode::EntryControl, fallpre );
1002 }
1003 }
1005 // Step B4: Shorten the pre-loop to run only 1 iteration (for now).
1006 // RCE and alignment may change this later.
1007 Node *cmp_end = pre_end->cmp_node();
1008 assert( cmp_end->in(2) == limit, "" );
1009 Node *pre_limit = new (C, 3) AddINode( init, stride );
1011 // Save the original loop limit in this Opaque1 node for
1012 // use by range check elimination.
1013 Node *pre_opaq = new (C, 3) Opaque1Node(C, pre_limit, limit);
1015 register_new_node( pre_limit, pre_head->in(0) );
1016 register_new_node( pre_opaq , pre_head->in(0) );
1018 // Since no other users of pre-loop compare, I can hack limit directly
1019 assert( cmp_end->outcnt() == 1, "no other users" );
1020 _igvn.hash_delete(cmp_end);
1021 cmp_end->set_req(2, peel_only ? pre_limit : pre_opaq);
1023 // Special case for not-equal loop bounds:
1024 // Change pre loop test, main loop test, and the
1025 // main loop guard test to use lt or gt depending on stride
1026 // direction:
1027 // positive stride use <
1028 // negative stride use >
1030 if (pre_end->in(CountedLoopEndNode::TestValue)->as_Bool()->_test._test == BoolTest::ne) {
1032 BoolTest::mask new_test = (main_end->stride_con() > 0) ? BoolTest::lt : BoolTest::gt;
1033 // Modify pre loop end condition
1034 Node* pre_bol = pre_end->in(CountedLoopEndNode::TestValue)->as_Bool();
1035 BoolNode* new_bol0 = new (C, 2) BoolNode(pre_bol->in(1), new_test);
1036 register_new_node( new_bol0, pre_head->in(0) );
1037 _igvn.hash_delete(pre_end);
1038 pre_end->set_req(CountedLoopEndNode::TestValue, new_bol0);
1039 // Modify main loop guard condition
1040 assert(min_iff->in(CountedLoopEndNode::TestValue) == min_bol, "guard okay");
1041 BoolNode* new_bol1 = new (C, 2) BoolNode(min_bol->in(1), new_test);
1042 register_new_node( new_bol1, new_pre_exit );
1043 _igvn.hash_delete(min_iff);
1044 min_iff->set_req(CountedLoopEndNode::TestValue, new_bol1);
1045 // Modify main loop end condition
1046 BoolNode* main_bol = main_end->in(CountedLoopEndNode::TestValue)->as_Bool();
1047 BoolNode* new_bol2 = new (C, 2) BoolNode(main_bol->in(1), new_test);
1048 register_new_node( new_bol2, main_end->in(CountedLoopEndNode::TestControl) );
1049 _igvn.hash_delete(main_end);
1050 main_end->set_req(CountedLoopEndNode::TestValue, new_bol2);
1051 }
1053 // Flag main loop
1054 main_head->set_main_loop();
1055 if( peel_only ) main_head->set_main_no_pre_loop();
1057 // It's difficult to be precise about the trip-counts
1058 // for the pre/post loops. They are usually very short,
1059 // so guess that 4 trips is a reasonable value.
1060 post_head->set_profile_trip_cnt(4.0);
1061 pre_head->set_profile_trip_cnt(4.0);
1063 // Now force out all loop-invariant dominating tests. The optimizer
1064 // finds some, but we _know_ they are all useless.
1065 peeled_dom_test_elim(loop,old_new);
1066 }
1068 //------------------------------is_invariant-----------------------------
1069 // Return true if n is invariant
1070 bool IdealLoopTree::is_invariant(Node* n) const {
1071 Node *n_c = _phase->has_ctrl(n) ? _phase->get_ctrl(n) : n;
1072 if (n_c->is_top()) return false;
1073 return !is_member(_phase->get_loop(n_c));
1074 }
1077 //------------------------------do_unroll--------------------------------------
1078 // Unroll the loop body one step - make each trip do 2 iterations.
1079 void PhaseIdealLoop::do_unroll( IdealLoopTree *loop, Node_List &old_new, bool adjust_min_trip ) {
1080 assert(LoopUnrollLimit, "");
1081 CountedLoopNode *loop_head = loop->_head->as_CountedLoop();
1082 CountedLoopEndNode *loop_end = loop_head->loopexit();
1083 assert(loop_end, "");
1084 #ifndef PRODUCT
1085 if (PrintOpto && VerifyLoopOptimizations) {
1086 tty->print("Unrolling ");
1087 loop->dump_head();
1088 } else if (TraceLoopOpts) {
1089 tty->print("Unroll %d ", loop_head->unrolled_count()*2);
1090 loop->dump_head();
1091 }
1092 #endif
1094 // Remember loop node count before unrolling to detect
1095 // if rounds of unroll,optimize are making progress
1096 loop_head->set_node_count_before_unroll(loop->_body.size());
1098 Node *ctrl = loop_head->in(LoopNode::EntryControl);
1099 Node *limit = loop_head->limit();
1100 Node *init = loop_head->init_trip();
1101 Node *stride = loop_head->stride();
1103 Node *opaq = NULL;
1104 if( adjust_min_trip ) { // If not maximally unrolling, need adjustment
1105 assert( loop_head->is_main_loop(), "" );
1106 assert( ctrl->Opcode() == Op_IfTrue || ctrl->Opcode() == Op_IfFalse, "" );
1107 Node *iff = ctrl->in(0);
1108 assert( iff->Opcode() == Op_If, "" );
1109 Node *bol = iff->in(1);
1110 assert( bol->Opcode() == Op_Bool, "" );
1111 Node *cmp = bol->in(1);
1112 assert( cmp->Opcode() == Op_CmpI, "" );
1113 opaq = cmp->in(2);
1114 // Occasionally it's possible for a pre-loop Opaque1 node to be
1115 // optimized away and then another round of loop opts attempted.
1116 // We can not optimize this particular loop in that case.
1117 if( opaq->Opcode() != Op_Opaque1 )
1118 return; // Cannot find pre-loop! Bail out!
1119 }
1121 C->set_major_progress();
1123 // Adjust max trip count. The trip count is intentionally rounded
1124 // down here (e.g. 15-> 7-> 3-> 1) because if we unwittingly over-unroll,
1125 // the main, unrolled, part of the loop will never execute as it is protected
1126 // by the min-trip test. See bug 4834191 for a case where we over-unrolled
1127 // and later determined that part of the unrolled loop was dead.
1128 loop_head->set_trip_count(loop_head->trip_count() / 2);
1130 // Double the count of original iterations in the unrolled loop body.
1131 loop_head->double_unrolled_count();
1133 // -----------
1134 // Step 2: Cut back the trip counter for an unroll amount of 2.
1135 // Loop will normally trip (limit - init)/stride_con. Since it's a
1136 // CountedLoop this is exact (stride divides limit-init exactly).
1137 // We are going to double the loop body, so we want to knock off any
1138 // odd iteration: (trip_cnt & ~1). Then back compute a new limit.
1139 Node *span = new (C, 3) SubINode( limit, init );
1140 register_new_node( span, ctrl );
1141 Node *trip = new (C, 3) DivINode( 0, span, stride );
1142 register_new_node( trip, ctrl );
1143 Node *mtwo = _igvn.intcon(-2);
1144 set_ctrl(mtwo, C->root());
1145 Node *rond = new (C, 3) AndINode( trip, mtwo );
1146 register_new_node( rond, ctrl );
1147 Node *spn2 = new (C, 3) MulINode( rond, stride );
1148 register_new_node( spn2, ctrl );
1149 Node *lim2 = new (C, 3) AddINode( spn2, init );
1150 register_new_node( lim2, ctrl );
1152 // Hammer in the new limit
1153 Node *ctrl2 = loop_end->in(0);
1154 Node *cmp2 = new (C, 3) CmpINode( loop_head->incr(), lim2 );
1155 register_new_node( cmp2, ctrl2 );
1156 Node *bol2 = new (C, 2) BoolNode( cmp2, loop_end->test_trip() );
1157 register_new_node( bol2, ctrl2 );
1158 _igvn.hash_delete(loop_end);
1159 loop_end->set_req(CountedLoopEndNode::TestValue, bol2);
1161 // Step 3: Find the min-trip test guaranteed before a 'main' loop.
1162 // Make it a 1-trip test (means at least 2 trips).
1163 if( adjust_min_trip ) {
1164 // Guard test uses an 'opaque' node which is not shared. Hence I
1165 // can edit it's inputs directly. Hammer in the new limit for the
1166 // minimum-trip guard.
1167 assert( opaq->outcnt() == 1, "" );
1168 _igvn.hash_delete(opaq);
1169 opaq->set_req(1, lim2);
1170 }
1172 // ---------
1173 // Step 4: Clone the loop body. Move it inside the loop. This loop body
1174 // represents the odd iterations; since the loop trips an even number of
1175 // times its backedge is never taken. Kill the backedge.
1176 uint dd = dom_depth(loop_head);
1177 clone_loop( loop, old_new, dd );
1179 // Make backedges of the clone equal to backedges of the original.
1180 // Make the fall-in from the original come from the fall-out of the clone.
1181 for (DUIterator_Fast jmax, j = loop_head->fast_outs(jmax); j < jmax; j++) {
1182 Node* phi = loop_head->fast_out(j);
1183 if( phi->is_Phi() && phi->in(0) == loop_head && phi->outcnt() > 0 ) {
1184 Node *newphi = old_new[phi->_idx];
1185 _igvn.hash_delete( phi );
1186 _igvn.hash_delete( newphi );
1188 phi ->set_req(LoopNode:: EntryControl, newphi->in(LoopNode::LoopBackControl));
1189 newphi->set_req(LoopNode::LoopBackControl, phi ->in(LoopNode::LoopBackControl));
1190 phi ->set_req(LoopNode::LoopBackControl, C->top());
1191 }
1192 }
1193 Node *clone_head = old_new[loop_head->_idx];
1194 _igvn.hash_delete( clone_head );
1195 loop_head ->set_req(LoopNode:: EntryControl, clone_head->in(LoopNode::LoopBackControl));
1196 clone_head->set_req(LoopNode::LoopBackControl, loop_head ->in(LoopNode::LoopBackControl));
1197 loop_head ->set_req(LoopNode::LoopBackControl, C->top());
1198 loop->_head = clone_head; // New loop header
1200 set_idom(loop_head, loop_head ->in(LoopNode::EntryControl), dd);
1201 set_idom(clone_head, clone_head->in(LoopNode::EntryControl), dd);
1203 // Kill the clone's backedge
1204 Node *newcle = old_new[loop_end->_idx];
1205 _igvn.hash_delete( newcle );
1206 Node *one = _igvn.intcon(1);
1207 set_ctrl(one, C->root());
1208 newcle->set_req(1, one);
1209 // Force clone into same loop body
1210 uint max = loop->_body.size();
1211 for( uint k = 0; k < max; k++ ) {
1212 Node *old = loop->_body.at(k);
1213 Node *nnn = old_new[old->_idx];
1214 loop->_body.push(nnn);
1215 if (!has_ctrl(old))
1216 set_loop(nnn, loop);
1217 }
1219 loop->record_for_igvn();
1220 }
1222 //------------------------------do_maximally_unroll----------------------------
1224 void PhaseIdealLoop::do_maximally_unroll( IdealLoopTree *loop, Node_List &old_new ) {
1225 CountedLoopNode *cl = loop->_head->as_CountedLoop();
1226 assert(cl->trip_count() > 0, "");
1227 #ifndef PRODUCT
1228 if (TraceLoopOpts) {
1229 tty->print("MaxUnroll %d ", cl->trip_count());
1230 loop->dump_head();
1231 }
1232 #endif
1234 // If loop is tripping an odd number of times, peel odd iteration
1235 if ((cl->trip_count() & 1) == 1) {
1236 do_peeling(loop, old_new);
1237 }
1239 // Now its tripping an even number of times remaining. Double loop body.
1240 // Do not adjust pre-guards; they are not needed and do not exist.
1241 if (cl->trip_count() > 0) {
1242 do_unroll(loop, old_new, false);
1243 }
1244 }
1246 //------------------------------dominates_backedge---------------------------------
1247 // Returns true if ctrl is executed on every complete iteration
1248 bool IdealLoopTree::dominates_backedge(Node* ctrl) {
1249 assert(ctrl->is_CFG(), "must be control");
1250 Node* backedge = _head->as_Loop()->in(LoopNode::LoopBackControl);
1251 return _phase->dom_lca_internal(ctrl, backedge) == ctrl;
1252 }
1254 //------------------------------add_constraint---------------------------------
1255 // Constrain the main loop iterations so the condition:
1256 // scale_con * I + offset < limit
1257 // always holds true. That is, either increase the number of iterations in
1258 // the pre-loop or the post-loop until the condition holds true in the main
1259 // loop. Stride, scale, offset and limit are all loop invariant. Further,
1260 // stride and scale are constants (offset and limit often are).
1261 void PhaseIdealLoop::add_constraint( int stride_con, int scale_con, Node *offset, Node *limit, Node *pre_ctrl, Node **pre_limit, Node **main_limit ) {
1263 // Compute "I :: (limit-offset)/scale_con"
1264 Node *con = new (C, 3) SubINode( limit, offset );
1265 register_new_node( con, pre_ctrl );
1266 Node *scale = _igvn.intcon(scale_con);
1267 set_ctrl(scale, C->root());
1268 Node *X = new (C, 3) DivINode( 0, con, scale );
1269 register_new_node( X, pre_ctrl );
1271 // For positive stride, the pre-loop limit always uses a MAX function
1272 // and the main loop a MIN function. For negative stride these are
1273 // reversed.
1275 // Also for positive stride*scale the affine function is increasing, so the
1276 // pre-loop must check for underflow and the post-loop for overflow.
1277 // Negative stride*scale reverses this; pre-loop checks for overflow and
1278 // post-loop for underflow.
1279 if( stride_con*scale_con > 0 ) {
1280 // Compute I < (limit-offset)/scale_con
1281 // Adjust main-loop last iteration to be MIN/MAX(main_loop,X)
1282 *main_limit = (stride_con > 0)
1283 ? (Node*)(new (C, 3) MinINode( *main_limit, X ))
1284 : (Node*)(new (C, 3) MaxINode( *main_limit, X ));
1285 register_new_node( *main_limit, pre_ctrl );
1287 } else {
1288 // Compute (limit-offset)/scale_con + SGN(-scale_con) <= I
1289 // Add the negation of the main-loop constraint to the pre-loop.
1290 // See footnote [++] below for a derivation of the limit expression.
1291 Node *incr = _igvn.intcon(scale_con > 0 ? -1 : 1);
1292 set_ctrl(incr, C->root());
1293 Node *adj = new (C, 3) AddINode( X, incr );
1294 register_new_node( adj, pre_ctrl );
1295 *pre_limit = (scale_con > 0)
1296 ? (Node*)new (C, 3) MinINode( *pre_limit, adj )
1297 : (Node*)new (C, 3) MaxINode( *pre_limit, adj );
1298 register_new_node( *pre_limit, pre_ctrl );
1300 // [++] Here's the algebra that justifies the pre-loop limit expression:
1301 //
1302 // NOT( scale_con * I + offset < limit )
1303 // ==
1304 // scale_con * I + offset >= limit
1305 // ==
1306 // SGN(scale_con) * I >= (limit-offset)/|scale_con|
1307 // ==
1308 // (limit-offset)/|scale_con| <= I * SGN(scale_con)
1309 // ==
1310 // (limit-offset)/|scale_con|-1 < I * SGN(scale_con)
1311 // ==
1312 // ( if (scale_con > 0) /*common case*/
1313 // (limit-offset)/scale_con - 1 < I
1314 // else
1315 // (limit-offset)/scale_con + 1 > I
1316 // )
1317 // ( if (scale_con > 0) /*common case*/
1318 // (limit-offset)/scale_con + SGN(-scale_con) < I
1319 // else
1320 // (limit-offset)/scale_con + SGN(-scale_con) > I
1321 }
1322 }
1325 //------------------------------is_scaled_iv---------------------------------
1326 // Return true if exp is a constant times an induction var
1327 bool PhaseIdealLoop::is_scaled_iv(Node* exp, Node* iv, int* p_scale) {
1328 if (exp == iv) {
1329 if (p_scale != NULL) {
1330 *p_scale = 1;
1331 }
1332 return true;
1333 }
1334 int opc = exp->Opcode();
1335 if (opc == Op_MulI) {
1336 if (exp->in(1) == iv && exp->in(2)->is_Con()) {
1337 if (p_scale != NULL) {
1338 *p_scale = exp->in(2)->get_int();
1339 }
1340 return true;
1341 }
1342 if (exp->in(2) == iv && exp->in(1)->is_Con()) {
1343 if (p_scale != NULL) {
1344 *p_scale = exp->in(1)->get_int();
1345 }
1346 return true;
1347 }
1348 } else if (opc == Op_LShiftI) {
1349 if (exp->in(1) == iv && exp->in(2)->is_Con()) {
1350 if (p_scale != NULL) {
1351 *p_scale = 1 << exp->in(2)->get_int();
1352 }
1353 return true;
1354 }
1355 }
1356 return false;
1357 }
1359 //-----------------------------is_scaled_iv_plus_offset------------------------------
1360 // Return true if exp is a simple induction variable expression: k1*iv + (invar + k2)
1361 bool PhaseIdealLoop::is_scaled_iv_plus_offset(Node* exp, Node* iv, int* p_scale, Node** p_offset, int depth) {
1362 if (is_scaled_iv(exp, iv, p_scale)) {
1363 if (p_offset != NULL) {
1364 Node *zero = _igvn.intcon(0);
1365 set_ctrl(zero, C->root());
1366 *p_offset = zero;
1367 }
1368 return true;
1369 }
1370 int opc = exp->Opcode();
1371 if (opc == Op_AddI) {
1372 if (is_scaled_iv(exp->in(1), iv, p_scale)) {
1373 if (p_offset != NULL) {
1374 *p_offset = exp->in(2);
1375 }
1376 return true;
1377 }
1378 if (exp->in(2)->is_Con()) {
1379 Node* offset2 = NULL;
1380 if (depth < 2 &&
1381 is_scaled_iv_plus_offset(exp->in(1), iv, p_scale,
1382 p_offset != NULL ? &offset2 : NULL, depth+1)) {
1383 if (p_offset != NULL) {
1384 Node *ctrl_off2 = get_ctrl(offset2);
1385 Node* offset = new (C, 3) AddINode(offset2, exp->in(2));
1386 register_new_node(offset, ctrl_off2);
1387 *p_offset = offset;
1388 }
1389 return true;
1390 }
1391 }
1392 } else if (opc == Op_SubI) {
1393 if (is_scaled_iv(exp->in(1), iv, p_scale)) {
1394 if (p_offset != NULL) {
1395 Node *zero = _igvn.intcon(0);
1396 set_ctrl(zero, C->root());
1397 Node *ctrl_off = get_ctrl(exp->in(2));
1398 Node* offset = new (C, 3) SubINode(zero, exp->in(2));
1399 register_new_node(offset, ctrl_off);
1400 *p_offset = offset;
1401 }
1402 return true;
1403 }
1404 if (is_scaled_iv(exp->in(2), iv, p_scale)) {
1405 if (p_offset != NULL) {
1406 *p_scale *= -1;
1407 *p_offset = exp->in(1);
1408 }
1409 return true;
1410 }
1411 }
1412 return false;
1413 }
1415 //------------------------------do_range_check---------------------------------
1416 // Eliminate range-checks and other trip-counter vs loop-invariant tests.
1417 void PhaseIdealLoop::do_range_check( IdealLoopTree *loop, Node_List &old_new ) {
1418 #ifndef PRODUCT
1419 if (PrintOpto && VerifyLoopOptimizations) {
1420 tty->print("Range Check Elimination ");
1421 loop->dump_head();
1422 } else if (TraceLoopOpts) {
1423 tty->print("RangeCheck ");
1424 loop->dump_head();
1425 }
1426 #endif
1427 assert(RangeCheckElimination, "");
1428 CountedLoopNode *cl = loop->_head->as_CountedLoop();
1429 assert(cl->is_main_loop(), "");
1431 // protect against stride not being a constant
1432 if (!cl->stride_is_con())
1433 return;
1435 // Find the trip counter; we are iteration splitting based on it
1436 Node *trip_counter = cl->phi();
1437 // Find the main loop limit; we will trim it's iterations
1438 // to not ever trip end tests
1439 Node *main_limit = cl->limit();
1441 // Need to find the main-loop zero-trip guard
1442 Node *ctrl = cl->in(LoopNode::EntryControl);
1443 assert(ctrl->Opcode() == Op_IfTrue || ctrl->Opcode() == Op_IfFalse, "");
1444 Node *iffm = ctrl->in(0);
1445 assert(iffm->Opcode() == Op_If, "");
1446 Node *bolzm = iffm->in(1);
1447 assert(bolzm->Opcode() == Op_Bool, "");
1448 Node *cmpzm = bolzm->in(1);
1449 assert(cmpzm->is_Cmp(), "");
1450 Node *opqzm = cmpzm->in(2);
1451 // Can not optimize a loop if pre-loop Opaque1 node is optimized
1452 // away and then another round of loop opts attempted.
1453 if (opqzm->Opcode() != Op_Opaque1)
1454 return;
1455 assert(opqzm->in(1) == main_limit, "do not understand situation");
1457 // Find the pre-loop limit; we will expand it's iterations to
1458 // not ever trip low tests.
1459 Node *p_f = iffm->in(0);
1460 assert(p_f->Opcode() == Op_IfFalse, "");
1461 CountedLoopEndNode *pre_end = p_f->in(0)->as_CountedLoopEnd();
1462 assert(pre_end->loopnode()->is_pre_loop(), "");
1463 Node *pre_opaq1 = pre_end->limit();
1464 // Occasionally it's possible for a pre-loop Opaque1 node to be
1465 // optimized away and then another round of loop opts attempted.
1466 // We can not optimize this particular loop in that case.
1467 if (pre_opaq1->Opcode() != Op_Opaque1)
1468 return;
1469 Opaque1Node *pre_opaq = (Opaque1Node*)pre_opaq1;
1470 Node *pre_limit = pre_opaq->in(1);
1472 // Where do we put new limit calculations
1473 Node *pre_ctrl = pre_end->loopnode()->in(LoopNode::EntryControl);
1475 // Ensure the original loop limit is available from the
1476 // pre-loop Opaque1 node.
1477 Node *orig_limit = pre_opaq->original_loop_limit();
1478 if (orig_limit == NULL || _igvn.type(orig_limit) == Type::TOP)
1479 return;
1481 // Must know if its a count-up or count-down loop
1483 int stride_con = cl->stride_con();
1484 Node *zero = _igvn.intcon(0);
1485 Node *one = _igvn.intcon(1);
1486 set_ctrl(zero, C->root());
1487 set_ctrl(one, C->root());
1489 // Range checks that do not dominate the loop backedge (ie.
1490 // conditionally executed) can lengthen the pre loop limit beyond
1491 // the original loop limit. To prevent this, the pre limit is
1492 // (for stride > 0) MINed with the original loop limit (MAXed
1493 // stride < 0) when some range_check (rc) is conditionally
1494 // executed.
1495 bool conditional_rc = false;
1497 // Check loop body for tests of trip-counter plus loop-invariant vs
1498 // loop-invariant.
1499 for( uint i = 0; i < loop->_body.size(); i++ ) {
1500 Node *iff = loop->_body[i];
1501 if( iff->Opcode() == Op_If ) { // Test?
1503 // Test is an IfNode, has 2 projections. If BOTH are in the loop
1504 // we need loop unswitching instead of iteration splitting.
1505 Node *exit = loop->is_loop_exit(iff);
1506 if( !exit ) continue;
1507 int flip = (exit->Opcode() == Op_IfTrue) ? 1 : 0;
1509 // Get boolean condition to test
1510 Node *i1 = iff->in(1);
1511 if( !i1->is_Bool() ) continue;
1512 BoolNode *bol = i1->as_Bool();
1513 BoolTest b_test = bol->_test;
1514 // Flip sense of test if exit condition is flipped
1515 if( flip )
1516 b_test = b_test.negate();
1518 // Get compare
1519 Node *cmp = bol->in(1);
1521 // Look for trip_counter + offset vs limit
1522 Node *rc_exp = cmp->in(1);
1523 Node *limit = cmp->in(2);
1524 jint scale_con= 1; // Assume trip counter not scaled
1526 Node *limit_c = get_ctrl(limit);
1527 if( loop->is_member(get_loop(limit_c) ) ) {
1528 // Compare might have operands swapped; commute them
1529 b_test = b_test.commute();
1530 rc_exp = cmp->in(2);
1531 limit = cmp->in(1);
1532 limit_c = get_ctrl(limit);
1533 if( loop->is_member(get_loop(limit_c) ) )
1534 continue; // Both inputs are loop varying; cannot RCE
1535 }
1536 // Here we know 'limit' is loop invariant
1538 // 'limit' maybe pinned below the zero trip test (probably from a
1539 // previous round of rce), in which case, it can't be used in the
1540 // zero trip test expression which must occur before the zero test's if.
1541 if( limit_c == ctrl ) {
1542 continue; // Don't rce this check but continue looking for other candidates.
1543 }
1545 // Check for scaled induction variable plus an offset
1546 Node *offset = NULL;
1548 if (!is_scaled_iv_plus_offset(rc_exp, trip_counter, &scale_con, &offset)) {
1549 continue;
1550 }
1552 Node *offset_c = get_ctrl(offset);
1553 if( loop->is_member( get_loop(offset_c) ) )
1554 continue; // Offset is not really loop invariant
1555 // Here we know 'offset' is loop invariant.
1557 // As above for the 'limit', the 'offset' maybe pinned below the
1558 // zero trip test.
1559 if( offset_c == ctrl ) {
1560 continue; // Don't rce this check but continue looking for other candidates.
1561 }
1563 // At this point we have the expression as:
1564 // scale_con * trip_counter + offset :: limit
1565 // where scale_con, offset and limit are loop invariant. Trip_counter
1566 // monotonically increases by stride_con, a constant. Both (or either)
1567 // stride_con and scale_con can be negative which will flip about the
1568 // sense of the test.
1570 // Adjust pre and main loop limits to guard the correct iteration set
1571 if( cmp->Opcode() == Op_CmpU ) {// Unsigned compare is really 2 tests
1572 if( b_test._test == BoolTest::lt ) { // Range checks always use lt
1573 // The overflow limit: scale*I+offset < limit
1574 add_constraint( stride_con, scale_con, offset, limit, pre_ctrl, &pre_limit, &main_limit );
1575 // The underflow limit: 0 <= scale*I+offset.
1576 // Some math yields: -scale*I-(offset+1) < 0
1577 Node *plus_one = new (C, 3) AddINode( offset, one );
1578 register_new_node( plus_one, pre_ctrl );
1579 Node *neg_offset = new (C, 3) SubINode( zero, plus_one );
1580 register_new_node( neg_offset, pre_ctrl );
1581 add_constraint( stride_con, -scale_con, neg_offset, zero, pre_ctrl, &pre_limit, &main_limit );
1582 if (!conditional_rc) {
1583 conditional_rc = !loop->dominates_backedge(iff);
1584 }
1585 } else {
1586 #ifndef PRODUCT
1587 if( PrintOpto )
1588 tty->print_cr("missed RCE opportunity");
1589 #endif
1590 continue; // In release mode, ignore it
1591 }
1592 } else { // Otherwise work on normal compares
1593 switch( b_test._test ) {
1594 case BoolTest::ge: // Convert X >= Y to -X <= -Y
1595 scale_con = -scale_con;
1596 offset = new (C, 3) SubINode( zero, offset );
1597 register_new_node( offset, pre_ctrl );
1598 limit = new (C, 3) SubINode( zero, limit );
1599 register_new_node( limit, pre_ctrl );
1600 // Fall into LE case
1601 case BoolTest::le: // Convert X <= Y to X < Y+1
1602 limit = new (C, 3) AddINode( limit, one );
1603 register_new_node( limit, pre_ctrl );
1604 // Fall into LT case
1605 case BoolTest::lt:
1606 add_constraint( stride_con, scale_con, offset, limit, pre_ctrl, &pre_limit, &main_limit );
1607 if (!conditional_rc) {
1608 conditional_rc = !loop->dominates_backedge(iff);
1609 }
1610 break;
1611 default:
1612 #ifndef PRODUCT
1613 if( PrintOpto )
1614 tty->print_cr("missed RCE opportunity");
1615 #endif
1616 continue; // Unhandled case
1617 }
1618 }
1620 // Kill the eliminated test
1621 C->set_major_progress();
1622 Node *kill_con = _igvn.intcon( 1-flip );
1623 set_ctrl(kill_con, C->root());
1624 _igvn.hash_delete(iff);
1625 iff->set_req(1, kill_con);
1626 _igvn._worklist.push(iff);
1627 // Find surviving projection
1628 assert(iff->is_If(), "");
1629 ProjNode* dp = ((IfNode*)iff)->proj_out(1-flip);
1630 // Find loads off the surviving projection; remove their control edge
1631 for (DUIterator_Fast imax, i = dp->fast_outs(imax); i < imax; i++) {
1632 Node* cd = dp->fast_out(i); // Control-dependent node
1633 if( cd->is_Load() ) { // Loads can now float around in the loop
1634 _igvn.hash_delete(cd);
1635 // Allow the load to float around in the loop, or before it
1636 // but NOT before the pre-loop.
1637 cd->set_req(0, ctrl); // ctrl, not NULL
1638 _igvn._worklist.push(cd);
1639 --i;
1640 --imax;
1641 }
1642 }
1644 } // End of is IF
1646 }
1648 // Update loop limits
1649 if (conditional_rc) {
1650 pre_limit = (stride_con > 0) ? (Node*)new (C,3) MinINode(pre_limit, orig_limit)
1651 : (Node*)new (C,3) MaxINode(pre_limit, orig_limit);
1652 register_new_node(pre_limit, pre_ctrl);
1653 }
1654 _igvn.hash_delete(pre_opaq);
1655 pre_opaq->set_req(1, pre_limit);
1657 // Note:: we are making the main loop limit no longer precise;
1658 // need to round up based on stride.
1659 if( stride_con != 1 && stride_con != -1 ) { // Cutout for common case
1660 // "Standard" round-up logic: ([main_limit-init+(y-1)]/y)*y+init
1661 // Hopefully, compiler will optimize for powers of 2.
1662 Node *ctrl = get_ctrl(main_limit);
1663 Node *stride = cl->stride();
1664 Node *init = cl->init_trip();
1665 Node *span = new (C, 3) SubINode(main_limit,init);
1666 register_new_node(span,ctrl);
1667 Node *rndup = _igvn.intcon(stride_con + ((stride_con>0)?-1:1));
1668 Node *add = new (C, 3) AddINode(span,rndup);
1669 register_new_node(add,ctrl);
1670 Node *div = new (C, 3) DivINode(0,add,stride);
1671 register_new_node(div,ctrl);
1672 Node *mul = new (C, 3) MulINode(div,stride);
1673 register_new_node(mul,ctrl);
1674 Node *newlim = new (C, 3) AddINode(mul,init);
1675 register_new_node(newlim,ctrl);
1676 main_limit = newlim;
1677 }
1679 Node *main_cle = cl->loopexit();
1680 Node *main_bol = main_cle->in(1);
1681 // Hacking loop bounds; need private copies of exit test
1682 if( main_bol->outcnt() > 1 ) {// BoolNode shared?
1683 _igvn.hash_delete(main_cle);
1684 main_bol = main_bol->clone();// Clone a private BoolNode
1685 register_new_node( main_bol, main_cle->in(0) );
1686 main_cle->set_req(1,main_bol);
1687 }
1688 Node *main_cmp = main_bol->in(1);
1689 if( main_cmp->outcnt() > 1 ) { // CmpNode shared?
1690 _igvn.hash_delete(main_bol);
1691 main_cmp = main_cmp->clone();// Clone a private CmpNode
1692 register_new_node( main_cmp, main_cle->in(0) );
1693 main_bol->set_req(1,main_cmp);
1694 }
1695 // Hack the now-private loop bounds
1696 _igvn.hash_delete(main_cmp);
1697 main_cmp->set_req(2, main_limit);
1698 _igvn._worklist.push(main_cmp);
1699 // The OpaqueNode is unshared by design
1700 _igvn.hash_delete(opqzm);
1701 assert( opqzm->outcnt() == 1, "cannot hack shared node" );
1702 opqzm->set_req(1,main_limit);
1703 _igvn._worklist.push(opqzm);
1704 }
1706 //------------------------------DCE_loop_body----------------------------------
1707 // Remove simplistic dead code from loop body
1708 void IdealLoopTree::DCE_loop_body() {
1709 for( uint i = 0; i < _body.size(); i++ )
1710 if( _body.at(i)->outcnt() == 0 )
1711 _body.map( i--, _body.pop() );
1712 }
1715 //------------------------------adjust_loop_exit_prob--------------------------
1716 // Look for loop-exit tests with the 50/50 (or worse) guesses from the parsing stage.
1717 // Replace with a 1-in-10 exit guess.
1718 void IdealLoopTree::adjust_loop_exit_prob( PhaseIdealLoop *phase ) {
1719 Node *test = tail();
1720 while( test != _head ) {
1721 uint top = test->Opcode();
1722 if( top == Op_IfTrue || top == Op_IfFalse ) {
1723 int test_con = ((ProjNode*)test)->_con;
1724 assert(top == (uint)(test_con? Op_IfTrue: Op_IfFalse), "sanity");
1725 IfNode *iff = test->in(0)->as_If();
1726 if( iff->outcnt() == 2 ) { // Ignore dead tests
1727 Node *bol = iff->in(1);
1728 if( bol && bol->req() > 1 && bol->in(1) &&
1729 ((bol->in(1)->Opcode() == Op_StorePConditional ) ||
1730 (bol->in(1)->Opcode() == Op_StoreIConditional ) ||
1731 (bol->in(1)->Opcode() == Op_StoreLConditional ) ||
1732 (bol->in(1)->Opcode() == Op_CompareAndSwapI ) ||
1733 (bol->in(1)->Opcode() == Op_CompareAndSwapL ) ||
1734 (bol->in(1)->Opcode() == Op_CompareAndSwapP ) ||
1735 (bol->in(1)->Opcode() == Op_CompareAndSwapN )))
1736 return; // Allocation loops RARELY take backedge
1737 // Find the OTHER exit path from the IF
1738 Node* ex = iff->proj_out(1-test_con);
1739 float p = iff->_prob;
1740 if( !phase->is_member( this, ex ) && iff->_fcnt == COUNT_UNKNOWN ) {
1741 if( top == Op_IfTrue ) {
1742 if( p < (PROB_FAIR + PROB_UNLIKELY_MAG(3))) {
1743 iff->_prob = PROB_STATIC_FREQUENT;
1744 }
1745 } else {
1746 if( p > (PROB_FAIR - PROB_UNLIKELY_MAG(3))) {
1747 iff->_prob = PROB_STATIC_INFREQUENT;
1748 }
1749 }
1750 }
1751 }
1752 }
1753 test = phase->idom(test);
1754 }
1755 }
1758 //------------------------------policy_do_remove_empty_loop--------------------
1759 // Micro-benchmark spamming. Policy is to always remove empty loops.
1760 // The 'DO' part is to replace the trip counter with the value it will
1761 // have on the last iteration. This will break the loop.
1762 bool IdealLoopTree::policy_do_remove_empty_loop( PhaseIdealLoop *phase ) {
1763 // Minimum size must be empty loop
1764 if (_body.size() > 7/*number of nodes in an empty loop*/)
1765 return false;
1767 if (!_head->is_CountedLoop())
1768 return false; // Dead loop
1769 CountedLoopNode *cl = _head->as_CountedLoop();
1770 if (!cl->loopexit())
1771 return false; // Malformed loop
1772 if (!phase->is_member(this, phase->get_ctrl(cl->loopexit()->in(CountedLoopEndNode::TestValue))))
1773 return false; // Infinite loop
1775 #ifdef ASSERT
1776 // Ensure only one phi which is the iv.
1777 Node* iv = NULL;
1778 for (DUIterator_Fast imax, i = cl->fast_outs(imax); i < imax; i++) {
1779 Node* n = cl->fast_out(i);
1780 if (n->Opcode() == Op_Phi) {
1781 assert(iv == NULL, "Too many phis" );
1782 iv = n;
1783 }
1784 }
1785 assert(iv == cl->phi(), "Wrong phi" );
1786 #endif
1788 // main and post loops have explicitly created zero trip guard
1789 bool needs_guard = !cl->is_main_loop() && !cl->is_post_loop();
1790 if (needs_guard) {
1791 // Check for an obvious zero trip guard.
1792 Node* inctrl = PhaseIdealLoop::skip_loop_predicates(cl->in(LoopNode::EntryControl));
1793 if (inctrl->Opcode() == Op_IfTrue) {
1794 // The test should look like just the backedge of a CountedLoop
1795 Node* iff = inctrl->in(0);
1796 if (iff->is_If()) {
1797 Node* bol = iff->in(1);
1798 if (bol->is_Bool() && bol->as_Bool()->_test._test == cl->loopexit()->test_trip()) {
1799 Node* cmp = bol->in(1);
1800 if (cmp->is_Cmp() && cmp->in(1) == cl->init_trip() && cmp->in(2) == cl->limit()) {
1801 needs_guard = false;
1802 }
1803 }
1804 }
1805 }
1806 }
1808 #ifndef PRODUCT
1809 if (PrintOpto) {
1810 tty->print("Removing empty loop with%s zero trip guard", needs_guard ? "out" : "");
1811 this->dump_head();
1812 } else if (TraceLoopOpts) {
1813 tty->print("Empty with%s zero trip guard ", needs_guard ? "out" : "");
1814 this->dump_head();
1815 }
1816 #endif
1818 if (needs_guard) {
1819 // Peel the loop to ensure there's a zero trip guard
1820 Node_List old_new;
1821 phase->do_peeling(this, old_new);
1822 }
1824 // Replace the phi at loop head with the final value of the last
1825 // iteration. Then the CountedLoopEnd will collapse (backedge never
1826 // taken) and all loop-invariant uses of the exit values will be correct.
1827 Node *phi = cl->phi();
1828 Node *final = new (phase->C, 3) SubINode( cl->limit(), cl->stride() );
1829 phase->register_new_node(final,cl->in(LoopNode::EntryControl));
1830 phase->_igvn.replace_node(phi,final);
1831 phase->C->set_major_progress();
1832 return true;
1833 }
1836 //=============================================================================
1837 //------------------------------iteration_split_impl---------------------------
1838 bool IdealLoopTree::iteration_split_impl( PhaseIdealLoop *phase, Node_List &old_new ) {
1839 // Check and remove empty loops (spam micro-benchmarks)
1840 if( policy_do_remove_empty_loop(phase) )
1841 return true; // Here we removed an empty loop
1843 bool should_peel = policy_peeling(phase); // Should we peel?
1845 bool should_unswitch = policy_unswitching(phase);
1847 // Non-counted loops may be peeled; exactly 1 iteration is peeled.
1848 // This removes loop-invariant tests (usually null checks).
1849 if( !_head->is_CountedLoop() ) { // Non-counted loop
1850 if (PartialPeelLoop && phase->partial_peel(this, old_new)) {
1851 // Partial peel succeeded so terminate this round of loop opts
1852 return false;
1853 }
1854 if( should_peel ) { // Should we peel?
1855 #ifndef PRODUCT
1856 if (PrintOpto) tty->print_cr("should_peel");
1857 #endif
1858 phase->do_peeling(this,old_new);
1859 } else if( should_unswitch ) {
1860 phase->do_unswitching(this, old_new);
1861 }
1862 return true;
1863 }
1864 CountedLoopNode *cl = _head->as_CountedLoop();
1866 if( !cl->loopexit() ) return true; // Ignore various kinds of broken loops
1868 // Do nothing special to pre- and post- loops
1869 if( cl->is_pre_loop() || cl->is_post_loop() ) return true;
1871 // Compute loop trip count from profile data
1872 compute_profile_trip_cnt(phase);
1874 // Before attempting fancy unrolling, RCE or alignment, see if we want
1875 // to completely unroll this loop or do loop unswitching.
1876 if( cl->is_normal_loop() ) {
1877 if (should_unswitch) {
1878 phase->do_unswitching(this, old_new);
1879 return true;
1880 }
1881 bool should_maximally_unroll = policy_maximally_unroll(phase);
1882 if( should_maximally_unroll ) {
1883 // Here we did some unrolling and peeling. Eventually we will
1884 // completely unroll this loop and it will no longer be a loop.
1885 phase->do_maximally_unroll(this,old_new);
1886 return true;
1887 }
1888 }
1891 // Counted loops may be peeled, may need some iterations run up
1892 // front for RCE, and may want to align loop refs to a cache
1893 // line. Thus we clone a full loop up front whose trip count is
1894 // at least 1 (if peeling), but may be several more.
1896 // The main loop will start cache-line aligned with at least 1
1897 // iteration of the unrolled body (zero-trip test required) and
1898 // will have some range checks removed.
1900 // A post-loop will finish any odd iterations (leftover after
1901 // unrolling), plus any needed for RCE purposes.
1903 bool should_unroll = policy_unroll(phase);
1905 bool should_rce = policy_range_check(phase);
1907 bool should_align = policy_align(phase);
1909 // If not RCE'ing (iteration splitting) or Aligning, then we do not
1910 // need a pre-loop. We may still need to peel an initial iteration but
1911 // we will not be needing an unknown number of pre-iterations.
1912 //
1913 // Basically, if may_rce_align reports FALSE first time through,
1914 // we will not be able to later do RCE or Aligning on this loop.
1915 bool may_rce_align = !policy_peel_only(phase) || should_rce || should_align;
1917 // If we have any of these conditions (RCE, alignment, unrolling) met, then
1918 // we switch to the pre-/main-/post-loop model. This model also covers
1919 // peeling.
1920 if( should_rce || should_align || should_unroll ) {
1921 if( cl->is_normal_loop() ) // Convert to 'pre/main/post' loops
1922 phase->insert_pre_post_loops(this,old_new, !may_rce_align);
1924 // Adjust the pre- and main-loop limits to let the pre and post loops run
1925 // with full checks, but the main-loop with no checks. Remove said
1926 // checks from the main body.
1927 if( should_rce )
1928 phase->do_range_check(this,old_new);
1930 // Double loop body for unrolling. Adjust the minimum-trip test (will do
1931 // twice as many iterations as before) and the main body limit (only do
1932 // an even number of trips). If we are peeling, we might enable some RCE
1933 // and we'd rather unroll the post-RCE'd loop SO... do not unroll if
1934 // peeling.
1935 if( should_unroll && !should_peel )
1936 phase->do_unroll(this,old_new, true);
1938 // Adjust the pre-loop limits to align the main body
1939 // iterations.
1940 if( should_align )
1941 Unimplemented();
1943 } else { // Else we have an unchanged counted loop
1944 if( should_peel ) // Might want to peel but do nothing else
1945 phase->do_peeling(this,old_new);
1946 }
1947 return true;
1948 }
1951 //=============================================================================
1952 //------------------------------iteration_split--------------------------------
1953 bool IdealLoopTree::iteration_split( PhaseIdealLoop *phase, Node_List &old_new ) {
1954 // Recursively iteration split nested loops
1955 if (_child && !_child->iteration_split(phase, old_new))
1956 return false;
1958 // Clean out prior deadwood
1959 DCE_loop_body();
1962 // Look for loop-exit tests with my 50/50 guesses from the Parsing stage.
1963 // Replace with a 1-in-10 exit guess.
1964 if (_parent /*not the root loop*/ &&
1965 !_irreducible &&
1966 // Also ignore the occasional dead backedge
1967 !tail()->is_top()) {
1968 adjust_loop_exit_prob(phase);
1969 }
1971 // Gate unrolling, RCE and peeling efforts.
1972 if (!_child && // If not an inner loop, do not split
1973 !_irreducible &&
1974 _allow_optimizations &&
1975 !tail()->is_top()) { // Also ignore the occasional dead backedge
1976 if (!_has_call) {
1977 if (!iteration_split_impl(phase, old_new)) {
1978 return false;
1979 }
1980 } else if (policy_unswitching(phase)) {
1981 phase->do_unswitching(this, old_new);
1982 }
1983 }
1985 // Minor offset re-organization to remove loop-fallout uses of
1986 // trip counter when there was no major reshaping.
1987 phase->reorg_offsets(this);
1989 if (_next && !_next->iteration_split(phase, old_new))
1990 return false;
1991 return true;
1992 }
1995 //=============================================================================
1996 // Process all the loops in the loop tree and replace any fill
1997 // patterns with an intrisc version.
1998 bool PhaseIdealLoop::do_intrinsify_fill() {
1999 bool changed = false;
2000 for (LoopTreeIterator iter(_ltree_root); !iter.done(); iter.next()) {
2001 IdealLoopTree* lpt = iter.current();
2002 changed |= intrinsify_fill(lpt);
2003 }
2004 return changed;
2005 }
2008 // Examine an inner loop looking for a a single store of an invariant
2009 // value in a unit stride loop,
2010 bool PhaseIdealLoop::match_fill_loop(IdealLoopTree* lpt, Node*& store, Node*& store_value,
2011 Node*& shift, Node*& con) {
2012 const char* msg = NULL;
2013 Node* msg_node = NULL;
2015 store_value = NULL;
2016 con = NULL;
2017 shift = NULL;
2019 // Process the loop looking for stores. If there are multiple
2020 // stores or extra control flow give at this point.
2021 CountedLoopNode* head = lpt->_head->as_CountedLoop();
2022 for (uint i = 0; msg == NULL && i < lpt->_body.size(); i++) {
2023 Node* n = lpt->_body.at(i);
2024 if (n->outcnt() == 0) continue; // Ignore dead
2025 if (n->is_Store()) {
2026 if (store != NULL) {
2027 msg = "multiple stores";
2028 break;
2029 }
2030 int opc = n->Opcode();
2031 if (opc == Op_StoreP || opc == Op_StoreN || opc == Op_StoreCM) {
2032 msg = "oop fills not handled";
2033 break;
2034 }
2035 Node* value = n->in(MemNode::ValueIn);
2036 if (!lpt->is_invariant(value)) {
2037 msg = "variant store value";
2038 } else if (!_igvn.type(n->in(MemNode::Address))->isa_aryptr()) {
2039 msg = "not array address";
2040 }
2041 store = n;
2042 store_value = value;
2043 } else if (n->is_If() && n != head->loopexit()) {
2044 msg = "extra control flow";
2045 msg_node = n;
2046 }
2047 }
2049 if (store == NULL) {
2050 // No store in loop
2051 return false;
2052 }
2054 if (msg == NULL && head->stride_con() != 1) {
2055 // could handle negative strides too
2056 if (head->stride_con() < 0) {
2057 msg = "negative stride";
2058 } else {
2059 msg = "non-unit stride";
2060 }
2061 }
2063 if (msg == NULL && !store->in(MemNode::Address)->is_AddP()) {
2064 msg = "can't handle store address";
2065 msg_node = store->in(MemNode::Address);
2066 }
2068 if (msg == NULL &&
2069 (!store->in(MemNode::Memory)->is_Phi() ||
2070 store->in(MemNode::Memory)->in(LoopNode::LoopBackControl) != store)) {
2071 msg = "store memory isn't proper phi";
2072 msg_node = store->in(MemNode::Memory);
2073 }
2075 // Make sure there is an appropriate fill routine
2076 BasicType t = store->as_Mem()->memory_type();
2077 const char* fill_name;
2078 if (msg == NULL &&
2079 StubRoutines::select_fill_function(t, false, fill_name) == NULL) {
2080 msg = "unsupported store";
2081 msg_node = store;
2082 }
2084 if (msg != NULL) {
2085 #ifndef PRODUCT
2086 if (TraceOptimizeFill) {
2087 tty->print_cr("not fill intrinsic candidate: %s", msg);
2088 if (msg_node != NULL) msg_node->dump();
2089 }
2090 #endif
2091 return false;
2092 }
2094 // Make sure the address expression can be handled. It should be
2095 // head->phi * elsize + con. head->phi might have a ConvI2L.
2096 Node* elements[4];
2097 Node* conv = NULL;
2098 bool found_index = false;
2099 int count = store->in(MemNode::Address)->as_AddP()->unpack_offsets(elements, ARRAY_SIZE(elements));
2100 for (int e = 0; e < count; e++) {
2101 Node* n = elements[e];
2102 if (n->is_Con() && con == NULL) {
2103 con = n;
2104 } else if (n->Opcode() == Op_LShiftX && shift == NULL) {
2105 Node* value = n->in(1);
2106 #ifdef _LP64
2107 if (value->Opcode() == Op_ConvI2L) {
2108 conv = value;
2109 value = value->in(1);
2110 }
2111 #endif
2112 if (value != head->phi()) {
2113 msg = "unhandled shift in address";
2114 } else {
2115 found_index = true;
2116 shift = n;
2117 assert(type2aelembytes(store->as_Mem()->memory_type(), true) == 1 << shift->in(2)->get_int(), "scale should match");
2118 }
2119 } else if (n->Opcode() == Op_ConvI2L && conv == NULL) {
2120 if (n->in(1) == head->phi()) {
2121 found_index = true;
2122 conv = n;
2123 } else {
2124 msg = "unhandled input to ConvI2L";
2125 }
2126 } else if (n == head->phi()) {
2127 // no shift, check below for allowed cases
2128 found_index = true;
2129 } else {
2130 msg = "unhandled node in address";
2131 msg_node = n;
2132 }
2133 }
2135 if (count == -1) {
2136 msg = "malformed address expression";
2137 msg_node = store;
2138 }
2140 if (!found_index) {
2141 msg = "missing use of index";
2142 }
2144 // byte sized items won't have a shift
2145 if (msg == NULL && shift == NULL && t != T_BYTE && t != T_BOOLEAN) {
2146 msg = "can't find shift";
2147 msg_node = store;
2148 }
2150 if (msg != NULL) {
2151 #ifndef PRODUCT
2152 if (TraceOptimizeFill) {
2153 tty->print_cr("not fill intrinsic: %s", msg);
2154 if (msg_node != NULL) msg_node->dump();
2155 }
2156 #endif
2157 return false;
2158 }
2160 // No make sure all the other nodes in the loop can be handled
2161 VectorSet ok(Thread::current()->resource_area());
2163 // store related values are ok
2164 ok.set(store->_idx);
2165 ok.set(store->in(MemNode::Memory)->_idx);
2167 // Loop structure is ok
2168 ok.set(head->_idx);
2169 ok.set(head->loopexit()->_idx);
2170 ok.set(head->phi()->_idx);
2171 ok.set(head->incr()->_idx);
2172 ok.set(head->loopexit()->cmp_node()->_idx);
2173 ok.set(head->loopexit()->in(1)->_idx);
2175 // Address elements are ok
2176 if (con) ok.set(con->_idx);
2177 if (shift) ok.set(shift->_idx);
2178 if (conv) ok.set(conv->_idx);
2180 for (uint i = 0; msg == NULL && i < lpt->_body.size(); i++) {
2181 Node* n = lpt->_body.at(i);
2182 if (n->outcnt() == 0) continue; // Ignore dead
2183 if (ok.test(n->_idx)) continue;
2184 // Backedge projection is ok
2185 if (n->is_IfTrue() && n->in(0) == head->loopexit()) continue;
2186 if (!n->is_AddP()) {
2187 msg = "unhandled node";
2188 msg_node = n;
2189 break;
2190 }
2191 }
2193 // Make sure no unexpected values are used outside the loop
2194 for (uint i = 0; msg == NULL && i < lpt->_body.size(); i++) {
2195 Node* n = lpt->_body.at(i);
2196 // These values can be replaced with other nodes if they are used
2197 // outside the loop.
2198 if (n == store || n == head->loopexit() || n == head->incr() || n == store->in(MemNode::Memory)) continue;
2199 for (SimpleDUIterator iter(n); iter.has_next(); iter.next()) {
2200 Node* use = iter.get();
2201 if (!lpt->_body.contains(use)) {
2202 msg = "node is used outside loop";
2203 // lpt->_body.dump();
2204 msg_node = n;
2205 break;
2206 }
2207 }
2208 }
2210 #ifdef ASSERT
2211 if (TraceOptimizeFill) {
2212 if (msg != NULL) {
2213 tty->print_cr("no fill intrinsic: %s", msg);
2214 if (msg_node != NULL) msg_node->dump();
2215 } else {
2216 tty->print_cr("fill intrinsic for:");
2217 }
2218 store->dump();
2219 if (Verbose) {
2220 lpt->_body.dump();
2221 }
2222 }
2223 #endif
2225 return msg == NULL;
2226 }
2230 bool PhaseIdealLoop::intrinsify_fill(IdealLoopTree* lpt) {
2231 // Only for counted inner loops
2232 if (!lpt->is_counted() || !lpt->is_inner()) {
2233 return false;
2234 }
2236 // Must have constant stride
2237 CountedLoopNode* head = lpt->_head->as_CountedLoop();
2238 if (!head->stride_is_con() || !head->is_normal_loop()) {
2239 return false;
2240 }
2242 // Check that the body only contains a store of a loop invariant
2243 // value that is indexed by the loop phi.
2244 Node* store = NULL;
2245 Node* store_value = NULL;
2246 Node* shift = NULL;
2247 Node* offset = NULL;
2248 if (!match_fill_loop(lpt, store, store_value, shift, offset)) {
2249 return false;
2250 }
2252 #ifndef PRODUCT
2253 if (TraceLoopOpts) {
2254 tty->print("ArrayFill ");
2255 lpt->dump_head();
2256 }
2257 #endif
2259 // Now replace the whole loop body by a call to a fill routine that
2260 // covers the same region as the loop.
2261 Node* base = store->in(MemNode::Address)->as_AddP()->in(AddPNode::Base);
2263 // Build an expression for the beginning of the copy region
2264 Node* index = head->init_trip();
2265 #ifdef _LP64
2266 index = new (C, 2) ConvI2LNode(index);
2267 _igvn.register_new_node_with_optimizer(index);
2268 #endif
2269 if (shift != NULL) {
2270 // byte arrays don't require a shift but others do.
2271 index = new (C, 3) LShiftXNode(index, shift->in(2));
2272 _igvn.register_new_node_with_optimizer(index);
2273 }
2274 index = new (C, 4) AddPNode(base, base, index);
2275 _igvn.register_new_node_with_optimizer(index);
2276 Node* from = new (C, 4) AddPNode(base, index, offset);
2277 _igvn.register_new_node_with_optimizer(from);
2278 // Compute the number of elements to copy
2279 Node* len = new (C, 3) SubINode(head->limit(), head->init_trip());
2280 _igvn.register_new_node_with_optimizer(len);
2282 BasicType t = store->as_Mem()->memory_type();
2283 bool aligned = false;
2284 if (offset != NULL && head->init_trip()->is_Con()) {
2285 int element_size = type2aelembytes(t);
2286 aligned = (offset->find_intptr_t_type()->get_con() + head->init_trip()->get_int() * element_size) % HeapWordSize == 0;
2287 }
2289 // Build a call to the fill routine
2290 const char* fill_name;
2291 address fill = StubRoutines::select_fill_function(t, aligned, fill_name);
2292 assert(fill != NULL, "what?");
2294 // Convert float/double to int/long for fill routines
2295 if (t == T_FLOAT) {
2296 store_value = new (C, 2) MoveF2INode(store_value);
2297 _igvn.register_new_node_with_optimizer(store_value);
2298 } else if (t == T_DOUBLE) {
2299 store_value = new (C, 2) MoveD2LNode(store_value);
2300 _igvn.register_new_node_with_optimizer(store_value);
2301 }
2303 Node* mem_phi = store->in(MemNode::Memory);
2304 Node* result_ctrl;
2305 Node* result_mem;
2306 const TypeFunc* call_type = OptoRuntime::array_fill_Type();
2307 int size = call_type->domain()->cnt();
2308 CallLeafNode *call = new (C, size) CallLeafNoFPNode(call_type, fill,
2309 fill_name, TypeAryPtr::get_array_body_type(t));
2310 call->init_req(TypeFunc::Parms+0, from);
2311 call->init_req(TypeFunc::Parms+1, store_value);
2312 #ifdef _LP64
2313 len = new (C, 2) ConvI2LNode(len);
2314 _igvn.register_new_node_with_optimizer(len);
2315 #endif
2316 call->init_req(TypeFunc::Parms+2, len);
2317 #ifdef _LP64
2318 call->init_req(TypeFunc::Parms+3, C->top());
2319 #endif
2320 call->init_req( TypeFunc::Control, head->init_control());
2321 call->init_req( TypeFunc::I_O , C->top() ) ; // does no i/o
2322 call->init_req( TypeFunc::Memory , mem_phi->in(LoopNode::EntryControl) );
2323 call->init_req( TypeFunc::ReturnAdr, C->start()->proj_out(TypeFunc::ReturnAdr) );
2324 call->init_req( TypeFunc::FramePtr, C->start()->proj_out(TypeFunc::FramePtr) );
2325 _igvn.register_new_node_with_optimizer(call);
2326 result_ctrl = new (C, 1) ProjNode(call,TypeFunc::Control);
2327 _igvn.register_new_node_with_optimizer(result_ctrl);
2328 result_mem = new (C, 1) ProjNode(call,TypeFunc::Memory);
2329 _igvn.register_new_node_with_optimizer(result_mem);
2331 // If this fill is tightly coupled to an allocation and overwrites
2332 // the whole body, allow it to take over the zeroing.
2333 AllocateNode* alloc = AllocateNode::Ideal_allocation(base, this);
2334 if (alloc != NULL && alloc->is_AllocateArray()) {
2335 Node* length = alloc->as_AllocateArray()->Ideal_length();
2336 if (head->limit() == length &&
2337 head->init_trip() == _igvn.intcon(0)) {
2338 if (TraceOptimizeFill) {
2339 tty->print_cr("Eliminated zeroing in allocation");
2340 }
2341 alloc->maybe_set_complete(&_igvn);
2342 } else {
2343 #ifdef ASSERT
2344 if (TraceOptimizeFill) {
2345 tty->print_cr("filling array but bounds don't match");
2346 alloc->dump();
2347 head->init_trip()->dump();
2348 head->limit()->dump();
2349 length->dump();
2350 }
2351 #endif
2352 }
2353 }
2355 // Redirect the old control and memory edges that are outside the loop.
2356 Node* exit = head->loopexit()->proj_out(0);
2357 // Sometimes the memory phi of the head is used as the outgoing
2358 // state of the loop. It's safe in this case to replace it with the
2359 // result_mem.
2360 _igvn.replace_node(store->in(MemNode::Memory), result_mem);
2361 _igvn.replace_node(exit, result_ctrl);
2362 _igvn.replace_node(store, result_mem);
2363 // Any uses the increment outside of the loop become the loop limit.
2364 _igvn.replace_node(head->incr(), head->limit());
2366 // Disconnect the head from the loop.
2367 for (uint i = 0; i < lpt->_body.size(); i++) {
2368 Node* n = lpt->_body.at(i);
2369 _igvn.replace_node(n, C->top());
2370 }
2372 return true;
2373 }