Sat, 09 Apr 2011 21:16:12 -0700
Merge
1 /*
2 * Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "precompiled.hpp"
26 #include "compiler/compileLog.hpp"
27 #include "memory/allocation.inline.hpp"
28 #include "opto/addnode.hpp"
29 #include "opto/callnode.hpp"
30 #include "opto/connode.hpp"
31 #include "opto/divnode.hpp"
32 #include "opto/loopnode.hpp"
33 #include "opto/mulnode.hpp"
34 #include "opto/rootnode.hpp"
35 #include "opto/runtime.hpp"
36 #include "opto/subnode.hpp"
38 //------------------------------is_loop_exit-----------------------------------
39 // Given an IfNode, return the loop-exiting projection or NULL if both
40 // arms remain in the loop.
41 Node *IdealLoopTree::is_loop_exit(Node *iff) const {
42 if( iff->outcnt() != 2 ) return NULL; // Ignore partially dead tests
43 PhaseIdealLoop *phase = _phase;
44 // Test is an IfNode, has 2 projections. If BOTH are in the loop
45 // we need loop unswitching instead of peeling.
46 if( !is_member(phase->get_loop( iff->raw_out(0) )) )
47 return iff->raw_out(0);
48 if( !is_member(phase->get_loop( iff->raw_out(1) )) )
49 return iff->raw_out(1);
50 return NULL;
51 }
54 //=============================================================================
57 //------------------------------record_for_igvn----------------------------
58 // Put loop body on igvn work list
59 void IdealLoopTree::record_for_igvn() {
60 for( uint i = 0; i < _body.size(); i++ ) {
61 Node *n = _body.at(i);
62 _phase->_igvn._worklist.push(n);
63 }
64 }
66 //------------------------------compute_exact_trip_count-----------------------
67 // Compute loop exact trip count if possible. Do not recalculate trip count for
68 // split loops (pre-main-post) which have their limits and inits behind Opaque node.
69 void IdealLoopTree::compute_exact_trip_count( PhaseIdealLoop *phase ) {
70 if (!_head->as_Loop()->is_valid_counted_loop()) {
71 return;
72 }
73 CountedLoopNode* cl = _head->as_CountedLoop();
74 // Trip count may become nonexact for iteration split loops since
75 // RCE modifies limits. Note, _trip_count value is not reset since
76 // it is used to limit unrolling of main loop.
77 cl->set_nonexact_trip_count();
79 // Loop's test should be part of loop.
80 if (!phase->is_member(this, phase->get_ctrl(cl->loopexit()->in(CountedLoopEndNode::TestValue))))
81 return; // Infinite loop
83 #ifdef ASSERT
84 BoolTest::mask bt = cl->loopexit()->test_trip();
85 assert(bt == BoolTest::lt || bt == BoolTest::gt ||
86 bt == BoolTest::ne, "canonical test is expected");
87 #endif
89 Node* init_n = cl->init_trip();
90 Node* limit_n = cl->limit();
91 if (init_n != NULL && init_n->is_Con() &&
92 limit_n != NULL && limit_n->is_Con()) {
93 // Use longs to avoid integer overflow.
94 int stride_con = cl->stride_con();
95 long init_con = cl->init_trip()->get_int();
96 long limit_con = cl->limit()->get_int();
97 int stride_m = stride_con - (stride_con > 0 ? 1 : -1);
98 long trip_count = (limit_con - init_con + stride_m)/stride_con;
99 if (trip_count > 0 && (julong)trip_count < (julong)max_juint) {
100 // Set exact trip count.
101 cl->set_exact_trip_count((uint)trip_count);
102 }
103 }
104 }
106 //------------------------------compute_profile_trip_cnt----------------------------
107 // Compute loop trip count from profile data as
108 // (backedge_count + loop_exit_count) / loop_exit_count
109 void IdealLoopTree::compute_profile_trip_cnt( PhaseIdealLoop *phase ) {
110 if (!_head->is_CountedLoop()) {
111 return;
112 }
113 CountedLoopNode* head = _head->as_CountedLoop();
114 if (head->profile_trip_cnt() != COUNT_UNKNOWN) {
115 return; // Already computed
116 }
117 float trip_cnt = (float)max_jint; // default is big
119 Node* back = head->in(LoopNode::LoopBackControl);
120 while (back != head) {
121 if ((back->Opcode() == Op_IfTrue || back->Opcode() == Op_IfFalse) &&
122 back->in(0) &&
123 back->in(0)->is_If() &&
124 back->in(0)->as_If()->_fcnt != COUNT_UNKNOWN &&
125 back->in(0)->as_If()->_prob != PROB_UNKNOWN) {
126 break;
127 }
128 back = phase->idom(back);
129 }
130 if (back != head) {
131 assert((back->Opcode() == Op_IfTrue || back->Opcode() == Op_IfFalse) &&
132 back->in(0), "if-projection exists");
133 IfNode* back_if = back->in(0)->as_If();
134 float loop_back_cnt = back_if->_fcnt * back_if->_prob;
136 // Now compute a loop exit count
137 float loop_exit_cnt = 0.0f;
138 for( uint i = 0; i < _body.size(); i++ ) {
139 Node *n = _body[i];
140 if( n->is_If() ) {
141 IfNode *iff = n->as_If();
142 if( iff->_fcnt != COUNT_UNKNOWN && iff->_prob != PROB_UNKNOWN ) {
143 Node *exit = is_loop_exit(iff);
144 if( exit ) {
145 float exit_prob = iff->_prob;
146 if (exit->Opcode() == Op_IfFalse) exit_prob = 1.0 - exit_prob;
147 if (exit_prob > PROB_MIN) {
148 float exit_cnt = iff->_fcnt * exit_prob;
149 loop_exit_cnt += exit_cnt;
150 }
151 }
152 }
153 }
154 }
155 if (loop_exit_cnt > 0.0f) {
156 trip_cnt = (loop_back_cnt + loop_exit_cnt) / loop_exit_cnt;
157 } else {
158 // No exit count so use
159 trip_cnt = loop_back_cnt;
160 }
161 }
162 #ifndef PRODUCT
163 if (TraceProfileTripCount) {
164 tty->print_cr("compute_profile_trip_cnt lp: %d cnt: %f\n", head->_idx, trip_cnt);
165 }
166 #endif
167 head->set_profile_trip_cnt(trip_cnt);
168 }
170 //---------------------is_invariant_addition-----------------------------
171 // Return nonzero index of invariant operand for an Add or Sub
172 // of (nonconstant) invariant and variant values. Helper for reassociate_invariants.
173 int IdealLoopTree::is_invariant_addition(Node* n, PhaseIdealLoop *phase) {
174 int op = n->Opcode();
175 if (op == Op_AddI || op == Op_SubI) {
176 bool in1_invar = this->is_invariant(n->in(1));
177 bool in2_invar = this->is_invariant(n->in(2));
178 if (in1_invar && !in2_invar) return 1;
179 if (!in1_invar && in2_invar) return 2;
180 }
181 return 0;
182 }
184 //---------------------reassociate_add_sub-----------------------------
185 // Reassociate invariant add and subtract expressions:
186 //
187 // inv1 + (x + inv2) => ( inv1 + inv2) + x
188 // (x + inv2) + inv1 => ( inv1 + inv2) + x
189 // inv1 + (x - inv2) => ( inv1 - inv2) + x
190 // inv1 - (inv2 - x) => ( inv1 - inv2) + x
191 // (x + inv2) - inv1 => (-inv1 + inv2) + x
192 // (x - inv2) + inv1 => ( inv1 - inv2) + x
193 // (x - inv2) - inv1 => (-inv1 - inv2) + x
194 // inv1 + (inv2 - x) => ( inv1 + inv2) - x
195 // inv1 - (x - inv2) => ( inv1 + inv2) - x
196 // (inv2 - x) + inv1 => ( inv1 + inv2) - x
197 // (inv2 - x) - inv1 => (-inv1 + inv2) - x
198 // inv1 - (x + inv2) => ( inv1 - inv2) - x
199 //
200 Node* IdealLoopTree::reassociate_add_sub(Node* n1, PhaseIdealLoop *phase) {
201 if (!n1->is_Add() && !n1->is_Sub() || n1->outcnt() == 0) return NULL;
202 if (is_invariant(n1)) return NULL;
203 int inv1_idx = is_invariant_addition(n1, phase);
204 if (!inv1_idx) return NULL;
205 // Don't mess with add of constant (igvn moves them to expression tree root.)
206 if (n1->is_Add() && n1->in(2)->is_Con()) return NULL;
207 Node* inv1 = n1->in(inv1_idx);
208 Node* n2 = n1->in(3 - inv1_idx);
209 int inv2_idx = is_invariant_addition(n2, phase);
210 if (!inv2_idx) return NULL;
211 Node* x = n2->in(3 - inv2_idx);
212 Node* inv2 = n2->in(inv2_idx);
214 bool neg_x = n2->is_Sub() && inv2_idx == 1;
215 bool neg_inv2 = n2->is_Sub() && inv2_idx == 2;
216 bool neg_inv1 = n1->is_Sub() && inv1_idx == 2;
217 if (n1->is_Sub() && inv1_idx == 1) {
218 neg_x = !neg_x;
219 neg_inv2 = !neg_inv2;
220 }
221 Node* inv1_c = phase->get_ctrl(inv1);
222 Node* inv2_c = phase->get_ctrl(inv2);
223 Node* n_inv1;
224 if (neg_inv1) {
225 Node *zero = phase->_igvn.intcon(0);
226 phase->set_ctrl(zero, phase->C->root());
227 n_inv1 = new (phase->C, 3) SubINode(zero, inv1);
228 phase->register_new_node(n_inv1, inv1_c);
229 } else {
230 n_inv1 = inv1;
231 }
232 Node* inv;
233 if (neg_inv2) {
234 inv = new (phase->C, 3) SubINode(n_inv1, inv2);
235 } else {
236 inv = new (phase->C, 3) AddINode(n_inv1, inv2);
237 }
238 phase->register_new_node(inv, phase->get_early_ctrl(inv));
240 Node* addx;
241 if (neg_x) {
242 addx = new (phase->C, 3) SubINode(inv, x);
243 } else {
244 addx = new (phase->C, 3) AddINode(x, inv);
245 }
246 phase->register_new_node(addx, phase->get_ctrl(x));
247 phase->_igvn.replace_node(n1, addx);
248 assert(phase->get_loop(phase->get_ctrl(n1)) == this, "");
249 _body.yank(n1);
250 return addx;
251 }
253 //---------------------reassociate_invariants-----------------------------
254 // Reassociate invariant expressions:
255 void IdealLoopTree::reassociate_invariants(PhaseIdealLoop *phase) {
256 for (int i = _body.size() - 1; i >= 0; i--) {
257 Node *n = _body.at(i);
258 for (int j = 0; j < 5; j++) {
259 Node* nn = reassociate_add_sub(n, phase);
260 if (nn == NULL) break;
261 n = nn; // again
262 };
263 }
264 }
266 //------------------------------policy_peeling---------------------------------
267 // Return TRUE or FALSE if the loop should be peeled or not. Peel if we can
268 // make some loop-invariant test (usually a null-check) happen before the loop.
269 bool IdealLoopTree::policy_peeling( PhaseIdealLoop *phase ) const {
270 Node *test = ((IdealLoopTree*)this)->tail();
271 int body_size = ((IdealLoopTree*)this)->_body.size();
272 int uniq = phase->C->unique();
273 // Peeling does loop cloning which can result in O(N^2) node construction
274 if( body_size > 255 /* Prevent overflow for large body_size */
275 || (body_size * body_size + uniq > MaxNodeLimit) ) {
276 return false; // too large to safely clone
277 }
278 while( test != _head ) { // Scan till run off top of loop
279 if( test->is_If() ) { // Test?
280 Node *ctrl = phase->get_ctrl(test->in(1));
281 if (ctrl->is_top())
282 return false; // Found dead test on live IF? No peeling!
283 // Standard IF only has one input value to check for loop invariance
284 assert( test->Opcode() == Op_If || test->Opcode() == Op_CountedLoopEnd, "Check this code when new subtype is added");
285 // Condition is not a member of this loop?
286 if( !is_member(phase->get_loop(ctrl)) &&
287 is_loop_exit(test) )
288 return true; // Found reason to peel!
289 }
290 // Walk up dominators to loop _head looking for test which is
291 // executed on every path thru loop.
292 test = phase->idom(test);
293 }
294 return false;
295 }
297 //------------------------------peeled_dom_test_elim---------------------------
298 // If we got the effect of peeling, either by actually peeling or by making
299 // a pre-loop which must execute at least once, we can remove all
300 // loop-invariant dominated tests in the main body.
301 void PhaseIdealLoop::peeled_dom_test_elim( IdealLoopTree *loop, Node_List &old_new ) {
302 bool progress = true;
303 while( progress ) {
304 progress = false; // Reset for next iteration
305 Node *prev = loop->_head->in(LoopNode::LoopBackControl);//loop->tail();
306 Node *test = prev->in(0);
307 while( test != loop->_head ) { // Scan till run off top of loop
309 int p_op = prev->Opcode();
310 if( (p_op == Op_IfFalse || p_op == Op_IfTrue) &&
311 test->is_If() && // Test?
312 !test->in(1)->is_Con() && // And not already obvious?
313 // Condition is not a member of this loop?
314 !loop->is_member(get_loop(get_ctrl(test->in(1))))){
315 // Walk loop body looking for instances of this test
316 for( uint i = 0; i < loop->_body.size(); i++ ) {
317 Node *n = loop->_body.at(i);
318 if( n->is_If() && n->in(1) == test->in(1) /*&& n != loop->tail()->in(0)*/ ) {
319 // IfNode was dominated by version in peeled loop body
320 progress = true;
321 dominated_by( old_new[prev->_idx], n );
322 }
323 }
324 }
325 prev = test;
326 test = idom(test);
327 } // End of scan tests in loop
329 } // End of while( progress )
330 }
332 //------------------------------do_peeling-------------------------------------
333 // Peel the first iteration of the given loop.
334 // Step 1: Clone the loop body. The clone becomes the peeled iteration.
335 // The pre-loop illegally has 2 control users (old & new loops).
336 // Step 2: Make the old-loop fall-in edges point to the peeled iteration.
337 // Do this by making the old-loop fall-in edges act as if they came
338 // around the loopback from the prior iteration (follow the old-loop
339 // backedges) and then map to the new peeled iteration. This leaves
340 // the pre-loop with only 1 user (the new peeled iteration), but the
341 // peeled-loop backedge has 2 users.
342 // Step 3: Cut the backedge on the clone (so its not a loop) and remove the
343 // extra backedge user.
344 //
345 // orig
346 //
347 // stmt1
348 // |
349 // v
350 // loop predicate
351 // |
352 // v
353 // loop<----+
354 // | |
355 // stmt2 |
356 // | |
357 // v |
358 // if ^
359 // / \ |
360 // / \ |
361 // v v |
362 // false true |
363 // / \ |
364 // / ----+
365 // |
366 // v
367 // exit
368 //
369 //
370 // after clone loop
371 //
372 // stmt1
373 // |
374 // v
375 // loop predicate
376 // / \
377 // clone / \ orig
378 // / \
379 // / \
380 // v v
381 // +---->loop clone loop<----+
382 // | | | |
383 // | stmt2 clone stmt2 |
384 // | | | |
385 // | v v |
386 // ^ if clone If ^
387 // | / \ / \ |
388 // | / \ / \ |
389 // | v v v v |
390 // | true false false true |
391 // | / \ / \ |
392 // +---- \ / ----+
393 // \ /
394 // 1v v2
395 // region
396 // |
397 // v
398 // exit
399 //
400 //
401 // after peel and predicate move
402 //
403 // stmt1
404 // /
405 // /
406 // clone / orig
407 // /
408 // / +----------+
409 // / | |
410 // / loop predicate |
411 // / | |
412 // v v |
413 // TOP-->loop clone loop<----+ |
414 // | | | |
415 // stmt2 clone stmt2 | |
416 // | | | ^
417 // v v | |
418 // if clone If ^ |
419 // / \ / \ | |
420 // / \ / \ | |
421 // v v v v | |
422 // true false false true | |
423 // | \ / \ | |
424 // | \ / ----+ ^
425 // | \ / |
426 // | 1v v2 |
427 // v region |
428 // | | |
429 // | v |
430 // | exit |
431 // | |
432 // +--------------->-----------------+
433 //
434 //
435 // final graph
436 //
437 // stmt1
438 // |
439 // v
440 // stmt2 clone
441 // |
442 // v
443 // if clone
444 // / |
445 // / |
446 // v v
447 // false true
448 // | |
449 // | v
450 // | loop predicate
451 // | |
452 // | v
453 // | loop<----+
454 // | | |
455 // | stmt2 |
456 // | | |
457 // | v |
458 // v if ^
459 // | / \ |
460 // | / \ |
461 // | v v |
462 // | false true |
463 // | | \ |
464 // v v --+
465 // region
466 // |
467 // v
468 // exit
469 //
470 void PhaseIdealLoop::do_peeling( IdealLoopTree *loop, Node_List &old_new ) {
472 C->set_major_progress();
473 // Peeling a 'main' loop in a pre/main/post situation obfuscates the
474 // 'pre' loop from the main and the 'pre' can no longer have it's
475 // iterations adjusted. Therefore, we need to declare this loop as
476 // no longer a 'main' loop; it will need new pre and post loops before
477 // we can do further RCE.
478 #ifndef PRODUCT
479 if (TraceLoopOpts) {
480 tty->print("Peel ");
481 loop->dump_head();
482 }
483 #endif
484 Node* head = loop->_head;
485 bool counted_loop = head->is_CountedLoop();
486 if (counted_loop) {
487 CountedLoopNode *cl = head->as_CountedLoop();
488 assert(cl->trip_count() > 0, "peeling a fully unrolled loop");
489 cl->set_trip_count(cl->trip_count() - 1);
490 if (cl->is_main_loop()) {
491 cl->set_normal_loop();
492 #ifndef PRODUCT
493 if (PrintOpto && VerifyLoopOptimizations) {
494 tty->print("Peeling a 'main' loop; resetting to 'normal' ");
495 loop->dump_head();
496 }
497 #endif
498 }
499 }
500 Node* entry = head->in(LoopNode::EntryControl);
502 // Step 1: Clone the loop body. The clone becomes the peeled iteration.
503 // The pre-loop illegally has 2 control users (old & new loops).
504 clone_loop( loop, old_new, dom_depth(head) );
506 // Step 2: Make the old-loop fall-in edges point to the peeled iteration.
507 // Do this by making the old-loop fall-in edges act as if they came
508 // around the loopback from the prior iteration (follow the old-loop
509 // backedges) and then map to the new peeled iteration. This leaves
510 // the pre-loop with only 1 user (the new peeled iteration), but the
511 // peeled-loop backedge has 2 users.
512 Node* new_exit_value = old_new[head->in(LoopNode::LoopBackControl)->_idx];
513 new_exit_value = move_loop_predicates(entry, new_exit_value);
514 _igvn.hash_delete(head);
515 head->set_req(LoopNode::EntryControl, new_exit_value);
516 for (DUIterator_Fast jmax, j = head->fast_outs(jmax); j < jmax; j++) {
517 Node* old = head->fast_out(j);
518 if (old->in(0) == loop->_head && old->req() == 3 && old->is_Phi()) {
519 new_exit_value = old_new[old->in(LoopNode::LoopBackControl)->_idx];
520 if (!new_exit_value ) // Backedge value is ALSO loop invariant?
521 // Then loop body backedge value remains the same.
522 new_exit_value = old->in(LoopNode::LoopBackControl);
523 _igvn.hash_delete(old);
524 old->set_req(LoopNode::EntryControl, new_exit_value);
525 }
526 }
529 // Step 3: Cut the backedge on the clone (so its not a loop) and remove the
530 // extra backedge user.
531 Node* new_head = old_new[head->_idx];
532 _igvn.hash_delete(new_head);
533 new_head->set_req(LoopNode::LoopBackControl, C->top());
534 for (DUIterator_Fast j2max, j2 = new_head->fast_outs(j2max); j2 < j2max; j2++) {
535 Node* use = new_head->fast_out(j2);
536 if (use->in(0) == new_head && use->req() == 3 && use->is_Phi()) {
537 _igvn.hash_delete(use);
538 use->set_req(LoopNode::LoopBackControl, C->top());
539 }
540 }
543 // Step 4: Correct dom-depth info. Set to loop-head depth.
544 int dd = dom_depth(head);
545 set_idom(head, head->in(1), dd);
546 for (uint j3 = 0; j3 < loop->_body.size(); j3++) {
547 Node *old = loop->_body.at(j3);
548 Node *nnn = old_new[old->_idx];
549 if (!has_ctrl(nnn))
550 set_idom(nnn, idom(nnn), dd-1);
551 // While we're at it, remove any SafePoints from the peeled code
552 if (old->Opcode() == Op_SafePoint) {
553 Node *nnn = old_new[old->_idx];
554 lazy_replace(nnn,nnn->in(TypeFunc::Control));
555 }
556 }
558 // Now force out all loop-invariant dominating tests. The optimizer
559 // finds some, but we _know_ they are all useless.
560 peeled_dom_test_elim(loop,old_new);
562 loop->record_for_igvn();
563 }
565 #define EMPTY_LOOP_SIZE 7 // number of nodes in an empty loop
567 //------------------------------policy_maximally_unroll------------------------
568 // Calculate exact loop trip count and return true if loop can be maximally
569 // unrolled.
570 bool IdealLoopTree::policy_maximally_unroll( PhaseIdealLoop *phase ) const {
571 CountedLoopNode *cl = _head->as_CountedLoop();
572 assert(cl->is_normal_loop(), "");
573 if (!cl->is_valid_counted_loop())
574 return false; // Malformed counted loop
576 if (!cl->has_exact_trip_count()) {
577 // Trip count is not exact.
578 return false;
579 }
581 uint trip_count = cl->trip_count();
582 // Note, max_juint is used to indicate unknown trip count.
583 assert(trip_count > 1, "one iteration loop should be optimized out already");
584 assert(trip_count < max_juint, "exact trip_count should be less than max_uint.");
586 // Real policy: if we maximally unroll, does it get too big?
587 // Allow the unrolled mess to get larger than standard loop
588 // size. After all, it will no longer be a loop.
589 uint body_size = _body.size();
590 uint unroll_limit = (uint)LoopUnrollLimit * 4;
591 assert( (intx)unroll_limit == LoopUnrollLimit * 4, "LoopUnrollLimit must fit in 32bits");
592 if (trip_count > unroll_limit || body_size > unroll_limit) {
593 return false;
594 }
596 // Take into account that after unroll conjoined heads and tails will fold,
597 // otherwise policy_unroll() may allow more unrolling than max unrolling.
598 uint new_body_size = EMPTY_LOOP_SIZE + (body_size - EMPTY_LOOP_SIZE) * trip_count;
599 uint tst_body_size = (new_body_size - EMPTY_LOOP_SIZE) / trip_count + EMPTY_LOOP_SIZE;
600 if (body_size != tst_body_size) // Check for int overflow
601 return false;
602 if (new_body_size > unroll_limit ||
603 // Unrolling can result in a large amount of node construction
604 new_body_size >= MaxNodeLimit - phase->C->unique()) {
605 return false;
606 }
608 // Currently we don't have policy to optimize one iteration loops.
609 // Maximally unrolling transformation is used for that:
610 // it is peeled and the original loop become non reachable (dead).
611 // Also fully unroll a loop with few iterations regardless next
612 // conditions since following loop optimizations will split
613 // such loop anyway (pre-main-post).
614 if (trip_count <= 3)
615 return true;
617 // Do not unroll a loop with String intrinsics code.
618 // String intrinsics are large and have loops.
619 for (uint k = 0; k < _body.size(); k++) {
620 Node* n = _body.at(k);
621 switch (n->Opcode()) {
622 case Op_StrComp:
623 case Op_StrEquals:
624 case Op_StrIndexOf:
625 case Op_AryEq: {
626 return false;
627 }
628 } // switch
629 }
631 return true; // Do maximally unroll
632 }
635 //------------------------------policy_unroll----------------------------------
636 // Return TRUE or FALSE if the loop should be unrolled or not. Unroll if
637 // the loop is a CountedLoop and the body is small enough.
638 bool IdealLoopTree::policy_unroll( PhaseIdealLoop *phase ) const {
640 CountedLoopNode *cl = _head->as_CountedLoop();
641 assert(cl->is_normal_loop() || cl->is_main_loop(), "");
643 if (!cl->is_valid_counted_loop())
644 return false; // Malformed counted loop
646 // protect against over-unrolling
647 if (cl->trip_count() <= 1) return false;
649 // Check for stride being a small enough constant
650 if (abs(cl->stride_con()) > (1<<3)) return false;
652 int future_unroll_ct = cl->unrolled_count() * 2;
654 // Don't unroll if the next round of unrolling would push us
655 // over the expected trip count of the loop. One is subtracted
656 // from the expected trip count because the pre-loop normally
657 // executes 1 iteration.
658 if (UnrollLimitForProfileCheck > 0 &&
659 cl->profile_trip_cnt() != COUNT_UNKNOWN &&
660 future_unroll_ct > UnrollLimitForProfileCheck &&
661 (float)future_unroll_ct > cl->profile_trip_cnt() - 1.0) {
662 return false;
663 }
665 // When unroll count is greater than LoopUnrollMin, don't unroll if:
666 // the residual iterations are more than 10% of the trip count
667 // and rounds of "unroll,optimize" are not making significant progress
668 // Progress defined as current size less than 20% larger than previous size.
669 if (UseSuperWord && cl->node_count_before_unroll() > 0 &&
670 future_unroll_ct > LoopUnrollMin &&
671 (future_unroll_ct - 1) * 10.0 > cl->profile_trip_cnt() &&
672 1.2 * cl->node_count_before_unroll() < (double)_body.size()) {
673 return false;
674 }
676 Node *init_n = cl->init_trip();
677 Node *limit_n = cl->limit();
678 // Non-constant bounds.
679 // Protect against over-unrolling when init or/and limit are not constant
680 // (so that trip_count's init value is maxint) but iv range is known.
681 if (init_n == NULL || !init_n->is_Con() ||
682 limit_n == NULL || !limit_n->is_Con()) {
683 Node* phi = cl->phi();
684 if (phi != NULL) {
685 assert(phi->is_Phi() && phi->in(0) == _head, "Counted loop should have iv phi.");
686 const TypeInt* iv_type = phase->_igvn.type(phi)->is_int();
687 int next_stride = cl->stride_con() * 2; // stride after this unroll
688 if (next_stride > 0) {
689 if (iv_type->_lo + next_stride <= iv_type->_lo || // overflow
690 iv_type->_lo + next_stride > iv_type->_hi) {
691 return false; // over-unrolling
692 }
693 } else if (next_stride < 0) {
694 if (iv_type->_hi + next_stride >= iv_type->_hi || // overflow
695 iv_type->_hi + next_stride < iv_type->_lo) {
696 return false; // over-unrolling
697 }
698 }
699 }
700 }
702 // Adjust body_size to determine if we unroll or not
703 uint body_size = _body.size();
704 // Key test to unroll CaffeineMark's Logic test
705 int xors_in_loop = 0;
706 // Also count ModL, DivL and MulL which expand mightly
707 for (uint k = 0; k < _body.size(); k++) {
708 Node* n = _body.at(k);
709 switch (n->Opcode()) {
710 case Op_XorI: xors_in_loop++; break; // CaffeineMark's Logic test
711 case Op_ModL: body_size += 30; break;
712 case Op_DivL: body_size += 30; break;
713 case Op_MulL: body_size += 10; break;
714 case Op_StrComp:
715 case Op_StrEquals:
716 case Op_StrIndexOf:
717 case Op_AryEq: {
718 // Do not unroll a loop with String intrinsics code.
719 // String intrinsics are large and have loops.
720 return false;
721 }
722 } // switch
723 }
725 // Check for being too big
726 if (body_size > (uint)LoopUnrollLimit) {
727 if (xors_in_loop >= 4 && body_size < (uint)LoopUnrollLimit*4) return true;
728 // Normal case: loop too big
729 return false;
730 }
732 // Unroll once! (Each trip will soon do double iterations)
733 return true;
734 }
736 //------------------------------policy_align-----------------------------------
737 // Return TRUE or FALSE if the loop should be cache-line aligned. Gather the
738 // expression that does the alignment. Note that only one array base can be
739 // aligned in a loop (unless the VM guarantees mutual alignment). Note that
740 // if we vectorize short memory ops into longer memory ops, we may want to
741 // increase alignment.
742 bool IdealLoopTree::policy_align( PhaseIdealLoop *phase ) const {
743 return false;
744 }
746 //------------------------------policy_range_check-----------------------------
747 // Return TRUE or FALSE if the loop should be range-check-eliminated.
748 // Actually we do iteration-splitting, a more powerful form of RCE.
749 bool IdealLoopTree::policy_range_check( PhaseIdealLoop *phase ) const {
750 if( !RangeCheckElimination ) return false;
752 CountedLoopNode *cl = _head->as_CountedLoop();
753 // If we unrolled with no intention of doing RCE and we later
754 // changed our minds, we got no pre-loop. Either we need to
755 // make a new pre-loop, or we gotta disallow RCE.
756 if( cl->is_main_no_pre_loop() ) return false; // Disallowed for now.
757 Node *trip_counter = cl->phi();
759 // Check loop body for tests of trip-counter plus loop-invariant vs
760 // loop-invariant.
761 for( uint i = 0; i < _body.size(); i++ ) {
762 Node *iff = _body[i];
763 if( iff->Opcode() == Op_If ) { // Test?
765 // Comparing trip+off vs limit
766 Node *bol = iff->in(1);
767 if( bol->req() != 2 ) continue; // dead constant test
768 if (!bol->is_Bool()) {
769 assert(UseLoopPredicate && bol->Opcode() == Op_Conv2B, "predicate check only");
770 continue;
771 }
772 Node *cmp = bol->in(1);
774 Node *rc_exp = cmp->in(1);
775 Node *limit = cmp->in(2);
777 Node *limit_c = phase->get_ctrl(limit);
778 if( limit_c == phase->C->top() )
779 return false; // Found dead test on live IF? No RCE!
780 if( is_member(phase->get_loop(limit_c) ) ) {
781 // Compare might have operands swapped; commute them
782 rc_exp = cmp->in(2);
783 limit = cmp->in(1);
784 limit_c = phase->get_ctrl(limit);
785 if( is_member(phase->get_loop(limit_c) ) )
786 continue; // Both inputs are loop varying; cannot RCE
787 }
789 if (!phase->is_scaled_iv_plus_offset(rc_exp, trip_counter, NULL, NULL)) {
790 continue;
791 }
792 // Yeah! Found a test like 'trip+off vs limit'
793 // Test is an IfNode, has 2 projections. If BOTH are in the loop
794 // we need loop unswitching instead of iteration splitting.
795 if( is_loop_exit(iff) )
796 return true; // Found reason to split iterations
797 } // End of is IF
798 }
800 return false;
801 }
803 //------------------------------policy_peel_only-------------------------------
804 // Return TRUE or FALSE if the loop should NEVER be RCE'd or aligned. Useful
805 // for unrolling loops with NO array accesses.
806 bool IdealLoopTree::policy_peel_only( PhaseIdealLoop *phase ) const {
808 for( uint i = 0; i < _body.size(); i++ )
809 if( _body[i]->is_Mem() )
810 return false;
812 // No memory accesses at all!
813 return true;
814 }
816 //------------------------------clone_up_backedge_goo--------------------------
817 // If Node n lives in the back_ctrl block and cannot float, we clone a private
818 // version of n in preheader_ctrl block and return that, otherwise return n.
819 Node *PhaseIdealLoop::clone_up_backedge_goo( Node *back_ctrl, Node *preheader_ctrl, Node *n ) {
820 if( get_ctrl(n) != back_ctrl ) return n;
822 Node *x = NULL; // If required, a clone of 'n'
823 // Check for 'n' being pinned in the backedge.
824 if( n->in(0) && n->in(0) == back_ctrl ) {
825 x = n->clone(); // Clone a copy of 'n' to preheader
826 x->set_req( 0, preheader_ctrl ); // Fix x's control input to preheader
827 }
829 // Recursive fixup any other input edges into x.
830 // If there are no changes we can just return 'n', otherwise
831 // we need to clone a private copy and change it.
832 for( uint i = 1; i < n->req(); i++ ) {
833 Node *g = clone_up_backedge_goo( back_ctrl, preheader_ctrl, n->in(i) );
834 if( g != n->in(i) ) {
835 if( !x )
836 x = n->clone();
837 x->set_req(i, g);
838 }
839 }
840 if( x ) { // x can legally float to pre-header location
841 register_new_node( x, preheader_ctrl );
842 return x;
843 } else { // raise n to cover LCA of uses
844 set_ctrl( n, find_non_split_ctrl(back_ctrl->in(0)) );
845 }
846 return n;
847 }
849 //------------------------------insert_pre_post_loops--------------------------
850 // Insert pre and post loops. If peel_only is set, the pre-loop can not have
851 // more iterations added. It acts as a 'peel' only, no lower-bound RCE, no
852 // alignment. Useful to unroll loops that do no array accesses.
853 void PhaseIdealLoop::insert_pre_post_loops( IdealLoopTree *loop, Node_List &old_new, bool peel_only ) {
855 #ifndef PRODUCT
856 if (TraceLoopOpts) {
857 if (peel_only)
858 tty->print("PeelMainPost ");
859 else
860 tty->print("PreMainPost ");
861 loop->dump_head();
862 }
863 #endif
864 C->set_major_progress();
866 // Find common pieces of the loop being guarded with pre & post loops
867 CountedLoopNode *main_head = loop->_head->as_CountedLoop();
868 assert( main_head->is_normal_loop(), "" );
869 CountedLoopEndNode *main_end = main_head->loopexit();
870 assert( main_end->outcnt() == 2, "1 true, 1 false path only" );
871 uint dd_main_head = dom_depth(main_head);
872 uint max = main_head->outcnt();
874 Node *pre_header= main_head->in(LoopNode::EntryControl);
875 Node *init = main_head->init_trip();
876 Node *incr = main_end ->incr();
877 Node *limit = main_end ->limit();
878 Node *stride = main_end ->stride();
879 Node *cmp = main_end ->cmp_node();
880 BoolTest::mask b_test = main_end->test_trip();
882 // Need only 1 user of 'bol' because I will be hacking the loop bounds.
883 Node *bol = main_end->in(CountedLoopEndNode::TestValue);
884 if( bol->outcnt() != 1 ) {
885 bol = bol->clone();
886 register_new_node(bol,main_end->in(CountedLoopEndNode::TestControl));
887 _igvn.hash_delete(main_end);
888 main_end->set_req(CountedLoopEndNode::TestValue, bol);
889 }
890 // Need only 1 user of 'cmp' because I will be hacking the loop bounds.
891 if( cmp->outcnt() != 1 ) {
892 cmp = cmp->clone();
893 register_new_node(cmp,main_end->in(CountedLoopEndNode::TestControl));
894 _igvn.hash_delete(bol);
895 bol->set_req(1, cmp);
896 }
898 //------------------------------
899 // Step A: Create Post-Loop.
900 Node* main_exit = main_end->proj_out(false);
901 assert( main_exit->Opcode() == Op_IfFalse, "" );
902 int dd_main_exit = dom_depth(main_exit);
904 // Step A1: Clone the loop body. The clone becomes the post-loop. The main
905 // loop pre-header illegally has 2 control users (old & new loops).
906 clone_loop( loop, old_new, dd_main_exit );
907 assert( old_new[main_end ->_idx]->Opcode() == Op_CountedLoopEnd, "" );
908 CountedLoopNode *post_head = old_new[main_head->_idx]->as_CountedLoop();
909 post_head->set_post_loop(main_head);
911 // Reduce the post-loop trip count.
912 CountedLoopEndNode* post_end = old_new[main_end ->_idx]->as_CountedLoopEnd();
913 post_end->_prob = PROB_FAIR;
915 // Build the main-loop normal exit.
916 IfFalseNode *new_main_exit = new (C, 1) IfFalseNode(main_end);
917 _igvn.register_new_node_with_optimizer( new_main_exit );
918 set_idom(new_main_exit, main_end, dd_main_exit );
919 set_loop(new_main_exit, loop->_parent);
921 // Step A2: Build a zero-trip guard for the post-loop. After leaving the
922 // main-loop, the post-loop may not execute at all. We 'opaque' the incr
923 // (the main-loop trip-counter exit value) because we will be changing
924 // the exit value (via unrolling) so we cannot constant-fold away the zero
925 // trip guard until all unrolling is done.
926 Node *zer_opaq = new (C, 2) Opaque1Node(C, incr);
927 Node *zer_cmp = new (C, 3) CmpINode( zer_opaq, limit );
928 Node *zer_bol = new (C, 2) BoolNode( zer_cmp, b_test );
929 register_new_node( zer_opaq, new_main_exit );
930 register_new_node( zer_cmp , new_main_exit );
931 register_new_node( zer_bol , new_main_exit );
933 // Build the IfNode
934 IfNode *zer_iff = new (C, 2) IfNode( new_main_exit, zer_bol, PROB_FAIR, COUNT_UNKNOWN );
935 _igvn.register_new_node_with_optimizer( zer_iff );
936 set_idom(zer_iff, new_main_exit, dd_main_exit);
937 set_loop(zer_iff, loop->_parent);
939 // Plug in the false-path, taken if we need to skip post-loop
940 _igvn.hash_delete( main_exit );
941 main_exit->set_req(0, zer_iff);
942 _igvn._worklist.push(main_exit);
943 set_idom(main_exit, zer_iff, dd_main_exit);
944 set_idom(main_exit->unique_out(), zer_iff, dd_main_exit);
945 // Make the true-path, must enter the post loop
946 Node *zer_taken = new (C, 1) IfTrueNode( zer_iff );
947 _igvn.register_new_node_with_optimizer( zer_taken );
948 set_idom(zer_taken, zer_iff, dd_main_exit);
949 set_loop(zer_taken, loop->_parent);
950 // Plug in the true path
951 _igvn.hash_delete( post_head );
952 post_head->set_req(LoopNode::EntryControl, zer_taken);
953 set_idom(post_head, zer_taken, dd_main_exit);
955 // Step A3: Make the fall-in values to the post-loop come from the
956 // fall-out values of the main-loop.
957 for (DUIterator_Fast imax, i = main_head->fast_outs(imax); i < imax; i++) {
958 Node* main_phi = main_head->fast_out(i);
959 if( main_phi->is_Phi() && main_phi->in(0) == main_head && main_phi->outcnt() >0 ) {
960 Node *post_phi = old_new[main_phi->_idx];
961 Node *fallmain = clone_up_backedge_goo(main_head->back_control(),
962 post_head->init_control(),
963 main_phi->in(LoopNode::LoopBackControl));
964 _igvn.hash_delete(post_phi);
965 post_phi->set_req( LoopNode::EntryControl, fallmain );
966 }
967 }
969 // Update local caches for next stanza
970 main_exit = new_main_exit;
973 //------------------------------
974 // Step B: Create Pre-Loop.
976 // Step B1: Clone the loop body. The clone becomes the pre-loop. The main
977 // loop pre-header illegally has 2 control users (old & new loops).
978 clone_loop( loop, old_new, dd_main_head );
979 CountedLoopNode* pre_head = old_new[main_head->_idx]->as_CountedLoop();
980 CountedLoopEndNode* pre_end = old_new[main_end ->_idx]->as_CountedLoopEnd();
981 pre_head->set_pre_loop(main_head);
982 Node *pre_incr = old_new[incr->_idx];
984 // Reduce the pre-loop trip count.
985 pre_end->_prob = PROB_FAIR;
987 // Find the pre-loop normal exit.
988 Node* pre_exit = pre_end->proj_out(false);
989 assert( pre_exit->Opcode() == Op_IfFalse, "" );
990 IfFalseNode *new_pre_exit = new (C, 1) IfFalseNode(pre_end);
991 _igvn.register_new_node_with_optimizer( new_pre_exit );
992 set_idom(new_pre_exit, pre_end, dd_main_head);
993 set_loop(new_pre_exit, loop->_parent);
995 // Step B2: Build a zero-trip guard for the main-loop. After leaving the
996 // pre-loop, the main-loop may not execute at all. Later in life this
997 // zero-trip guard will become the minimum-trip guard when we unroll
998 // the main-loop.
999 Node *min_opaq = new (C, 2) Opaque1Node(C, limit);
1000 Node *min_cmp = new (C, 3) CmpINode( pre_incr, min_opaq );
1001 Node *min_bol = new (C, 2) BoolNode( min_cmp, b_test );
1002 register_new_node( min_opaq, new_pre_exit );
1003 register_new_node( min_cmp , new_pre_exit );
1004 register_new_node( min_bol , new_pre_exit );
1006 // Build the IfNode (assume the main-loop is executed always).
1007 IfNode *min_iff = new (C, 2) IfNode( new_pre_exit, min_bol, PROB_ALWAYS, COUNT_UNKNOWN );
1008 _igvn.register_new_node_with_optimizer( min_iff );
1009 set_idom(min_iff, new_pre_exit, dd_main_head);
1010 set_loop(min_iff, loop->_parent);
1012 // Plug in the false-path, taken if we need to skip main-loop
1013 _igvn.hash_delete( pre_exit );
1014 pre_exit->set_req(0, min_iff);
1015 set_idom(pre_exit, min_iff, dd_main_head);
1016 set_idom(pre_exit->unique_out(), min_iff, dd_main_head);
1017 // Make the true-path, must enter the main loop
1018 Node *min_taken = new (C, 1) IfTrueNode( min_iff );
1019 _igvn.register_new_node_with_optimizer( min_taken );
1020 set_idom(min_taken, min_iff, dd_main_head);
1021 set_loop(min_taken, loop->_parent);
1022 // Plug in the true path
1023 _igvn.hash_delete( main_head );
1024 main_head->set_req(LoopNode::EntryControl, min_taken);
1025 set_idom(main_head, min_taken, dd_main_head);
1027 // Step B3: Make the fall-in values to the main-loop come from the
1028 // fall-out values of the pre-loop.
1029 for (DUIterator_Fast i2max, i2 = main_head->fast_outs(i2max); i2 < i2max; i2++) {
1030 Node* main_phi = main_head->fast_out(i2);
1031 if( main_phi->is_Phi() && main_phi->in(0) == main_head && main_phi->outcnt() > 0 ) {
1032 Node *pre_phi = old_new[main_phi->_idx];
1033 Node *fallpre = clone_up_backedge_goo(pre_head->back_control(),
1034 main_head->init_control(),
1035 pre_phi->in(LoopNode::LoopBackControl));
1036 _igvn.hash_delete(main_phi);
1037 main_phi->set_req( LoopNode::EntryControl, fallpre );
1038 }
1039 }
1041 // Step B4: Shorten the pre-loop to run only 1 iteration (for now).
1042 // RCE and alignment may change this later.
1043 Node *cmp_end = pre_end->cmp_node();
1044 assert( cmp_end->in(2) == limit, "" );
1045 Node *pre_limit = new (C, 3) AddINode( init, stride );
1047 // Save the original loop limit in this Opaque1 node for
1048 // use by range check elimination.
1049 Node *pre_opaq = new (C, 3) Opaque1Node(C, pre_limit, limit);
1051 register_new_node( pre_limit, pre_head->in(0) );
1052 register_new_node( pre_opaq , pre_head->in(0) );
1054 // Since no other users of pre-loop compare, I can hack limit directly
1055 assert( cmp_end->outcnt() == 1, "no other users" );
1056 _igvn.hash_delete(cmp_end);
1057 cmp_end->set_req(2, peel_only ? pre_limit : pre_opaq);
1059 // Special case for not-equal loop bounds:
1060 // Change pre loop test, main loop test, and the
1061 // main loop guard test to use lt or gt depending on stride
1062 // direction:
1063 // positive stride use <
1064 // negative stride use >
1066 if (pre_end->in(CountedLoopEndNode::TestValue)->as_Bool()->_test._test == BoolTest::ne) {
1068 BoolTest::mask new_test = (main_end->stride_con() > 0) ? BoolTest::lt : BoolTest::gt;
1069 // Modify pre loop end condition
1070 Node* pre_bol = pre_end->in(CountedLoopEndNode::TestValue)->as_Bool();
1071 BoolNode* new_bol0 = new (C, 2) BoolNode(pre_bol->in(1), new_test);
1072 register_new_node( new_bol0, pre_head->in(0) );
1073 _igvn.hash_delete(pre_end);
1074 pre_end->set_req(CountedLoopEndNode::TestValue, new_bol0);
1075 // Modify main loop guard condition
1076 assert(min_iff->in(CountedLoopEndNode::TestValue) == min_bol, "guard okay");
1077 BoolNode* new_bol1 = new (C, 2) BoolNode(min_bol->in(1), new_test);
1078 register_new_node( new_bol1, new_pre_exit );
1079 _igvn.hash_delete(min_iff);
1080 min_iff->set_req(CountedLoopEndNode::TestValue, new_bol1);
1081 // Modify main loop end condition
1082 BoolNode* main_bol = main_end->in(CountedLoopEndNode::TestValue)->as_Bool();
1083 BoolNode* new_bol2 = new (C, 2) BoolNode(main_bol->in(1), new_test);
1084 register_new_node( new_bol2, main_end->in(CountedLoopEndNode::TestControl) );
1085 _igvn.hash_delete(main_end);
1086 main_end->set_req(CountedLoopEndNode::TestValue, new_bol2);
1087 }
1089 // Flag main loop
1090 main_head->set_main_loop();
1091 if( peel_only ) main_head->set_main_no_pre_loop();
1093 // It's difficult to be precise about the trip-counts
1094 // for the pre/post loops. They are usually very short,
1095 // so guess that 4 trips is a reasonable value.
1096 post_head->set_profile_trip_cnt(4.0);
1097 pre_head->set_profile_trip_cnt(4.0);
1099 // Now force out all loop-invariant dominating tests. The optimizer
1100 // finds some, but we _know_ they are all useless.
1101 peeled_dom_test_elim(loop,old_new);
1102 }
1104 //------------------------------is_invariant-----------------------------
1105 // Return true if n is invariant
1106 bool IdealLoopTree::is_invariant(Node* n) const {
1107 Node *n_c = _phase->has_ctrl(n) ? _phase->get_ctrl(n) : n;
1108 if (n_c->is_top()) return false;
1109 return !is_member(_phase->get_loop(n_c));
1110 }
1113 //------------------------------do_unroll--------------------------------------
1114 // Unroll the loop body one step - make each trip do 2 iterations.
1115 void PhaseIdealLoop::do_unroll( IdealLoopTree *loop, Node_List &old_new, bool adjust_min_trip ) {
1116 assert(LoopUnrollLimit, "");
1117 CountedLoopNode *loop_head = loop->_head->as_CountedLoop();
1118 CountedLoopEndNode *loop_end = loop_head->loopexit();
1119 assert(loop_end, "");
1120 #ifndef PRODUCT
1121 if (PrintOpto && VerifyLoopOptimizations) {
1122 tty->print("Unrolling ");
1123 loop->dump_head();
1124 } else if (TraceLoopOpts) {
1125 if (loop_head->trip_count() < (uint)LoopUnrollLimit) {
1126 tty->print("Unroll %d(%2d) ", loop_head->unrolled_count()*2, loop_head->trip_count());
1127 } else {
1128 tty->print("Unroll %d ", loop_head->unrolled_count()*2);
1129 }
1130 loop->dump_head();
1131 }
1132 #endif
1134 // Remember loop node count before unrolling to detect
1135 // if rounds of unroll,optimize are making progress
1136 loop_head->set_node_count_before_unroll(loop->_body.size());
1138 Node *ctrl = loop_head->in(LoopNode::EntryControl);
1139 Node *limit = loop_head->limit();
1140 Node *init = loop_head->init_trip();
1141 Node *stride = loop_head->stride();
1143 Node *opaq = NULL;
1144 if( adjust_min_trip ) { // If not maximally unrolling, need adjustment
1145 assert( loop_head->is_main_loop(), "" );
1146 assert( ctrl->Opcode() == Op_IfTrue || ctrl->Opcode() == Op_IfFalse, "" );
1147 Node *iff = ctrl->in(0);
1148 assert( iff->Opcode() == Op_If, "" );
1149 Node *bol = iff->in(1);
1150 assert( bol->Opcode() == Op_Bool, "" );
1151 Node *cmp = bol->in(1);
1152 assert( cmp->Opcode() == Op_CmpI, "" );
1153 opaq = cmp->in(2);
1154 // Occasionally it's possible for a pre-loop Opaque1 node to be
1155 // optimized away and then another round of loop opts attempted.
1156 // We can not optimize this particular loop in that case.
1157 if( opaq->Opcode() != Op_Opaque1 )
1158 return; // Cannot find pre-loop! Bail out!
1159 }
1161 C->set_major_progress();
1163 // Adjust max trip count. The trip count is intentionally rounded
1164 // down here (e.g. 15-> 7-> 3-> 1) because if we unwittingly over-unroll,
1165 // the main, unrolled, part of the loop will never execute as it is protected
1166 // by the min-trip test. See bug 4834191 for a case where we over-unrolled
1167 // and later determined that part of the unrolled loop was dead.
1168 loop_head->set_trip_count(loop_head->trip_count() / 2);
1170 // Double the count of original iterations in the unrolled loop body.
1171 loop_head->double_unrolled_count();
1173 // -----------
1174 // Step 2: Cut back the trip counter for an unroll amount of 2.
1175 // Loop will normally trip (limit - init)/stride_con. Since it's a
1176 // CountedLoop this is exact (stride divides limit-init exactly).
1177 // We are going to double the loop body, so we want to knock off any
1178 // odd iteration: (trip_cnt & ~1). Then back compute a new limit.
1179 Node *span = new (C, 3) SubINode( limit, init );
1180 register_new_node( span, ctrl );
1181 Node *trip = new (C, 3) DivINode( 0, span, stride );
1182 register_new_node( trip, ctrl );
1183 Node *mtwo = _igvn.intcon(-2);
1184 set_ctrl(mtwo, C->root());
1185 Node *rond = new (C, 3) AndINode( trip, mtwo );
1186 register_new_node( rond, ctrl );
1187 Node *spn2 = new (C, 3) MulINode( rond, stride );
1188 register_new_node( spn2, ctrl );
1189 Node *lim2 = new (C, 3) AddINode( spn2, init );
1190 register_new_node( lim2, ctrl );
1192 // Hammer in the new limit
1193 Node *ctrl2 = loop_end->in(0);
1194 Node *cmp2 = new (C, 3) CmpINode( loop_head->incr(), lim2 );
1195 register_new_node( cmp2, ctrl2 );
1196 Node *bol2 = new (C, 2) BoolNode( cmp2, loop_end->test_trip() );
1197 register_new_node( bol2, ctrl2 );
1198 _igvn.hash_delete(loop_end);
1199 loop_end->set_req(CountedLoopEndNode::TestValue, bol2);
1201 // Step 3: Find the min-trip test guaranteed before a 'main' loop.
1202 // Make it a 1-trip test (means at least 2 trips).
1203 if( adjust_min_trip ) {
1204 // Guard test uses an 'opaque' node which is not shared. Hence I
1205 // can edit it's inputs directly. Hammer in the new limit for the
1206 // minimum-trip guard.
1207 assert( opaq->outcnt() == 1, "" );
1208 _igvn.hash_delete(opaq);
1209 opaq->set_req(1, lim2);
1210 }
1212 // ---------
1213 // Step 4: Clone the loop body. Move it inside the loop. This loop body
1214 // represents the odd iterations; since the loop trips an even number of
1215 // times its backedge is never taken. Kill the backedge.
1216 uint dd = dom_depth(loop_head);
1217 clone_loop( loop, old_new, dd );
1219 // Make backedges of the clone equal to backedges of the original.
1220 // Make the fall-in from the original come from the fall-out of the clone.
1221 for (DUIterator_Fast jmax, j = loop_head->fast_outs(jmax); j < jmax; j++) {
1222 Node* phi = loop_head->fast_out(j);
1223 if( phi->is_Phi() && phi->in(0) == loop_head && phi->outcnt() > 0 ) {
1224 Node *newphi = old_new[phi->_idx];
1225 _igvn.hash_delete( phi );
1226 _igvn.hash_delete( newphi );
1228 phi ->set_req(LoopNode:: EntryControl, newphi->in(LoopNode::LoopBackControl));
1229 newphi->set_req(LoopNode::LoopBackControl, phi ->in(LoopNode::LoopBackControl));
1230 phi ->set_req(LoopNode::LoopBackControl, C->top());
1231 }
1232 }
1233 Node *clone_head = old_new[loop_head->_idx];
1234 _igvn.hash_delete( clone_head );
1235 loop_head ->set_req(LoopNode:: EntryControl, clone_head->in(LoopNode::LoopBackControl));
1236 clone_head->set_req(LoopNode::LoopBackControl, loop_head ->in(LoopNode::LoopBackControl));
1237 loop_head ->set_req(LoopNode::LoopBackControl, C->top());
1238 loop->_head = clone_head; // New loop header
1240 set_idom(loop_head, loop_head ->in(LoopNode::EntryControl), dd);
1241 set_idom(clone_head, clone_head->in(LoopNode::EntryControl), dd);
1243 // Kill the clone's backedge
1244 Node *newcle = old_new[loop_end->_idx];
1245 _igvn.hash_delete( newcle );
1246 Node *one = _igvn.intcon(1);
1247 set_ctrl(one, C->root());
1248 newcle->set_req(1, one);
1249 // Force clone into same loop body
1250 uint max = loop->_body.size();
1251 for( uint k = 0; k < max; k++ ) {
1252 Node *old = loop->_body.at(k);
1253 Node *nnn = old_new[old->_idx];
1254 loop->_body.push(nnn);
1255 if (!has_ctrl(old))
1256 set_loop(nnn, loop);
1257 }
1259 loop->record_for_igvn();
1260 }
1262 //------------------------------do_maximally_unroll----------------------------
1264 void PhaseIdealLoop::do_maximally_unroll( IdealLoopTree *loop, Node_List &old_new ) {
1265 CountedLoopNode *cl = loop->_head->as_CountedLoop();
1266 assert(cl->trip_count() > 0, "");
1267 #ifndef PRODUCT
1268 if (TraceLoopOpts) {
1269 tty->print("MaxUnroll %d ", cl->trip_count());
1270 loop->dump_head();
1271 }
1272 #endif
1274 // If loop is tripping an odd number of times, peel odd iteration
1275 if ((cl->trip_count() & 1) == 1) {
1276 do_peeling(loop, old_new);
1277 }
1279 // Now its tripping an even number of times remaining. Double loop body.
1280 // Do not adjust pre-guards; they are not needed and do not exist.
1281 if (cl->trip_count() > 0) {
1282 do_unroll(loop, old_new, false);
1283 }
1284 }
1286 //------------------------------dominates_backedge---------------------------------
1287 // Returns true if ctrl is executed on every complete iteration
1288 bool IdealLoopTree::dominates_backedge(Node* ctrl) {
1289 assert(ctrl->is_CFG(), "must be control");
1290 Node* backedge = _head->as_Loop()->in(LoopNode::LoopBackControl);
1291 return _phase->dom_lca_internal(ctrl, backedge) == ctrl;
1292 }
1294 //------------------------------add_constraint---------------------------------
1295 // Constrain the main loop iterations so the condition:
1296 // scale_con * I + offset < limit
1297 // always holds true. That is, either increase the number of iterations in
1298 // the pre-loop or the post-loop until the condition holds true in the main
1299 // loop. Stride, scale, offset and limit are all loop invariant. Further,
1300 // stride and scale are constants (offset and limit often are).
1301 void PhaseIdealLoop::add_constraint( int stride_con, int scale_con, Node *offset, Node *limit, Node *pre_ctrl, Node **pre_limit, Node **main_limit ) {
1303 // Compute "I :: (limit-offset)/scale_con"
1304 Node *con = new (C, 3) SubINode( limit, offset );
1305 register_new_node( con, pre_ctrl );
1306 Node *scale = _igvn.intcon(scale_con);
1307 set_ctrl(scale, C->root());
1308 Node *X = new (C, 3) DivINode( 0, con, scale );
1309 register_new_node( X, pre_ctrl );
1311 // For positive stride, the pre-loop limit always uses a MAX function
1312 // and the main loop a MIN function. For negative stride these are
1313 // reversed.
1315 // Also for positive stride*scale the affine function is increasing, so the
1316 // pre-loop must check for underflow and the post-loop for overflow.
1317 // Negative stride*scale reverses this; pre-loop checks for overflow and
1318 // post-loop for underflow.
1319 if( stride_con*scale_con > 0 ) {
1320 // Compute I < (limit-offset)/scale_con
1321 // Adjust main-loop last iteration to be MIN/MAX(main_loop,X)
1322 *main_limit = (stride_con > 0)
1323 ? (Node*)(new (C, 3) MinINode( *main_limit, X ))
1324 : (Node*)(new (C, 3) MaxINode( *main_limit, X ));
1325 register_new_node( *main_limit, pre_ctrl );
1327 } else {
1328 // Compute (limit-offset)/scale_con + SGN(-scale_con) <= I
1329 // Add the negation of the main-loop constraint to the pre-loop.
1330 // See footnote [++] below for a derivation of the limit expression.
1331 Node *incr = _igvn.intcon(scale_con > 0 ? -1 : 1);
1332 set_ctrl(incr, C->root());
1333 Node *adj = new (C, 3) AddINode( X, incr );
1334 register_new_node( adj, pre_ctrl );
1335 *pre_limit = (scale_con > 0)
1336 ? (Node*)new (C, 3) MinINode( *pre_limit, adj )
1337 : (Node*)new (C, 3) MaxINode( *pre_limit, adj );
1338 register_new_node( *pre_limit, pre_ctrl );
1340 // [++] Here's the algebra that justifies the pre-loop limit expression:
1341 //
1342 // NOT( scale_con * I + offset < limit )
1343 // ==
1344 // scale_con * I + offset >= limit
1345 // ==
1346 // SGN(scale_con) * I >= (limit-offset)/|scale_con|
1347 // ==
1348 // (limit-offset)/|scale_con| <= I * SGN(scale_con)
1349 // ==
1350 // (limit-offset)/|scale_con|-1 < I * SGN(scale_con)
1351 // ==
1352 // ( if (scale_con > 0) /*common case*/
1353 // (limit-offset)/scale_con - 1 < I
1354 // else
1355 // (limit-offset)/scale_con + 1 > I
1356 // )
1357 // ( if (scale_con > 0) /*common case*/
1358 // (limit-offset)/scale_con + SGN(-scale_con) < I
1359 // else
1360 // (limit-offset)/scale_con + SGN(-scale_con) > I
1361 }
1362 }
1365 //------------------------------is_scaled_iv---------------------------------
1366 // Return true if exp is a constant times an induction var
1367 bool PhaseIdealLoop::is_scaled_iv(Node* exp, Node* iv, int* p_scale) {
1368 if (exp == iv) {
1369 if (p_scale != NULL) {
1370 *p_scale = 1;
1371 }
1372 return true;
1373 }
1374 int opc = exp->Opcode();
1375 if (opc == Op_MulI) {
1376 if (exp->in(1) == iv && exp->in(2)->is_Con()) {
1377 if (p_scale != NULL) {
1378 *p_scale = exp->in(2)->get_int();
1379 }
1380 return true;
1381 }
1382 if (exp->in(2) == iv && exp->in(1)->is_Con()) {
1383 if (p_scale != NULL) {
1384 *p_scale = exp->in(1)->get_int();
1385 }
1386 return true;
1387 }
1388 } else if (opc == Op_LShiftI) {
1389 if (exp->in(1) == iv && exp->in(2)->is_Con()) {
1390 if (p_scale != NULL) {
1391 *p_scale = 1 << exp->in(2)->get_int();
1392 }
1393 return true;
1394 }
1395 }
1396 return false;
1397 }
1399 //-----------------------------is_scaled_iv_plus_offset------------------------------
1400 // Return true if exp is a simple induction variable expression: k1*iv + (invar + k2)
1401 bool PhaseIdealLoop::is_scaled_iv_plus_offset(Node* exp, Node* iv, int* p_scale, Node** p_offset, int depth) {
1402 if (is_scaled_iv(exp, iv, p_scale)) {
1403 if (p_offset != NULL) {
1404 Node *zero = _igvn.intcon(0);
1405 set_ctrl(zero, C->root());
1406 *p_offset = zero;
1407 }
1408 return true;
1409 }
1410 int opc = exp->Opcode();
1411 if (opc == Op_AddI) {
1412 if (is_scaled_iv(exp->in(1), iv, p_scale)) {
1413 if (p_offset != NULL) {
1414 *p_offset = exp->in(2);
1415 }
1416 return true;
1417 }
1418 if (exp->in(2)->is_Con()) {
1419 Node* offset2 = NULL;
1420 if (depth < 2 &&
1421 is_scaled_iv_plus_offset(exp->in(1), iv, p_scale,
1422 p_offset != NULL ? &offset2 : NULL, depth+1)) {
1423 if (p_offset != NULL) {
1424 Node *ctrl_off2 = get_ctrl(offset2);
1425 Node* offset = new (C, 3) AddINode(offset2, exp->in(2));
1426 register_new_node(offset, ctrl_off2);
1427 *p_offset = offset;
1428 }
1429 return true;
1430 }
1431 }
1432 } else if (opc == Op_SubI) {
1433 if (is_scaled_iv(exp->in(1), iv, p_scale)) {
1434 if (p_offset != NULL) {
1435 Node *zero = _igvn.intcon(0);
1436 set_ctrl(zero, C->root());
1437 Node *ctrl_off = get_ctrl(exp->in(2));
1438 Node* offset = new (C, 3) SubINode(zero, exp->in(2));
1439 register_new_node(offset, ctrl_off);
1440 *p_offset = offset;
1441 }
1442 return true;
1443 }
1444 if (is_scaled_iv(exp->in(2), iv, p_scale)) {
1445 if (p_offset != NULL) {
1446 *p_scale *= -1;
1447 *p_offset = exp->in(1);
1448 }
1449 return true;
1450 }
1451 }
1452 return false;
1453 }
1455 //------------------------------do_range_check---------------------------------
1456 // Eliminate range-checks and other trip-counter vs loop-invariant tests.
1457 void PhaseIdealLoop::do_range_check( IdealLoopTree *loop, Node_List &old_new ) {
1458 #ifndef PRODUCT
1459 if (PrintOpto && VerifyLoopOptimizations) {
1460 tty->print("Range Check Elimination ");
1461 loop->dump_head();
1462 } else if (TraceLoopOpts) {
1463 tty->print("RangeCheck ");
1464 loop->dump_head();
1465 }
1466 #endif
1467 assert(RangeCheckElimination, "");
1468 CountedLoopNode *cl = loop->_head->as_CountedLoop();
1469 assert(cl->is_main_loop(), "");
1471 // protect against stride not being a constant
1472 if (!cl->stride_is_con())
1473 return;
1475 // Find the trip counter; we are iteration splitting based on it
1476 Node *trip_counter = cl->phi();
1477 // Find the main loop limit; we will trim it's iterations
1478 // to not ever trip end tests
1479 Node *main_limit = cl->limit();
1481 // Need to find the main-loop zero-trip guard
1482 Node *ctrl = cl->in(LoopNode::EntryControl);
1483 assert(ctrl->Opcode() == Op_IfTrue || ctrl->Opcode() == Op_IfFalse, "");
1484 Node *iffm = ctrl->in(0);
1485 assert(iffm->Opcode() == Op_If, "");
1486 Node *bolzm = iffm->in(1);
1487 assert(bolzm->Opcode() == Op_Bool, "");
1488 Node *cmpzm = bolzm->in(1);
1489 assert(cmpzm->is_Cmp(), "");
1490 Node *opqzm = cmpzm->in(2);
1491 // Can not optimize a loop if pre-loop Opaque1 node is optimized
1492 // away and then another round of loop opts attempted.
1493 if (opqzm->Opcode() != Op_Opaque1)
1494 return;
1495 assert(opqzm->in(1) == main_limit, "do not understand situation");
1497 // Find the pre-loop limit; we will expand it's iterations to
1498 // not ever trip low tests.
1499 Node *p_f = iffm->in(0);
1500 assert(p_f->Opcode() == Op_IfFalse, "");
1501 CountedLoopEndNode *pre_end = p_f->in(0)->as_CountedLoopEnd();
1502 assert(pre_end->loopnode()->is_pre_loop(), "");
1503 Node *pre_opaq1 = pre_end->limit();
1504 // Occasionally it's possible for a pre-loop Opaque1 node to be
1505 // optimized away and then another round of loop opts attempted.
1506 // We can not optimize this particular loop in that case.
1507 if (pre_opaq1->Opcode() != Op_Opaque1)
1508 return;
1509 Opaque1Node *pre_opaq = (Opaque1Node*)pre_opaq1;
1510 Node *pre_limit = pre_opaq->in(1);
1512 // Where do we put new limit calculations
1513 Node *pre_ctrl = pre_end->loopnode()->in(LoopNode::EntryControl);
1515 // Ensure the original loop limit is available from the
1516 // pre-loop Opaque1 node.
1517 Node *orig_limit = pre_opaq->original_loop_limit();
1518 if (orig_limit == NULL || _igvn.type(orig_limit) == Type::TOP)
1519 return;
1521 // Must know if its a count-up or count-down loop
1523 int stride_con = cl->stride_con();
1524 Node *zero = _igvn.intcon(0);
1525 Node *one = _igvn.intcon(1);
1526 set_ctrl(zero, C->root());
1527 set_ctrl(one, C->root());
1529 // Range checks that do not dominate the loop backedge (ie.
1530 // conditionally executed) can lengthen the pre loop limit beyond
1531 // the original loop limit. To prevent this, the pre limit is
1532 // (for stride > 0) MINed with the original loop limit (MAXed
1533 // stride < 0) when some range_check (rc) is conditionally
1534 // executed.
1535 bool conditional_rc = false;
1537 // Check loop body for tests of trip-counter plus loop-invariant vs
1538 // loop-invariant.
1539 for( uint i = 0; i < loop->_body.size(); i++ ) {
1540 Node *iff = loop->_body[i];
1541 if( iff->Opcode() == Op_If ) { // Test?
1543 // Test is an IfNode, has 2 projections. If BOTH are in the loop
1544 // we need loop unswitching instead of iteration splitting.
1545 Node *exit = loop->is_loop_exit(iff);
1546 if( !exit ) continue;
1547 int flip = (exit->Opcode() == Op_IfTrue) ? 1 : 0;
1549 // Get boolean condition to test
1550 Node *i1 = iff->in(1);
1551 if( !i1->is_Bool() ) continue;
1552 BoolNode *bol = i1->as_Bool();
1553 BoolTest b_test = bol->_test;
1554 // Flip sense of test if exit condition is flipped
1555 if( flip )
1556 b_test = b_test.negate();
1558 // Get compare
1559 Node *cmp = bol->in(1);
1561 // Look for trip_counter + offset vs limit
1562 Node *rc_exp = cmp->in(1);
1563 Node *limit = cmp->in(2);
1564 jint scale_con= 1; // Assume trip counter not scaled
1566 Node *limit_c = get_ctrl(limit);
1567 if( loop->is_member(get_loop(limit_c) ) ) {
1568 // Compare might have operands swapped; commute them
1569 b_test = b_test.commute();
1570 rc_exp = cmp->in(2);
1571 limit = cmp->in(1);
1572 limit_c = get_ctrl(limit);
1573 if( loop->is_member(get_loop(limit_c) ) )
1574 continue; // Both inputs are loop varying; cannot RCE
1575 }
1576 // Here we know 'limit' is loop invariant
1578 // 'limit' maybe pinned below the zero trip test (probably from a
1579 // previous round of rce), in which case, it can't be used in the
1580 // zero trip test expression which must occur before the zero test's if.
1581 if( limit_c == ctrl ) {
1582 continue; // Don't rce this check but continue looking for other candidates.
1583 }
1585 // Check for scaled induction variable plus an offset
1586 Node *offset = NULL;
1588 if (!is_scaled_iv_plus_offset(rc_exp, trip_counter, &scale_con, &offset)) {
1589 continue;
1590 }
1592 Node *offset_c = get_ctrl(offset);
1593 if( loop->is_member( get_loop(offset_c) ) )
1594 continue; // Offset is not really loop invariant
1595 // Here we know 'offset' is loop invariant.
1597 // As above for the 'limit', the 'offset' maybe pinned below the
1598 // zero trip test.
1599 if( offset_c == ctrl ) {
1600 continue; // Don't rce this check but continue looking for other candidates.
1601 }
1603 // At this point we have the expression as:
1604 // scale_con * trip_counter + offset :: limit
1605 // where scale_con, offset and limit are loop invariant. Trip_counter
1606 // monotonically increases by stride_con, a constant. Both (or either)
1607 // stride_con and scale_con can be negative which will flip about the
1608 // sense of the test.
1610 // Adjust pre and main loop limits to guard the correct iteration set
1611 if( cmp->Opcode() == Op_CmpU ) {// Unsigned compare is really 2 tests
1612 if( b_test._test == BoolTest::lt ) { // Range checks always use lt
1613 // The overflow limit: scale*I+offset < limit
1614 add_constraint( stride_con, scale_con, offset, limit, pre_ctrl, &pre_limit, &main_limit );
1615 // The underflow limit: 0 <= scale*I+offset.
1616 // Some math yields: -scale*I-(offset+1) < 0
1617 Node *plus_one = new (C, 3) AddINode( offset, one );
1618 register_new_node( plus_one, pre_ctrl );
1619 Node *neg_offset = new (C, 3) SubINode( zero, plus_one );
1620 register_new_node( neg_offset, pre_ctrl );
1621 add_constraint( stride_con, -scale_con, neg_offset, zero, pre_ctrl, &pre_limit, &main_limit );
1622 if (!conditional_rc) {
1623 conditional_rc = !loop->dominates_backedge(iff);
1624 }
1625 } else {
1626 #ifndef PRODUCT
1627 if( PrintOpto )
1628 tty->print_cr("missed RCE opportunity");
1629 #endif
1630 continue; // In release mode, ignore it
1631 }
1632 } else { // Otherwise work on normal compares
1633 switch( b_test._test ) {
1634 case BoolTest::ge: // Convert X >= Y to -X <= -Y
1635 scale_con = -scale_con;
1636 offset = new (C, 3) SubINode( zero, offset );
1637 register_new_node( offset, pre_ctrl );
1638 limit = new (C, 3) SubINode( zero, limit );
1639 register_new_node( limit, pre_ctrl );
1640 // Fall into LE case
1641 case BoolTest::le: // Convert X <= Y to X < Y+1
1642 limit = new (C, 3) AddINode( limit, one );
1643 register_new_node( limit, pre_ctrl );
1644 // Fall into LT case
1645 case BoolTest::lt:
1646 add_constraint( stride_con, scale_con, offset, limit, pre_ctrl, &pre_limit, &main_limit );
1647 if (!conditional_rc) {
1648 conditional_rc = !loop->dominates_backedge(iff);
1649 }
1650 break;
1651 default:
1652 #ifndef PRODUCT
1653 if( PrintOpto )
1654 tty->print_cr("missed RCE opportunity");
1655 #endif
1656 continue; // Unhandled case
1657 }
1658 }
1660 // Kill the eliminated test
1661 C->set_major_progress();
1662 Node *kill_con = _igvn.intcon( 1-flip );
1663 set_ctrl(kill_con, C->root());
1664 _igvn.hash_delete(iff);
1665 iff->set_req(1, kill_con);
1666 _igvn._worklist.push(iff);
1667 // Find surviving projection
1668 assert(iff->is_If(), "");
1669 ProjNode* dp = ((IfNode*)iff)->proj_out(1-flip);
1670 // Find loads off the surviving projection; remove their control edge
1671 for (DUIterator_Fast imax, i = dp->fast_outs(imax); i < imax; i++) {
1672 Node* cd = dp->fast_out(i); // Control-dependent node
1673 if( cd->is_Load() ) { // Loads can now float around in the loop
1674 _igvn.hash_delete(cd);
1675 // Allow the load to float around in the loop, or before it
1676 // but NOT before the pre-loop.
1677 cd->set_req(0, ctrl); // ctrl, not NULL
1678 _igvn._worklist.push(cd);
1679 --i;
1680 --imax;
1681 }
1682 }
1684 } // End of is IF
1686 }
1688 // Update loop limits
1689 if (conditional_rc) {
1690 pre_limit = (stride_con > 0) ? (Node*)new (C,3) MinINode(pre_limit, orig_limit)
1691 : (Node*)new (C,3) MaxINode(pre_limit, orig_limit);
1692 register_new_node(pre_limit, pre_ctrl);
1693 }
1694 _igvn.hash_delete(pre_opaq);
1695 pre_opaq->set_req(1, pre_limit);
1697 // Note:: we are making the main loop limit no longer precise;
1698 // need to round up based on stride.
1699 if( stride_con != 1 && stride_con != -1 ) { // Cutout for common case
1700 // "Standard" round-up logic: ([main_limit-init+(y-1)]/y)*y+init
1701 // Hopefully, compiler will optimize for powers of 2.
1702 Node *ctrl = get_ctrl(main_limit);
1703 Node *stride = cl->stride();
1704 Node *init = cl->init_trip();
1705 Node *span = new (C, 3) SubINode(main_limit,init);
1706 register_new_node(span,ctrl);
1707 Node *rndup = _igvn.intcon(stride_con + ((stride_con>0)?-1:1));
1708 Node *add = new (C, 3) AddINode(span,rndup);
1709 register_new_node(add,ctrl);
1710 Node *div = new (C, 3) DivINode(0,add,stride);
1711 register_new_node(div,ctrl);
1712 Node *mul = new (C, 3) MulINode(div,stride);
1713 register_new_node(mul,ctrl);
1714 Node *newlim = new (C, 3) AddINode(mul,init);
1715 register_new_node(newlim,ctrl);
1716 main_limit = newlim;
1717 }
1719 Node *main_cle = cl->loopexit();
1720 Node *main_bol = main_cle->in(1);
1721 // Hacking loop bounds; need private copies of exit test
1722 if( main_bol->outcnt() > 1 ) {// BoolNode shared?
1723 _igvn.hash_delete(main_cle);
1724 main_bol = main_bol->clone();// Clone a private BoolNode
1725 register_new_node( main_bol, main_cle->in(0) );
1726 main_cle->set_req(1,main_bol);
1727 }
1728 Node *main_cmp = main_bol->in(1);
1729 if( main_cmp->outcnt() > 1 ) { // CmpNode shared?
1730 _igvn.hash_delete(main_bol);
1731 main_cmp = main_cmp->clone();// Clone a private CmpNode
1732 register_new_node( main_cmp, main_cle->in(0) );
1733 main_bol->set_req(1,main_cmp);
1734 }
1735 // Hack the now-private loop bounds
1736 _igvn.hash_delete(main_cmp);
1737 main_cmp->set_req(2, main_limit);
1738 _igvn._worklist.push(main_cmp);
1739 // The OpaqueNode is unshared by design
1740 _igvn.hash_delete(opqzm);
1741 assert( opqzm->outcnt() == 1, "cannot hack shared node" );
1742 opqzm->set_req(1,main_limit);
1743 _igvn._worklist.push(opqzm);
1744 }
1746 //------------------------------DCE_loop_body----------------------------------
1747 // Remove simplistic dead code from loop body
1748 void IdealLoopTree::DCE_loop_body() {
1749 for( uint i = 0; i < _body.size(); i++ )
1750 if( _body.at(i)->outcnt() == 0 )
1751 _body.map( i--, _body.pop() );
1752 }
1755 //------------------------------adjust_loop_exit_prob--------------------------
1756 // Look for loop-exit tests with the 50/50 (or worse) guesses from the parsing stage.
1757 // Replace with a 1-in-10 exit guess.
1758 void IdealLoopTree::adjust_loop_exit_prob( PhaseIdealLoop *phase ) {
1759 Node *test = tail();
1760 while( test != _head ) {
1761 uint top = test->Opcode();
1762 if( top == Op_IfTrue || top == Op_IfFalse ) {
1763 int test_con = ((ProjNode*)test)->_con;
1764 assert(top == (uint)(test_con? Op_IfTrue: Op_IfFalse), "sanity");
1765 IfNode *iff = test->in(0)->as_If();
1766 if( iff->outcnt() == 2 ) { // Ignore dead tests
1767 Node *bol = iff->in(1);
1768 if( bol && bol->req() > 1 && bol->in(1) &&
1769 ((bol->in(1)->Opcode() == Op_StorePConditional ) ||
1770 (bol->in(1)->Opcode() == Op_StoreIConditional ) ||
1771 (bol->in(1)->Opcode() == Op_StoreLConditional ) ||
1772 (bol->in(1)->Opcode() == Op_CompareAndSwapI ) ||
1773 (bol->in(1)->Opcode() == Op_CompareAndSwapL ) ||
1774 (bol->in(1)->Opcode() == Op_CompareAndSwapP ) ||
1775 (bol->in(1)->Opcode() == Op_CompareAndSwapN )))
1776 return; // Allocation loops RARELY take backedge
1777 // Find the OTHER exit path from the IF
1778 Node* ex = iff->proj_out(1-test_con);
1779 float p = iff->_prob;
1780 if( !phase->is_member( this, ex ) && iff->_fcnt == COUNT_UNKNOWN ) {
1781 if( top == Op_IfTrue ) {
1782 if( p < (PROB_FAIR + PROB_UNLIKELY_MAG(3))) {
1783 iff->_prob = PROB_STATIC_FREQUENT;
1784 }
1785 } else {
1786 if( p > (PROB_FAIR - PROB_UNLIKELY_MAG(3))) {
1787 iff->_prob = PROB_STATIC_INFREQUENT;
1788 }
1789 }
1790 }
1791 }
1792 }
1793 test = phase->idom(test);
1794 }
1795 }
1798 //------------------------------policy_do_remove_empty_loop--------------------
1799 // Micro-benchmark spamming. Policy is to always remove empty loops.
1800 // The 'DO' part is to replace the trip counter with the value it will
1801 // have on the last iteration. This will break the loop.
1802 bool IdealLoopTree::policy_do_remove_empty_loop( PhaseIdealLoop *phase ) {
1803 // Minimum size must be empty loop
1804 if (_body.size() > EMPTY_LOOP_SIZE)
1805 return false;
1807 if (!_head->is_CountedLoop())
1808 return false; // Dead loop
1809 CountedLoopNode *cl = _head->as_CountedLoop();
1810 if (!cl->loopexit())
1811 return false; // Malformed loop
1812 if (!phase->is_member(this, phase->get_ctrl(cl->loopexit()->in(CountedLoopEndNode::TestValue))))
1813 return false; // Infinite loop
1815 #ifdef ASSERT
1816 // Ensure only one phi which is the iv.
1817 Node* iv = NULL;
1818 for (DUIterator_Fast imax, i = cl->fast_outs(imax); i < imax; i++) {
1819 Node* n = cl->fast_out(i);
1820 if (n->Opcode() == Op_Phi) {
1821 assert(iv == NULL, "Too many phis" );
1822 iv = n;
1823 }
1824 }
1825 assert(iv == cl->phi(), "Wrong phi" );
1826 #endif
1828 // main and post loops have explicitly created zero trip guard
1829 bool needs_guard = !cl->is_main_loop() && !cl->is_post_loop();
1830 if (needs_guard) {
1831 // Skip guard if values not overlap.
1832 const TypeInt* init_t = phase->_igvn.type(cl->init_trip())->is_int();
1833 const TypeInt* limit_t = phase->_igvn.type(cl->limit())->is_int();
1834 int stride_con = cl->stride_con();
1835 if (stride_con > 0) {
1836 needs_guard = (init_t->_hi >= limit_t->_lo);
1837 } else {
1838 needs_guard = (init_t->_lo <= limit_t->_hi);
1839 }
1840 }
1841 if (needs_guard) {
1842 // Check for an obvious zero trip guard.
1843 Node* inctrl = PhaseIdealLoop::skip_loop_predicates(cl->in(LoopNode::EntryControl));
1844 if (inctrl->Opcode() == Op_IfTrue) {
1845 // The test should look like just the backedge of a CountedLoop
1846 Node* iff = inctrl->in(0);
1847 if (iff->is_If()) {
1848 Node* bol = iff->in(1);
1849 if (bol->is_Bool() && bol->as_Bool()->_test._test == cl->loopexit()->test_trip()) {
1850 Node* cmp = bol->in(1);
1851 if (cmp->is_Cmp() && cmp->in(1) == cl->init_trip() && cmp->in(2) == cl->limit()) {
1852 needs_guard = false;
1853 }
1854 }
1855 }
1856 }
1857 }
1859 #ifndef PRODUCT
1860 if (PrintOpto) {
1861 tty->print("Removing empty loop with%s zero trip guard", needs_guard ? "out" : "");
1862 this->dump_head();
1863 } else if (TraceLoopOpts) {
1864 tty->print("Empty with%s zero trip guard ", needs_guard ? "out" : "");
1865 this->dump_head();
1866 }
1867 #endif
1869 if (needs_guard) {
1870 // Peel the loop to ensure there's a zero trip guard
1871 Node_List old_new;
1872 phase->do_peeling(this, old_new);
1873 }
1875 // Replace the phi at loop head with the final value of the last
1876 // iteration. Then the CountedLoopEnd will collapse (backedge never
1877 // taken) and all loop-invariant uses of the exit values will be correct.
1878 Node *phi = cl->phi();
1879 Node *final = new (phase->C, 3) SubINode( cl->limit(), cl->stride() );
1880 phase->register_new_node(final,cl->in(LoopNode::EntryControl));
1881 phase->_igvn.replace_node(phi,final);
1882 phase->C->set_major_progress();
1883 return true;
1884 }
1886 //------------------------------policy_do_one_iteration_loop-------------------
1887 // Convert one iteration loop into normal code.
1888 bool IdealLoopTree::policy_do_one_iteration_loop( PhaseIdealLoop *phase ) {
1889 if (!_head->as_Loop()->is_valid_counted_loop())
1890 return false; // Only for counted loop
1892 CountedLoopNode *cl = _head->as_CountedLoop();
1893 if (!cl->has_exact_trip_count() || cl->trip_count() != 1) {
1894 return false;
1895 }
1897 #ifndef PRODUCT
1898 if(TraceLoopOpts) {
1899 tty->print("OneIteration ");
1900 this->dump_head();
1901 }
1902 #endif
1904 Node *init_n = cl->init_trip();
1905 #ifdef ASSERT
1906 // Loop boundaries should be constant since trip count is exact.
1907 assert(init_n->get_int() + cl->stride_con() >= cl->limit()->get_int(), "should be one iteration");
1908 #endif
1909 // Replace the phi at loop head with the value of the init_trip.
1910 // Then the CountedLoopEnd will collapse (backedge will not be taken)
1911 // and all loop-invariant uses of the exit values will be correct.
1912 phase->_igvn.replace_node(cl->phi(), cl->init_trip());
1913 phase->C->set_major_progress();
1914 return true;
1915 }
1917 //=============================================================================
1918 //------------------------------iteration_split_impl---------------------------
1919 bool IdealLoopTree::iteration_split_impl( PhaseIdealLoop *phase, Node_List &old_new ) {
1920 // Compute exact loop trip count if possible.
1921 compute_exact_trip_count(phase);
1923 // Convert one iteration loop into normal code.
1924 if (policy_do_one_iteration_loop(phase))
1925 return true;
1927 // Check and remove empty loops (spam micro-benchmarks)
1928 if (policy_do_remove_empty_loop(phase))
1929 return true; // Here we removed an empty loop
1931 bool should_peel = policy_peeling(phase); // Should we peel?
1933 bool should_unswitch = policy_unswitching(phase);
1935 // Non-counted loops may be peeled; exactly 1 iteration is peeled.
1936 // This removes loop-invariant tests (usually null checks).
1937 if (!_head->is_CountedLoop()) { // Non-counted loop
1938 if (PartialPeelLoop && phase->partial_peel(this, old_new)) {
1939 // Partial peel succeeded so terminate this round of loop opts
1940 return false;
1941 }
1942 if (should_peel) { // Should we peel?
1943 #ifndef PRODUCT
1944 if (PrintOpto) tty->print_cr("should_peel");
1945 #endif
1946 phase->do_peeling(this,old_new);
1947 } else if (should_unswitch) {
1948 phase->do_unswitching(this, old_new);
1949 }
1950 return true;
1951 }
1952 CountedLoopNode *cl = _head->as_CountedLoop();
1954 if (!cl->loopexit()) return true; // Ignore various kinds of broken loops
1956 // Do nothing special to pre- and post- loops
1957 if (cl->is_pre_loop() || cl->is_post_loop()) return true;
1959 // Compute loop trip count from profile data
1960 compute_profile_trip_cnt(phase);
1962 // Before attempting fancy unrolling, RCE or alignment, see if we want
1963 // to completely unroll this loop or do loop unswitching.
1964 if (cl->is_normal_loop()) {
1965 if (should_unswitch) {
1966 phase->do_unswitching(this, old_new);
1967 return true;
1968 }
1969 bool should_maximally_unroll = policy_maximally_unroll(phase);
1970 if (should_maximally_unroll) {
1971 // Here we did some unrolling and peeling. Eventually we will
1972 // completely unroll this loop and it will no longer be a loop.
1973 phase->do_maximally_unroll(this,old_new);
1974 return true;
1975 }
1976 }
1978 // Skip next optimizations if running low on nodes. Note that
1979 // policy_unswitching and policy_maximally_unroll have this check.
1980 uint nodes_left = MaxNodeLimit - phase->C->unique();
1981 if ((2 * _body.size()) > nodes_left) {
1982 return true;
1983 }
1985 // Counted loops may be peeled, may need some iterations run up
1986 // front for RCE, and may want to align loop refs to a cache
1987 // line. Thus we clone a full loop up front whose trip count is
1988 // at least 1 (if peeling), but may be several more.
1990 // The main loop will start cache-line aligned with at least 1
1991 // iteration of the unrolled body (zero-trip test required) and
1992 // will have some range checks removed.
1994 // A post-loop will finish any odd iterations (leftover after
1995 // unrolling), plus any needed for RCE purposes.
1997 bool should_unroll = policy_unroll(phase);
1999 bool should_rce = policy_range_check(phase);
2001 bool should_align = policy_align(phase);
2003 // If not RCE'ing (iteration splitting) or Aligning, then we do not
2004 // need a pre-loop. We may still need to peel an initial iteration but
2005 // we will not be needing an unknown number of pre-iterations.
2006 //
2007 // Basically, if may_rce_align reports FALSE first time through,
2008 // we will not be able to later do RCE or Aligning on this loop.
2009 bool may_rce_align = !policy_peel_only(phase) || should_rce || should_align;
2011 // If we have any of these conditions (RCE, alignment, unrolling) met, then
2012 // we switch to the pre-/main-/post-loop model. This model also covers
2013 // peeling.
2014 if (should_rce || should_align || should_unroll) {
2015 if (cl->is_normal_loop()) // Convert to 'pre/main/post' loops
2016 phase->insert_pre_post_loops(this,old_new, !may_rce_align);
2018 // Adjust the pre- and main-loop limits to let the pre and post loops run
2019 // with full checks, but the main-loop with no checks. Remove said
2020 // checks from the main body.
2021 if (should_rce)
2022 phase->do_range_check(this,old_new);
2024 // Double loop body for unrolling. Adjust the minimum-trip test (will do
2025 // twice as many iterations as before) and the main body limit (only do
2026 // an even number of trips). If we are peeling, we might enable some RCE
2027 // and we'd rather unroll the post-RCE'd loop SO... do not unroll if
2028 // peeling.
2029 if (should_unroll && !should_peel)
2030 phase->do_unroll(this,old_new, true);
2032 // Adjust the pre-loop limits to align the main body
2033 // iterations.
2034 if (should_align)
2035 Unimplemented();
2037 } else { // Else we have an unchanged counted loop
2038 if (should_peel) // Might want to peel but do nothing else
2039 phase->do_peeling(this,old_new);
2040 }
2041 return true;
2042 }
2045 //=============================================================================
2046 //------------------------------iteration_split--------------------------------
2047 bool IdealLoopTree::iteration_split( PhaseIdealLoop *phase, Node_List &old_new ) {
2048 // Recursively iteration split nested loops
2049 if (_child && !_child->iteration_split(phase, old_new))
2050 return false;
2052 // Clean out prior deadwood
2053 DCE_loop_body();
2056 // Look for loop-exit tests with my 50/50 guesses from the Parsing stage.
2057 // Replace with a 1-in-10 exit guess.
2058 if (_parent /*not the root loop*/ &&
2059 !_irreducible &&
2060 // Also ignore the occasional dead backedge
2061 !tail()->is_top()) {
2062 adjust_loop_exit_prob(phase);
2063 }
2065 // Gate unrolling, RCE and peeling efforts.
2066 if (!_child && // If not an inner loop, do not split
2067 !_irreducible &&
2068 _allow_optimizations &&
2069 !tail()->is_top()) { // Also ignore the occasional dead backedge
2070 if (!_has_call) {
2071 if (!iteration_split_impl(phase, old_new)) {
2072 return false;
2073 }
2074 } else if (policy_unswitching(phase)) {
2075 phase->do_unswitching(this, old_new);
2076 }
2077 }
2079 // Minor offset re-organization to remove loop-fallout uses of
2080 // trip counter when there was no major reshaping.
2081 phase->reorg_offsets(this);
2083 if (_next && !_next->iteration_split(phase, old_new))
2084 return false;
2085 return true;
2086 }
2089 //=============================================================================
2090 // Process all the loops in the loop tree and replace any fill
2091 // patterns with an intrisc version.
2092 bool PhaseIdealLoop::do_intrinsify_fill() {
2093 bool changed = false;
2094 for (LoopTreeIterator iter(_ltree_root); !iter.done(); iter.next()) {
2095 IdealLoopTree* lpt = iter.current();
2096 changed |= intrinsify_fill(lpt);
2097 }
2098 return changed;
2099 }
2102 // Examine an inner loop looking for a a single store of an invariant
2103 // value in a unit stride loop,
2104 bool PhaseIdealLoop::match_fill_loop(IdealLoopTree* lpt, Node*& store, Node*& store_value,
2105 Node*& shift, Node*& con) {
2106 const char* msg = NULL;
2107 Node* msg_node = NULL;
2109 store_value = NULL;
2110 con = NULL;
2111 shift = NULL;
2113 // Process the loop looking for stores. If there are multiple
2114 // stores or extra control flow give at this point.
2115 CountedLoopNode* head = lpt->_head->as_CountedLoop();
2116 for (uint i = 0; msg == NULL && i < lpt->_body.size(); i++) {
2117 Node* n = lpt->_body.at(i);
2118 if (n->outcnt() == 0) continue; // Ignore dead
2119 if (n->is_Store()) {
2120 if (store != NULL) {
2121 msg = "multiple stores";
2122 break;
2123 }
2124 int opc = n->Opcode();
2125 if (opc == Op_StoreP || opc == Op_StoreN || opc == Op_StoreCM) {
2126 msg = "oop fills not handled";
2127 break;
2128 }
2129 Node* value = n->in(MemNode::ValueIn);
2130 if (!lpt->is_invariant(value)) {
2131 msg = "variant store value";
2132 } else if (!_igvn.type(n->in(MemNode::Address))->isa_aryptr()) {
2133 msg = "not array address";
2134 }
2135 store = n;
2136 store_value = value;
2137 } else if (n->is_If() && n != head->loopexit()) {
2138 msg = "extra control flow";
2139 msg_node = n;
2140 }
2141 }
2143 if (store == NULL) {
2144 // No store in loop
2145 return false;
2146 }
2148 if (msg == NULL && head->stride_con() != 1) {
2149 // could handle negative strides too
2150 if (head->stride_con() < 0) {
2151 msg = "negative stride";
2152 } else {
2153 msg = "non-unit stride";
2154 }
2155 }
2157 if (msg == NULL && !store->in(MemNode::Address)->is_AddP()) {
2158 msg = "can't handle store address";
2159 msg_node = store->in(MemNode::Address);
2160 }
2162 if (msg == NULL &&
2163 (!store->in(MemNode::Memory)->is_Phi() ||
2164 store->in(MemNode::Memory)->in(LoopNode::LoopBackControl) != store)) {
2165 msg = "store memory isn't proper phi";
2166 msg_node = store->in(MemNode::Memory);
2167 }
2169 // Make sure there is an appropriate fill routine
2170 BasicType t = store->as_Mem()->memory_type();
2171 const char* fill_name;
2172 if (msg == NULL &&
2173 StubRoutines::select_fill_function(t, false, fill_name) == NULL) {
2174 msg = "unsupported store";
2175 msg_node = store;
2176 }
2178 if (msg != NULL) {
2179 #ifndef PRODUCT
2180 if (TraceOptimizeFill) {
2181 tty->print_cr("not fill intrinsic candidate: %s", msg);
2182 if (msg_node != NULL) msg_node->dump();
2183 }
2184 #endif
2185 return false;
2186 }
2188 // Make sure the address expression can be handled. It should be
2189 // head->phi * elsize + con. head->phi might have a ConvI2L.
2190 Node* elements[4];
2191 Node* conv = NULL;
2192 bool found_index = false;
2193 int count = store->in(MemNode::Address)->as_AddP()->unpack_offsets(elements, ARRAY_SIZE(elements));
2194 for (int e = 0; e < count; e++) {
2195 Node* n = elements[e];
2196 if (n->is_Con() && con == NULL) {
2197 con = n;
2198 } else if (n->Opcode() == Op_LShiftX && shift == NULL) {
2199 Node* value = n->in(1);
2200 #ifdef _LP64
2201 if (value->Opcode() == Op_ConvI2L) {
2202 conv = value;
2203 value = value->in(1);
2204 }
2205 #endif
2206 if (value != head->phi()) {
2207 msg = "unhandled shift in address";
2208 } else {
2209 if (type2aelembytes(store->as_Mem()->memory_type(), true) != (1 << n->in(2)->get_int())) {
2210 msg = "scale doesn't match";
2211 } else {
2212 found_index = true;
2213 shift = n;
2214 }
2215 }
2216 } else if (n->Opcode() == Op_ConvI2L && conv == NULL) {
2217 if (n->in(1) == head->phi()) {
2218 found_index = true;
2219 conv = n;
2220 } else {
2221 msg = "unhandled input to ConvI2L";
2222 }
2223 } else if (n == head->phi()) {
2224 // no shift, check below for allowed cases
2225 found_index = true;
2226 } else {
2227 msg = "unhandled node in address";
2228 msg_node = n;
2229 }
2230 }
2232 if (count == -1) {
2233 msg = "malformed address expression";
2234 msg_node = store;
2235 }
2237 if (!found_index) {
2238 msg = "missing use of index";
2239 }
2241 // byte sized items won't have a shift
2242 if (msg == NULL && shift == NULL && t != T_BYTE && t != T_BOOLEAN) {
2243 msg = "can't find shift";
2244 msg_node = store;
2245 }
2247 if (msg != NULL) {
2248 #ifndef PRODUCT
2249 if (TraceOptimizeFill) {
2250 tty->print_cr("not fill intrinsic: %s", msg);
2251 if (msg_node != NULL) msg_node->dump();
2252 }
2253 #endif
2254 return false;
2255 }
2257 // No make sure all the other nodes in the loop can be handled
2258 VectorSet ok(Thread::current()->resource_area());
2260 // store related values are ok
2261 ok.set(store->_idx);
2262 ok.set(store->in(MemNode::Memory)->_idx);
2264 // Loop structure is ok
2265 ok.set(head->_idx);
2266 ok.set(head->loopexit()->_idx);
2267 ok.set(head->phi()->_idx);
2268 ok.set(head->incr()->_idx);
2269 ok.set(head->loopexit()->cmp_node()->_idx);
2270 ok.set(head->loopexit()->in(1)->_idx);
2272 // Address elements are ok
2273 if (con) ok.set(con->_idx);
2274 if (shift) ok.set(shift->_idx);
2275 if (conv) ok.set(conv->_idx);
2277 for (uint i = 0; msg == NULL && i < lpt->_body.size(); i++) {
2278 Node* n = lpt->_body.at(i);
2279 if (n->outcnt() == 0) continue; // Ignore dead
2280 if (ok.test(n->_idx)) continue;
2281 // Backedge projection is ok
2282 if (n->is_IfTrue() && n->in(0) == head->loopexit()) continue;
2283 if (!n->is_AddP()) {
2284 msg = "unhandled node";
2285 msg_node = n;
2286 break;
2287 }
2288 }
2290 // Make sure no unexpected values are used outside the loop
2291 for (uint i = 0; msg == NULL && i < lpt->_body.size(); i++) {
2292 Node* n = lpt->_body.at(i);
2293 // These values can be replaced with other nodes if they are used
2294 // outside the loop.
2295 if (n == store || n == head->loopexit() || n == head->incr() || n == store->in(MemNode::Memory)) continue;
2296 for (SimpleDUIterator iter(n); iter.has_next(); iter.next()) {
2297 Node* use = iter.get();
2298 if (!lpt->_body.contains(use)) {
2299 msg = "node is used outside loop";
2300 // lpt->_body.dump();
2301 msg_node = n;
2302 break;
2303 }
2304 }
2305 }
2307 #ifdef ASSERT
2308 if (TraceOptimizeFill) {
2309 if (msg != NULL) {
2310 tty->print_cr("no fill intrinsic: %s", msg);
2311 if (msg_node != NULL) msg_node->dump();
2312 } else {
2313 tty->print_cr("fill intrinsic for:");
2314 }
2315 store->dump();
2316 if (Verbose) {
2317 lpt->_body.dump();
2318 }
2319 }
2320 #endif
2322 return msg == NULL;
2323 }
2327 bool PhaseIdealLoop::intrinsify_fill(IdealLoopTree* lpt) {
2328 // Only for counted inner loops
2329 if (!lpt->is_counted() || !lpt->is_inner()) {
2330 return false;
2331 }
2333 // Must have constant stride
2334 CountedLoopNode* head = lpt->_head->as_CountedLoop();
2335 if (!head->stride_is_con() || !head->is_normal_loop()) {
2336 return false;
2337 }
2339 // Check that the body only contains a store of a loop invariant
2340 // value that is indexed by the loop phi.
2341 Node* store = NULL;
2342 Node* store_value = NULL;
2343 Node* shift = NULL;
2344 Node* offset = NULL;
2345 if (!match_fill_loop(lpt, store, store_value, shift, offset)) {
2346 return false;
2347 }
2349 #ifndef PRODUCT
2350 if (TraceLoopOpts) {
2351 tty->print("ArrayFill ");
2352 lpt->dump_head();
2353 }
2354 #endif
2356 // Now replace the whole loop body by a call to a fill routine that
2357 // covers the same region as the loop.
2358 Node* base = store->in(MemNode::Address)->as_AddP()->in(AddPNode::Base);
2360 // Build an expression for the beginning of the copy region
2361 Node* index = head->init_trip();
2362 #ifdef _LP64
2363 index = new (C, 2) ConvI2LNode(index);
2364 _igvn.register_new_node_with_optimizer(index);
2365 #endif
2366 if (shift != NULL) {
2367 // byte arrays don't require a shift but others do.
2368 index = new (C, 3) LShiftXNode(index, shift->in(2));
2369 _igvn.register_new_node_with_optimizer(index);
2370 }
2371 index = new (C, 4) AddPNode(base, base, index);
2372 _igvn.register_new_node_with_optimizer(index);
2373 Node* from = new (C, 4) AddPNode(base, index, offset);
2374 _igvn.register_new_node_with_optimizer(from);
2375 // Compute the number of elements to copy
2376 Node* len = new (C, 3) SubINode(head->limit(), head->init_trip());
2377 _igvn.register_new_node_with_optimizer(len);
2379 BasicType t = store->as_Mem()->memory_type();
2380 bool aligned = false;
2381 if (offset != NULL && head->init_trip()->is_Con()) {
2382 int element_size = type2aelembytes(t);
2383 aligned = (offset->find_intptr_t_type()->get_con() + head->init_trip()->get_int() * element_size) % HeapWordSize == 0;
2384 }
2386 // Build a call to the fill routine
2387 const char* fill_name;
2388 address fill = StubRoutines::select_fill_function(t, aligned, fill_name);
2389 assert(fill != NULL, "what?");
2391 // Convert float/double to int/long for fill routines
2392 if (t == T_FLOAT) {
2393 store_value = new (C, 2) MoveF2INode(store_value);
2394 _igvn.register_new_node_with_optimizer(store_value);
2395 } else if (t == T_DOUBLE) {
2396 store_value = new (C, 2) MoveD2LNode(store_value);
2397 _igvn.register_new_node_with_optimizer(store_value);
2398 }
2400 Node* mem_phi = store->in(MemNode::Memory);
2401 Node* result_ctrl;
2402 Node* result_mem;
2403 const TypeFunc* call_type = OptoRuntime::array_fill_Type();
2404 int size = call_type->domain()->cnt();
2405 CallLeafNode *call = new (C, size) CallLeafNoFPNode(call_type, fill,
2406 fill_name, TypeAryPtr::get_array_body_type(t));
2407 call->init_req(TypeFunc::Parms+0, from);
2408 call->init_req(TypeFunc::Parms+1, store_value);
2409 #ifdef _LP64
2410 len = new (C, 2) ConvI2LNode(len);
2411 _igvn.register_new_node_with_optimizer(len);
2412 #endif
2413 call->init_req(TypeFunc::Parms+2, len);
2414 #ifdef _LP64
2415 call->init_req(TypeFunc::Parms+3, C->top());
2416 #endif
2417 call->init_req( TypeFunc::Control, head->init_control());
2418 call->init_req( TypeFunc::I_O , C->top() ) ; // does no i/o
2419 call->init_req( TypeFunc::Memory , mem_phi->in(LoopNode::EntryControl) );
2420 call->init_req( TypeFunc::ReturnAdr, C->start()->proj_out(TypeFunc::ReturnAdr) );
2421 call->init_req( TypeFunc::FramePtr, C->start()->proj_out(TypeFunc::FramePtr) );
2422 _igvn.register_new_node_with_optimizer(call);
2423 result_ctrl = new (C, 1) ProjNode(call,TypeFunc::Control);
2424 _igvn.register_new_node_with_optimizer(result_ctrl);
2425 result_mem = new (C, 1) ProjNode(call,TypeFunc::Memory);
2426 _igvn.register_new_node_with_optimizer(result_mem);
2428 // If this fill is tightly coupled to an allocation and overwrites
2429 // the whole body, allow it to take over the zeroing.
2430 AllocateNode* alloc = AllocateNode::Ideal_allocation(base, this);
2431 if (alloc != NULL && alloc->is_AllocateArray()) {
2432 Node* length = alloc->as_AllocateArray()->Ideal_length();
2433 if (head->limit() == length &&
2434 head->init_trip() == _igvn.intcon(0)) {
2435 if (TraceOptimizeFill) {
2436 tty->print_cr("Eliminated zeroing in allocation");
2437 }
2438 alloc->maybe_set_complete(&_igvn);
2439 } else {
2440 #ifdef ASSERT
2441 if (TraceOptimizeFill) {
2442 tty->print_cr("filling array but bounds don't match");
2443 alloc->dump();
2444 head->init_trip()->dump();
2445 head->limit()->dump();
2446 length->dump();
2447 }
2448 #endif
2449 }
2450 }
2452 // Redirect the old control and memory edges that are outside the loop.
2453 Node* exit = head->loopexit()->proj_out(0);
2454 // Sometimes the memory phi of the head is used as the outgoing
2455 // state of the loop. It's safe in this case to replace it with the
2456 // result_mem.
2457 _igvn.replace_node(store->in(MemNode::Memory), result_mem);
2458 _igvn.replace_node(exit, result_ctrl);
2459 _igvn.replace_node(store, result_mem);
2460 // Any uses the increment outside of the loop become the loop limit.
2461 _igvn.replace_node(head->incr(), head->limit());
2463 // Disconnect the head from the loop.
2464 for (uint i = 0; i < lpt->_body.size(); i++) {
2465 Node* n = lpt->_body.at(i);
2466 _igvn.replace_node(n, C->top());
2467 }
2469 return true;
2470 }