Wed, 27 Apr 2016 01:25:04 +0800
Initial load
http://hg.openjdk.java.net/jdk8u/jdk8u/hotspot/
changeset: 6782:28b50d07f6f8
tag: jdk8u25-b17
1 /*
2 * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "precompiled.hpp"
26 #include "compiler/compileLog.hpp"
27 #include "memory/allocation.inline.hpp"
28 #include "opto/addnode.hpp"
29 #include "opto/callnode.hpp"
30 #include "opto/connode.hpp"
31 #include "opto/divnode.hpp"
32 #include "opto/loopnode.hpp"
33 #include "opto/mulnode.hpp"
34 #include "opto/rootnode.hpp"
35 #include "opto/runtime.hpp"
36 #include "opto/subnode.hpp"
38 //------------------------------is_loop_exit-----------------------------------
39 // Given an IfNode, return the loop-exiting projection or NULL if both
40 // arms remain in the loop.
41 Node *IdealLoopTree::is_loop_exit(Node *iff) const {
42 if( iff->outcnt() != 2 ) return NULL; // Ignore partially dead tests
43 PhaseIdealLoop *phase = _phase;
44 // Test is an IfNode, has 2 projections. If BOTH are in the loop
45 // we need loop unswitching instead of peeling.
46 if( !is_member(phase->get_loop( iff->raw_out(0) )) )
47 return iff->raw_out(0);
48 if( !is_member(phase->get_loop( iff->raw_out(1) )) )
49 return iff->raw_out(1);
50 return NULL;
51 }
54 //=============================================================================
57 //------------------------------record_for_igvn----------------------------
58 // Put loop body on igvn work list
59 void IdealLoopTree::record_for_igvn() {
60 for( uint i = 0; i < _body.size(); i++ ) {
61 Node *n = _body.at(i);
62 _phase->_igvn._worklist.push(n);
63 }
64 }
66 //------------------------------compute_exact_trip_count-----------------------
67 // Compute loop exact trip count if possible. Do not recalculate trip count for
68 // split loops (pre-main-post) which have their limits and inits behind Opaque node.
69 void IdealLoopTree::compute_exact_trip_count( PhaseIdealLoop *phase ) {
70 if (!_head->as_Loop()->is_valid_counted_loop()) {
71 return;
72 }
73 CountedLoopNode* cl = _head->as_CountedLoop();
74 // Trip count may become nonexact for iteration split loops since
75 // RCE modifies limits. Note, _trip_count value is not reset since
76 // it is used to limit unrolling of main loop.
77 cl->set_nonexact_trip_count();
79 // Loop's test should be part of loop.
80 if (!phase->is_member(this, phase->get_ctrl(cl->loopexit()->in(CountedLoopEndNode::TestValue))))
81 return; // Infinite loop
83 #ifdef ASSERT
84 BoolTest::mask bt = cl->loopexit()->test_trip();
85 assert(bt == BoolTest::lt || bt == BoolTest::gt ||
86 bt == BoolTest::ne, "canonical test is expected");
87 #endif
89 Node* init_n = cl->init_trip();
90 Node* limit_n = cl->limit();
91 if (init_n != NULL && init_n->is_Con() &&
92 limit_n != NULL && limit_n->is_Con()) {
93 // Use longs to avoid integer overflow.
94 int stride_con = cl->stride_con();
95 jlong init_con = cl->init_trip()->get_int();
96 jlong limit_con = cl->limit()->get_int();
97 int stride_m = stride_con - (stride_con > 0 ? 1 : -1);
98 jlong trip_count = (limit_con - init_con + stride_m)/stride_con;
99 if (trip_count > 0 && (julong)trip_count < (julong)max_juint) {
100 // Set exact trip count.
101 cl->set_exact_trip_count((uint)trip_count);
102 }
103 }
104 }
106 //------------------------------compute_profile_trip_cnt----------------------------
107 // Compute loop trip count from profile data as
108 // (backedge_count + loop_exit_count) / loop_exit_count
109 void IdealLoopTree::compute_profile_trip_cnt( PhaseIdealLoop *phase ) {
110 if (!_head->is_CountedLoop()) {
111 return;
112 }
113 CountedLoopNode* head = _head->as_CountedLoop();
114 if (head->profile_trip_cnt() != COUNT_UNKNOWN) {
115 return; // Already computed
116 }
117 float trip_cnt = (float)max_jint; // default is big
119 Node* back = head->in(LoopNode::LoopBackControl);
120 while (back != head) {
121 if ((back->Opcode() == Op_IfTrue || back->Opcode() == Op_IfFalse) &&
122 back->in(0) &&
123 back->in(0)->is_If() &&
124 back->in(0)->as_If()->_fcnt != COUNT_UNKNOWN &&
125 back->in(0)->as_If()->_prob != PROB_UNKNOWN) {
126 break;
127 }
128 back = phase->idom(back);
129 }
130 if (back != head) {
131 assert((back->Opcode() == Op_IfTrue || back->Opcode() == Op_IfFalse) &&
132 back->in(0), "if-projection exists");
133 IfNode* back_if = back->in(0)->as_If();
134 float loop_back_cnt = back_if->_fcnt * back_if->_prob;
136 // Now compute a loop exit count
137 float loop_exit_cnt = 0.0f;
138 for( uint i = 0; i < _body.size(); i++ ) {
139 Node *n = _body[i];
140 if( n->is_If() ) {
141 IfNode *iff = n->as_If();
142 if( iff->_fcnt != COUNT_UNKNOWN && iff->_prob != PROB_UNKNOWN ) {
143 Node *exit = is_loop_exit(iff);
144 if( exit ) {
145 float exit_prob = iff->_prob;
146 if (exit->Opcode() == Op_IfFalse) exit_prob = 1.0 - exit_prob;
147 if (exit_prob > PROB_MIN) {
148 float exit_cnt = iff->_fcnt * exit_prob;
149 loop_exit_cnt += exit_cnt;
150 }
151 }
152 }
153 }
154 }
155 if (loop_exit_cnt > 0.0f) {
156 trip_cnt = (loop_back_cnt + loop_exit_cnt) / loop_exit_cnt;
157 } else {
158 // No exit count so use
159 trip_cnt = loop_back_cnt;
160 }
161 }
162 #ifndef PRODUCT
163 if (TraceProfileTripCount) {
164 tty->print_cr("compute_profile_trip_cnt lp: %d cnt: %f\n", head->_idx, trip_cnt);
165 }
166 #endif
167 head->set_profile_trip_cnt(trip_cnt);
168 }
170 //---------------------is_invariant_addition-----------------------------
171 // Return nonzero index of invariant operand for an Add or Sub
172 // of (nonconstant) invariant and variant values. Helper for reassociate_invariants.
173 int IdealLoopTree::is_invariant_addition(Node* n, PhaseIdealLoop *phase) {
174 int op = n->Opcode();
175 if (op == Op_AddI || op == Op_SubI) {
176 bool in1_invar = this->is_invariant(n->in(1));
177 bool in2_invar = this->is_invariant(n->in(2));
178 if (in1_invar && !in2_invar) return 1;
179 if (!in1_invar && in2_invar) return 2;
180 }
181 return 0;
182 }
184 //---------------------reassociate_add_sub-----------------------------
185 // Reassociate invariant add and subtract expressions:
186 //
187 // inv1 + (x + inv2) => ( inv1 + inv2) + x
188 // (x + inv2) + inv1 => ( inv1 + inv2) + x
189 // inv1 + (x - inv2) => ( inv1 - inv2) + x
190 // inv1 - (inv2 - x) => ( inv1 - inv2) + x
191 // (x + inv2) - inv1 => (-inv1 + inv2) + x
192 // (x - inv2) + inv1 => ( inv1 - inv2) + x
193 // (x - inv2) - inv1 => (-inv1 - inv2) + x
194 // inv1 + (inv2 - x) => ( inv1 + inv2) - x
195 // inv1 - (x - inv2) => ( inv1 + inv2) - x
196 // (inv2 - x) + inv1 => ( inv1 + inv2) - x
197 // (inv2 - x) - inv1 => (-inv1 + inv2) - x
198 // inv1 - (x + inv2) => ( inv1 - inv2) - x
199 //
200 Node* IdealLoopTree::reassociate_add_sub(Node* n1, PhaseIdealLoop *phase) {
201 if (!n1->is_Add() && !n1->is_Sub() || n1->outcnt() == 0) return NULL;
202 if (is_invariant(n1)) return NULL;
203 int inv1_idx = is_invariant_addition(n1, phase);
204 if (!inv1_idx) return NULL;
205 // Don't mess with add of constant (igvn moves them to expression tree root.)
206 if (n1->is_Add() && n1->in(2)->is_Con()) return NULL;
207 Node* inv1 = n1->in(inv1_idx);
208 Node* n2 = n1->in(3 - inv1_idx);
209 int inv2_idx = is_invariant_addition(n2, phase);
210 if (!inv2_idx) return NULL;
211 Node* x = n2->in(3 - inv2_idx);
212 Node* inv2 = n2->in(inv2_idx);
214 bool neg_x = n2->is_Sub() && inv2_idx == 1;
215 bool neg_inv2 = n2->is_Sub() && inv2_idx == 2;
216 bool neg_inv1 = n1->is_Sub() && inv1_idx == 2;
217 if (n1->is_Sub() && inv1_idx == 1) {
218 neg_x = !neg_x;
219 neg_inv2 = !neg_inv2;
220 }
221 Node* inv1_c = phase->get_ctrl(inv1);
222 Node* inv2_c = phase->get_ctrl(inv2);
223 Node* n_inv1;
224 if (neg_inv1) {
225 Node *zero = phase->_igvn.intcon(0);
226 phase->set_ctrl(zero, phase->C->root());
227 n_inv1 = new (phase->C) SubINode(zero, inv1);
228 phase->register_new_node(n_inv1, inv1_c);
229 } else {
230 n_inv1 = inv1;
231 }
232 Node* inv;
233 if (neg_inv2) {
234 inv = new (phase->C) SubINode(n_inv1, inv2);
235 } else {
236 inv = new (phase->C) AddINode(n_inv1, inv2);
237 }
238 phase->register_new_node(inv, phase->get_early_ctrl(inv));
240 Node* addx;
241 if (neg_x) {
242 addx = new (phase->C) SubINode(inv, x);
243 } else {
244 addx = new (phase->C) AddINode(x, inv);
245 }
246 phase->register_new_node(addx, phase->get_ctrl(x));
247 phase->_igvn.replace_node(n1, addx);
248 assert(phase->get_loop(phase->get_ctrl(n1)) == this, "");
249 _body.yank(n1);
250 return addx;
251 }
253 //---------------------reassociate_invariants-----------------------------
254 // Reassociate invariant expressions:
255 void IdealLoopTree::reassociate_invariants(PhaseIdealLoop *phase) {
256 for (int i = _body.size() - 1; i >= 0; i--) {
257 Node *n = _body.at(i);
258 for (int j = 0; j < 5; j++) {
259 Node* nn = reassociate_add_sub(n, phase);
260 if (nn == NULL) break;
261 n = nn; // again
262 };
263 }
264 }
266 //------------------------------policy_peeling---------------------------------
267 // Return TRUE or FALSE if the loop should be peeled or not. Peel if we can
268 // make some loop-invariant test (usually a null-check) happen before the loop.
269 bool IdealLoopTree::policy_peeling( PhaseIdealLoop *phase ) const {
270 Node *test = ((IdealLoopTree*)this)->tail();
271 int body_size = ((IdealLoopTree*)this)->_body.size();
272 int live_node_count = phase->C->live_nodes();
273 // Peeling does loop cloning which can result in O(N^2) node construction
274 if( body_size > 255 /* Prevent overflow for large body_size */
275 || (body_size * body_size + live_node_count > MaxNodeLimit) ) {
276 return false; // too large to safely clone
277 }
278 while( test != _head ) { // Scan till run off top of loop
279 if( test->is_If() ) { // Test?
280 Node *ctrl = phase->get_ctrl(test->in(1));
281 if (ctrl->is_top())
282 return false; // Found dead test on live IF? No peeling!
283 // Standard IF only has one input value to check for loop invariance
284 assert( test->Opcode() == Op_If || test->Opcode() == Op_CountedLoopEnd, "Check this code when new subtype is added");
285 // Condition is not a member of this loop?
286 if( !is_member(phase->get_loop(ctrl)) &&
287 is_loop_exit(test) )
288 return true; // Found reason to peel!
289 }
290 // Walk up dominators to loop _head looking for test which is
291 // executed on every path thru loop.
292 test = phase->idom(test);
293 }
294 return false;
295 }
297 //------------------------------peeled_dom_test_elim---------------------------
298 // If we got the effect of peeling, either by actually peeling or by making
299 // a pre-loop which must execute at least once, we can remove all
300 // loop-invariant dominated tests in the main body.
301 void PhaseIdealLoop::peeled_dom_test_elim( IdealLoopTree *loop, Node_List &old_new ) {
302 bool progress = true;
303 while( progress ) {
304 progress = false; // Reset for next iteration
305 Node *prev = loop->_head->in(LoopNode::LoopBackControl);//loop->tail();
306 Node *test = prev->in(0);
307 while( test != loop->_head ) { // Scan till run off top of loop
309 int p_op = prev->Opcode();
310 if( (p_op == Op_IfFalse || p_op == Op_IfTrue) &&
311 test->is_If() && // Test?
312 !test->in(1)->is_Con() && // And not already obvious?
313 // Condition is not a member of this loop?
314 !loop->is_member(get_loop(get_ctrl(test->in(1))))){
315 // Walk loop body looking for instances of this test
316 for( uint i = 0; i < loop->_body.size(); i++ ) {
317 Node *n = loop->_body.at(i);
318 if( n->is_If() && n->in(1) == test->in(1) /*&& n != loop->tail()->in(0)*/ ) {
319 // IfNode was dominated by version in peeled loop body
320 progress = true;
321 dominated_by( old_new[prev->_idx], n );
322 }
323 }
324 }
325 prev = test;
326 test = idom(test);
327 } // End of scan tests in loop
329 } // End of while( progress )
330 }
332 //------------------------------do_peeling-------------------------------------
333 // Peel the first iteration of the given loop.
334 // Step 1: Clone the loop body. The clone becomes the peeled iteration.
335 // The pre-loop illegally has 2 control users (old & new loops).
336 // Step 2: Make the old-loop fall-in edges point to the peeled iteration.
337 // Do this by making the old-loop fall-in edges act as if they came
338 // around the loopback from the prior iteration (follow the old-loop
339 // backedges) and then map to the new peeled iteration. This leaves
340 // the pre-loop with only 1 user (the new peeled iteration), but the
341 // peeled-loop backedge has 2 users.
342 // Step 3: Cut the backedge on the clone (so its not a loop) and remove the
343 // extra backedge user.
344 //
345 // orig
346 //
347 // stmt1
348 // |
349 // v
350 // loop predicate
351 // |
352 // v
353 // loop<----+
354 // | |
355 // stmt2 |
356 // | |
357 // v |
358 // if ^
359 // / \ |
360 // / \ |
361 // v v |
362 // false true |
363 // / \ |
364 // / ----+
365 // |
366 // v
367 // exit
368 //
369 //
370 // after clone loop
371 //
372 // stmt1
373 // |
374 // v
375 // loop predicate
376 // / \
377 // clone / \ orig
378 // / \
379 // / \
380 // v v
381 // +---->loop clone loop<----+
382 // | | | |
383 // | stmt2 clone stmt2 |
384 // | | | |
385 // | v v |
386 // ^ if clone If ^
387 // | / \ / \ |
388 // | / \ / \ |
389 // | v v v v |
390 // | true false false true |
391 // | / \ / \ |
392 // +---- \ / ----+
393 // \ /
394 // 1v v2
395 // region
396 // |
397 // v
398 // exit
399 //
400 //
401 // after peel and predicate move
402 //
403 // stmt1
404 // /
405 // /
406 // clone / orig
407 // /
408 // / +----------+
409 // / | |
410 // / loop predicate |
411 // / | |
412 // v v |
413 // TOP-->loop clone loop<----+ |
414 // | | | |
415 // stmt2 clone stmt2 | |
416 // | | | ^
417 // v v | |
418 // if clone If ^ |
419 // / \ / \ | |
420 // / \ / \ | |
421 // v v v v | |
422 // true false false true | |
423 // | \ / \ | |
424 // | \ / ----+ ^
425 // | \ / |
426 // | 1v v2 |
427 // v region |
428 // | | |
429 // | v |
430 // | exit |
431 // | |
432 // +--------------->-----------------+
433 //
434 //
435 // final graph
436 //
437 // stmt1
438 // |
439 // v
440 // stmt2 clone
441 // |
442 // v
443 // if clone
444 // / |
445 // / |
446 // v v
447 // false true
448 // | |
449 // | v
450 // | loop predicate
451 // | |
452 // | v
453 // | loop<----+
454 // | | |
455 // | stmt2 |
456 // | | |
457 // | v |
458 // v if ^
459 // | / \ |
460 // | / \ |
461 // | v v |
462 // | false true |
463 // | | \ |
464 // v v --+
465 // region
466 // |
467 // v
468 // exit
469 //
470 void PhaseIdealLoop::do_peeling( IdealLoopTree *loop, Node_List &old_new ) {
472 C->set_major_progress();
473 // Peeling a 'main' loop in a pre/main/post situation obfuscates the
474 // 'pre' loop from the main and the 'pre' can no longer have it's
475 // iterations adjusted. Therefore, we need to declare this loop as
476 // no longer a 'main' loop; it will need new pre and post loops before
477 // we can do further RCE.
478 #ifndef PRODUCT
479 if (TraceLoopOpts) {
480 tty->print("Peel ");
481 loop->dump_head();
482 }
483 #endif
484 Node* head = loop->_head;
485 bool counted_loop = head->is_CountedLoop();
486 if (counted_loop) {
487 CountedLoopNode *cl = head->as_CountedLoop();
488 assert(cl->trip_count() > 0, "peeling a fully unrolled loop");
489 cl->set_trip_count(cl->trip_count() - 1);
490 if (cl->is_main_loop()) {
491 cl->set_normal_loop();
492 #ifndef PRODUCT
493 if (PrintOpto && VerifyLoopOptimizations) {
494 tty->print("Peeling a 'main' loop; resetting to 'normal' ");
495 loop->dump_head();
496 }
497 #endif
498 }
499 }
500 Node* entry = head->in(LoopNode::EntryControl);
502 // Step 1: Clone the loop body. The clone becomes the peeled iteration.
503 // The pre-loop illegally has 2 control users (old & new loops).
504 clone_loop( loop, old_new, dom_depth(head) );
506 // Step 2: Make the old-loop fall-in edges point to the peeled iteration.
507 // Do this by making the old-loop fall-in edges act as if they came
508 // around the loopback from the prior iteration (follow the old-loop
509 // backedges) and then map to the new peeled iteration. This leaves
510 // the pre-loop with only 1 user (the new peeled iteration), but the
511 // peeled-loop backedge has 2 users.
512 Node* new_entry = old_new[head->in(LoopNode::LoopBackControl)->_idx];
513 _igvn.hash_delete(head);
514 head->set_req(LoopNode::EntryControl, new_entry);
515 for (DUIterator_Fast jmax, j = head->fast_outs(jmax); j < jmax; j++) {
516 Node* old = head->fast_out(j);
517 if (old->in(0) == loop->_head && old->req() == 3 && old->is_Phi()) {
518 Node* new_exit_value = old_new[old->in(LoopNode::LoopBackControl)->_idx];
519 if (!new_exit_value ) // Backedge value is ALSO loop invariant?
520 // Then loop body backedge value remains the same.
521 new_exit_value = old->in(LoopNode::LoopBackControl);
522 _igvn.hash_delete(old);
523 old->set_req(LoopNode::EntryControl, new_exit_value);
524 }
525 }
528 // Step 3: Cut the backedge on the clone (so its not a loop) and remove the
529 // extra backedge user.
530 Node* new_head = old_new[head->_idx];
531 _igvn.hash_delete(new_head);
532 new_head->set_req(LoopNode::LoopBackControl, C->top());
533 for (DUIterator_Fast j2max, j2 = new_head->fast_outs(j2max); j2 < j2max; j2++) {
534 Node* use = new_head->fast_out(j2);
535 if (use->in(0) == new_head && use->req() == 3 && use->is_Phi()) {
536 _igvn.hash_delete(use);
537 use->set_req(LoopNode::LoopBackControl, C->top());
538 }
539 }
542 // Step 4: Correct dom-depth info. Set to loop-head depth.
543 int dd = dom_depth(head);
544 set_idom(head, head->in(1), dd);
545 for (uint j3 = 0; j3 < loop->_body.size(); j3++) {
546 Node *old = loop->_body.at(j3);
547 Node *nnn = old_new[old->_idx];
548 if (!has_ctrl(nnn))
549 set_idom(nnn, idom(nnn), dd-1);
550 }
552 // Now force out all loop-invariant dominating tests. The optimizer
553 // finds some, but we _know_ they are all useless.
554 peeled_dom_test_elim(loop,old_new);
556 loop->record_for_igvn();
557 }
559 #define EMPTY_LOOP_SIZE 7 // number of nodes in an empty loop
561 //------------------------------policy_maximally_unroll------------------------
562 // Calculate exact loop trip count and return true if loop can be maximally
563 // unrolled.
564 bool IdealLoopTree::policy_maximally_unroll( PhaseIdealLoop *phase ) const {
565 CountedLoopNode *cl = _head->as_CountedLoop();
566 assert(cl->is_normal_loop(), "");
567 if (!cl->is_valid_counted_loop())
568 return false; // Malformed counted loop
570 if (!cl->has_exact_trip_count()) {
571 // Trip count is not exact.
572 return false;
573 }
575 uint trip_count = cl->trip_count();
576 // Note, max_juint is used to indicate unknown trip count.
577 assert(trip_count > 1, "one iteration loop should be optimized out already");
578 assert(trip_count < max_juint, "exact trip_count should be less than max_uint.");
580 // Real policy: if we maximally unroll, does it get too big?
581 // Allow the unrolled mess to get larger than standard loop
582 // size. After all, it will no longer be a loop.
583 uint body_size = _body.size();
584 uint unroll_limit = (uint)LoopUnrollLimit * 4;
585 assert( (intx)unroll_limit == LoopUnrollLimit * 4, "LoopUnrollLimit must fit in 32bits");
586 if (trip_count > unroll_limit || body_size > unroll_limit) {
587 return false;
588 }
590 // Fully unroll a loop with few iterations regardless next
591 // conditions since following loop optimizations will split
592 // such loop anyway (pre-main-post).
593 if (trip_count <= 3)
594 return true;
596 // Take into account that after unroll conjoined heads and tails will fold,
597 // otherwise policy_unroll() may allow more unrolling than max unrolling.
598 uint new_body_size = EMPTY_LOOP_SIZE + (body_size - EMPTY_LOOP_SIZE) * trip_count;
599 uint tst_body_size = (new_body_size - EMPTY_LOOP_SIZE) / trip_count + EMPTY_LOOP_SIZE;
600 if (body_size != tst_body_size) // Check for int overflow
601 return false;
602 if (new_body_size > unroll_limit ||
603 // Unrolling can result in a large amount of node construction
604 new_body_size >= MaxNodeLimit - (uint) phase->C->live_nodes()) {
605 return false;
606 }
608 // Do not unroll a loop with String intrinsics code.
609 // String intrinsics are large and have loops.
610 for (uint k = 0; k < _body.size(); k++) {
611 Node* n = _body.at(k);
612 switch (n->Opcode()) {
613 case Op_StrComp:
614 case Op_StrEquals:
615 case Op_StrIndexOf:
616 case Op_EncodeISOArray:
617 case Op_AryEq: {
618 return false;
619 }
620 #if INCLUDE_RTM_OPT
621 case Op_FastLock:
622 case Op_FastUnlock: {
623 // Don't unroll RTM locking code because it is large.
624 if (UseRTMLocking) {
625 return false;
626 }
627 }
628 #endif
629 } // switch
630 }
632 return true; // Do maximally unroll
633 }
636 //------------------------------policy_unroll----------------------------------
637 // Return TRUE or FALSE if the loop should be unrolled or not. Unroll if
638 // the loop is a CountedLoop and the body is small enough.
639 bool IdealLoopTree::policy_unroll( PhaseIdealLoop *phase ) const {
641 CountedLoopNode *cl = _head->as_CountedLoop();
642 assert(cl->is_normal_loop() || cl->is_main_loop(), "");
644 if (!cl->is_valid_counted_loop())
645 return false; // Malformed counted loop
647 // Protect against over-unrolling.
648 // After split at least one iteration will be executed in pre-loop.
649 if (cl->trip_count() <= (uint)(cl->is_normal_loop() ? 2 : 1)) return false;
651 int future_unroll_ct = cl->unrolled_count() * 2;
652 if (future_unroll_ct > LoopMaxUnroll) return false;
654 // Check for initial stride being a small enough constant
655 if (abs(cl->stride_con()) > (1<<2)*future_unroll_ct) return false;
657 // Don't unroll if the next round of unrolling would push us
658 // over the expected trip count of the loop. One is subtracted
659 // from the expected trip count because the pre-loop normally
660 // executes 1 iteration.
661 if (UnrollLimitForProfileCheck > 0 &&
662 cl->profile_trip_cnt() != COUNT_UNKNOWN &&
663 future_unroll_ct > UnrollLimitForProfileCheck &&
664 (float)future_unroll_ct > cl->profile_trip_cnt() - 1.0) {
665 return false;
666 }
668 // When unroll count is greater than LoopUnrollMin, don't unroll if:
669 // the residual iterations are more than 10% of the trip count
670 // and rounds of "unroll,optimize" are not making significant progress
671 // Progress defined as current size less than 20% larger than previous size.
672 if (UseSuperWord && cl->node_count_before_unroll() > 0 &&
673 future_unroll_ct > LoopUnrollMin &&
674 (future_unroll_ct - 1) * 10.0 > cl->profile_trip_cnt() &&
675 1.2 * cl->node_count_before_unroll() < (double)_body.size()) {
676 return false;
677 }
679 Node *init_n = cl->init_trip();
680 Node *limit_n = cl->limit();
681 int stride_con = cl->stride_con();
682 // Non-constant bounds.
683 // Protect against over-unrolling when init or/and limit are not constant
684 // (so that trip_count's init value is maxint) but iv range is known.
685 if (init_n == NULL || !init_n->is_Con() ||
686 limit_n == NULL || !limit_n->is_Con()) {
687 Node* phi = cl->phi();
688 if (phi != NULL) {
689 assert(phi->is_Phi() && phi->in(0) == _head, "Counted loop should have iv phi.");
690 const TypeInt* iv_type = phase->_igvn.type(phi)->is_int();
691 int next_stride = stride_con * 2; // stride after this unroll
692 if (next_stride > 0) {
693 if (iv_type->_lo + next_stride <= iv_type->_lo || // overflow
694 iv_type->_lo + next_stride > iv_type->_hi) {
695 return false; // over-unrolling
696 }
697 } else if (next_stride < 0) {
698 if (iv_type->_hi + next_stride >= iv_type->_hi || // overflow
699 iv_type->_hi + next_stride < iv_type->_lo) {
700 return false; // over-unrolling
701 }
702 }
703 }
704 }
706 // After unroll limit will be adjusted: new_limit = limit-stride.
707 // Bailout if adjustment overflow.
708 const TypeInt* limit_type = phase->_igvn.type(limit_n)->is_int();
709 if (stride_con > 0 && ((limit_type->_hi - stride_con) >= limit_type->_hi) ||
710 stride_con < 0 && ((limit_type->_lo - stride_con) <= limit_type->_lo))
711 return false; // overflow
713 // Adjust body_size to determine if we unroll or not
714 uint body_size = _body.size();
715 // Key test to unroll loop in CRC32 java code
716 int xors_in_loop = 0;
717 // Also count ModL, DivL and MulL which expand mightly
718 for (uint k = 0; k < _body.size(); k++) {
719 Node* n = _body.at(k);
720 switch (n->Opcode()) {
721 case Op_XorI: xors_in_loop++; break; // CRC32 java code
722 case Op_ModL: body_size += 30; break;
723 case Op_DivL: body_size += 30; break;
724 case Op_MulL: body_size += 10; break;
725 case Op_StrComp:
726 case Op_StrEquals:
727 case Op_StrIndexOf:
728 case Op_EncodeISOArray:
729 case Op_AryEq: {
730 // Do not unroll a loop with String intrinsics code.
731 // String intrinsics are large and have loops.
732 return false;
733 }
734 #if INCLUDE_RTM_OPT
735 case Op_FastLock:
736 case Op_FastUnlock: {
737 // Don't unroll RTM locking code because it is large.
738 if (UseRTMLocking) {
739 return false;
740 }
741 }
742 #endif
743 } // switch
744 }
746 // Check for being too big
747 if (body_size > (uint)LoopUnrollLimit) {
748 if (xors_in_loop >= 4 && body_size < (uint)LoopUnrollLimit*4) return true;
749 // Normal case: loop too big
750 return false;
751 }
753 // Unroll once! (Each trip will soon do double iterations)
754 return true;
755 }
757 //------------------------------policy_align-----------------------------------
758 // Return TRUE or FALSE if the loop should be cache-line aligned. Gather the
759 // expression that does the alignment. Note that only one array base can be
760 // aligned in a loop (unless the VM guarantees mutual alignment). Note that
761 // if we vectorize short memory ops into longer memory ops, we may want to
762 // increase alignment.
763 bool IdealLoopTree::policy_align( PhaseIdealLoop *phase ) const {
764 return false;
765 }
767 //------------------------------policy_range_check-----------------------------
768 // Return TRUE or FALSE if the loop should be range-check-eliminated.
769 // Actually we do iteration-splitting, a more powerful form of RCE.
770 bool IdealLoopTree::policy_range_check( PhaseIdealLoop *phase ) const {
771 if (!RangeCheckElimination) return false;
773 CountedLoopNode *cl = _head->as_CountedLoop();
774 // If we unrolled with no intention of doing RCE and we later
775 // changed our minds, we got no pre-loop. Either we need to
776 // make a new pre-loop, or we gotta disallow RCE.
777 if (cl->is_main_no_pre_loop()) return false; // Disallowed for now.
778 Node *trip_counter = cl->phi();
780 // Check loop body for tests of trip-counter plus loop-invariant vs
781 // loop-invariant.
782 for (uint i = 0; i < _body.size(); i++) {
783 Node *iff = _body[i];
784 if (iff->Opcode() == Op_If) { // Test?
786 // Comparing trip+off vs limit
787 Node *bol = iff->in(1);
788 if (bol->req() != 2) continue; // dead constant test
789 if (!bol->is_Bool()) {
790 assert(UseLoopPredicate && bol->Opcode() == Op_Conv2B, "predicate check only");
791 continue;
792 }
793 if (bol->as_Bool()->_test._test == BoolTest::ne)
794 continue; // not RC
796 Node *cmp = bol->in(1);
797 Node *rc_exp = cmp->in(1);
798 Node *limit = cmp->in(2);
800 Node *limit_c = phase->get_ctrl(limit);
801 if( limit_c == phase->C->top() )
802 return false; // Found dead test on live IF? No RCE!
803 if( is_member(phase->get_loop(limit_c) ) ) {
804 // Compare might have operands swapped; commute them
805 rc_exp = cmp->in(2);
806 limit = cmp->in(1);
807 limit_c = phase->get_ctrl(limit);
808 if( is_member(phase->get_loop(limit_c) ) )
809 continue; // Both inputs are loop varying; cannot RCE
810 }
812 if (!phase->is_scaled_iv_plus_offset(rc_exp, trip_counter, NULL, NULL)) {
813 continue;
814 }
815 // Yeah! Found a test like 'trip+off vs limit'
816 // Test is an IfNode, has 2 projections. If BOTH are in the loop
817 // we need loop unswitching instead of iteration splitting.
818 if( is_loop_exit(iff) )
819 return true; // Found reason to split iterations
820 } // End of is IF
821 }
823 return false;
824 }
826 //------------------------------policy_peel_only-------------------------------
827 // Return TRUE or FALSE if the loop should NEVER be RCE'd or aligned. Useful
828 // for unrolling loops with NO array accesses.
829 bool IdealLoopTree::policy_peel_only( PhaseIdealLoop *phase ) const {
831 for( uint i = 0; i < _body.size(); i++ )
832 if( _body[i]->is_Mem() )
833 return false;
835 // No memory accesses at all!
836 return true;
837 }
839 //------------------------------clone_up_backedge_goo--------------------------
840 // If Node n lives in the back_ctrl block and cannot float, we clone a private
841 // version of n in preheader_ctrl block and return that, otherwise return n.
842 Node *PhaseIdealLoop::clone_up_backedge_goo( Node *back_ctrl, Node *preheader_ctrl, Node *n, VectorSet &visited, Node_Stack &clones ) {
843 if( get_ctrl(n) != back_ctrl ) return n;
845 // Only visit once
846 if (visited.test_set(n->_idx)) {
847 Node *x = clones.find(n->_idx);
848 if (x != NULL)
849 return x;
850 return n;
851 }
853 Node *x = NULL; // If required, a clone of 'n'
854 // Check for 'n' being pinned in the backedge.
855 if( n->in(0) && n->in(0) == back_ctrl ) {
856 assert(clones.find(n->_idx) == NULL, "dead loop");
857 x = n->clone(); // Clone a copy of 'n' to preheader
858 clones.push(x, n->_idx);
859 x->set_req( 0, preheader_ctrl ); // Fix x's control input to preheader
860 }
862 // Recursive fixup any other input edges into x.
863 // If there are no changes we can just return 'n', otherwise
864 // we need to clone a private copy and change it.
865 for( uint i = 1; i < n->req(); i++ ) {
866 Node *g = clone_up_backedge_goo( back_ctrl, preheader_ctrl, n->in(i), visited, clones );
867 if( g != n->in(i) ) {
868 if( !x ) {
869 assert(clones.find(n->_idx) == NULL, "dead loop");
870 x = n->clone();
871 clones.push(x, n->_idx);
872 }
873 x->set_req(i, g);
874 }
875 }
876 if( x ) { // x can legally float to pre-header location
877 register_new_node( x, preheader_ctrl );
878 return x;
879 } else { // raise n to cover LCA of uses
880 set_ctrl( n, find_non_split_ctrl(back_ctrl->in(0)) );
881 }
882 return n;
883 }
885 //------------------------------insert_pre_post_loops--------------------------
886 // Insert pre and post loops. If peel_only is set, the pre-loop can not have
887 // more iterations added. It acts as a 'peel' only, no lower-bound RCE, no
888 // alignment. Useful to unroll loops that do no array accesses.
889 void PhaseIdealLoop::insert_pre_post_loops( IdealLoopTree *loop, Node_List &old_new, bool peel_only ) {
891 #ifndef PRODUCT
892 if (TraceLoopOpts) {
893 if (peel_only)
894 tty->print("PeelMainPost ");
895 else
896 tty->print("PreMainPost ");
897 loop->dump_head();
898 }
899 #endif
900 C->set_major_progress();
902 // Find common pieces of the loop being guarded with pre & post loops
903 CountedLoopNode *main_head = loop->_head->as_CountedLoop();
904 assert( main_head->is_normal_loop(), "" );
905 CountedLoopEndNode *main_end = main_head->loopexit();
906 guarantee(main_end != NULL, "no loop exit node");
907 assert( main_end->outcnt() == 2, "1 true, 1 false path only" );
908 uint dd_main_head = dom_depth(main_head);
909 uint max = main_head->outcnt();
911 Node *pre_header= main_head->in(LoopNode::EntryControl);
912 Node *init = main_head->init_trip();
913 Node *incr = main_end ->incr();
914 Node *limit = main_end ->limit();
915 Node *stride = main_end ->stride();
916 Node *cmp = main_end ->cmp_node();
917 BoolTest::mask b_test = main_end->test_trip();
919 // Need only 1 user of 'bol' because I will be hacking the loop bounds.
920 Node *bol = main_end->in(CountedLoopEndNode::TestValue);
921 if( bol->outcnt() != 1 ) {
922 bol = bol->clone();
923 register_new_node(bol,main_end->in(CountedLoopEndNode::TestControl));
924 _igvn.hash_delete(main_end);
925 main_end->set_req(CountedLoopEndNode::TestValue, bol);
926 }
927 // Need only 1 user of 'cmp' because I will be hacking the loop bounds.
928 if( cmp->outcnt() != 1 ) {
929 cmp = cmp->clone();
930 register_new_node(cmp,main_end->in(CountedLoopEndNode::TestControl));
931 _igvn.hash_delete(bol);
932 bol->set_req(1, cmp);
933 }
935 //------------------------------
936 // Step A: Create Post-Loop.
937 Node* main_exit = main_end->proj_out(false);
938 assert( main_exit->Opcode() == Op_IfFalse, "" );
939 int dd_main_exit = dom_depth(main_exit);
941 // Step A1: Clone the loop body. The clone becomes the post-loop. The main
942 // loop pre-header illegally has 2 control users (old & new loops).
943 clone_loop( loop, old_new, dd_main_exit );
944 assert( old_new[main_end ->_idx]->Opcode() == Op_CountedLoopEnd, "" );
945 CountedLoopNode *post_head = old_new[main_head->_idx]->as_CountedLoop();
946 post_head->set_post_loop(main_head);
948 // Reduce the post-loop trip count.
949 CountedLoopEndNode* post_end = old_new[main_end ->_idx]->as_CountedLoopEnd();
950 post_end->_prob = PROB_FAIR;
952 // Build the main-loop normal exit.
953 IfFalseNode *new_main_exit = new (C) IfFalseNode(main_end);
954 _igvn.register_new_node_with_optimizer( new_main_exit );
955 set_idom(new_main_exit, main_end, dd_main_exit );
956 set_loop(new_main_exit, loop->_parent);
958 // Step A2: Build a zero-trip guard for the post-loop. After leaving the
959 // main-loop, the post-loop may not execute at all. We 'opaque' the incr
960 // (the main-loop trip-counter exit value) because we will be changing
961 // the exit value (via unrolling) so we cannot constant-fold away the zero
962 // trip guard until all unrolling is done.
963 Node *zer_opaq = new (C) Opaque1Node(C, incr);
964 Node *zer_cmp = new (C) CmpINode( zer_opaq, limit );
965 Node *zer_bol = new (C) BoolNode( zer_cmp, b_test );
966 register_new_node( zer_opaq, new_main_exit );
967 register_new_node( zer_cmp , new_main_exit );
968 register_new_node( zer_bol , new_main_exit );
970 // Build the IfNode
971 IfNode *zer_iff = new (C) IfNode( new_main_exit, zer_bol, PROB_FAIR, COUNT_UNKNOWN );
972 _igvn.register_new_node_with_optimizer( zer_iff );
973 set_idom(zer_iff, new_main_exit, dd_main_exit);
974 set_loop(zer_iff, loop->_parent);
976 // Plug in the false-path, taken if we need to skip post-loop
977 _igvn.replace_input_of(main_exit, 0, zer_iff);
978 set_idom(main_exit, zer_iff, dd_main_exit);
979 set_idom(main_exit->unique_out(), zer_iff, dd_main_exit);
980 // Make the true-path, must enter the post loop
981 Node *zer_taken = new (C) IfTrueNode( zer_iff );
982 _igvn.register_new_node_with_optimizer( zer_taken );
983 set_idom(zer_taken, zer_iff, dd_main_exit);
984 set_loop(zer_taken, loop->_parent);
985 // Plug in the true path
986 _igvn.hash_delete( post_head );
987 post_head->set_req(LoopNode::EntryControl, zer_taken);
988 set_idom(post_head, zer_taken, dd_main_exit);
990 Arena *a = Thread::current()->resource_area();
991 VectorSet visited(a);
992 Node_Stack clones(a, main_head->back_control()->outcnt());
993 // Step A3: Make the fall-in values to the post-loop come from the
994 // fall-out values of the main-loop.
995 for (DUIterator_Fast imax, i = main_head->fast_outs(imax); i < imax; i++) {
996 Node* main_phi = main_head->fast_out(i);
997 if( main_phi->is_Phi() && main_phi->in(0) == main_head && main_phi->outcnt() >0 ) {
998 Node *post_phi = old_new[main_phi->_idx];
999 Node *fallmain = clone_up_backedge_goo(main_head->back_control(),
1000 post_head->init_control(),
1001 main_phi->in(LoopNode::LoopBackControl),
1002 visited, clones);
1003 _igvn.hash_delete(post_phi);
1004 post_phi->set_req( LoopNode::EntryControl, fallmain );
1005 }
1006 }
1008 // Update local caches for next stanza
1009 main_exit = new_main_exit;
1012 //------------------------------
1013 // Step B: Create Pre-Loop.
1015 // Step B1: Clone the loop body. The clone becomes the pre-loop. The main
1016 // loop pre-header illegally has 2 control users (old & new loops).
1017 clone_loop( loop, old_new, dd_main_head );
1018 CountedLoopNode* pre_head = old_new[main_head->_idx]->as_CountedLoop();
1019 CountedLoopEndNode* pre_end = old_new[main_end ->_idx]->as_CountedLoopEnd();
1020 pre_head->set_pre_loop(main_head);
1021 Node *pre_incr = old_new[incr->_idx];
1023 // Reduce the pre-loop trip count.
1024 pre_end->_prob = PROB_FAIR;
1026 // Find the pre-loop normal exit.
1027 Node* pre_exit = pre_end->proj_out(false);
1028 assert( pre_exit->Opcode() == Op_IfFalse, "" );
1029 IfFalseNode *new_pre_exit = new (C) IfFalseNode(pre_end);
1030 _igvn.register_new_node_with_optimizer( new_pre_exit );
1031 set_idom(new_pre_exit, pre_end, dd_main_head);
1032 set_loop(new_pre_exit, loop->_parent);
1034 // Step B2: Build a zero-trip guard for the main-loop. After leaving the
1035 // pre-loop, the main-loop may not execute at all. Later in life this
1036 // zero-trip guard will become the minimum-trip guard when we unroll
1037 // the main-loop.
1038 Node *min_opaq = new (C) Opaque1Node(C, limit);
1039 Node *min_cmp = new (C) CmpINode( pre_incr, min_opaq );
1040 Node *min_bol = new (C) BoolNode( min_cmp, b_test );
1041 register_new_node( min_opaq, new_pre_exit );
1042 register_new_node( min_cmp , new_pre_exit );
1043 register_new_node( min_bol , new_pre_exit );
1045 // Build the IfNode (assume the main-loop is executed always).
1046 IfNode *min_iff = new (C) IfNode( new_pre_exit, min_bol, PROB_ALWAYS, COUNT_UNKNOWN );
1047 _igvn.register_new_node_with_optimizer( min_iff );
1048 set_idom(min_iff, new_pre_exit, dd_main_head);
1049 set_loop(min_iff, loop->_parent);
1051 // Plug in the false-path, taken if we need to skip main-loop
1052 _igvn.hash_delete( pre_exit );
1053 pre_exit->set_req(0, min_iff);
1054 set_idom(pre_exit, min_iff, dd_main_head);
1055 set_idom(pre_exit->unique_out(), min_iff, dd_main_head);
1056 // Make the true-path, must enter the main loop
1057 Node *min_taken = new (C) IfTrueNode( min_iff );
1058 _igvn.register_new_node_with_optimizer( min_taken );
1059 set_idom(min_taken, min_iff, dd_main_head);
1060 set_loop(min_taken, loop->_parent);
1061 // Plug in the true path
1062 _igvn.hash_delete( main_head );
1063 main_head->set_req(LoopNode::EntryControl, min_taken);
1064 set_idom(main_head, min_taken, dd_main_head);
1066 visited.Clear();
1067 clones.clear();
1068 // Step B3: Make the fall-in values to the main-loop come from the
1069 // fall-out values of the pre-loop.
1070 for (DUIterator_Fast i2max, i2 = main_head->fast_outs(i2max); i2 < i2max; i2++) {
1071 Node* main_phi = main_head->fast_out(i2);
1072 if( main_phi->is_Phi() && main_phi->in(0) == main_head && main_phi->outcnt() > 0 ) {
1073 Node *pre_phi = old_new[main_phi->_idx];
1074 Node *fallpre = clone_up_backedge_goo(pre_head->back_control(),
1075 main_head->init_control(),
1076 pre_phi->in(LoopNode::LoopBackControl),
1077 visited, clones);
1078 _igvn.hash_delete(main_phi);
1079 main_phi->set_req( LoopNode::EntryControl, fallpre );
1080 }
1081 }
1083 // Step B4: Shorten the pre-loop to run only 1 iteration (for now).
1084 // RCE and alignment may change this later.
1085 Node *cmp_end = pre_end->cmp_node();
1086 assert( cmp_end->in(2) == limit, "" );
1087 Node *pre_limit = new (C) AddINode( init, stride );
1089 // Save the original loop limit in this Opaque1 node for
1090 // use by range check elimination.
1091 Node *pre_opaq = new (C) Opaque1Node(C, pre_limit, limit);
1093 register_new_node( pre_limit, pre_head->in(0) );
1094 register_new_node( pre_opaq , pre_head->in(0) );
1096 // Since no other users of pre-loop compare, I can hack limit directly
1097 assert( cmp_end->outcnt() == 1, "no other users" );
1098 _igvn.hash_delete(cmp_end);
1099 cmp_end->set_req(2, peel_only ? pre_limit : pre_opaq);
1101 // Special case for not-equal loop bounds:
1102 // Change pre loop test, main loop test, and the
1103 // main loop guard test to use lt or gt depending on stride
1104 // direction:
1105 // positive stride use <
1106 // negative stride use >
1107 //
1108 // not-equal test is kept for post loop to handle case
1109 // when init > limit when stride > 0 (and reverse).
1111 if (pre_end->in(CountedLoopEndNode::TestValue)->as_Bool()->_test._test == BoolTest::ne) {
1113 BoolTest::mask new_test = (main_end->stride_con() > 0) ? BoolTest::lt : BoolTest::gt;
1114 // Modify pre loop end condition
1115 Node* pre_bol = pre_end->in(CountedLoopEndNode::TestValue)->as_Bool();
1116 BoolNode* new_bol0 = new (C) BoolNode(pre_bol->in(1), new_test);
1117 register_new_node( new_bol0, pre_head->in(0) );
1118 _igvn.hash_delete(pre_end);
1119 pre_end->set_req(CountedLoopEndNode::TestValue, new_bol0);
1120 // Modify main loop guard condition
1121 assert(min_iff->in(CountedLoopEndNode::TestValue) == min_bol, "guard okay");
1122 BoolNode* new_bol1 = new (C) BoolNode(min_bol->in(1), new_test);
1123 register_new_node( new_bol1, new_pre_exit );
1124 _igvn.hash_delete(min_iff);
1125 min_iff->set_req(CountedLoopEndNode::TestValue, new_bol1);
1126 // Modify main loop end condition
1127 BoolNode* main_bol = main_end->in(CountedLoopEndNode::TestValue)->as_Bool();
1128 BoolNode* new_bol2 = new (C) BoolNode(main_bol->in(1), new_test);
1129 register_new_node( new_bol2, main_end->in(CountedLoopEndNode::TestControl) );
1130 _igvn.hash_delete(main_end);
1131 main_end->set_req(CountedLoopEndNode::TestValue, new_bol2);
1132 }
1134 // Flag main loop
1135 main_head->set_main_loop();
1136 if( peel_only ) main_head->set_main_no_pre_loop();
1138 // Subtract a trip count for the pre-loop.
1139 main_head->set_trip_count(main_head->trip_count() - 1);
1141 // It's difficult to be precise about the trip-counts
1142 // for the pre/post loops. They are usually very short,
1143 // so guess that 4 trips is a reasonable value.
1144 post_head->set_profile_trip_cnt(4.0);
1145 pre_head->set_profile_trip_cnt(4.0);
1147 // Now force out all loop-invariant dominating tests. The optimizer
1148 // finds some, but we _know_ they are all useless.
1149 peeled_dom_test_elim(loop,old_new);
1150 loop->record_for_igvn();
1151 }
1153 //------------------------------is_invariant-----------------------------
1154 // Return true if n is invariant
1155 bool IdealLoopTree::is_invariant(Node* n) const {
1156 Node *n_c = _phase->has_ctrl(n) ? _phase->get_ctrl(n) : n;
1157 if (n_c->is_top()) return false;
1158 return !is_member(_phase->get_loop(n_c));
1159 }
1162 //------------------------------do_unroll--------------------------------------
1163 // Unroll the loop body one step - make each trip do 2 iterations.
1164 void PhaseIdealLoop::do_unroll( IdealLoopTree *loop, Node_List &old_new, bool adjust_min_trip ) {
1165 assert(LoopUnrollLimit, "");
1166 CountedLoopNode *loop_head = loop->_head->as_CountedLoop();
1167 CountedLoopEndNode *loop_end = loop_head->loopexit();
1168 assert(loop_end, "");
1169 #ifndef PRODUCT
1170 if (PrintOpto && VerifyLoopOptimizations) {
1171 tty->print("Unrolling ");
1172 loop->dump_head();
1173 } else if (TraceLoopOpts) {
1174 if (loop_head->trip_count() < (uint)LoopUnrollLimit) {
1175 tty->print("Unroll %d(%2d) ", loop_head->unrolled_count()*2, loop_head->trip_count());
1176 } else {
1177 tty->print("Unroll %d ", loop_head->unrolled_count()*2);
1178 }
1179 loop->dump_head();
1180 }
1181 #endif
1183 // Remember loop node count before unrolling to detect
1184 // if rounds of unroll,optimize are making progress
1185 loop_head->set_node_count_before_unroll(loop->_body.size());
1187 Node *ctrl = loop_head->in(LoopNode::EntryControl);
1188 Node *limit = loop_head->limit();
1189 Node *init = loop_head->init_trip();
1190 Node *stride = loop_head->stride();
1192 Node *opaq = NULL;
1193 if (adjust_min_trip) { // If not maximally unrolling, need adjustment
1194 // Search for zero-trip guard.
1195 assert( loop_head->is_main_loop(), "" );
1196 assert( ctrl->Opcode() == Op_IfTrue || ctrl->Opcode() == Op_IfFalse, "" );
1197 Node *iff = ctrl->in(0);
1198 assert( iff->Opcode() == Op_If, "" );
1199 Node *bol = iff->in(1);
1200 assert( bol->Opcode() == Op_Bool, "" );
1201 Node *cmp = bol->in(1);
1202 assert( cmp->Opcode() == Op_CmpI, "" );
1203 opaq = cmp->in(2);
1204 // Occasionally it's possible for a zero-trip guard Opaque1 node to be
1205 // optimized away and then another round of loop opts attempted.
1206 // We can not optimize this particular loop in that case.
1207 if (opaq->Opcode() != Op_Opaque1)
1208 return; // Cannot find zero-trip guard! Bail out!
1209 // Zero-trip test uses an 'opaque' node which is not shared.
1210 assert(opaq->outcnt() == 1 && opaq->in(1) == limit, "");
1211 }
1213 C->set_major_progress();
1215 Node* new_limit = NULL;
1216 if (UnrollLimitCheck) {
1217 int stride_con = stride->get_int();
1218 int stride_p = (stride_con > 0) ? stride_con : -stride_con;
1219 uint old_trip_count = loop_head->trip_count();
1220 // Verify that unroll policy result is still valid.
1221 assert(old_trip_count > 1 &&
1222 (!adjust_min_trip || stride_p <= (1<<3)*loop_head->unrolled_count()), "sanity");
1224 // Adjust loop limit to keep valid iterations number after unroll.
1225 // Use (limit - stride) instead of (((limit - init)/stride) & (-2))*stride
1226 // which may overflow.
1227 if (!adjust_min_trip) {
1228 assert(old_trip_count > 1 && (old_trip_count & 1) == 0,
1229 "odd trip count for maximally unroll");
1230 // Don't need to adjust limit for maximally unroll since trip count is even.
1231 } else if (loop_head->has_exact_trip_count() && init->is_Con()) {
1232 // Loop's limit is constant. Loop's init could be constant when pre-loop
1233 // become peeled iteration.
1234 jlong init_con = init->get_int();
1235 // We can keep old loop limit if iterations count stays the same:
1236 // old_trip_count == new_trip_count * 2
1237 // Note: since old_trip_count >= 2 then new_trip_count >= 1
1238 // so we also don't need to adjust zero trip test.
1239 jlong limit_con = limit->get_int();
1240 // (stride_con*2) not overflow since stride_con <= 8.
1241 int new_stride_con = stride_con * 2;
1242 int stride_m = new_stride_con - (stride_con > 0 ? 1 : -1);
1243 jlong trip_count = (limit_con - init_con + stride_m)/new_stride_con;
1244 // New trip count should satisfy next conditions.
1245 assert(trip_count > 0 && (julong)trip_count < (julong)max_juint/2, "sanity");
1246 uint new_trip_count = (uint)trip_count;
1247 adjust_min_trip = (old_trip_count != new_trip_count*2);
1248 }
1250 if (adjust_min_trip) {
1251 // Step 2: Adjust the trip limit if it is called for.
1252 // The adjustment amount is -stride. Need to make sure if the
1253 // adjustment underflows or overflows, then the main loop is skipped.
1254 Node* cmp = loop_end->cmp_node();
1255 assert(cmp->in(2) == limit, "sanity");
1256 assert(opaq != NULL && opaq->in(1) == limit, "sanity");
1258 // Verify that policy_unroll result is still valid.
1259 const TypeInt* limit_type = _igvn.type(limit)->is_int();
1260 assert(stride_con > 0 && ((limit_type->_hi - stride_con) < limit_type->_hi) ||
1261 stride_con < 0 && ((limit_type->_lo - stride_con) > limit_type->_lo), "sanity");
1263 if (limit->is_Con()) {
1264 // The check in policy_unroll and the assert above guarantee
1265 // no underflow if limit is constant.
1266 new_limit = _igvn.intcon(limit->get_int() - stride_con);
1267 set_ctrl(new_limit, C->root());
1268 } else {
1269 // Limit is not constant.
1270 if (loop_head->unrolled_count() == 1) { // only for first unroll
1271 // Separate limit by Opaque node in case it is an incremented
1272 // variable from previous loop to avoid using pre-incremented
1273 // value which could increase register pressure.
1274 // Otherwise reorg_offsets() optimization will create a separate
1275 // Opaque node for each use of trip-counter and as result
1276 // zero trip guard limit will be different from loop limit.
1277 assert(has_ctrl(opaq), "should have it");
1278 Node* opaq_ctrl = get_ctrl(opaq);
1279 limit = new (C) Opaque2Node( C, limit );
1280 register_new_node( limit, opaq_ctrl );
1281 }
1282 if (stride_con > 0 && ((limit_type->_lo - stride_con) < limit_type->_lo) ||
1283 stride_con < 0 && ((limit_type->_hi - stride_con) > limit_type->_hi)) {
1284 // No underflow.
1285 new_limit = new (C) SubINode(limit, stride);
1286 } else {
1287 // (limit - stride) may underflow.
1288 // Clamp the adjustment value with MININT or MAXINT:
1289 //
1290 // new_limit = limit-stride
1291 // if (stride > 0)
1292 // new_limit = (limit < new_limit) ? MININT : new_limit;
1293 // else
1294 // new_limit = (limit > new_limit) ? MAXINT : new_limit;
1295 //
1296 BoolTest::mask bt = loop_end->test_trip();
1297 assert(bt == BoolTest::lt || bt == BoolTest::gt, "canonical test is expected");
1298 Node* adj_max = _igvn.intcon((stride_con > 0) ? min_jint : max_jint);
1299 set_ctrl(adj_max, C->root());
1300 Node* old_limit = NULL;
1301 Node* adj_limit = NULL;
1302 Node* bol = limit->is_CMove() ? limit->in(CMoveNode::Condition) : NULL;
1303 if (loop_head->unrolled_count() > 1 &&
1304 limit->is_CMove() && limit->Opcode() == Op_CMoveI &&
1305 limit->in(CMoveNode::IfTrue) == adj_max &&
1306 bol->as_Bool()->_test._test == bt &&
1307 bol->in(1)->Opcode() == Op_CmpI &&
1308 bol->in(1)->in(2) == limit->in(CMoveNode::IfFalse)) {
1309 // Loop was unrolled before.
1310 // Optimize the limit to avoid nested CMove:
1311 // use original limit as old limit.
1312 old_limit = bol->in(1)->in(1);
1313 // Adjust previous adjusted limit.
1314 adj_limit = limit->in(CMoveNode::IfFalse);
1315 adj_limit = new (C) SubINode(adj_limit, stride);
1316 } else {
1317 old_limit = limit;
1318 adj_limit = new (C) SubINode(limit, stride);
1319 }
1320 assert(old_limit != NULL && adj_limit != NULL, "");
1321 register_new_node( adj_limit, ctrl ); // adjust amount
1322 Node* adj_cmp = new (C) CmpINode(old_limit, adj_limit);
1323 register_new_node( adj_cmp, ctrl );
1324 Node* adj_bool = new (C) BoolNode(adj_cmp, bt);
1325 register_new_node( adj_bool, ctrl );
1326 new_limit = new (C) CMoveINode(adj_bool, adj_limit, adj_max, TypeInt::INT);
1327 }
1328 register_new_node(new_limit, ctrl);
1329 }
1330 assert(new_limit != NULL, "");
1331 // Replace in loop test.
1332 assert(loop_end->in(1)->in(1) == cmp, "sanity");
1333 if (cmp->outcnt() == 1 && loop_end->in(1)->outcnt() == 1) {
1334 // Don't need to create new test since only one user.
1335 _igvn.hash_delete(cmp);
1336 cmp->set_req(2, new_limit);
1337 } else {
1338 // Create new test since it is shared.
1339 Node* ctrl2 = loop_end->in(0);
1340 Node* cmp2 = cmp->clone();
1341 cmp2->set_req(2, new_limit);
1342 register_new_node(cmp2, ctrl2);
1343 Node* bol2 = loop_end->in(1)->clone();
1344 bol2->set_req(1, cmp2);
1345 register_new_node(bol2, ctrl2);
1346 _igvn.hash_delete(loop_end);
1347 loop_end->set_req(1, bol2);
1348 }
1349 // Step 3: Find the min-trip test guaranteed before a 'main' loop.
1350 // Make it a 1-trip test (means at least 2 trips).
1352 // Guard test uses an 'opaque' node which is not shared. Hence I
1353 // can edit it's inputs directly. Hammer in the new limit for the
1354 // minimum-trip guard.
1355 assert(opaq->outcnt() == 1, "");
1356 _igvn.hash_delete(opaq);
1357 opaq->set_req(1, new_limit);
1358 }
1360 // Adjust max trip count. The trip count is intentionally rounded
1361 // down here (e.g. 15-> 7-> 3-> 1) because if we unwittingly over-unroll,
1362 // the main, unrolled, part of the loop will never execute as it is protected
1363 // by the min-trip test. See bug 4834191 for a case where we over-unrolled
1364 // and later determined that part of the unrolled loop was dead.
1365 loop_head->set_trip_count(old_trip_count / 2);
1367 // Double the count of original iterations in the unrolled loop body.
1368 loop_head->double_unrolled_count();
1370 } else { // LoopLimitCheck
1372 // Adjust max trip count. The trip count is intentionally rounded
1373 // down here (e.g. 15-> 7-> 3-> 1) because if we unwittingly over-unroll,
1374 // the main, unrolled, part of the loop will never execute as it is protected
1375 // by the min-trip test. See bug 4834191 for a case where we over-unrolled
1376 // and later determined that part of the unrolled loop was dead.
1377 loop_head->set_trip_count(loop_head->trip_count() / 2);
1379 // Double the count of original iterations in the unrolled loop body.
1380 loop_head->double_unrolled_count();
1382 // -----------
1383 // Step 2: Cut back the trip counter for an unroll amount of 2.
1384 // Loop will normally trip (limit - init)/stride_con. Since it's a
1385 // CountedLoop this is exact (stride divides limit-init exactly).
1386 // We are going to double the loop body, so we want to knock off any
1387 // odd iteration: (trip_cnt & ~1). Then back compute a new limit.
1388 Node *span = new (C) SubINode( limit, init );
1389 register_new_node( span, ctrl );
1390 Node *trip = new (C) DivINode( 0, span, stride );
1391 register_new_node( trip, ctrl );
1392 Node *mtwo = _igvn.intcon(-2);
1393 set_ctrl(mtwo, C->root());
1394 Node *rond = new (C) AndINode( trip, mtwo );
1395 register_new_node( rond, ctrl );
1396 Node *spn2 = new (C) MulINode( rond, stride );
1397 register_new_node( spn2, ctrl );
1398 new_limit = new (C) AddINode( spn2, init );
1399 register_new_node( new_limit, ctrl );
1401 // Hammer in the new limit
1402 Node *ctrl2 = loop_end->in(0);
1403 Node *cmp2 = new (C) CmpINode( loop_head->incr(), new_limit );
1404 register_new_node( cmp2, ctrl2 );
1405 Node *bol2 = new (C) BoolNode( cmp2, loop_end->test_trip() );
1406 register_new_node( bol2, ctrl2 );
1407 _igvn.hash_delete(loop_end);
1408 loop_end->set_req(CountedLoopEndNode::TestValue, bol2);
1410 // Step 3: Find the min-trip test guaranteed before a 'main' loop.
1411 // Make it a 1-trip test (means at least 2 trips).
1412 if( adjust_min_trip ) {
1413 assert( new_limit != NULL, "" );
1414 // Guard test uses an 'opaque' node which is not shared. Hence I
1415 // can edit it's inputs directly. Hammer in the new limit for the
1416 // minimum-trip guard.
1417 assert( opaq->outcnt() == 1, "" );
1418 _igvn.hash_delete(opaq);
1419 opaq->set_req(1, new_limit);
1420 }
1421 } // LoopLimitCheck
1423 // ---------
1424 // Step 4: Clone the loop body. Move it inside the loop. This loop body
1425 // represents the odd iterations; since the loop trips an even number of
1426 // times its backedge is never taken. Kill the backedge.
1427 uint dd = dom_depth(loop_head);
1428 clone_loop( loop, old_new, dd );
1430 // Make backedges of the clone equal to backedges of the original.
1431 // Make the fall-in from the original come from the fall-out of the clone.
1432 for (DUIterator_Fast jmax, j = loop_head->fast_outs(jmax); j < jmax; j++) {
1433 Node* phi = loop_head->fast_out(j);
1434 if( phi->is_Phi() && phi->in(0) == loop_head && phi->outcnt() > 0 ) {
1435 Node *newphi = old_new[phi->_idx];
1436 _igvn.hash_delete( phi );
1437 _igvn.hash_delete( newphi );
1439 phi ->set_req(LoopNode:: EntryControl, newphi->in(LoopNode::LoopBackControl));
1440 newphi->set_req(LoopNode::LoopBackControl, phi ->in(LoopNode::LoopBackControl));
1441 phi ->set_req(LoopNode::LoopBackControl, C->top());
1442 }
1443 }
1444 Node *clone_head = old_new[loop_head->_idx];
1445 _igvn.hash_delete( clone_head );
1446 loop_head ->set_req(LoopNode:: EntryControl, clone_head->in(LoopNode::LoopBackControl));
1447 clone_head->set_req(LoopNode::LoopBackControl, loop_head ->in(LoopNode::LoopBackControl));
1448 loop_head ->set_req(LoopNode::LoopBackControl, C->top());
1449 loop->_head = clone_head; // New loop header
1451 set_idom(loop_head, loop_head ->in(LoopNode::EntryControl), dd);
1452 set_idom(clone_head, clone_head->in(LoopNode::EntryControl), dd);
1454 // Kill the clone's backedge
1455 Node *newcle = old_new[loop_end->_idx];
1456 _igvn.hash_delete( newcle );
1457 Node *one = _igvn.intcon(1);
1458 set_ctrl(one, C->root());
1459 newcle->set_req(1, one);
1460 // Force clone into same loop body
1461 uint max = loop->_body.size();
1462 for( uint k = 0; k < max; k++ ) {
1463 Node *old = loop->_body.at(k);
1464 Node *nnn = old_new[old->_idx];
1465 loop->_body.push(nnn);
1466 if (!has_ctrl(old))
1467 set_loop(nnn, loop);
1468 }
1470 loop->record_for_igvn();
1471 }
1473 //------------------------------do_maximally_unroll----------------------------
1475 void PhaseIdealLoop::do_maximally_unroll( IdealLoopTree *loop, Node_List &old_new ) {
1476 CountedLoopNode *cl = loop->_head->as_CountedLoop();
1477 assert(cl->has_exact_trip_count(), "trip count is not exact");
1478 assert(cl->trip_count() > 0, "");
1479 #ifndef PRODUCT
1480 if (TraceLoopOpts) {
1481 tty->print("MaxUnroll %d ", cl->trip_count());
1482 loop->dump_head();
1483 }
1484 #endif
1486 // If loop is tripping an odd number of times, peel odd iteration
1487 if ((cl->trip_count() & 1) == 1) {
1488 do_peeling(loop, old_new);
1489 }
1491 // Now its tripping an even number of times remaining. Double loop body.
1492 // Do not adjust pre-guards; they are not needed and do not exist.
1493 if (cl->trip_count() > 0) {
1494 assert((cl->trip_count() & 1) == 0, "missed peeling");
1495 do_unroll(loop, old_new, false);
1496 }
1497 }
1499 //------------------------------dominates_backedge---------------------------------
1500 // Returns true if ctrl is executed on every complete iteration
1501 bool IdealLoopTree::dominates_backedge(Node* ctrl) {
1502 assert(ctrl->is_CFG(), "must be control");
1503 Node* backedge = _head->as_Loop()->in(LoopNode::LoopBackControl);
1504 return _phase->dom_lca_internal(ctrl, backedge) == ctrl;
1505 }
1507 //------------------------------adjust_limit-----------------------------------
1508 // Helper function for add_constraint().
1509 Node* PhaseIdealLoop::adjust_limit(int stride_con, Node * scale, Node *offset, Node *rc_limit, Node *loop_limit, Node *pre_ctrl) {
1510 // Compute "I :: (limit-offset)/scale"
1511 Node *con = new (C) SubINode(rc_limit, offset);
1512 register_new_node(con, pre_ctrl);
1513 Node *X = new (C) DivINode(0, con, scale);
1514 register_new_node(X, pre_ctrl);
1516 // Adjust loop limit
1517 loop_limit = (stride_con > 0)
1518 ? (Node*)(new (C) MinINode(loop_limit, X))
1519 : (Node*)(new (C) MaxINode(loop_limit, X));
1520 register_new_node(loop_limit, pre_ctrl);
1521 return loop_limit;
1522 }
1524 //------------------------------add_constraint---------------------------------
1525 // Constrain the main loop iterations so the conditions:
1526 // low_limit <= scale_con * I + offset < upper_limit
1527 // always holds true. That is, either increase the number of iterations in
1528 // the pre-loop or the post-loop until the condition holds true in the main
1529 // loop. Stride, scale, offset and limit are all loop invariant. Further,
1530 // stride and scale are constants (offset and limit often are).
1531 void PhaseIdealLoop::add_constraint( int stride_con, int scale_con, Node *offset, Node *low_limit, Node *upper_limit, Node *pre_ctrl, Node **pre_limit, Node **main_limit ) {
1532 // For positive stride, the pre-loop limit always uses a MAX function
1533 // and the main loop a MIN function. For negative stride these are
1534 // reversed.
1536 // Also for positive stride*scale the affine function is increasing, so the
1537 // pre-loop must check for underflow and the post-loop for overflow.
1538 // Negative stride*scale reverses this; pre-loop checks for overflow and
1539 // post-loop for underflow.
1541 Node *scale = _igvn.intcon(scale_con);
1542 set_ctrl(scale, C->root());
1544 if ((stride_con^scale_con) >= 0) { // Use XOR to avoid overflow
1545 // The overflow limit: scale*I+offset < upper_limit
1546 // For main-loop compute
1547 // ( if (scale > 0) /* and stride > 0 */
1548 // I < (upper_limit-offset)/scale
1549 // else /* scale < 0 and stride < 0 */
1550 // I > (upper_limit-offset)/scale
1551 // )
1552 //
1553 // (upper_limit-offset) may overflow or underflow.
1554 // But it is fine since main loop will either have
1555 // less iterations or will be skipped in such case.
1556 *main_limit = adjust_limit(stride_con, scale, offset, upper_limit, *main_limit, pre_ctrl);
1558 // The underflow limit: low_limit <= scale*I+offset.
1559 // For pre-loop compute
1560 // NOT(scale*I+offset >= low_limit)
1561 // scale*I+offset < low_limit
1562 // ( if (scale > 0) /* and stride > 0 */
1563 // I < (low_limit-offset)/scale
1564 // else /* scale < 0 and stride < 0 */
1565 // I > (low_limit-offset)/scale
1566 // )
1568 if (low_limit->get_int() == -max_jint) {
1569 if (!RangeLimitCheck) return;
1570 // We need this guard when scale*pre_limit+offset >= limit
1571 // due to underflow. So we need execute pre-loop until
1572 // scale*I+offset >= min_int. But (min_int-offset) will
1573 // underflow when offset > 0 and X will be > original_limit
1574 // when stride > 0. To avoid it we replace positive offset with 0.
1575 //
1576 // Also (min_int+1 == -max_int) is used instead of min_int here
1577 // to avoid problem with scale == -1 (min_int/(-1) == min_int).
1578 Node* shift = _igvn.intcon(31);
1579 set_ctrl(shift, C->root());
1580 Node* sign = new (C) RShiftINode(offset, shift);
1581 register_new_node(sign, pre_ctrl);
1582 offset = new (C) AndINode(offset, sign);
1583 register_new_node(offset, pre_ctrl);
1584 } else {
1585 assert(low_limit->get_int() == 0, "wrong low limit for range check");
1586 // The only problem we have here when offset == min_int
1587 // since (0-min_int) == min_int. It may be fine for stride > 0
1588 // but for stride < 0 X will be < original_limit. To avoid it
1589 // max(pre_limit, original_limit) is used in do_range_check().
1590 }
1591 // Pass (-stride) to indicate pre_loop_cond = NOT(main_loop_cond);
1592 *pre_limit = adjust_limit((-stride_con), scale, offset, low_limit, *pre_limit, pre_ctrl);
1594 } else { // stride_con*scale_con < 0
1595 // For negative stride*scale pre-loop checks for overflow and
1596 // post-loop for underflow.
1597 //
1598 // The overflow limit: scale*I+offset < upper_limit
1599 // For pre-loop compute
1600 // NOT(scale*I+offset < upper_limit)
1601 // scale*I+offset >= upper_limit
1602 // scale*I+offset+1 > upper_limit
1603 // ( if (scale < 0) /* and stride > 0 */
1604 // I < (upper_limit-(offset+1))/scale
1605 // else /* scale > 0 and stride < 0 */
1606 // I > (upper_limit-(offset+1))/scale
1607 // )
1608 //
1609 // (upper_limit-offset-1) may underflow or overflow.
1610 // To avoid it min(pre_limit, original_limit) is used
1611 // in do_range_check() for stride > 0 and max() for < 0.
1612 Node *one = _igvn.intcon(1);
1613 set_ctrl(one, C->root());
1615 Node *plus_one = new (C) AddINode(offset, one);
1616 register_new_node( plus_one, pre_ctrl );
1617 // Pass (-stride) to indicate pre_loop_cond = NOT(main_loop_cond);
1618 *pre_limit = adjust_limit((-stride_con), scale, plus_one, upper_limit, *pre_limit, pre_ctrl);
1620 if (low_limit->get_int() == -max_jint) {
1621 if (!RangeLimitCheck) return;
1622 // We need this guard when scale*main_limit+offset >= limit
1623 // due to underflow. So we need execute main-loop while
1624 // scale*I+offset+1 > min_int. But (min_int-offset-1) will
1625 // underflow when (offset+1) > 0 and X will be < main_limit
1626 // when scale < 0 (and stride > 0). To avoid it we replace
1627 // positive (offset+1) with 0.
1628 //
1629 // Also (min_int+1 == -max_int) is used instead of min_int here
1630 // to avoid problem with scale == -1 (min_int/(-1) == min_int).
1631 Node* shift = _igvn.intcon(31);
1632 set_ctrl(shift, C->root());
1633 Node* sign = new (C) RShiftINode(plus_one, shift);
1634 register_new_node(sign, pre_ctrl);
1635 plus_one = new (C) AndINode(plus_one, sign);
1636 register_new_node(plus_one, pre_ctrl);
1637 } else {
1638 assert(low_limit->get_int() == 0, "wrong low limit for range check");
1639 // The only problem we have here when offset == max_int
1640 // since (max_int+1) == min_int and (0-min_int) == min_int.
1641 // But it is fine since main loop will either have
1642 // less iterations or will be skipped in such case.
1643 }
1644 // The underflow limit: low_limit <= scale*I+offset.
1645 // For main-loop compute
1646 // scale*I+offset+1 > low_limit
1647 // ( if (scale < 0) /* and stride > 0 */
1648 // I < (low_limit-(offset+1))/scale
1649 // else /* scale > 0 and stride < 0 */
1650 // I > (low_limit-(offset+1))/scale
1651 // )
1653 *main_limit = adjust_limit(stride_con, scale, plus_one, low_limit, *main_limit, pre_ctrl);
1654 }
1655 }
1658 //------------------------------is_scaled_iv---------------------------------
1659 // Return true if exp is a constant times an induction var
1660 bool PhaseIdealLoop::is_scaled_iv(Node* exp, Node* iv, int* p_scale) {
1661 if (exp == iv) {
1662 if (p_scale != NULL) {
1663 *p_scale = 1;
1664 }
1665 return true;
1666 }
1667 int opc = exp->Opcode();
1668 if (opc == Op_MulI) {
1669 if (exp->in(1) == iv && exp->in(2)->is_Con()) {
1670 if (p_scale != NULL) {
1671 *p_scale = exp->in(2)->get_int();
1672 }
1673 return true;
1674 }
1675 if (exp->in(2) == iv && exp->in(1)->is_Con()) {
1676 if (p_scale != NULL) {
1677 *p_scale = exp->in(1)->get_int();
1678 }
1679 return true;
1680 }
1681 } else if (opc == Op_LShiftI) {
1682 if (exp->in(1) == iv && exp->in(2)->is_Con()) {
1683 if (p_scale != NULL) {
1684 *p_scale = 1 << exp->in(2)->get_int();
1685 }
1686 return true;
1687 }
1688 }
1689 return false;
1690 }
1692 //-----------------------------is_scaled_iv_plus_offset------------------------------
1693 // Return true if exp is a simple induction variable expression: k1*iv + (invar + k2)
1694 bool PhaseIdealLoop::is_scaled_iv_plus_offset(Node* exp, Node* iv, int* p_scale, Node** p_offset, int depth) {
1695 if (is_scaled_iv(exp, iv, p_scale)) {
1696 if (p_offset != NULL) {
1697 Node *zero = _igvn.intcon(0);
1698 set_ctrl(zero, C->root());
1699 *p_offset = zero;
1700 }
1701 return true;
1702 }
1703 int opc = exp->Opcode();
1704 if (opc == Op_AddI) {
1705 if (is_scaled_iv(exp->in(1), iv, p_scale)) {
1706 if (p_offset != NULL) {
1707 *p_offset = exp->in(2);
1708 }
1709 return true;
1710 }
1711 if (exp->in(2)->is_Con()) {
1712 Node* offset2 = NULL;
1713 if (depth < 2 &&
1714 is_scaled_iv_plus_offset(exp->in(1), iv, p_scale,
1715 p_offset != NULL ? &offset2 : NULL, depth+1)) {
1716 if (p_offset != NULL) {
1717 Node *ctrl_off2 = get_ctrl(offset2);
1718 Node* offset = new (C) AddINode(offset2, exp->in(2));
1719 register_new_node(offset, ctrl_off2);
1720 *p_offset = offset;
1721 }
1722 return true;
1723 }
1724 }
1725 } else if (opc == Op_SubI) {
1726 if (is_scaled_iv(exp->in(1), iv, p_scale)) {
1727 if (p_offset != NULL) {
1728 Node *zero = _igvn.intcon(0);
1729 set_ctrl(zero, C->root());
1730 Node *ctrl_off = get_ctrl(exp->in(2));
1731 Node* offset = new (C) SubINode(zero, exp->in(2));
1732 register_new_node(offset, ctrl_off);
1733 *p_offset = offset;
1734 }
1735 return true;
1736 }
1737 if (is_scaled_iv(exp->in(2), iv, p_scale)) {
1738 if (p_offset != NULL) {
1739 *p_scale *= -1;
1740 *p_offset = exp->in(1);
1741 }
1742 return true;
1743 }
1744 }
1745 return false;
1746 }
1748 //------------------------------do_range_check---------------------------------
1749 // Eliminate range-checks and other trip-counter vs loop-invariant tests.
1750 void PhaseIdealLoop::do_range_check( IdealLoopTree *loop, Node_List &old_new ) {
1751 #ifndef PRODUCT
1752 if (PrintOpto && VerifyLoopOptimizations) {
1753 tty->print("Range Check Elimination ");
1754 loop->dump_head();
1755 } else if (TraceLoopOpts) {
1756 tty->print("RangeCheck ");
1757 loop->dump_head();
1758 }
1759 #endif
1760 assert(RangeCheckElimination, "");
1761 CountedLoopNode *cl = loop->_head->as_CountedLoop();
1762 assert(cl->is_main_loop(), "");
1764 // protect against stride not being a constant
1765 if (!cl->stride_is_con())
1766 return;
1768 // Find the trip counter; we are iteration splitting based on it
1769 Node *trip_counter = cl->phi();
1770 // Find the main loop limit; we will trim it's iterations
1771 // to not ever trip end tests
1772 Node *main_limit = cl->limit();
1774 // Need to find the main-loop zero-trip guard
1775 Node *ctrl = cl->in(LoopNode::EntryControl);
1776 assert(ctrl->Opcode() == Op_IfTrue || ctrl->Opcode() == Op_IfFalse, "");
1777 Node *iffm = ctrl->in(0);
1778 assert(iffm->Opcode() == Op_If, "");
1779 Node *bolzm = iffm->in(1);
1780 assert(bolzm->Opcode() == Op_Bool, "");
1781 Node *cmpzm = bolzm->in(1);
1782 assert(cmpzm->is_Cmp(), "");
1783 Node *opqzm = cmpzm->in(2);
1784 // Can not optimize a loop if zero-trip Opaque1 node is optimized
1785 // away and then another round of loop opts attempted.
1786 if (opqzm->Opcode() != Op_Opaque1)
1787 return;
1788 assert(opqzm->in(1) == main_limit, "do not understand situation");
1790 // Find the pre-loop limit; we will expand it's iterations to
1791 // not ever trip low tests.
1792 Node *p_f = iffm->in(0);
1793 assert(p_f->Opcode() == Op_IfFalse, "");
1794 CountedLoopEndNode *pre_end = p_f->in(0)->as_CountedLoopEnd();
1795 assert(pre_end->loopnode()->is_pre_loop(), "");
1796 Node *pre_opaq1 = pre_end->limit();
1797 // Occasionally it's possible for a pre-loop Opaque1 node to be
1798 // optimized away and then another round of loop opts attempted.
1799 // We can not optimize this particular loop in that case.
1800 if (pre_opaq1->Opcode() != Op_Opaque1)
1801 return;
1802 Opaque1Node *pre_opaq = (Opaque1Node*)pre_opaq1;
1803 Node *pre_limit = pre_opaq->in(1);
1805 // Where do we put new limit calculations
1806 Node *pre_ctrl = pre_end->loopnode()->in(LoopNode::EntryControl);
1808 // Ensure the original loop limit is available from the
1809 // pre-loop Opaque1 node.
1810 Node *orig_limit = pre_opaq->original_loop_limit();
1811 if (orig_limit == NULL || _igvn.type(orig_limit) == Type::TOP)
1812 return;
1814 // Must know if its a count-up or count-down loop
1816 int stride_con = cl->stride_con();
1817 Node *zero = _igvn.intcon(0);
1818 Node *one = _igvn.intcon(1);
1819 // Use symmetrical int range [-max_jint,max_jint]
1820 Node *mini = _igvn.intcon(-max_jint);
1821 set_ctrl(zero, C->root());
1822 set_ctrl(one, C->root());
1823 set_ctrl(mini, C->root());
1825 // Range checks that do not dominate the loop backedge (ie.
1826 // conditionally executed) can lengthen the pre loop limit beyond
1827 // the original loop limit. To prevent this, the pre limit is
1828 // (for stride > 0) MINed with the original loop limit (MAXed
1829 // stride < 0) when some range_check (rc) is conditionally
1830 // executed.
1831 bool conditional_rc = false;
1833 // Check loop body for tests of trip-counter plus loop-invariant vs
1834 // loop-invariant.
1835 for( uint i = 0; i < loop->_body.size(); i++ ) {
1836 Node *iff = loop->_body[i];
1837 if( iff->Opcode() == Op_If ) { // Test?
1839 // Test is an IfNode, has 2 projections. If BOTH are in the loop
1840 // we need loop unswitching instead of iteration splitting.
1841 Node *exit = loop->is_loop_exit(iff);
1842 if( !exit ) continue;
1843 int flip = (exit->Opcode() == Op_IfTrue) ? 1 : 0;
1845 // Get boolean condition to test
1846 Node *i1 = iff->in(1);
1847 if( !i1->is_Bool() ) continue;
1848 BoolNode *bol = i1->as_Bool();
1849 BoolTest b_test = bol->_test;
1850 // Flip sense of test if exit condition is flipped
1851 if( flip )
1852 b_test = b_test.negate();
1854 // Get compare
1855 Node *cmp = bol->in(1);
1857 // Look for trip_counter + offset vs limit
1858 Node *rc_exp = cmp->in(1);
1859 Node *limit = cmp->in(2);
1860 jint scale_con= 1; // Assume trip counter not scaled
1862 Node *limit_c = get_ctrl(limit);
1863 if( loop->is_member(get_loop(limit_c) ) ) {
1864 // Compare might have operands swapped; commute them
1865 b_test = b_test.commute();
1866 rc_exp = cmp->in(2);
1867 limit = cmp->in(1);
1868 limit_c = get_ctrl(limit);
1869 if( loop->is_member(get_loop(limit_c) ) )
1870 continue; // Both inputs are loop varying; cannot RCE
1871 }
1872 // Here we know 'limit' is loop invariant
1874 // 'limit' maybe pinned below the zero trip test (probably from a
1875 // previous round of rce), in which case, it can't be used in the
1876 // zero trip test expression which must occur before the zero test's if.
1877 if( limit_c == ctrl ) {
1878 continue; // Don't rce this check but continue looking for other candidates.
1879 }
1881 // Check for scaled induction variable plus an offset
1882 Node *offset = NULL;
1884 if (!is_scaled_iv_plus_offset(rc_exp, trip_counter, &scale_con, &offset)) {
1885 continue;
1886 }
1888 Node *offset_c = get_ctrl(offset);
1889 if( loop->is_member( get_loop(offset_c) ) )
1890 continue; // Offset is not really loop invariant
1891 // Here we know 'offset' is loop invariant.
1893 // As above for the 'limit', the 'offset' maybe pinned below the
1894 // zero trip test.
1895 if( offset_c == ctrl ) {
1896 continue; // Don't rce this check but continue looking for other candidates.
1897 }
1898 #ifdef ASSERT
1899 if (TraceRangeLimitCheck) {
1900 tty->print_cr("RC bool node%s", flip ? " flipped:" : ":");
1901 bol->dump(2);
1902 }
1903 #endif
1904 // At this point we have the expression as:
1905 // scale_con * trip_counter + offset :: limit
1906 // where scale_con, offset and limit are loop invariant. Trip_counter
1907 // monotonically increases by stride_con, a constant. Both (or either)
1908 // stride_con and scale_con can be negative which will flip about the
1909 // sense of the test.
1911 // Adjust pre and main loop limits to guard the correct iteration set
1912 if( cmp->Opcode() == Op_CmpU ) {// Unsigned compare is really 2 tests
1913 if( b_test._test == BoolTest::lt ) { // Range checks always use lt
1914 // The underflow and overflow limits: 0 <= scale*I+offset < limit
1915 add_constraint( stride_con, scale_con, offset, zero, limit, pre_ctrl, &pre_limit, &main_limit );
1916 if (!conditional_rc) {
1917 // (0-offset)/scale could be outside of loop iterations range.
1918 conditional_rc = !loop->dominates_backedge(iff) || RangeLimitCheck;
1919 }
1920 } else {
1921 #ifndef PRODUCT
1922 if( PrintOpto )
1923 tty->print_cr("missed RCE opportunity");
1924 #endif
1925 continue; // In release mode, ignore it
1926 }
1927 } else { // Otherwise work on normal compares
1928 switch( b_test._test ) {
1929 case BoolTest::gt:
1930 // Fall into GE case
1931 case BoolTest::ge:
1932 // Convert (I*scale+offset) >= Limit to (I*(-scale)+(-offset)) <= -Limit
1933 scale_con = -scale_con;
1934 offset = new (C) SubINode( zero, offset );
1935 register_new_node( offset, pre_ctrl );
1936 limit = new (C) SubINode( zero, limit );
1937 register_new_node( limit, pre_ctrl );
1938 // Fall into LE case
1939 case BoolTest::le:
1940 if (b_test._test != BoolTest::gt) {
1941 // Convert X <= Y to X < Y+1
1942 limit = new (C) AddINode( limit, one );
1943 register_new_node( limit, pre_ctrl );
1944 }
1945 // Fall into LT case
1946 case BoolTest::lt:
1947 // The underflow and overflow limits: MIN_INT <= scale*I+offset < limit
1948 // Note: (MIN_INT+1 == -MAX_INT) is used instead of MIN_INT here
1949 // to avoid problem with scale == -1: MIN_INT/(-1) == MIN_INT.
1950 add_constraint( stride_con, scale_con, offset, mini, limit, pre_ctrl, &pre_limit, &main_limit );
1951 if (!conditional_rc) {
1952 // ((MIN_INT+1)-offset)/scale could be outside of loop iterations range.
1953 // Note: negative offset is replaced with 0 but (MIN_INT+1)/scale could
1954 // still be outside of loop range.
1955 conditional_rc = !loop->dominates_backedge(iff) || RangeLimitCheck;
1956 }
1957 break;
1958 default:
1959 #ifndef PRODUCT
1960 if( PrintOpto )
1961 tty->print_cr("missed RCE opportunity");
1962 #endif
1963 continue; // Unhandled case
1964 }
1965 }
1967 // Kill the eliminated test
1968 C->set_major_progress();
1969 Node *kill_con = _igvn.intcon( 1-flip );
1970 set_ctrl(kill_con, C->root());
1971 _igvn.replace_input_of(iff, 1, kill_con);
1972 // Find surviving projection
1973 assert(iff->is_If(), "");
1974 ProjNode* dp = ((IfNode*)iff)->proj_out(1-flip);
1975 // Find loads off the surviving projection; remove their control edge
1976 for (DUIterator_Fast imax, i = dp->fast_outs(imax); i < imax; i++) {
1977 Node* cd = dp->fast_out(i); // Control-dependent node
1978 if (cd->is_Load() && cd->depends_only_on_test()) { // Loads can now float around in the loop
1979 // Allow the load to float around in the loop, or before it
1980 // but NOT before the pre-loop.
1981 _igvn.replace_input_of(cd, 0, ctrl); // ctrl, not NULL
1982 --i;
1983 --imax;
1984 }
1985 }
1987 } // End of is IF
1989 }
1991 // Update loop limits
1992 if (conditional_rc) {
1993 pre_limit = (stride_con > 0) ? (Node*)new (C) MinINode(pre_limit, orig_limit)
1994 : (Node*)new (C) MaxINode(pre_limit, orig_limit);
1995 register_new_node(pre_limit, pre_ctrl);
1996 }
1997 _igvn.hash_delete(pre_opaq);
1998 pre_opaq->set_req(1, pre_limit);
2000 // Note:: we are making the main loop limit no longer precise;
2001 // need to round up based on stride.
2002 cl->set_nonexact_trip_count();
2003 if (!LoopLimitCheck && stride_con != 1 && stride_con != -1) { // Cutout for common case
2004 // "Standard" round-up logic: ([main_limit-init+(y-1)]/y)*y+init
2005 // Hopefully, compiler will optimize for powers of 2.
2006 Node *ctrl = get_ctrl(main_limit);
2007 Node *stride = cl->stride();
2008 Node *init = cl->init_trip();
2009 Node *span = new (C) SubINode(main_limit,init);
2010 register_new_node(span,ctrl);
2011 Node *rndup = _igvn.intcon(stride_con + ((stride_con>0)?-1:1));
2012 Node *add = new (C) AddINode(span,rndup);
2013 register_new_node(add,ctrl);
2014 Node *div = new (C) DivINode(0,add,stride);
2015 register_new_node(div,ctrl);
2016 Node *mul = new (C) MulINode(div,stride);
2017 register_new_node(mul,ctrl);
2018 Node *newlim = new (C) AddINode(mul,init);
2019 register_new_node(newlim,ctrl);
2020 main_limit = newlim;
2021 }
2023 Node *main_cle = cl->loopexit();
2024 Node *main_bol = main_cle->in(1);
2025 // Hacking loop bounds; need private copies of exit test
2026 if( main_bol->outcnt() > 1 ) {// BoolNode shared?
2027 _igvn.hash_delete(main_cle);
2028 main_bol = main_bol->clone();// Clone a private BoolNode
2029 register_new_node( main_bol, main_cle->in(0) );
2030 main_cle->set_req(1,main_bol);
2031 }
2032 Node *main_cmp = main_bol->in(1);
2033 if( main_cmp->outcnt() > 1 ) { // CmpNode shared?
2034 _igvn.hash_delete(main_bol);
2035 main_cmp = main_cmp->clone();// Clone a private CmpNode
2036 register_new_node( main_cmp, main_cle->in(0) );
2037 main_bol->set_req(1,main_cmp);
2038 }
2039 // Hack the now-private loop bounds
2040 _igvn.replace_input_of(main_cmp, 2, main_limit);
2041 // The OpaqueNode is unshared by design
2042 assert( opqzm->outcnt() == 1, "cannot hack shared node" );
2043 _igvn.replace_input_of(opqzm, 1, main_limit);
2044 }
2046 //------------------------------DCE_loop_body----------------------------------
2047 // Remove simplistic dead code from loop body
2048 void IdealLoopTree::DCE_loop_body() {
2049 for( uint i = 0; i < _body.size(); i++ )
2050 if( _body.at(i)->outcnt() == 0 )
2051 _body.map( i--, _body.pop() );
2052 }
2055 //------------------------------adjust_loop_exit_prob--------------------------
2056 // Look for loop-exit tests with the 50/50 (or worse) guesses from the parsing stage.
2057 // Replace with a 1-in-10 exit guess.
2058 void IdealLoopTree::adjust_loop_exit_prob( PhaseIdealLoop *phase ) {
2059 Node *test = tail();
2060 while( test != _head ) {
2061 uint top = test->Opcode();
2062 if( top == Op_IfTrue || top == Op_IfFalse ) {
2063 int test_con = ((ProjNode*)test)->_con;
2064 assert(top == (uint)(test_con? Op_IfTrue: Op_IfFalse), "sanity");
2065 IfNode *iff = test->in(0)->as_If();
2066 if( iff->outcnt() == 2 ) { // Ignore dead tests
2067 Node *bol = iff->in(1);
2068 if( bol && bol->req() > 1 && bol->in(1) &&
2069 ((bol->in(1)->Opcode() == Op_StorePConditional ) ||
2070 (bol->in(1)->Opcode() == Op_StoreIConditional ) ||
2071 (bol->in(1)->Opcode() == Op_StoreLConditional ) ||
2072 (bol->in(1)->Opcode() == Op_CompareAndSwapI ) ||
2073 (bol->in(1)->Opcode() == Op_CompareAndSwapL ) ||
2074 (bol->in(1)->Opcode() == Op_CompareAndSwapP ) ||
2075 (bol->in(1)->Opcode() == Op_CompareAndSwapN )))
2076 return; // Allocation loops RARELY take backedge
2077 // Find the OTHER exit path from the IF
2078 Node* ex = iff->proj_out(1-test_con);
2079 float p = iff->_prob;
2080 if( !phase->is_member( this, ex ) && iff->_fcnt == COUNT_UNKNOWN ) {
2081 if( top == Op_IfTrue ) {
2082 if( p < (PROB_FAIR + PROB_UNLIKELY_MAG(3))) {
2083 iff->_prob = PROB_STATIC_FREQUENT;
2084 }
2085 } else {
2086 if( p > (PROB_FAIR - PROB_UNLIKELY_MAG(3))) {
2087 iff->_prob = PROB_STATIC_INFREQUENT;
2088 }
2089 }
2090 }
2091 }
2092 }
2093 test = phase->idom(test);
2094 }
2095 }
2098 //------------------------------policy_do_remove_empty_loop--------------------
2099 // Micro-benchmark spamming. Policy is to always remove empty loops.
2100 // The 'DO' part is to replace the trip counter with the value it will
2101 // have on the last iteration. This will break the loop.
2102 bool IdealLoopTree::policy_do_remove_empty_loop( PhaseIdealLoop *phase ) {
2103 // Minimum size must be empty loop
2104 if (_body.size() > EMPTY_LOOP_SIZE)
2105 return false;
2107 if (!_head->is_CountedLoop())
2108 return false; // Dead loop
2109 CountedLoopNode *cl = _head->as_CountedLoop();
2110 if (!cl->is_valid_counted_loop())
2111 return false; // Malformed loop
2112 if (!phase->is_member(this, phase->get_ctrl(cl->loopexit()->in(CountedLoopEndNode::TestValue))))
2113 return false; // Infinite loop
2115 #ifdef ASSERT
2116 // Ensure only one phi which is the iv.
2117 Node* iv = NULL;
2118 for (DUIterator_Fast imax, i = cl->fast_outs(imax); i < imax; i++) {
2119 Node* n = cl->fast_out(i);
2120 if (n->Opcode() == Op_Phi) {
2121 assert(iv == NULL, "Too many phis" );
2122 iv = n;
2123 }
2124 }
2125 assert(iv == cl->phi(), "Wrong phi" );
2126 #endif
2128 // main and post loops have explicitly created zero trip guard
2129 bool needs_guard = !cl->is_main_loop() && !cl->is_post_loop();
2130 if (needs_guard) {
2131 // Skip guard if values not overlap.
2132 const TypeInt* init_t = phase->_igvn.type(cl->init_trip())->is_int();
2133 const TypeInt* limit_t = phase->_igvn.type(cl->limit())->is_int();
2134 int stride_con = cl->stride_con();
2135 if (stride_con > 0) {
2136 needs_guard = (init_t->_hi >= limit_t->_lo);
2137 } else {
2138 needs_guard = (init_t->_lo <= limit_t->_hi);
2139 }
2140 }
2141 if (needs_guard) {
2142 // Check for an obvious zero trip guard.
2143 Node* inctrl = PhaseIdealLoop::skip_loop_predicates(cl->in(LoopNode::EntryControl));
2144 if (inctrl->Opcode() == Op_IfTrue) {
2145 // The test should look like just the backedge of a CountedLoop
2146 Node* iff = inctrl->in(0);
2147 if (iff->is_If()) {
2148 Node* bol = iff->in(1);
2149 if (bol->is_Bool() && bol->as_Bool()->_test._test == cl->loopexit()->test_trip()) {
2150 Node* cmp = bol->in(1);
2151 if (cmp->is_Cmp() && cmp->in(1) == cl->init_trip() && cmp->in(2) == cl->limit()) {
2152 needs_guard = false;
2153 }
2154 }
2155 }
2156 }
2157 }
2159 #ifndef PRODUCT
2160 if (PrintOpto) {
2161 tty->print("Removing empty loop with%s zero trip guard", needs_guard ? "out" : "");
2162 this->dump_head();
2163 } else if (TraceLoopOpts) {
2164 tty->print("Empty with%s zero trip guard ", needs_guard ? "out" : "");
2165 this->dump_head();
2166 }
2167 #endif
2169 if (needs_guard) {
2170 // Peel the loop to ensure there's a zero trip guard
2171 Node_List old_new;
2172 phase->do_peeling(this, old_new);
2173 }
2175 // Replace the phi at loop head with the final value of the last
2176 // iteration. Then the CountedLoopEnd will collapse (backedge never
2177 // taken) and all loop-invariant uses of the exit values will be correct.
2178 Node *phi = cl->phi();
2179 Node *exact_limit = phase->exact_limit(this);
2180 if (exact_limit != cl->limit()) {
2181 // We also need to replace the original limit to collapse loop exit.
2182 Node* cmp = cl->loopexit()->cmp_node();
2183 assert(cl->limit() == cmp->in(2), "sanity");
2184 phase->_igvn._worklist.push(cmp->in(2)); // put limit on worklist
2185 phase->_igvn.replace_input_of(cmp, 2, exact_limit); // put cmp on worklist
2186 }
2187 // Note: the final value after increment should not overflow since
2188 // counted loop has limit check predicate.
2189 Node *final = new (phase->C) SubINode( exact_limit, cl->stride() );
2190 phase->register_new_node(final,cl->in(LoopNode::EntryControl));
2191 phase->_igvn.replace_node(phi,final);
2192 phase->C->set_major_progress();
2193 return true;
2194 }
2196 //------------------------------policy_do_one_iteration_loop-------------------
2197 // Convert one iteration loop into normal code.
2198 bool IdealLoopTree::policy_do_one_iteration_loop( PhaseIdealLoop *phase ) {
2199 if (!_head->as_Loop()->is_valid_counted_loop())
2200 return false; // Only for counted loop
2202 CountedLoopNode *cl = _head->as_CountedLoop();
2203 if (!cl->has_exact_trip_count() || cl->trip_count() != 1) {
2204 return false;
2205 }
2207 #ifndef PRODUCT
2208 if(TraceLoopOpts) {
2209 tty->print("OneIteration ");
2210 this->dump_head();
2211 }
2212 #endif
2214 Node *init_n = cl->init_trip();
2215 #ifdef ASSERT
2216 // Loop boundaries should be constant since trip count is exact.
2217 assert(init_n->get_int() + cl->stride_con() >= cl->limit()->get_int(), "should be one iteration");
2218 #endif
2219 // Replace the phi at loop head with the value of the init_trip.
2220 // Then the CountedLoopEnd will collapse (backedge will not be taken)
2221 // and all loop-invariant uses of the exit values will be correct.
2222 phase->_igvn.replace_node(cl->phi(), cl->init_trip());
2223 phase->C->set_major_progress();
2224 return true;
2225 }
2227 //=============================================================================
2228 //------------------------------iteration_split_impl---------------------------
2229 bool IdealLoopTree::iteration_split_impl( PhaseIdealLoop *phase, Node_List &old_new ) {
2230 // Compute exact loop trip count if possible.
2231 compute_exact_trip_count(phase);
2233 // Convert one iteration loop into normal code.
2234 if (policy_do_one_iteration_loop(phase))
2235 return true;
2237 // Check and remove empty loops (spam micro-benchmarks)
2238 if (policy_do_remove_empty_loop(phase))
2239 return true; // Here we removed an empty loop
2241 bool should_peel = policy_peeling(phase); // Should we peel?
2243 bool should_unswitch = policy_unswitching(phase);
2245 // Non-counted loops may be peeled; exactly 1 iteration is peeled.
2246 // This removes loop-invariant tests (usually null checks).
2247 if (!_head->is_CountedLoop()) { // Non-counted loop
2248 if (PartialPeelLoop && phase->partial_peel(this, old_new)) {
2249 // Partial peel succeeded so terminate this round of loop opts
2250 return false;
2251 }
2252 if (should_peel) { // Should we peel?
2253 #ifndef PRODUCT
2254 if (PrintOpto) tty->print_cr("should_peel");
2255 #endif
2256 phase->do_peeling(this,old_new);
2257 } else if (should_unswitch) {
2258 phase->do_unswitching(this, old_new);
2259 }
2260 return true;
2261 }
2262 CountedLoopNode *cl = _head->as_CountedLoop();
2264 if (!cl->is_valid_counted_loop()) return true; // Ignore various kinds of broken loops
2266 // Do nothing special to pre- and post- loops
2267 if (cl->is_pre_loop() || cl->is_post_loop()) return true;
2269 // Compute loop trip count from profile data
2270 compute_profile_trip_cnt(phase);
2272 // Before attempting fancy unrolling, RCE or alignment, see if we want
2273 // to completely unroll this loop or do loop unswitching.
2274 if (cl->is_normal_loop()) {
2275 if (should_unswitch) {
2276 phase->do_unswitching(this, old_new);
2277 return true;
2278 }
2279 bool should_maximally_unroll = policy_maximally_unroll(phase);
2280 if (should_maximally_unroll) {
2281 // Here we did some unrolling and peeling. Eventually we will
2282 // completely unroll this loop and it will no longer be a loop.
2283 phase->do_maximally_unroll(this,old_new);
2284 return true;
2285 }
2286 }
2288 // Skip next optimizations if running low on nodes. Note that
2289 // policy_unswitching and policy_maximally_unroll have this check.
2290 uint nodes_left = MaxNodeLimit - (uint) phase->C->live_nodes();
2291 if ((2 * _body.size()) > nodes_left) {
2292 return true;
2293 }
2295 // Counted loops may be peeled, may need some iterations run up
2296 // front for RCE, and may want to align loop refs to a cache
2297 // line. Thus we clone a full loop up front whose trip count is
2298 // at least 1 (if peeling), but may be several more.
2300 // The main loop will start cache-line aligned with at least 1
2301 // iteration of the unrolled body (zero-trip test required) and
2302 // will have some range checks removed.
2304 // A post-loop will finish any odd iterations (leftover after
2305 // unrolling), plus any needed for RCE purposes.
2307 bool should_unroll = policy_unroll(phase);
2309 bool should_rce = policy_range_check(phase);
2311 bool should_align = policy_align(phase);
2313 // If not RCE'ing (iteration splitting) or Aligning, then we do not
2314 // need a pre-loop. We may still need to peel an initial iteration but
2315 // we will not be needing an unknown number of pre-iterations.
2316 //
2317 // Basically, if may_rce_align reports FALSE first time through,
2318 // we will not be able to later do RCE or Aligning on this loop.
2319 bool may_rce_align = !policy_peel_only(phase) || should_rce || should_align;
2321 // If we have any of these conditions (RCE, alignment, unrolling) met, then
2322 // we switch to the pre-/main-/post-loop model. This model also covers
2323 // peeling.
2324 if (should_rce || should_align || should_unroll) {
2325 if (cl->is_normal_loop()) // Convert to 'pre/main/post' loops
2326 phase->insert_pre_post_loops(this,old_new, !may_rce_align);
2328 // Adjust the pre- and main-loop limits to let the pre and post loops run
2329 // with full checks, but the main-loop with no checks. Remove said
2330 // checks from the main body.
2331 if (should_rce)
2332 phase->do_range_check(this,old_new);
2334 // Double loop body for unrolling. Adjust the minimum-trip test (will do
2335 // twice as many iterations as before) and the main body limit (only do
2336 // an even number of trips). If we are peeling, we might enable some RCE
2337 // and we'd rather unroll the post-RCE'd loop SO... do not unroll if
2338 // peeling.
2339 if (should_unroll && !should_peel)
2340 phase->do_unroll(this,old_new, true);
2342 // Adjust the pre-loop limits to align the main body
2343 // iterations.
2344 if (should_align)
2345 Unimplemented();
2347 } else { // Else we have an unchanged counted loop
2348 if (should_peel) // Might want to peel but do nothing else
2349 phase->do_peeling(this,old_new);
2350 }
2351 return true;
2352 }
2355 //=============================================================================
2356 //------------------------------iteration_split--------------------------------
2357 bool IdealLoopTree::iteration_split( PhaseIdealLoop *phase, Node_List &old_new ) {
2358 // Recursively iteration split nested loops
2359 if (_child && !_child->iteration_split(phase, old_new))
2360 return false;
2362 // Clean out prior deadwood
2363 DCE_loop_body();
2366 // Look for loop-exit tests with my 50/50 guesses from the Parsing stage.
2367 // Replace with a 1-in-10 exit guess.
2368 if (_parent /*not the root loop*/ &&
2369 !_irreducible &&
2370 // Also ignore the occasional dead backedge
2371 !tail()->is_top()) {
2372 adjust_loop_exit_prob(phase);
2373 }
2375 // Gate unrolling, RCE and peeling efforts.
2376 if (!_child && // If not an inner loop, do not split
2377 !_irreducible &&
2378 _allow_optimizations &&
2379 !tail()->is_top()) { // Also ignore the occasional dead backedge
2380 if (!_has_call) {
2381 if (!iteration_split_impl(phase, old_new)) {
2382 return false;
2383 }
2384 } else if (policy_unswitching(phase)) {
2385 phase->do_unswitching(this, old_new);
2386 }
2387 }
2389 // Minor offset re-organization to remove loop-fallout uses of
2390 // trip counter when there was no major reshaping.
2391 phase->reorg_offsets(this);
2393 if (_next && !_next->iteration_split(phase, old_new))
2394 return false;
2395 return true;
2396 }
2399 //=============================================================================
2400 // Process all the loops in the loop tree and replace any fill
2401 // patterns with an intrisc version.
2402 bool PhaseIdealLoop::do_intrinsify_fill() {
2403 bool changed = false;
2404 for (LoopTreeIterator iter(_ltree_root); !iter.done(); iter.next()) {
2405 IdealLoopTree* lpt = iter.current();
2406 changed |= intrinsify_fill(lpt);
2407 }
2408 return changed;
2409 }
2412 // Examine an inner loop looking for a a single store of an invariant
2413 // value in a unit stride loop,
2414 bool PhaseIdealLoop::match_fill_loop(IdealLoopTree* lpt, Node*& store, Node*& store_value,
2415 Node*& shift, Node*& con) {
2416 const char* msg = NULL;
2417 Node* msg_node = NULL;
2419 store_value = NULL;
2420 con = NULL;
2421 shift = NULL;
2423 // Process the loop looking for stores. If there are multiple
2424 // stores or extra control flow give at this point.
2425 CountedLoopNode* head = lpt->_head->as_CountedLoop();
2426 for (uint i = 0; msg == NULL && i < lpt->_body.size(); i++) {
2427 Node* n = lpt->_body.at(i);
2428 if (n->outcnt() == 0) continue; // Ignore dead
2429 if (n->is_Store()) {
2430 if (store != NULL) {
2431 msg = "multiple stores";
2432 break;
2433 }
2434 int opc = n->Opcode();
2435 if (opc == Op_StoreP || opc == Op_StoreN || opc == Op_StoreNKlass || opc == Op_StoreCM) {
2436 msg = "oop fills not handled";
2437 break;
2438 }
2439 Node* value = n->in(MemNode::ValueIn);
2440 if (!lpt->is_invariant(value)) {
2441 msg = "variant store value";
2442 } else if (!_igvn.type(n->in(MemNode::Address))->isa_aryptr()) {
2443 msg = "not array address";
2444 }
2445 store = n;
2446 store_value = value;
2447 } else if (n->is_If() && n != head->loopexit()) {
2448 msg = "extra control flow";
2449 msg_node = n;
2450 }
2451 }
2453 if (store == NULL) {
2454 // No store in loop
2455 return false;
2456 }
2458 if (msg == NULL && head->stride_con() != 1) {
2459 // could handle negative strides too
2460 if (head->stride_con() < 0) {
2461 msg = "negative stride";
2462 } else {
2463 msg = "non-unit stride";
2464 }
2465 }
2467 if (msg == NULL && !store->in(MemNode::Address)->is_AddP()) {
2468 msg = "can't handle store address";
2469 msg_node = store->in(MemNode::Address);
2470 }
2472 if (msg == NULL &&
2473 (!store->in(MemNode::Memory)->is_Phi() ||
2474 store->in(MemNode::Memory)->in(LoopNode::LoopBackControl) != store)) {
2475 msg = "store memory isn't proper phi";
2476 msg_node = store->in(MemNode::Memory);
2477 }
2479 // Make sure there is an appropriate fill routine
2480 BasicType t = store->as_Mem()->memory_type();
2481 const char* fill_name;
2482 if (msg == NULL &&
2483 StubRoutines::select_fill_function(t, false, fill_name) == NULL) {
2484 msg = "unsupported store";
2485 msg_node = store;
2486 }
2488 if (msg != NULL) {
2489 #ifndef PRODUCT
2490 if (TraceOptimizeFill) {
2491 tty->print_cr("not fill intrinsic candidate: %s", msg);
2492 if (msg_node != NULL) msg_node->dump();
2493 }
2494 #endif
2495 return false;
2496 }
2498 // Make sure the address expression can be handled. It should be
2499 // head->phi * elsize + con. head->phi might have a ConvI2L.
2500 Node* elements[4];
2501 Node* conv = NULL;
2502 bool found_index = false;
2503 int count = store->in(MemNode::Address)->as_AddP()->unpack_offsets(elements, ARRAY_SIZE(elements));
2504 for (int e = 0; e < count; e++) {
2505 Node* n = elements[e];
2506 if (n->is_Con() && con == NULL) {
2507 con = n;
2508 } else if (n->Opcode() == Op_LShiftX && shift == NULL) {
2509 Node* value = n->in(1);
2510 #ifdef _LP64
2511 if (value->Opcode() == Op_ConvI2L) {
2512 conv = value;
2513 value = value->in(1);
2514 }
2515 #endif
2516 if (value != head->phi()) {
2517 msg = "unhandled shift in address";
2518 } else {
2519 if (type2aelembytes(store->as_Mem()->memory_type(), true) != (1 << n->in(2)->get_int())) {
2520 msg = "scale doesn't match";
2521 } else {
2522 found_index = true;
2523 shift = n;
2524 }
2525 }
2526 } else if (n->Opcode() == Op_ConvI2L && conv == NULL) {
2527 if (n->in(1) == head->phi()) {
2528 found_index = true;
2529 conv = n;
2530 } else {
2531 msg = "unhandled input to ConvI2L";
2532 }
2533 } else if (n == head->phi()) {
2534 // no shift, check below for allowed cases
2535 found_index = true;
2536 } else {
2537 msg = "unhandled node in address";
2538 msg_node = n;
2539 }
2540 }
2542 if (count == -1) {
2543 msg = "malformed address expression";
2544 msg_node = store;
2545 }
2547 if (!found_index) {
2548 msg = "missing use of index";
2549 }
2551 // byte sized items won't have a shift
2552 if (msg == NULL && shift == NULL && t != T_BYTE && t != T_BOOLEAN) {
2553 msg = "can't find shift";
2554 msg_node = store;
2555 }
2557 if (msg != NULL) {
2558 #ifndef PRODUCT
2559 if (TraceOptimizeFill) {
2560 tty->print_cr("not fill intrinsic: %s", msg);
2561 if (msg_node != NULL) msg_node->dump();
2562 }
2563 #endif
2564 return false;
2565 }
2567 // No make sure all the other nodes in the loop can be handled
2568 VectorSet ok(Thread::current()->resource_area());
2570 // store related values are ok
2571 ok.set(store->_idx);
2572 ok.set(store->in(MemNode::Memory)->_idx);
2574 CountedLoopEndNode* loop_exit = head->loopexit();
2575 guarantee(loop_exit != NULL, "no loop exit node");
2577 // Loop structure is ok
2578 ok.set(head->_idx);
2579 ok.set(loop_exit->_idx);
2580 ok.set(head->phi()->_idx);
2581 ok.set(head->incr()->_idx);
2582 ok.set(loop_exit->cmp_node()->_idx);
2583 ok.set(loop_exit->in(1)->_idx);
2585 // Address elements are ok
2586 if (con) ok.set(con->_idx);
2587 if (shift) ok.set(shift->_idx);
2588 if (conv) ok.set(conv->_idx);
2590 for (uint i = 0; msg == NULL && i < lpt->_body.size(); i++) {
2591 Node* n = lpt->_body.at(i);
2592 if (n->outcnt() == 0) continue; // Ignore dead
2593 if (ok.test(n->_idx)) continue;
2594 // Backedge projection is ok
2595 if (n->is_IfTrue() && n->in(0) == loop_exit) continue;
2596 if (!n->is_AddP()) {
2597 msg = "unhandled node";
2598 msg_node = n;
2599 break;
2600 }
2601 }
2603 // Make sure no unexpected values are used outside the loop
2604 for (uint i = 0; msg == NULL && i < lpt->_body.size(); i++) {
2605 Node* n = lpt->_body.at(i);
2606 // These values can be replaced with other nodes if they are used
2607 // outside the loop.
2608 if (n == store || n == loop_exit || n == head->incr() || n == store->in(MemNode::Memory)) continue;
2609 for (SimpleDUIterator iter(n); iter.has_next(); iter.next()) {
2610 Node* use = iter.get();
2611 if (!lpt->_body.contains(use)) {
2612 msg = "node is used outside loop";
2613 // lpt->_body.dump();
2614 msg_node = n;
2615 break;
2616 }
2617 }
2618 }
2620 #ifdef ASSERT
2621 if (TraceOptimizeFill) {
2622 if (msg != NULL) {
2623 tty->print_cr("no fill intrinsic: %s", msg);
2624 if (msg_node != NULL) msg_node->dump();
2625 } else {
2626 tty->print_cr("fill intrinsic for:");
2627 }
2628 store->dump();
2629 if (Verbose) {
2630 lpt->_body.dump();
2631 }
2632 }
2633 #endif
2635 return msg == NULL;
2636 }
2640 bool PhaseIdealLoop::intrinsify_fill(IdealLoopTree* lpt) {
2641 // Only for counted inner loops
2642 if (!lpt->is_counted() || !lpt->is_inner()) {
2643 return false;
2644 }
2646 // Must have constant stride
2647 CountedLoopNode* head = lpt->_head->as_CountedLoop();
2648 if (!head->is_valid_counted_loop() || !head->is_normal_loop()) {
2649 return false;
2650 }
2652 // Check that the body only contains a store of a loop invariant
2653 // value that is indexed by the loop phi.
2654 Node* store = NULL;
2655 Node* store_value = NULL;
2656 Node* shift = NULL;
2657 Node* offset = NULL;
2658 if (!match_fill_loop(lpt, store, store_value, shift, offset)) {
2659 return false;
2660 }
2662 #ifndef PRODUCT
2663 if (TraceLoopOpts) {
2664 tty->print("ArrayFill ");
2665 lpt->dump_head();
2666 }
2667 #endif
2669 // Now replace the whole loop body by a call to a fill routine that
2670 // covers the same region as the loop.
2671 Node* base = store->in(MemNode::Address)->as_AddP()->in(AddPNode::Base);
2673 // Build an expression for the beginning of the copy region
2674 Node* index = head->init_trip();
2675 #ifdef _LP64
2676 index = new (C) ConvI2LNode(index);
2677 _igvn.register_new_node_with_optimizer(index);
2678 #endif
2679 if (shift != NULL) {
2680 // byte arrays don't require a shift but others do.
2681 index = new (C) LShiftXNode(index, shift->in(2));
2682 _igvn.register_new_node_with_optimizer(index);
2683 }
2684 index = new (C) AddPNode(base, base, index);
2685 _igvn.register_new_node_with_optimizer(index);
2686 Node* from = new (C) AddPNode(base, index, offset);
2687 _igvn.register_new_node_with_optimizer(from);
2688 // Compute the number of elements to copy
2689 Node* len = new (C) SubINode(head->limit(), head->init_trip());
2690 _igvn.register_new_node_with_optimizer(len);
2692 BasicType t = store->as_Mem()->memory_type();
2693 bool aligned = false;
2694 if (offset != NULL && head->init_trip()->is_Con()) {
2695 int element_size = type2aelembytes(t);
2696 aligned = (offset->find_intptr_t_type()->get_con() + head->init_trip()->get_int() * element_size) % HeapWordSize == 0;
2697 }
2699 // Build a call to the fill routine
2700 const char* fill_name;
2701 address fill = StubRoutines::select_fill_function(t, aligned, fill_name);
2702 assert(fill != NULL, "what?");
2704 // Convert float/double to int/long for fill routines
2705 if (t == T_FLOAT) {
2706 store_value = new (C) MoveF2INode(store_value);
2707 _igvn.register_new_node_with_optimizer(store_value);
2708 } else if (t == T_DOUBLE) {
2709 store_value = new (C) MoveD2LNode(store_value);
2710 _igvn.register_new_node_with_optimizer(store_value);
2711 }
2713 if (CCallingConventionRequiresIntsAsLongs &&
2714 // See StubRoutines::select_fill_function for types. FLOAT has been converted to INT.
2715 (t == T_FLOAT || t == T_INT || is_subword_type(t))) {
2716 store_value = new (C) ConvI2LNode(store_value);
2717 _igvn.register_new_node_with_optimizer(store_value);
2718 }
2720 Node* mem_phi = store->in(MemNode::Memory);
2721 Node* result_ctrl;
2722 Node* result_mem;
2723 const TypeFunc* call_type = OptoRuntime::array_fill_Type();
2724 CallLeafNode *call = new (C) CallLeafNoFPNode(call_type, fill,
2725 fill_name, TypeAryPtr::get_array_body_type(t));
2726 uint cnt = 0;
2727 call->init_req(TypeFunc::Parms + cnt++, from);
2728 call->init_req(TypeFunc::Parms + cnt++, store_value);
2729 if (CCallingConventionRequiresIntsAsLongs) {
2730 call->init_req(TypeFunc::Parms + cnt++, C->top());
2731 }
2732 #ifdef _LP64
2733 len = new (C) ConvI2LNode(len);
2734 _igvn.register_new_node_with_optimizer(len);
2735 #endif
2736 call->init_req(TypeFunc::Parms + cnt++, len);
2737 #ifdef _LP64
2738 call->init_req(TypeFunc::Parms + cnt++, C->top());
2739 #endif
2740 call->init_req(TypeFunc::Control, head->init_control());
2741 call->init_req(TypeFunc::I_O, C->top()); // Does no I/O.
2742 call->init_req(TypeFunc::Memory, mem_phi->in(LoopNode::EntryControl));
2743 call->init_req(TypeFunc::ReturnAdr, C->start()->proj_out(TypeFunc::ReturnAdr));
2744 call->init_req(TypeFunc::FramePtr, C->start()->proj_out(TypeFunc::FramePtr));
2745 _igvn.register_new_node_with_optimizer(call);
2746 result_ctrl = new (C) ProjNode(call,TypeFunc::Control);
2747 _igvn.register_new_node_with_optimizer(result_ctrl);
2748 result_mem = new (C) ProjNode(call,TypeFunc::Memory);
2749 _igvn.register_new_node_with_optimizer(result_mem);
2751 /* Disable following optimization until proper fix (add missing checks).
2753 // If this fill is tightly coupled to an allocation and overwrites
2754 // the whole body, allow it to take over the zeroing.
2755 AllocateNode* alloc = AllocateNode::Ideal_allocation(base, this);
2756 if (alloc != NULL && alloc->is_AllocateArray()) {
2757 Node* length = alloc->as_AllocateArray()->Ideal_length();
2758 if (head->limit() == length &&
2759 head->init_trip() == _igvn.intcon(0)) {
2760 if (TraceOptimizeFill) {
2761 tty->print_cr("Eliminated zeroing in allocation");
2762 }
2763 alloc->maybe_set_complete(&_igvn);
2764 } else {
2765 #ifdef ASSERT
2766 if (TraceOptimizeFill) {
2767 tty->print_cr("filling array but bounds don't match");
2768 alloc->dump();
2769 head->init_trip()->dump();
2770 head->limit()->dump();
2771 length->dump();
2772 }
2773 #endif
2774 }
2775 }
2776 */
2778 // Redirect the old control and memory edges that are outside the loop.
2779 Node* exit = head->loopexit()->proj_out(0);
2780 // Sometimes the memory phi of the head is used as the outgoing
2781 // state of the loop. It's safe in this case to replace it with the
2782 // result_mem.
2783 _igvn.replace_node(store->in(MemNode::Memory), result_mem);
2784 _igvn.replace_node(exit, result_ctrl);
2785 _igvn.replace_node(store, result_mem);
2786 // Any uses the increment outside of the loop become the loop limit.
2787 _igvn.replace_node(head->incr(), head->limit());
2789 // Disconnect the head from the loop.
2790 for (uint i = 0; i < lpt->_body.size(); i++) {
2791 Node* n = lpt->_body.at(i);
2792 _igvn.replace_node(n, C->top());
2793 }
2795 return true;
2796 }