src/share/vm/opto/split_if.cpp

changeset 0
f90c822e73f8
child 6876
710a3c8b516e
equal deleted inserted replaced
-1:000000000000 0:f90c822e73f8
1 /*
2 * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "memory/allocation.inline.hpp"
27 #include "opto/callnode.hpp"
28 #include "opto/connode.hpp"
29 #include "opto/loopnode.hpp"
30
31
32 //------------------------------split_thru_region------------------------------
33 // Split Node 'n' through merge point.
34 Node *PhaseIdealLoop::split_thru_region( Node *n, Node *region ) {
35 uint wins = 0;
36 assert( n->is_CFG(), "" );
37 assert( region->is_Region(), "" );
38 Node *r = new (C) RegionNode( region->req() );
39 IdealLoopTree *loop = get_loop( n );
40 for( uint i = 1; i < region->req(); i++ ) {
41 Node *x = n->clone();
42 Node *in0 = n->in(0);
43 if( in0->in(0) == region ) x->set_req( 0, in0->in(i) );
44 for( uint j = 1; j < n->req(); j++ ) {
45 Node *in = n->in(j);
46 if( get_ctrl(in) == region )
47 x->set_req( j, in->in(i) );
48 }
49 _igvn.register_new_node_with_optimizer(x);
50 set_loop(x, loop);
51 set_idom(x, x->in(0), dom_depth(x->in(0))+1);
52 r->init_req(i, x);
53 }
54
55 // Record region
56 r->set_req(0,region); // Not a TRUE RegionNode
57 _igvn.register_new_node_with_optimizer(r);
58 set_loop(r, loop);
59 if( !loop->_child )
60 loop->_body.push(r);
61 return r;
62 }
63
64 //------------------------------split_up---------------------------------------
65 // Split block-local op up through the phis to empty the current block
66 bool PhaseIdealLoop::split_up( Node *n, Node *blk1, Node *blk2 ) {
67 if( n->is_CFG() ) {
68 assert( n->in(0) != blk1, "Lousy candidate for split-if" );
69 return false;
70 }
71 if( get_ctrl(n) != blk1 && get_ctrl(n) != blk2 )
72 return false; // Not block local
73 if( n->is_Phi() ) return false; // Local PHIs are expected
74
75 // Recursively split-up inputs
76 for (uint i = 1; i < n->req(); i++) {
77 if( split_up( n->in(i), blk1, blk2 ) ) {
78 // Got split recursively and self went dead?
79 if (n->outcnt() == 0)
80 _igvn.remove_dead_node(n);
81 return true;
82 }
83 }
84
85 // Check for needing to clone-up a compare. Can't do that, it forces
86 // another (nested) split-if transform. Instead, clone it "down".
87 if( n->is_Cmp() ) {
88 assert(get_ctrl(n) == blk2 || get_ctrl(n) == blk1, "must be in block with IF");
89 // Check for simple Cmp/Bool/CMove which we can clone-up. Cmp/Bool/CMove
90 // sequence can have no other users and it must all reside in the split-if
91 // block. Non-simple Cmp/Bool/CMove sequences are 'cloned-down' below -
92 // private, per-use versions of the Cmp and Bool are made. These sink to
93 // the CMove block. If the CMove is in the split-if block, then in the
94 // next iteration this will become a simple Cmp/Bool/CMove set to clone-up.
95 Node *bol, *cmov;
96 if( !(n->outcnt() == 1 && n->unique_out()->is_Bool() &&
97 (bol = n->unique_out()->as_Bool()) &&
98 (get_ctrl(bol) == blk1 ||
99 get_ctrl(bol) == blk2) &&
100 bol->outcnt() == 1 &&
101 bol->unique_out()->is_CMove() &&
102 (cmov = bol->unique_out()->as_CMove()) &&
103 (get_ctrl(cmov) == blk1 ||
104 get_ctrl(cmov) == blk2) ) ) {
105
106 // Must clone down
107 #ifndef PRODUCT
108 if( PrintOpto && VerifyLoopOptimizations ) {
109 tty->print("Cloning down: ");
110 n->dump();
111 }
112 #endif
113 // Clone down any block-local BoolNode uses of this CmpNode
114 for (DUIterator i = n->outs(); n->has_out(i); i++) {
115 Node* bol = n->out(i);
116 assert( bol->is_Bool(), "" );
117 if (bol->outcnt() == 1) {
118 Node* use = bol->unique_out();
119 Node *use_c = use->is_If() ? use->in(0) : get_ctrl(use);
120 if (use_c == blk1 || use_c == blk2) {
121 continue;
122 }
123 }
124 if (get_ctrl(bol) == blk1 || get_ctrl(bol) == blk2) {
125 // Recursively sink any BoolNode
126 #ifndef PRODUCT
127 if( PrintOpto && VerifyLoopOptimizations ) {
128 tty->print("Cloning down: ");
129 bol->dump();
130 }
131 #endif
132 for (DUIterator_Last jmin, j = bol->last_outs(jmin); j >= jmin; --j) {
133 // Uses are either IfNodes or CMoves
134 Node* iff = bol->last_out(j);
135 assert( iff->in(1) == bol, "" );
136 // Get control block of either the CMove or the If input
137 Node *iff_ctrl = iff->is_If() ? iff->in(0) : get_ctrl(iff);
138 Node *x = bol->clone();
139 register_new_node(x, iff_ctrl);
140 _igvn.replace_input_of(iff, 1, x);
141 }
142 _igvn.remove_dead_node( bol );
143 --i;
144 }
145 }
146 // Clone down this CmpNode
147 for (DUIterator_Last jmin, j = n->last_outs(jmin); j >= jmin; --j) {
148 Node* bol = n->last_out(j);
149 assert( bol->in(1) == n, "" );
150 Node *x = n->clone();
151 register_new_node(x, get_ctrl(bol));
152 _igvn.replace_input_of(bol, 1, x);
153 }
154 _igvn.remove_dead_node( n );
155
156 return true;
157 }
158 }
159
160 // See if splitting-up a Store. Any anti-dep loads must go up as
161 // well. An anti-dep load might be in the wrong block, because in
162 // this particular layout/schedule we ignored anti-deps and allow
163 // memory to be alive twice. This only works if we do the same
164 // operations on anti-dep loads as we do their killing stores.
165 if( n->is_Store() && n->in(MemNode::Memory)->in(0) == n->in(0) ) {
166 // Get store's memory slice
167 int alias_idx = C->get_alias_index(_igvn.type(n->in(MemNode::Address))->is_ptr());
168
169 // Get memory-phi anti-dep loads will be using
170 Node *memphi = n->in(MemNode::Memory);
171 assert( memphi->is_Phi(), "" );
172 // Hoist any anti-dep load to the splitting block;
173 // it will then "split-up".
174 for (DUIterator_Fast imax,i = memphi->fast_outs(imax); i < imax; i++) {
175 Node *load = memphi->fast_out(i);
176 if( load->is_Load() && alias_idx == C->get_alias_index(_igvn.type(load->in(MemNode::Address))->is_ptr()) )
177 set_ctrl(load,blk1);
178 }
179 }
180
181 // Found some other Node; must clone it up
182 #ifndef PRODUCT
183 if( PrintOpto && VerifyLoopOptimizations ) {
184 tty->print("Cloning up: ");
185 n->dump();
186 }
187 #endif
188
189 // ConvI2L may have type information on it which becomes invalid if
190 // it moves up in the graph so change any clones so widen the type
191 // to TypeLong::INT when pushing it up.
192 const Type* rtype = NULL;
193 if (n->Opcode() == Op_ConvI2L && n->bottom_type() != TypeLong::INT) {
194 rtype = TypeLong::INT;
195 }
196
197 // Now actually split-up this guy. One copy per control path merging.
198 Node *phi = PhiNode::make_blank(blk1, n);
199 for( uint j = 1; j < blk1->req(); j++ ) {
200 Node *x = n->clone();
201 // Widen the type of the ConvI2L when pushing up.
202 if (rtype != NULL) x->as_Type()->set_type(rtype);
203 if( n->in(0) && n->in(0) == blk1 )
204 x->set_req( 0, blk1->in(j) );
205 for( uint i = 1; i < n->req(); i++ ) {
206 Node *m = n->in(i);
207 if( get_ctrl(m) == blk1 ) {
208 assert( m->in(0) == blk1, "" );
209 x->set_req( i, m->in(j) );
210 }
211 }
212 register_new_node( x, blk1->in(j) );
213 phi->init_req( j, x );
214 }
215 // Announce phi to optimizer
216 register_new_node(phi, blk1);
217
218 // Remove cloned-up value from optimizer; use phi instead
219 _igvn.replace_node( n, phi );
220
221 // (There used to be a self-recursive call to split_up() here,
222 // but it is not needed. All necessary forward walking is done
223 // by do_split_if() below.)
224
225 return true;
226 }
227
228 //------------------------------register_new_node------------------------------
229 void PhaseIdealLoop::register_new_node( Node *n, Node *blk ) {
230 assert(!n->is_CFG(), "must be data node");
231 _igvn.register_new_node_with_optimizer(n);
232 set_ctrl(n, blk);
233 IdealLoopTree *loop = get_loop(blk);
234 if( !loop->_child )
235 loop->_body.push(n);
236 }
237
238 //------------------------------small_cache------------------------------------
239 struct small_cache : public Dict {
240
241 small_cache() : Dict( cmpkey, hashptr ) {}
242 Node *probe( Node *use_blk ) { return (Node*)((*this)[use_blk]); }
243 void lru_insert( Node *use_blk, Node *new_def ) { Insert(use_blk,new_def); }
244 };
245
246 //------------------------------spinup-----------------------------------------
247 // "Spin up" the dominator tree, starting at the use site and stopping when we
248 // find the post-dominating point.
249
250 // We must be at the merge point which post-dominates 'new_false' and
251 // 'new_true'. Figure out which edges into the RegionNode eventually lead up
252 // to false and which to true. Put in a PhiNode to merge values; plug in
253 // the appropriate false-arm or true-arm values. If some path leads to the
254 // original IF, then insert a Phi recursively.
255 Node *PhaseIdealLoop::spinup( Node *iff_dom, Node *new_false, Node *new_true, Node *use_blk, Node *def, small_cache *cache ) {
256 if (use_blk->is_top()) // Handle dead uses
257 return use_blk;
258 Node *prior_n = (Node*)0xdeadbeef;
259 Node *n = use_blk; // Get path input
260 assert( use_blk != iff_dom, "" );
261 // Here's the "spinup" the dominator tree loop. Do a cache-check
262 // along the way, in case we've come this way before.
263 while( n != iff_dom ) { // Found post-dominating point?
264 prior_n = n;
265 n = idom(n); // Search higher
266 Node *s = cache->probe( prior_n ); // Check cache
267 if( s ) return s; // Cache hit!
268 }
269
270 Node *phi_post;
271 if( prior_n == new_false || prior_n == new_true ) {
272 phi_post = def->clone();
273 phi_post->set_req(0, prior_n );
274 register_new_node(phi_post, prior_n);
275 } else {
276 // This method handles both control uses (looking for Regions) or data
277 // uses (looking for Phis). If looking for a control use, then we need
278 // to insert a Region instead of a Phi; however Regions always exist
279 // previously (the hash_find_insert below would always hit) so we can
280 // return the existing Region.
281 if( def->is_CFG() ) {
282 phi_post = prior_n; // If looking for CFG, return prior
283 } else {
284 assert( def->is_Phi(), "" );
285 assert( prior_n->is_Region(), "must be a post-dominating merge point" );
286
287 // Need a Phi here
288 phi_post = PhiNode::make_blank(prior_n, def);
289 // Search for both true and false on all paths till find one.
290 for( uint i = 1; i < phi_post->req(); i++ ) // For all paths
291 phi_post->init_req( i, spinup( iff_dom, new_false, new_true, prior_n->in(i), def, cache ) );
292 Node *t = _igvn.hash_find_insert(phi_post);
293 if( t ) { // See if we already have this one
294 // phi_post will not be used, so kill it
295 _igvn.remove_dead_node(phi_post);
296 phi_post->destruct();
297 phi_post = t;
298 } else {
299 register_new_node( phi_post, prior_n );
300 }
301 }
302 }
303
304 // Update cache everywhere
305 prior_n = (Node*)0xdeadbeef; // Reset IDOM walk
306 n = use_blk; // Get path input
307 // Spin-up the idom tree again, basically doing path-compression.
308 // Insert cache entries along the way, so that if we ever hit this
309 // point in the IDOM tree again we'll stop immediately on a cache hit.
310 while( n != iff_dom ) { // Found post-dominating point?
311 prior_n = n;
312 n = idom(n); // Search higher
313 cache->lru_insert( prior_n, phi_post ); // Fill cache
314 } // End of while not gone high enough
315
316 return phi_post;
317 }
318
319 //------------------------------find_use_block---------------------------------
320 // Find the block a USE is in. Normally USE's are in the same block as the
321 // using instruction. For Phi-USE's, the USE is in the predecessor block
322 // along the corresponding path.
323 Node *PhaseIdealLoop::find_use_block( Node *use, Node *def, Node *old_false, Node *new_false, Node *old_true, Node *new_true ) {
324 // CFG uses are their own block
325 if( use->is_CFG() )
326 return use;
327
328 if( use->is_Phi() ) { // Phi uses in prior block
329 // Grab the first Phi use; there may be many.
330 // Each will be handled as a separate iteration of
331 // the "while( phi->outcnt() )" loop.
332 uint j;
333 for( j = 1; j < use->req(); j++ )
334 if( use->in(j) == def )
335 break;
336 assert( j < use->req(), "def should be among use's inputs" );
337 return use->in(0)->in(j);
338 }
339 // Normal (non-phi) use
340 Node *use_blk = get_ctrl(use);
341 // Some uses are directly attached to the old (and going away)
342 // false and true branches.
343 if( use_blk == old_false ) {
344 use_blk = new_false;
345 set_ctrl(use, new_false);
346 }
347 if( use_blk == old_true ) {
348 use_blk = new_true;
349 set_ctrl(use, new_true);
350 }
351
352 if (use_blk == NULL) { // He's dead, Jim
353 _igvn.replace_node(use, C->top());
354 }
355
356 return use_blk;
357 }
358
359 //------------------------------handle_use-------------------------------------
360 // Handle uses of the merge point. Basically, split-if makes the merge point
361 // go away so all uses of the merge point must go away as well. Most block
362 // local uses have already been split-up, through the merge point. Uses from
363 // far below the merge point can't always be split up (e.g., phi-uses are
364 // pinned) and it makes too much stuff live. Instead we use a path-based
365 // solution to move uses down.
366 //
367 // If the use is along the pre-split-CFG true branch, then the new use will
368 // be from the post-split-CFG true merge point. Vice-versa for the false
369 // path. Some uses will be along both paths; then we sink the use to the
370 // post-dominating location; we may need to insert a Phi there.
371 void PhaseIdealLoop::handle_use( Node *use, Node *def, small_cache *cache, Node *region_dom, Node *new_false, Node *new_true, Node *old_false, Node *old_true ) {
372
373 Node *use_blk = find_use_block(use,def,old_false,new_false,old_true,new_true);
374 if( !use_blk ) return; // He's dead, Jim
375
376 // Walk up the dominator tree until I hit either the old IfFalse, the old
377 // IfTrue or the old If. Insert Phis where needed.
378 Node *new_def = spinup( region_dom, new_false, new_true, use_blk, def, cache );
379
380 // Found where this USE goes. Re-point him.
381 uint i;
382 for( i = 0; i < use->req(); i++ )
383 if( use->in(i) == def )
384 break;
385 assert( i < use->req(), "def should be among use's inputs" );
386 _igvn.replace_input_of(use, i, new_def);
387 }
388
389 //------------------------------do_split_if------------------------------------
390 // Found an If getting its condition-code input from a Phi in the same block.
391 // Split thru the Region.
392 void PhaseIdealLoop::do_split_if( Node *iff ) {
393 #ifndef PRODUCT
394 if( PrintOpto && VerifyLoopOptimizations )
395 tty->print_cr("Split-if");
396 if (TraceLoopOpts) {
397 tty->print_cr("SplitIf");
398 }
399 #endif
400 C->set_major_progress();
401 Node *region = iff->in(0);
402 Node *region_dom = idom(region);
403
404 // We are going to clone this test (and the control flow with it) up through
405 // the incoming merge point. We need to empty the current basic block.
406 // Clone any instructions which must be in this block up through the merge
407 // point.
408 DUIterator i, j;
409 bool progress = true;
410 while (progress) {
411 progress = false;
412 for (i = region->outs(); region->has_out(i); i++) {
413 Node* n = region->out(i);
414 if( n == region ) continue;
415 // The IF to be split is OK.
416 if( n == iff ) continue;
417 if( !n->is_Phi() ) { // Found pinned memory op or such
418 if (split_up(n, region, iff)) {
419 i = region->refresh_out_pos(i);
420 progress = true;
421 }
422 continue;
423 }
424 assert( n->in(0) == region, "" );
425
426 // Recursively split up all users of a Phi
427 for (j = n->outs(); n->has_out(j); j++) {
428 Node* m = n->out(j);
429 // If m is dead, throw it away, and declare progress
430 if (_nodes[m->_idx] == NULL) {
431 _igvn.remove_dead_node(m);
432 // fall through
433 }
434 else if (m != iff && split_up(m, region, iff)) {
435 // fall through
436 } else {
437 continue;
438 }
439 // Something unpredictable changed.
440 // Tell the iterators to refresh themselves, and rerun the loop.
441 i = region->refresh_out_pos(i);
442 j = region->refresh_out_pos(j);
443 progress = true;
444 }
445 }
446 }
447
448 // Now we have no instructions in the block containing the IF.
449 // Split the IF.
450 Node *new_iff = split_thru_region( iff, region );
451
452 // Replace both uses of 'new_iff' with Regions merging True/False
453 // paths. This makes 'new_iff' go dead.
454 Node *old_false, *old_true;
455 Node *new_false, *new_true;
456 for (DUIterator_Last j2min, j2 = iff->last_outs(j2min); j2 >= j2min; --j2) {
457 Node *ifp = iff->last_out(j2);
458 assert( ifp->Opcode() == Op_IfFalse || ifp->Opcode() == Op_IfTrue, "" );
459 ifp->set_req(0, new_iff);
460 Node *ifpx = split_thru_region( ifp, region );
461
462 // Replace 'If' projection of a Region with a Region of
463 // 'If' projections.
464 ifpx->set_req(0, ifpx); // A TRUE RegionNode
465
466 // Setup dominator info
467 set_idom(ifpx, region_dom, dom_depth(region_dom) + 1);
468
469 // Check for splitting loop tails
470 if( get_loop(iff)->tail() == ifp )
471 get_loop(iff)->_tail = ifpx;
472
473 // Replace in the graph with lazy-update mechanism
474 new_iff->set_req(0, new_iff); // hook self so it does not go dead
475 lazy_replace_proj( ifp, ifpx );
476 new_iff->set_req(0, region);
477
478 // Record bits for later xforms
479 if( ifp->Opcode() == Op_IfFalse ) {
480 old_false = ifp;
481 new_false = ifpx;
482 } else {
483 old_true = ifp;
484 new_true = ifpx;
485 }
486 }
487 _igvn.remove_dead_node(new_iff);
488 // Lazy replace IDOM info with the region's dominator
489 lazy_replace( iff, region_dom );
490
491 // Now make the original merge point go dead, by handling all its uses.
492 small_cache region_cache;
493 // Preload some control flow in region-cache
494 region_cache.lru_insert( new_false, new_false );
495 region_cache.lru_insert( new_true , new_true );
496 // Now handle all uses of the splitting block
497 for (DUIterator k = region->outs(); region->has_out(k); k++) {
498 Node* phi = region->out(k);
499 if (!phi->in(0)) { // Dead phi? Remove it
500 _igvn.remove_dead_node(phi);
501 } else if (phi == region) { // Found the self-reference
502 continue; // No roll-back of DUIterator
503 } else if (phi->is_Phi()) { // Expected common case: Phi hanging off of Region
504 assert(phi->in(0) == region, "Inconsistent graph");
505 // Need a per-def cache. Phi represents a def, so make a cache
506 small_cache phi_cache;
507
508 // Inspect all Phi uses to make the Phi go dead
509 for (DUIterator_Last lmin, l = phi->last_outs(lmin); l >= lmin; --l) {
510 Node* use = phi->last_out(l);
511 // Compute the new DEF for this USE. New DEF depends on the path
512 // taken from the original DEF to the USE. The new DEF may be some
513 // collection of PHI's merging values from different paths. The Phis
514 // inserted depend only on the location of the USE. We use a
515 // 2-element cache to handle multiple uses from the same block.
516 handle_use(use, phi, &phi_cache, region_dom, new_false, new_true, old_false, old_true);
517 } // End of while phi has uses
518 // Remove the dead Phi
519 _igvn.remove_dead_node( phi );
520 } else {
521 assert(phi->in(0) == region, "Inconsistent graph");
522 // Random memory op guarded by Region. Compute new DEF for USE.
523 handle_use(phi, region, &region_cache, region_dom, new_false, new_true, old_false, old_true);
524 }
525 // Every path above deletes a use of the region, except for the region
526 // self-cycle (which is needed by handle_use calling find_use_block
527 // calling get_ctrl calling get_ctrl_no_update looking for dead
528 // regions). So roll back the DUIterator innards.
529 --k;
530 } // End of while merge point has phis
531
532 assert(region->outcnt() == 1, "Only self reference should remain"); // Just Self on the Region
533 region->set_req(0, NULL); // Break the self-cycle
534
535 // Any leftover bits in the splitting block must not have depended on local
536 // Phi inputs (these have already been split-up). Hence it's safe to hoist
537 // these guys to the dominating point.
538 lazy_replace( region, region_dom );
539 #ifndef PRODUCT
540 if( VerifyLoopOptimizations ) verify();
541 #endif
542 }

mercurial