src/share/vm/opto/coalesce.cpp

Mon, 31 Oct 2011 03:06:42 -0700

author
twisti
date
Mon, 31 Oct 2011 03:06:42 -0700
changeset 3249
e3b0dcc327b9
parent 3040
c7b60b601eb4
child 4949
8373c19be854
permissions
-rw-r--r--

7104561: UseRDPCForConstantTableBase doesn't work after shorten branches changes
Reviewed-by: never, kvn

     1 /*
     2  * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    20  * or visit www.oracle.com if you need additional information or have any
    21  * questions.
    22  *
    23  */
    25 #include "precompiled.hpp"
    26 #include "memory/allocation.inline.hpp"
    27 #include "opto/block.hpp"
    28 #include "opto/cfgnode.hpp"
    29 #include "opto/chaitin.hpp"
    30 #include "opto/coalesce.hpp"
    31 #include "opto/connode.hpp"
    32 #include "opto/indexSet.hpp"
    33 #include "opto/machnode.hpp"
    34 #include "opto/matcher.hpp"
    35 #include "opto/regmask.hpp"
    37 //=============================================================================
    38 //------------------------------reset_uf_map-----------------------------------
    39 void PhaseChaitin::reset_uf_map( uint maxlrg ) {
    40   _maxlrg = maxlrg;
    41   // Force the Union-Find mapping to be at least this large
    42   _uf_map.extend(_maxlrg,0);
    43   // Initialize it to be the ID mapping.
    44   for( uint i=0; i<_maxlrg; i++ )
    45     _uf_map.map(i,i);
    46 }
    48 //------------------------------compress_uf_map--------------------------------
    49 // Make all Nodes map directly to their final live range; no need for
    50 // the Union-Find mapping after this call.
    51 void PhaseChaitin::compress_uf_map_for_nodes( ) {
    52   // For all Nodes, compress mapping
    53   uint unique = _names.Size();
    54   for( uint i=0; i<unique; i++ ) {
    55     uint lrg = _names[i];
    56     uint compressed_lrg = Find(lrg);
    57     if( lrg != compressed_lrg )
    58       _names.map(i,compressed_lrg);
    59   }
    60 }
    62 //------------------------------Find-------------------------------------------
    63 // Straight out of Tarjan's union-find algorithm
    64 uint PhaseChaitin::Find_compress( uint lrg ) {
    65   uint cur = lrg;
    66   uint next = _uf_map[cur];
    67   while( next != cur ) {        // Scan chain of equivalences
    68     assert( next < cur, "always union smaller" );
    69     cur = next;                 // until find a fixed-point
    70     next = _uf_map[cur];
    71   }
    72   // Core of union-find algorithm: update chain of
    73   // equivalences to be equal to the root.
    74   while( lrg != next ) {
    75     uint tmp = _uf_map[lrg];
    76     _uf_map.map(lrg, next);
    77     lrg = tmp;
    78   }
    79   return lrg;
    80 }
    82 //------------------------------Find-------------------------------------------
    83 // Straight out of Tarjan's union-find algorithm
    84 uint PhaseChaitin::Find_compress( const Node *n ) {
    85   uint lrg = Find_compress(_names[n->_idx]);
    86   _names.map(n->_idx,lrg);
    87   return lrg;
    88 }
    90 //------------------------------Find_const-------------------------------------
    91 // Like Find above, but no path compress, so bad asymptotic behavior
    92 uint PhaseChaitin::Find_const( uint lrg ) const {
    93   if( !lrg ) return lrg;        // Ignore the zero LRG
    94   // Off the end?  This happens during debugging dumps when you got
    95   // brand new live ranges but have not told the allocator yet.
    96   if( lrg >= _maxlrg ) return lrg;
    97   uint next = _uf_map[lrg];
    98   while( next != lrg ) {        // Scan chain of equivalences
    99     assert( next < lrg, "always union smaller" );
   100     lrg = next;                 // until find a fixed-point
   101     next = _uf_map[lrg];
   102   }
   103   return next;
   104 }
   106 //------------------------------Find-------------------------------------------
   107 // Like Find above, but no path compress, so bad asymptotic behavior
   108 uint PhaseChaitin::Find_const( const Node *n ) const {
   109   if( n->_idx >= _names.Size() ) return 0; // not mapped, usual for debug dump
   110   return Find_const( _names[n->_idx] );
   111 }
   113 //------------------------------Union------------------------------------------
   114 // union 2 sets together.
   115 void PhaseChaitin::Union( const Node *src_n, const Node *dst_n ) {
   116   uint src = Find(src_n);
   117   uint dst = Find(dst_n);
   118   assert( src, "" );
   119   assert( dst, "" );
   120   assert( src < _maxlrg, "oob" );
   121   assert( dst < _maxlrg, "oob" );
   122   assert( src < dst, "always union smaller" );
   123   _uf_map.map(dst,src);
   124 }
   126 //------------------------------new_lrg----------------------------------------
   127 void PhaseChaitin::new_lrg( const Node *x, uint lrg ) {
   128   // Make the Node->LRG mapping
   129   _names.extend(x->_idx,lrg);
   130   // Make the Union-Find mapping an identity function
   131   _uf_map.extend(lrg,lrg);
   132 }
   134 //------------------------------clone_projs------------------------------------
   135 // After cloning some rematerialized instruction, clone any MachProj's that
   136 // follow it.  Example: Intel zero is XOR, kills flags.  Sparc FP constants
   137 // use G3 as an address temp.
   138 int PhaseChaitin::clone_projs( Block *b, uint idx, Node *con, Node *copy, uint &maxlrg ) {
   139   Block *bcon = _cfg._bbs[con->_idx];
   140   uint cindex = bcon->find_node(con);
   141   Node *con_next = bcon->_nodes[cindex+1];
   142   if( con_next->in(0) != con || !con_next->is_MachProj() )
   143     return false;               // No MachProj's follow
   145   // Copy kills after the cloned constant
   146   Node *kills = con_next->clone();
   147   kills->set_req( 0, copy );
   148   b->_nodes.insert( idx, kills );
   149   _cfg._bbs.map( kills->_idx, b );
   150   new_lrg( kills, maxlrg++ );
   151   return true;
   152 }
   154 //------------------------------compact----------------------------------------
   155 // Renumber the live ranges to compact them.  Makes the IFG smaller.
   156 void PhaseChaitin::compact() {
   157   // Current the _uf_map contains a series of short chains which are headed
   158   // by a self-cycle.  All the chains run from big numbers to little numbers.
   159   // The Find() call chases the chains & shortens them for the next Find call.
   160   // We are going to change this structure slightly.  Numbers above a moving
   161   // wave 'i' are unchanged.  Numbers below 'j' point directly to their
   162   // compacted live range with no further chaining.  There are no chains or
   163   // cycles below 'i', so the Find call no longer works.
   164   uint j=1;
   165   uint i;
   166   for( i=1; i < _maxlrg; i++ ) {
   167     uint lr = _uf_map[i];
   168     // Ignore unallocated live ranges
   169     if( !lr ) continue;
   170     assert( lr <= i, "" );
   171     _uf_map.map(i, ( lr == i ) ? j++ : _uf_map[lr]);
   172   }
   173   if( false )                  // PrintOptoCompactLiveRanges
   174     printf("Compacted %d LRs from %d\n",i-j,i);
   175   // Now change the Node->LR mapping to reflect the compacted names
   176   uint unique = _names.Size();
   177   for( i=0; i<unique; i++ )
   178     _names.map(i,_uf_map[_names[i]]);
   180   // Reset the Union-Find mapping
   181   reset_uf_map(j);
   183 }
   185 //=============================================================================
   186 //------------------------------Dump-------------------------------------------
   187 #ifndef PRODUCT
   188 void PhaseCoalesce::dump( Node *n ) const {
   189   // Being a const function means I cannot use 'Find'
   190   uint r = _phc.Find(n);
   191   tty->print("L%d/N%d ",r,n->_idx);
   192 }
   194 //------------------------------dump-------------------------------------------
   195 void PhaseCoalesce::dump() const {
   196   // I know I have a block layout now, so I can print blocks in a loop
   197   for( uint i=0; i<_phc._cfg._num_blocks; i++ ) {
   198     uint j;
   199     Block *b = _phc._cfg._blocks[i];
   200     // Print a nice block header
   201     tty->print("B%d: ",b->_pre_order);
   202     for( j=1; j<b->num_preds(); j++ )
   203       tty->print("B%d ", _phc._cfg._bbs[b->pred(j)->_idx]->_pre_order);
   204     tty->print("-> ");
   205     for( j=0; j<b->_num_succs; j++ )
   206       tty->print("B%d ",b->_succs[j]->_pre_order);
   207     tty->print(" IDom: B%d/#%d\n", b->_idom ? b->_idom->_pre_order : 0, b->_dom_depth);
   208     uint cnt = b->_nodes.size();
   209     for( j=0; j<cnt; j++ ) {
   210       Node *n = b->_nodes[j];
   211       dump( n );
   212       tty->print("\t%s\t",n->Name());
   214       // Dump the inputs
   215       uint k;                   // Exit value of loop
   216       for( k=0; k<n->req(); k++ ) // For all required inputs
   217         if( n->in(k) ) dump( n->in(k) );
   218         else tty->print("_ ");
   219       int any_prec = 0;
   220       for( ; k<n->len(); k++ )          // For all precedence inputs
   221         if( n->in(k) ) {
   222           if( !any_prec++ ) tty->print(" |");
   223           dump( n->in(k) );
   224         }
   226       // Dump node-specific info
   227       n->dump_spec(tty);
   228       tty->print("\n");
   230     }
   231     tty->print("\n");
   232   }
   233 }
   234 #endif
   236 //------------------------------combine_these_two------------------------------
   237 // Combine the live ranges def'd by these 2 Nodes.  N2 is an input to N1.
   238 void PhaseCoalesce::combine_these_two( Node *n1, Node *n2 ) {
   239   uint lr1 = _phc.Find(n1);
   240   uint lr2 = _phc.Find(n2);
   241   if( lr1 != lr2 &&             // Different live ranges already AND
   242       !_phc._ifg->test_edge_sq( lr1, lr2 ) ) {  // Do not interfere
   243     LRG *lrg1 = &_phc.lrgs(lr1);
   244     LRG *lrg2 = &_phc.lrgs(lr2);
   245     // Not an oop->int cast; oop->oop, int->int, AND int->oop are OK.
   247     // Now, why is int->oop OK?  We end up declaring a raw-pointer as an oop
   248     // and in general that's a bad thing.  However, int->oop conversions only
   249     // happen at GC points, so the lifetime of the misclassified raw-pointer
   250     // is from the CheckCastPP (that converts it to an oop) backwards up
   251     // through a merge point and into the slow-path call, and around the
   252     // diamond up to the heap-top check and back down into the slow-path call.
   253     // The misclassified raw pointer is NOT live across the slow-path call,
   254     // and so does not appear in any GC info, so the fact that it is
   255     // misclassified is OK.
   257     if( (lrg1->_is_oop || !lrg2->_is_oop) && // not an oop->int cast AND
   258         // Compatible final mask
   259         lrg1->mask().overlap( lrg2->mask() ) ) {
   260       // Merge larger into smaller.
   261       if( lr1 > lr2 ) {
   262         uint  tmp =  lr1;  lr1 =  lr2;  lr2 =  tmp;
   263         Node   *n =   n1;   n1 =   n2;   n2 =    n;
   264         LRG *ltmp = lrg1; lrg1 = lrg2; lrg2 = ltmp;
   265       }
   266       // Union lr2 into lr1
   267       _phc.Union( n1, n2 );
   268       if (lrg1->_maxfreq < lrg2->_maxfreq)
   269         lrg1->_maxfreq = lrg2->_maxfreq;
   270       // Merge in the IFG
   271       _phc._ifg->Union( lr1, lr2 );
   272       // Combine register restrictions
   273       lrg1->AND(lrg2->mask());
   274     }
   275   }
   276 }
   278 //------------------------------coalesce_driver--------------------------------
   279 // Copy coalescing
   280 void PhaseCoalesce::coalesce_driver( ) {
   282   verify();
   283   // Coalesce from high frequency to low
   284   for( uint i=0; i<_phc._cfg._num_blocks; i++ )
   285     coalesce( _phc._blks[i] );
   287 }
   289 //------------------------------insert_copy_with_overlap-----------------------
   290 // I am inserting copies to come out of SSA form.  In the general case, I am
   291 // doing a parallel renaming.  I'm in the Named world now, so I can't do a
   292 // general parallel renaming.  All the copies now use  "names" (live-ranges)
   293 // to carry values instead of the explicit use-def chains.  Suppose I need to
   294 // insert 2 copies into the same block.  They copy L161->L128 and L128->L132.
   295 // If I insert them in the wrong order then L128 will get clobbered before it
   296 // can get used by the second copy.  This cannot happen in the SSA model;
   297 // direct use-def chains get me the right value.  It DOES happen in the named
   298 // model so I have to handle the reordering of copies.
   299 //
   300 // In general, I need to topo-sort the placed copies to avoid conflicts.
   301 // Its possible to have a closed cycle of copies (e.g., recirculating the same
   302 // values around a loop).  In this case I need a temp to break the cycle.
   303 void PhaseAggressiveCoalesce::insert_copy_with_overlap( Block *b, Node *copy, uint dst_name, uint src_name ) {
   305   // Scan backwards for the locations of the last use of the dst_name.
   306   // I am about to clobber the dst_name, so the copy must be inserted
   307   // after the last use.  Last use is really first-use on a backwards scan.
   308   uint i = b->end_idx()-1;
   309   while( 1 ) {
   310     Node *n = b->_nodes[i];
   311     // Check for end of virtual copies; this is also the end of the
   312     // parallel renaming effort.
   313     if( n->_idx < _unique ) break;
   314     uint idx = n->is_Copy();
   315     assert( idx || n->is_Con() || n->is_MachProj(), "Only copies during parallel renaming" );
   316     if( idx && _phc.Find(n->in(idx)) == dst_name ) break;
   317     i--;
   318   }
   319   uint last_use_idx = i;
   321   // Also search for any kill of src_name that exits the block.
   322   // Since the copy uses src_name, I have to come before any kill.
   323   uint kill_src_idx = b->end_idx();
   324   // There can be only 1 kill that exits any block and that is
   325   // the last kill.  Thus it is the first kill on a backwards scan.
   326   i = b->end_idx()-1;
   327   while( 1 ) {
   328     Node *n = b->_nodes[i];
   329     // Check for end of virtual copies; this is also the end of the
   330     // parallel renaming effort.
   331     if( n->_idx < _unique ) break;
   332     assert( n->is_Copy() || n->is_Con() || n->is_MachProj(), "Only copies during parallel renaming" );
   333     if( _phc.Find(n) == src_name ) {
   334       kill_src_idx = i;
   335       break;
   336     }
   337     i--;
   338   }
   339   // Need a temp?  Last use of dst comes after the kill of src?
   340   if( last_use_idx >= kill_src_idx ) {
   341     // Need to break a cycle with a temp
   342     uint idx = copy->is_Copy();
   343     Node *tmp = copy->clone();
   344     _phc.new_lrg(tmp,_phc._maxlrg++);
   345     // Insert new temp between copy and source
   346     tmp ->set_req(idx,copy->in(idx));
   347     copy->set_req(idx,tmp);
   348     // Save source in temp early, before source is killed
   349     b->_nodes.insert(kill_src_idx,tmp);
   350     _phc._cfg._bbs.map( tmp->_idx, b );
   351     last_use_idx++;
   352   }
   354   // Insert just after last use
   355   b->_nodes.insert(last_use_idx+1,copy);
   356 }
   358 //------------------------------insert_copies----------------------------------
   359 void PhaseAggressiveCoalesce::insert_copies( Matcher &matcher ) {
   360   // We do LRGs compressing and fix a liveout data only here since the other
   361   // place in Split() is guarded by the assert which we never hit.
   362   _phc.compress_uf_map_for_nodes();
   363   // Fix block's liveout data for compressed live ranges.
   364   for(uint lrg = 1; lrg < _phc._maxlrg; lrg++ ) {
   365     uint compressed_lrg = _phc.Find(lrg);
   366     if( lrg != compressed_lrg ) {
   367       for( uint bidx = 0; bidx < _phc._cfg._num_blocks; bidx++ ) {
   368         IndexSet *liveout = _phc._live->live(_phc._cfg._blocks[bidx]);
   369         if( liveout->member(lrg) ) {
   370           liveout->remove(lrg);
   371           liveout->insert(compressed_lrg);
   372         }
   373       }
   374     }
   375   }
   377   // All new nodes added are actual copies to replace virtual copies.
   378   // Nodes with index less than '_unique' are original, non-virtual Nodes.
   379   _unique = C->unique();
   381   for( uint i=0; i<_phc._cfg._num_blocks; i++ ) {
   382     Block *b = _phc._cfg._blocks[i];
   383     uint cnt = b->num_preds();  // Number of inputs to the Phi
   385     for( uint l = 1; l<b->_nodes.size(); l++ ) {
   386       Node *n = b->_nodes[l];
   388       // Do not use removed-copies, use copied value instead
   389       uint ncnt = n->req();
   390       for( uint k = 1; k<ncnt; k++ ) {
   391         Node *copy = n->in(k);
   392         uint cidx = copy->is_Copy();
   393         if( cidx ) {
   394           Node *def = copy->in(cidx);
   395           if( _phc.Find(copy) == _phc.Find(def) )
   396             n->set_req(k,def);
   397         }
   398       }
   400       // Remove any explicit copies that get coalesced.
   401       uint cidx = n->is_Copy();
   402       if( cidx ) {
   403         Node *def = n->in(cidx);
   404         if( _phc.Find(n) == _phc.Find(def) ) {
   405           n->replace_by(def);
   406           n->set_req(cidx,NULL);
   407           b->_nodes.remove(l);
   408           l--;
   409           continue;
   410         }
   411       }
   413       if( n->is_Phi() ) {
   414         // Get the chosen name for the Phi
   415         uint phi_name = _phc.Find( n );
   416         // Ignore the pre-allocated specials
   417         if( !phi_name ) continue;
   418         // Check for mismatch inputs to Phi
   419         for( uint j = 1; j<cnt; j++ ) {
   420           Node *m = n->in(j);
   421           uint src_name = _phc.Find(m);
   422           if( src_name != phi_name ) {
   423             Block *pred = _phc._cfg._bbs[b->pred(j)->_idx];
   424             Node *copy;
   425             assert(!m->is_Con() || m->is_Mach(), "all Con must be Mach");
   426             // Rematerialize constants instead of copying them
   427             if( m->is_Mach() && m->as_Mach()->is_Con() &&
   428                 m->as_Mach()->rematerialize() ) {
   429               copy = m->clone();
   430               // Insert the copy in the predecessor basic block
   431               pred->add_inst(copy);
   432               // Copy any flags as well
   433               _phc.clone_projs( pred, pred->end_idx(), m, copy, _phc._maxlrg );
   434             } else {
   435               const RegMask *rm = C->matcher()->idealreg2spillmask[m->ideal_reg()];
   436               copy = new (C) MachSpillCopyNode(m,*rm,*rm);
   437               // Find a good place to insert.  Kinda tricky, use a subroutine
   438               insert_copy_with_overlap(pred,copy,phi_name,src_name);
   439             }
   440             // Insert the copy in the use-def chain
   441             n->set_req( j, copy );
   442             _phc._cfg._bbs.map( copy->_idx, pred );
   443             // Extend ("register allocate") the names array for the copy.
   444             _phc._names.extend( copy->_idx, phi_name );
   445           } // End of if Phi names do not match
   446         } // End of for all inputs to Phi
   447       } else { // End of if Phi
   449         // Now check for 2-address instructions
   450         uint idx;
   451         if( n->is_Mach() && (idx=n->as_Mach()->two_adr()) ) {
   452           // Get the chosen name for the Node
   453           uint name = _phc.Find( n );
   454           assert( name, "no 2-address specials" );
   455           // Check for name mis-match on the 2-address input
   456           Node *m = n->in(idx);
   457           if( _phc.Find(m) != name ) {
   458             Node *copy;
   459             assert(!m->is_Con() || m->is_Mach(), "all Con must be Mach");
   460             // At this point it is unsafe to extend live ranges (6550579).
   461             // Rematerialize only constants as we do for Phi above.
   462             if( m->is_Mach() && m->as_Mach()->is_Con() &&
   463                 m->as_Mach()->rematerialize() ) {
   464               copy = m->clone();
   465               // Insert the copy in the basic block, just before us
   466               b->_nodes.insert( l++, copy );
   467               if( _phc.clone_projs( b, l, m, copy, _phc._maxlrg ) )
   468                 l++;
   469             } else {
   470               const RegMask *rm = C->matcher()->idealreg2spillmask[m->ideal_reg()];
   471               copy = new (C) MachSpillCopyNode( m, *rm, *rm );
   472               // Insert the copy in the basic block, just before us
   473               b->_nodes.insert( l++, copy );
   474             }
   475             // Insert the copy in the use-def chain
   476             n->set_req(idx, copy );
   477             // Extend ("register allocate") the names array for the copy.
   478             _phc._names.extend( copy->_idx, name );
   479             _phc._cfg._bbs.map( copy->_idx, b );
   480           }
   482         } // End of is two-adr
   484         // Insert a copy at a debug use for a lrg which has high frequency
   485         if( b->_freq < OPTO_DEBUG_SPLIT_FREQ || b->is_uncommon(_phc._cfg._bbs) ) {
   486           // Walk the debug inputs to the node and check for lrg freq
   487           JVMState* jvms = n->jvms();
   488           uint debug_start = jvms ? jvms->debug_start() : 999999;
   489           uint debug_end   = jvms ? jvms->debug_end()   : 999999;
   490           for(uint inpidx = debug_start; inpidx < debug_end; inpidx++) {
   491             // Do not split monitors; they are only needed for debug table
   492             // entries and need no code.
   493             if( jvms->is_monitor_use(inpidx) ) continue;
   494             Node *inp = n->in(inpidx);
   495             uint nidx = _phc.n2lidx(inp);
   496             LRG &lrg = lrgs(nidx);
   498             // If this lrg has a high frequency use/def
   499             if( lrg._maxfreq >= _phc.high_frequency_lrg() ) {
   500               // If the live range is also live out of this block (like it
   501               // would be for a fast/slow idiom), the normal spill mechanism
   502               // does an excellent job.  If it is not live out of this block
   503               // (like it would be for debug info to uncommon trap) splitting
   504               // the live range now allows a better allocation in the high
   505               // frequency blocks.
   506               //   Build_IFG_virtual has converted the live sets to
   507               // live-IN info, not live-OUT info.
   508               uint k;
   509               for( k=0; k < b->_num_succs; k++ )
   510                 if( _phc._live->live(b->_succs[k])->member( nidx ) )
   511                   break;      // Live in to some successor block?
   512               if( k < b->_num_succs )
   513                 continue;     // Live out; do not pre-split
   514               // Split the lrg at this use
   515               const RegMask *rm = C->matcher()->idealreg2spillmask[inp->ideal_reg()];
   516               Node *copy = new (C) MachSpillCopyNode( inp, *rm, *rm );
   517               // Insert the copy in the use-def chain
   518               n->set_req(inpidx, copy );
   519               // Insert the copy in the basic block, just before us
   520               b->_nodes.insert( l++, copy );
   521               // Extend ("register allocate") the names array for the copy.
   522               _phc.new_lrg( copy, _phc._maxlrg++ );
   523               _phc._cfg._bbs.map( copy->_idx, b );
   524               //tty->print_cr("Split a debug use in Aggressive Coalesce");
   525             }  // End of if high frequency use/def
   526           }  // End of for all debug inputs
   527         }  // End of if low frequency safepoint
   529       } // End of if Phi
   531     } // End of for all instructions
   532   } // End of for all blocks
   533 }
   535 //=============================================================================
   536 //------------------------------coalesce---------------------------------------
   537 // Aggressive (but pessimistic) copy coalescing of a single block
   539 // The following coalesce pass represents a single round of aggressive
   540 // pessimistic coalesce.  "Aggressive" means no attempt to preserve
   541 // colorability when coalescing.  This occasionally means more spills, but
   542 // it also means fewer rounds of coalescing for better code - and that means
   543 // faster compiles.
   545 // "Pessimistic" means we do not hit the fixed point in one pass (and we are
   546 // reaching for the least fixed point to boot).  This is typically solved
   547 // with a few more rounds of coalescing, but the compiler must run fast.  We
   548 // could optimistically coalescing everything touching PhiNodes together
   549 // into one big live range, then check for self-interference.  Everywhere
   550 // the live range interferes with self it would have to be split.  Finding
   551 // the right split points can be done with some heuristics (based on
   552 // expected frequency of edges in the live range).  In short, it's a real
   553 // research problem and the timeline is too short to allow such research.
   554 // Further thoughts: (1) build the LR in a pass, (2) find self-interference
   555 // in another pass, (3) per each self-conflict, split, (4) split by finding
   556 // the low-cost cut (min-cut) of the LR, (5) edges in the LR are weighted
   557 // according to the GCM algorithm (or just exec freq on CFG edges).
   559 void PhaseAggressiveCoalesce::coalesce( Block *b ) {
   560   // Copies are still "virtual" - meaning we have not made them explicitly
   561   // copies.  Instead, Phi functions of successor blocks have mis-matched
   562   // live-ranges.  If I fail to coalesce, I'll have to insert a copy to line
   563   // up the live-ranges.  Check for Phis in successor blocks.
   564   uint i;
   565   for( i=0; i<b->_num_succs; i++ ) {
   566     Block *bs = b->_succs[i];
   567     // Find index of 'b' in 'bs' predecessors
   568     uint j=1;
   569     while( _phc._cfg._bbs[bs->pred(j)->_idx] != b ) j++;
   570     // Visit all the Phis in successor block
   571     for( uint k = 1; k<bs->_nodes.size(); k++ ) {
   572       Node *n = bs->_nodes[k];
   573       if( !n->is_Phi() ) break;
   574       combine_these_two( n, n->in(j) );
   575     }
   576   } // End of for all successor blocks
   579   // Check _this_ block for 2-address instructions and copies.
   580   uint cnt = b->end_idx();
   581   for( i = 1; i<cnt; i++ ) {
   582     Node *n = b->_nodes[i];
   583     uint idx;
   584     // 2-address instructions have a virtual Copy matching their input
   585     // to their output
   586     if( n->is_Mach() && (idx = n->as_Mach()->two_adr()) ) {
   587       MachNode *mach = n->as_Mach();
   588       combine_these_two( mach, mach->in(idx) );
   589     }
   590   } // End of for all instructions in block
   591 }
   593 //=============================================================================
   594 //------------------------------PhaseConservativeCoalesce----------------------
   595 PhaseConservativeCoalesce::PhaseConservativeCoalesce( PhaseChaitin &chaitin ) : PhaseCoalesce(chaitin) {
   596   _ulr.initialize(_phc._maxlrg);
   597 }
   599 //------------------------------verify-----------------------------------------
   600 void PhaseConservativeCoalesce::verify() {
   601 #ifdef ASSERT
   602   _phc.set_was_low();
   603 #endif
   604 }
   606 //------------------------------union_helper-----------------------------------
   607 void PhaseConservativeCoalesce::union_helper( Node *lr1_node, Node *lr2_node, uint lr1, uint lr2, Node *src_def, Node *dst_copy, Node *src_copy, Block *b, uint bindex ) {
   608   // Join live ranges.  Merge larger into smaller.  Union lr2 into lr1 in the
   609   // union-find tree
   610   _phc.Union( lr1_node, lr2_node );
   612   // Single-def live range ONLY if both live ranges are single-def.
   613   // If both are single def, then src_def powers one live range
   614   // and def_copy powers the other.  After merging, src_def powers
   615   // the combined live range.
   616   lrgs(lr1)._def = (lrgs(lr1).is_multidef() ||
   617                         lrgs(lr2).is_multidef() )
   618     ? NodeSentinel : src_def;
   619   lrgs(lr2)._def = NULL;    // No def for lrg 2
   620   lrgs(lr2).Clear();        // Force empty mask for LRG 2
   621   //lrgs(lr2)._size = 0;      // Live-range 2 goes dead
   622   lrgs(lr1)._is_oop |= lrgs(lr2)._is_oop;
   623   lrgs(lr2)._is_oop = 0;    // In particular, not an oop for GC info
   625   if (lrgs(lr1)._maxfreq < lrgs(lr2)._maxfreq)
   626     lrgs(lr1)._maxfreq = lrgs(lr2)._maxfreq;
   628   // Copy original value instead.  Intermediate copies go dead, and
   629   // the dst_copy becomes useless.
   630   int didx = dst_copy->is_Copy();
   631   dst_copy->set_req( didx, src_def );
   632   // Add copy to free list
   633   // _phc.free_spillcopy(b->_nodes[bindex]);
   634   assert( b->_nodes[bindex] == dst_copy, "" );
   635   dst_copy->replace_by( dst_copy->in(didx) );
   636   dst_copy->set_req( didx, NULL);
   637   b->_nodes.remove(bindex);
   638   if( bindex < b->_ihrp_index ) b->_ihrp_index--;
   639   if( bindex < b->_fhrp_index ) b->_fhrp_index--;
   641   // Stretched lr1; add it to liveness of intermediate blocks
   642   Block *b2 = _phc._cfg._bbs[src_copy->_idx];
   643   while( b != b2 ) {
   644     b = _phc._cfg._bbs[b->pred(1)->_idx];
   645     _phc._live->live(b)->insert(lr1);
   646   }
   647 }
   649 //------------------------------compute_separating_interferences---------------
   650 // Factored code from copy_copy that computes extra interferences from
   651 // lengthening a live range by double-coalescing.
   652 uint PhaseConservativeCoalesce::compute_separating_interferences(Node *dst_copy, Node *src_copy, Block *b, uint bindex, RegMask &rm, uint reg_degree, uint rm_size, uint lr1, uint lr2 ) {
   654   assert(!lrgs(lr1)._fat_proj, "cannot coalesce fat_proj");
   655   assert(!lrgs(lr2)._fat_proj, "cannot coalesce fat_proj");
   656   Node *prev_copy = dst_copy->in(dst_copy->is_Copy());
   657   Block *b2 = b;
   658   uint bindex2 = bindex;
   659   while( 1 ) {
   660     // Find previous instruction
   661     bindex2--;                  // Chain backwards 1 instruction
   662     while( bindex2 == 0 ) {     // At block start, find prior block
   663       assert( b2->num_preds() == 2, "cannot double coalesce across c-flow" );
   664       b2 = _phc._cfg._bbs[b2->pred(1)->_idx];
   665       bindex2 = b2->end_idx()-1;
   666     }
   667     // Get prior instruction
   668     assert(bindex2 < b2->_nodes.size(), "index out of bounds");
   669     Node *x = b2->_nodes[bindex2];
   670     if( x == prev_copy ) {      // Previous copy in copy chain?
   671       if( prev_copy == src_copy)// Found end of chain and all interferences
   672         break;                  // So break out of loop
   673       // Else work back one in copy chain
   674       prev_copy = prev_copy->in(prev_copy->is_Copy());
   675     } else {                    // Else collect interferences
   676       uint lidx = _phc.Find(x);
   677       // Found another def of live-range being stretched?
   678       if( lidx == lr1 ) return max_juint;
   679       if( lidx == lr2 ) return max_juint;
   681       // If we attempt to coalesce across a bound def
   682       if( lrgs(lidx).is_bound() ) {
   683         // Do not let the coalesced LRG expect to get the bound color
   684         rm.SUBTRACT( lrgs(lidx).mask() );
   685         // Recompute rm_size
   686         rm_size = rm.Size();
   687         //if( rm._flags ) rm_size += 1000000;
   688         if( reg_degree >= rm_size ) return max_juint;
   689       }
   690       if( rm.overlap(lrgs(lidx).mask()) ) {
   691         // Insert lidx into union LRG; returns TRUE if actually inserted
   692         if( _ulr.insert(lidx) ) {
   693           // Infinite-stack neighbors do not alter colorability, as they
   694           // can always color to some other color.
   695           if( !lrgs(lidx).mask().is_AllStack() ) {
   696             // If this coalesce will make any new neighbor uncolorable,
   697             // do not coalesce.
   698             if( lrgs(lidx).just_lo_degree() )
   699               return max_juint;
   700             // Bump our degree
   701             if( ++reg_degree >= rm_size )
   702               return max_juint;
   703           } // End of if not infinite-stack neighbor
   704         } // End of if actually inserted
   705       } // End of if live range overlaps
   706     } // End of else collect interferences for 1 node
   707   } // End of while forever, scan back for interferences
   708   return reg_degree;
   709 }
   711 //------------------------------update_ifg-------------------------------------
   712 void PhaseConservativeCoalesce::update_ifg(uint lr1, uint lr2, IndexSet *n_lr1, IndexSet *n_lr2) {
   713   // Some original neighbors of lr1 might have gone away
   714   // because the constrained register mask prevented them.
   715   // Remove lr1 from such neighbors.
   716   IndexSetIterator one(n_lr1);
   717   uint neighbor;
   718   LRG &lrg1 = lrgs(lr1);
   719   while ((neighbor = one.next()) != 0)
   720     if( !_ulr.member(neighbor) )
   721       if( _phc._ifg->neighbors(neighbor)->remove(lr1) )
   722         lrgs(neighbor).inc_degree( -lrg1.compute_degree(lrgs(neighbor)) );
   725   // lr2 is now called (coalesced into) lr1.
   726   // Remove lr2 from the IFG.
   727   IndexSetIterator two(n_lr2);
   728   LRG &lrg2 = lrgs(lr2);
   729   while ((neighbor = two.next()) != 0)
   730     if( _phc._ifg->neighbors(neighbor)->remove(lr2) )
   731       lrgs(neighbor).inc_degree( -lrg2.compute_degree(lrgs(neighbor)) );
   733   // Some neighbors of intermediate copies now interfere with the
   734   // combined live range.
   735   IndexSetIterator three(&_ulr);
   736   while ((neighbor = three.next()) != 0)
   737     if( _phc._ifg->neighbors(neighbor)->insert(lr1) )
   738       lrgs(neighbor).inc_degree( lrg1.compute_degree(lrgs(neighbor)) );
   739 }
   741 //------------------------------record_bias------------------------------------
   742 static void record_bias( const PhaseIFG *ifg, int lr1, int lr2 ) {
   743   // Tag copy bias here
   744   if( !ifg->lrgs(lr1)._copy_bias )
   745     ifg->lrgs(lr1)._copy_bias = lr2;
   746   if( !ifg->lrgs(lr2)._copy_bias )
   747     ifg->lrgs(lr2)._copy_bias = lr1;
   748 }
   750 //------------------------------copy_copy--------------------------------------
   751 // See if I can coalesce a series of multiple copies together.  I need the
   752 // final dest copy and the original src copy.  They can be the same Node.
   753 // Compute the compatible register masks.
   754 bool PhaseConservativeCoalesce::copy_copy( Node *dst_copy, Node *src_copy, Block *b, uint bindex ) {
   756   if( !dst_copy->is_SpillCopy() ) return false;
   757   if( !src_copy->is_SpillCopy() ) return false;
   758   Node *src_def = src_copy->in(src_copy->is_Copy());
   759   uint lr1 = _phc.Find(dst_copy);
   760   uint lr2 = _phc.Find(src_def );
   762   // Same live ranges already?
   763   if( lr1 == lr2 ) return false;
   765   // Interfere?
   766   if( _phc._ifg->test_edge_sq( lr1, lr2 ) ) return false;
   768   // Not an oop->int cast; oop->oop, int->int, AND int->oop are OK.
   769   if( !lrgs(lr1)._is_oop && lrgs(lr2)._is_oop ) // not an oop->int cast
   770     return false;
   772   // Coalescing between an aligned live range and a mis-aligned live range?
   773   // No, no!  Alignment changes how we count degree.
   774   if( lrgs(lr1)._fat_proj != lrgs(lr2)._fat_proj )
   775     return false;
   777   // Sort; use smaller live-range number
   778   Node *lr1_node = dst_copy;
   779   Node *lr2_node = src_def;
   780   if( lr1 > lr2 ) {
   781     uint tmp = lr1; lr1 = lr2; lr2 = tmp;
   782     lr1_node = src_def;  lr2_node = dst_copy;
   783   }
   785   // Check for compatibility of the 2 live ranges by
   786   // intersecting their allowed register sets.
   787   RegMask rm = lrgs(lr1).mask();
   788   rm.AND(lrgs(lr2).mask());
   789   // Number of bits free
   790   uint rm_size = rm.Size();
   792   if (UseFPUForSpilling && rm.is_AllStack() ) {
   793     // Don't coalesce when frequency difference is large
   794     Block *dst_b = _phc._cfg._bbs[dst_copy->_idx];
   795     Block *src_def_b = _phc._cfg._bbs[src_def->_idx];
   796     if (src_def_b->_freq > 10*dst_b->_freq )
   797       return false;
   798   }
   800   // If we can use any stack slot, then effective size is infinite
   801   if( rm.is_AllStack() ) rm_size += 1000000;
   802   // Incompatible masks, no way to coalesce
   803   if( rm_size == 0 ) return false;
   805   // Another early bail-out test is when we are double-coalescing and the
   806   // 2 copies are separated by some control flow.
   807   if( dst_copy != src_copy ) {
   808     Block *src_b = _phc._cfg._bbs[src_copy->_idx];
   809     Block *b2 = b;
   810     while( b2 != src_b ) {
   811       if( b2->num_preds() > 2 ){// Found merge-point
   812         _phc._lost_opp_cflow_coalesce++;
   813         // extra record_bias commented out because Chris believes it is not
   814         // productive.  Since we can record only 1 bias, we want to choose one
   815         // that stands a chance of working and this one probably does not.
   816         //record_bias( _phc._lrgs, lr1, lr2 );
   817         return false;           // To hard to find all interferences
   818       }
   819       b2 = _phc._cfg._bbs[b2->pred(1)->_idx];
   820     }
   821   }
   823   // Union the two interference sets together into '_ulr'
   824   uint reg_degree = _ulr.lrg_union( lr1, lr2, rm_size, _phc._ifg, rm );
   826   if( reg_degree >= rm_size ) {
   827     record_bias( _phc._ifg, lr1, lr2 );
   828     return false;
   829   }
   831   // Now I need to compute all the interferences between dst_copy and
   832   // src_copy.  I'm not willing visit the entire interference graph, so
   833   // I limit my search to things in dst_copy's block or in a straight
   834   // line of previous blocks.  I give up at merge points or when I get
   835   // more interferences than my degree.  I can stop when I find src_copy.
   836   if( dst_copy != src_copy ) {
   837     reg_degree = compute_separating_interferences(dst_copy, src_copy, b, bindex, rm, rm_size, reg_degree, lr1, lr2 );
   838     if( reg_degree == max_juint ) {
   839       record_bias( _phc._ifg, lr1, lr2 );
   840       return false;
   841     }
   842   } // End of if dst_copy & src_copy are different
   845   // ---- THE COMBINED LRG IS COLORABLE ----
   847   // YEAH - Now coalesce this copy away
   848   assert( lrgs(lr1).num_regs() == lrgs(lr2).num_regs(),   "" );
   850   IndexSet *n_lr1 = _phc._ifg->neighbors(lr1);
   851   IndexSet *n_lr2 = _phc._ifg->neighbors(lr2);
   853   // Update the interference graph
   854   update_ifg(lr1, lr2, n_lr1, n_lr2);
   856   _ulr.remove(lr1);
   858   // Uncomment the following code to trace Coalescing in great detail.
   859   //
   860   //if (false) {
   861   //  tty->cr();
   862   //  tty->print_cr("#######################################");
   863   //  tty->print_cr("union %d and %d", lr1, lr2);
   864   //  n_lr1->dump();
   865   //  n_lr2->dump();
   866   //  tty->print_cr("resulting set is");
   867   //  _ulr.dump();
   868   //}
   870   // Replace n_lr1 with the new combined live range.  _ulr will use
   871   // n_lr1's old memory on the next iteration.  n_lr2 is cleared to
   872   // send its internal memory to the free list.
   873   _ulr.swap(n_lr1);
   874   _ulr.clear();
   875   n_lr2->clear();
   877   lrgs(lr1).set_degree( _phc._ifg->effective_degree(lr1) );
   878   lrgs(lr2).set_degree( 0 );
   880   // Join live ranges.  Merge larger into smaller.  Union lr2 into lr1 in the
   881   // union-find tree
   882   union_helper( lr1_node, lr2_node, lr1, lr2, src_def, dst_copy, src_copy, b, bindex );
   883   // Combine register restrictions
   884   lrgs(lr1).set_mask(rm);
   885   lrgs(lr1).compute_set_mask_size();
   886   lrgs(lr1)._cost += lrgs(lr2)._cost;
   887   lrgs(lr1)._area += lrgs(lr2)._area;
   889   // While its uncommon to successfully coalesce live ranges that started out
   890   // being not-lo-degree, it can happen.  In any case the combined coalesced
   891   // live range better Simplify nicely.
   892   lrgs(lr1)._was_lo = 1;
   894   // kinda expensive to do all the time
   895   //tty->print_cr("warning: slow verify happening");
   896   //_phc._ifg->verify( &_phc );
   897   return true;
   898 }
   900 //------------------------------coalesce---------------------------------------
   901 // Conservative (but pessimistic) copy coalescing of a single block
   902 void PhaseConservativeCoalesce::coalesce( Block *b ) {
   903   // Bail out on infrequent blocks
   904   if( b->is_uncommon(_phc._cfg._bbs) )
   905     return;
   906   // Check this block for copies.
   907   for( uint i = 1; i<b->end_idx(); i++ ) {
   908     // Check for actual copies on inputs.  Coalesce a copy into its
   909     // input if use and copy's input are compatible.
   910     Node *copy1 = b->_nodes[i];
   911     uint idx1 = copy1->is_Copy();
   912     if( !idx1 ) continue;       // Not a copy
   914     if( copy_copy(copy1,copy1,b,i) ) {
   915       i--;                      // Retry, same location in block
   916       PhaseChaitin::_conserv_coalesce++;  // Collect stats on success
   917       continue;
   918     }
   920     /* do not attempt pairs.  About 1/2 of all pairs can be removed by
   921        post-alloc.  The other set are too few to bother.
   922     Node *copy2 = copy1->in(idx1);
   923     uint idx2 = copy2->is_Copy();
   924     if( !idx2 ) continue;
   925     if( copy_copy(copy1,copy2,b,i) ) {
   926       i--;                      // Retry, same location in block
   927       PhaseChaitin::_conserv_coalesce_pair++; // Collect stats on success
   928       continue;
   929     }
   930     */
   931   }
   932 }

mercurial