src/share/vm/opto/coalesce.cpp

Tue, 02 Jul 2013 20:42:12 -0400

author
drchase
date
Tue, 02 Jul 2013 20:42:12 -0400
changeset 5353
b800986664f4
parent 5285
693e4d04fd09
child 5509
d1034bd8cefc
permissions
-rw-r--r--

7088419: Use x86 Hardware CRC32 Instruction with java.util.zip.CRC32
Summary: add intrinsics using new instruction to interpreter, C1, C2, for suitable x86; add test
Reviewed-by: kvn, twisti

     1 /*
     2  * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    20  * or visit www.oracle.com if you need additional information or have any
    21  * questions.
    22  *
    23  */
    25 #include "precompiled.hpp"
    26 #include "memory/allocation.inline.hpp"
    27 #include "opto/block.hpp"
    28 #include "opto/cfgnode.hpp"
    29 #include "opto/chaitin.hpp"
    30 #include "opto/coalesce.hpp"
    31 #include "opto/connode.hpp"
    32 #include "opto/indexSet.hpp"
    33 #include "opto/machnode.hpp"
    34 #include "opto/matcher.hpp"
    35 #include "opto/regmask.hpp"
    37 //=============================================================================
    38 //------------------------------Dump-------------------------------------------
    39 #ifndef PRODUCT
    40 void PhaseCoalesce::dump(Node *n) const {
    41   // Being a const function means I cannot use 'Find'
    42   uint r = _phc._lrg_map.find(n);
    43   tty->print("L%d/N%d ",r,n->_idx);
    44 }
    46 //------------------------------dump-------------------------------------------
    47 void PhaseCoalesce::dump() const {
    48   // I know I have a block layout now, so I can print blocks in a loop
    49   for( uint i=0; i<_phc._cfg._num_blocks; i++ ) {
    50     uint j;
    51     Block *b = _phc._cfg._blocks[i];
    52     // Print a nice block header
    53     tty->print("B%d: ",b->_pre_order);
    54     for( j=1; j<b->num_preds(); j++ )
    55       tty->print("B%d ", _phc._cfg._bbs[b->pred(j)->_idx]->_pre_order);
    56     tty->print("-> ");
    57     for( j=0; j<b->_num_succs; j++ )
    58       tty->print("B%d ",b->_succs[j]->_pre_order);
    59     tty->print(" IDom: B%d/#%d\n", b->_idom ? b->_idom->_pre_order : 0, b->_dom_depth);
    60     uint cnt = b->_nodes.size();
    61     for( j=0; j<cnt; j++ ) {
    62       Node *n = b->_nodes[j];
    63       dump( n );
    64       tty->print("\t%s\t",n->Name());
    66       // Dump the inputs
    67       uint k;                   // Exit value of loop
    68       for( k=0; k<n->req(); k++ ) // For all required inputs
    69         if( n->in(k) ) dump( n->in(k) );
    70         else tty->print("_ ");
    71       int any_prec = 0;
    72       for( ; k<n->len(); k++ )          // For all precedence inputs
    73         if( n->in(k) ) {
    74           if( !any_prec++ ) tty->print(" |");
    75           dump( n->in(k) );
    76         }
    78       // Dump node-specific info
    79       n->dump_spec(tty);
    80       tty->print("\n");
    82     }
    83     tty->print("\n");
    84   }
    85 }
    86 #endif
    88 //------------------------------combine_these_two------------------------------
    89 // Combine the live ranges def'd by these 2 Nodes.  N2 is an input to N1.
    90 void PhaseCoalesce::combine_these_two(Node *n1, Node *n2) {
    91   uint lr1 = _phc._lrg_map.find(n1);
    92   uint lr2 = _phc._lrg_map.find(n2);
    93   if( lr1 != lr2 &&             // Different live ranges already AND
    94       !_phc._ifg->test_edge_sq( lr1, lr2 ) ) {  // Do not interfere
    95     LRG *lrg1 = &_phc.lrgs(lr1);
    96     LRG *lrg2 = &_phc.lrgs(lr2);
    97     // Not an oop->int cast; oop->oop, int->int, AND int->oop are OK.
    99     // Now, why is int->oop OK?  We end up declaring a raw-pointer as an oop
   100     // and in general that's a bad thing.  However, int->oop conversions only
   101     // happen at GC points, so the lifetime of the misclassified raw-pointer
   102     // is from the CheckCastPP (that converts it to an oop) backwards up
   103     // through a merge point and into the slow-path call, and around the
   104     // diamond up to the heap-top check and back down into the slow-path call.
   105     // The misclassified raw pointer is NOT live across the slow-path call,
   106     // and so does not appear in any GC info, so the fact that it is
   107     // misclassified is OK.
   109     if( (lrg1->_is_oop || !lrg2->_is_oop) && // not an oop->int cast AND
   110         // Compatible final mask
   111         lrg1->mask().overlap( lrg2->mask() ) ) {
   112       // Merge larger into smaller.
   113       if( lr1 > lr2 ) {
   114         uint  tmp =  lr1;  lr1 =  lr2;  lr2 =  tmp;
   115         Node   *n =   n1;   n1 =   n2;   n2 =    n;
   116         LRG *ltmp = lrg1; lrg1 = lrg2; lrg2 = ltmp;
   117       }
   118       // Union lr2 into lr1
   119       _phc.Union( n1, n2 );
   120       if (lrg1->_maxfreq < lrg2->_maxfreq)
   121         lrg1->_maxfreq = lrg2->_maxfreq;
   122       // Merge in the IFG
   123       _phc._ifg->Union( lr1, lr2 );
   124       // Combine register restrictions
   125       lrg1->AND(lrg2->mask());
   126     }
   127   }
   128 }
   130 //------------------------------coalesce_driver--------------------------------
   131 // Copy coalescing
   132 void PhaseCoalesce::coalesce_driver( ) {
   134   verify();
   135   // Coalesce from high frequency to low
   136   for( uint i=0; i<_phc._cfg._num_blocks; i++ )
   137     coalesce( _phc._blks[i] );
   139 }
   141 //------------------------------insert_copy_with_overlap-----------------------
   142 // I am inserting copies to come out of SSA form.  In the general case, I am
   143 // doing a parallel renaming.  I'm in the Named world now, so I can't do a
   144 // general parallel renaming.  All the copies now use  "names" (live-ranges)
   145 // to carry values instead of the explicit use-def chains.  Suppose I need to
   146 // insert 2 copies into the same block.  They copy L161->L128 and L128->L132.
   147 // If I insert them in the wrong order then L128 will get clobbered before it
   148 // can get used by the second copy.  This cannot happen in the SSA model;
   149 // direct use-def chains get me the right value.  It DOES happen in the named
   150 // model so I have to handle the reordering of copies.
   151 //
   152 // In general, I need to topo-sort the placed copies to avoid conflicts.
   153 // Its possible to have a closed cycle of copies (e.g., recirculating the same
   154 // values around a loop).  In this case I need a temp to break the cycle.
   155 void PhaseAggressiveCoalesce::insert_copy_with_overlap( Block *b, Node *copy, uint dst_name, uint src_name ) {
   157   // Scan backwards for the locations of the last use of the dst_name.
   158   // I am about to clobber the dst_name, so the copy must be inserted
   159   // after the last use.  Last use is really first-use on a backwards scan.
   160   uint i = b->end_idx()-1;
   161   while(1) {
   162     Node *n = b->_nodes[i];
   163     // Check for end of virtual copies; this is also the end of the
   164     // parallel renaming effort.
   165     if (n->_idx < _unique) {
   166       break;
   167     }
   168     uint idx = n->is_Copy();
   169     assert( idx || n->is_Con() || n->is_MachProj(), "Only copies during parallel renaming" );
   170     if (idx && _phc._lrg_map.find(n->in(idx)) == dst_name) {
   171       break;
   172     }
   173     i--;
   174   }
   175   uint last_use_idx = i;
   177   // Also search for any kill of src_name that exits the block.
   178   // Since the copy uses src_name, I have to come before any kill.
   179   uint kill_src_idx = b->end_idx();
   180   // There can be only 1 kill that exits any block and that is
   181   // the last kill.  Thus it is the first kill on a backwards scan.
   182   i = b->end_idx()-1;
   183   while (1) {
   184     Node *n = b->_nodes[i];
   185     // Check for end of virtual copies; this is also the end of the
   186     // parallel renaming effort.
   187     if (n->_idx < _unique) {
   188       break;
   189     }
   190     assert( n->is_Copy() || n->is_Con() || n->is_MachProj(), "Only copies during parallel renaming" );
   191     if (_phc._lrg_map.find(n) == src_name) {
   192       kill_src_idx = i;
   193       break;
   194     }
   195     i--;
   196   }
   197   // Need a temp?  Last use of dst comes after the kill of src?
   198   if (last_use_idx >= kill_src_idx) {
   199     // Need to break a cycle with a temp
   200     uint idx = copy->is_Copy();
   201     Node *tmp = copy->clone();
   202     uint max_lrg_id = _phc._lrg_map.max_lrg_id();
   203     _phc.new_lrg(tmp, max_lrg_id);
   204     _phc._lrg_map.set_max_lrg_id(max_lrg_id + 1);
   206     // Insert new temp between copy and source
   207     tmp ->set_req(idx,copy->in(idx));
   208     copy->set_req(idx,tmp);
   209     // Save source in temp early, before source is killed
   210     b->_nodes.insert(kill_src_idx,tmp);
   211     _phc._cfg._bbs.map( tmp->_idx, b );
   212     last_use_idx++;
   213   }
   215   // Insert just after last use
   216   b->_nodes.insert(last_use_idx+1,copy);
   217 }
   219 //------------------------------insert_copies----------------------------------
   220 void PhaseAggressiveCoalesce::insert_copies( Matcher &matcher ) {
   221   // We do LRGs compressing and fix a liveout data only here since the other
   222   // place in Split() is guarded by the assert which we never hit.
   223   _phc._lrg_map.compress_uf_map_for_nodes();
   224   // Fix block's liveout data for compressed live ranges.
   225   for (uint lrg = 1; lrg < _phc._lrg_map.max_lrg_id(); lrg++) {
   226     uint compressed_lrg = _phc._lrg_map.find(lrg);
   227     if (lrg != compressed_lrg) {
   228       for (uint bidx = 0; bidx < _phc._cfg._num_blocks; bidx++) {
   229         IndexSet *liveout = _phc._live->live(_phc._cfg._blocks[bidx]);
   230         if (liveout->member(lrg)) {
   231           liveout->remove(lrg);
   232           liveout->insert(compressed_lrg);
   233         }
   234       }
   235     }
   236   }
   238   // All new nodes added are actual copies to replace virtual copies.
   239   // Nodes with index less than '_unique' are original, non-virtual Nodes.
   240   _unique = C->unique();
   242   for( uint i=0; i<_phc._cfg._num_blocks; i++ ) {
   243     C->check_node_count(NodeLimitFudgeFactor, "out of nodes in coalesce");
   244     if (C->failing()) return;
   245     Block *b = _phc._cfg._blocks[i];
   246     uint cnt = b->num_preds();  // Number of inputs to the Phi
   248     for( uint l = 1; l<b->_nodes.size(); l++ ) {
   249       Node *n = b->_nodes[l];
   251       // Do not use removed-copies, use copied value instead
   252       uint ncnt = n->req();
   253       for( uint k = 1; k<ncnt; k++ ) {
   254         Node *copy = n->in(k);
   255         uint cidx = copy->is_Copy();
   256         if( cidx ) {
   257           Node *def = copy->in(cidx);
   258           if (_phc._lrg_map.find(copy) == _phc._lrg_map.find(def)) {
   259             n->set_req(k, def);
   260           }
   261         }
   262       }
   264       // Remove any explicit copies that get coalesced.
   265       uint cidx = n->is_Copy();
   266       if( cidx ) {
   267         Node *def = n->in(cidx);
   268         if (_phc._lrg_map.find(n) == _phc._lrg_map.find(def)) {
   269           n->replace_by(def);
   270           n->set_req(cidx,NULL);
   271           b->_nodes.remove(l);
   272           l--;
   273           continue;
   274         }
   275       }
   277       if (n->is_Phi()) {
   278         // Get the chosen name for the Phi
   279         uint phi_name = _phc._lrg_map.find(n);
   280         // Ignore the pre-allocated specials
   281         if (!phi_name) {
   282           continue;
   283         }
   284         // Check for mismatch inputs to Phi
   285         for (uint j = 1; j < cnt; j++) {
   286           Node *m = n->in(j);
   287           uint src_name = _phc._lrg_map.find(m);
   288           if (src_name != phi_name) {
   289             Block *pred = _phc._cfg._bbs[b->pred(j)->_idx];
   290             Node *copy;
   291             assert(!m->is_Con() || m->is_Mach(), "all Con must be Mach");
   292             // Rematerialize constants instead of copying them
   293             if( m->is_Mach() && m->as_Mach()->is_Con() &&
   294                 m->as_Mach()->rematerialize() ) {
   295               copy = m->clone();
   296               // Insert the copy in the predecessor basic block
   297               pred->add_inst(copy);
   298               // Copy any flags as well
   299               _phc.clone_projs(pred, pred->end_idx(), m, copy, _phc._lrg_map);
   300             } else {
   301               const RegMask *rm = C->matcher()->idealreg2spillmask[m->ideal_reg()];
   302               copy = new (C) MachSpillCopyNode(m, *rm, *rm);
   303               // Find a good place to insert.  Kinda tricky, use a subroutine
   304               insert_copy_with_overlap(pred,copy,phi_name,src_name);
   305             }
   306             // Insert the copy in the use-def chain
   307             n->set_req(j, copy);
   308             _phc._cfg._bbs.map( copy->_idx, pred );
   309             // Extend ("register allocate") the names array for the copy.
   310             _phc._lrg_map.extend(copy->_idx, phi_name);
   311           } // End of if Phi names do not match
   312         } // End of for all inputs to Phi
   313       } else { // End of if Phi
   315         // Now check for 2-address instructions
   316         uint idx;
   317         if( n->is_Mach() && (idx=n->as_Mach()->two_adr()) ) {
   318           // Get the chosen name for the Node
   319           uint name = _phc._lrg_map.find(n);
   320           assert (name, "no 2-address specials");
   321           // Check for name mis-match on the 2-address input
   322           Node *m = n->in(idx);
   323           if (_phc._lrg_map.find(m) != name) {
   324             Node *copy;
   325             assert(!m->is_Con() || m->is_Mach(), "all Con must be Mach");
   326             // At this point it is unsafe to extend live ranges (6550579).
   327             // Rematerialize only constants as we do for Phi above.
   328             if(m->is_Mach() && m->as_Mach()->is_Con() &&
   329                m->as_Mach()->rematerialize()) {
   330               copy = m->clone();
   331               // Insert the copy in the basic block, just before us
   332               b->_nodes.insert(l++, copy);
   333               if(_phc.clone_projs(b, l, m, copy, _phc._lrg_map)) {
   334                 l++;
   335               }
   336             } else {
   337               const RegMask *rm = C->matcher()->idealreg2spillmask[m->ideal_reg()];
   338               copy = new (C) MachSpillCopyNode(m, *rm, *rm);
   339               // Insert the copy in the basic block, just before us
   340               b->_nodes.insert(l++, copy);
   341             }
   342             // Insert the copy in the use-def chain
   343             n->set_req(idx, copy);
   344             // Extend ("register allocate") the names array for the copy.
   345             _phc._lrg_map.extend(copy->_idx, name);
   346             _phc._cfg._bbs.map( copy->_idx, b );
   347           }
   349         } // End of is two-adr
   351         // Insert a copy at a debug use for a lrg which has high frequency
   352         if (b->_freq < OPTO_DEBUG_SPLIT_FREQ || b->is_uncommon(_phc._cfg._bbs)) {
   353           // Walk the debug inputs to the node and check for lrg freq
   354           JVMState* jvms = n->jvms();
   355           uint debug_start = jvms ? jvms->debug_start() : 999999;
   356           uint debug_end   = jvms ? jvms->debug_end()   : 999999;
   357           for(uint inpidx = debug_start; inpidx < debug_end; inpidx++) {
   358             // Do not split monitors; they are only needed for debug table
   359             // entries and need no code.
   360             if (jvms->is_monitor_use(inpidx)) {
   361               continue;
   362             }
   363             Node *inp = n->in(inpidx);
   364             uint nidx = _phc._lrg_map.live_range_id(inp);
   365             LRG &lrg = lrgs(nidx);
   367             // If this lrg has a high frequency use/def
   368             if( lrg._maxfreq >= _phc.high_frequency_lrg() ) {
   369               // If the live range is also live out of this block (like it
   370               // would be for a fast/slow idiom), the normal spill mechanism
   371               // does an excellent job.  If it is not live out of this block
   372               // (like it would be for debug info to uncommon trap) splitting
   373               // the live range now allows a better allocation in the high
   374               // frequency blocks.
   375               //   Build_IFG_virtual has converted the live sets to
   376               // live-IN info, not live-OUT info.
   377               uint k;
   378               for( k=0; k < b->_num_succs; k++ )
   379                 if( _phc._live->live(b->_succs[k])->member( nidx ) )
   380                   break;      // Live in to some successor block?
   381               if( k < b->_num_succs )
   382                 continue;     // Live out; do not pre-split
   383               // Split the lrg at this use
   384               const RegMask *rm = C->matcher()->idealreg2spillmask[inp->ideal_reg()];
   385               Node *copy = new (C) MachSpillCopyNode( inp, *rm, *rm );
   386               // Insert the copy in the use-def chain
   387               n->set_req(inpidx, copy );
   388               // Insert the copy in the basic block, just before us
   389               b->_nodes.insert( l++, copy );
   390               // Extend ("register allocate") the names array for the copy.
   391               uint max_lrg_id = _phc._lrg_map.max_lrg_id();
   392               _phc.new_lrg(copy, max_lrg_id);
   393               _phc._lrg_map.set_max_lrg_id(max_lrg_id + 1);
   394               _phc._cfg._bbs.map(copy->_idx, b);
   395               //tty->print_cr("Split a debug use in Aggressive Coalesce");
   396             }  // End of if high frequency use/def
   397           }  // End of for all debug inputs
   398         }  // End of if low frequency safepoint
   400       } // End of if Phi
   402     } // End of for all instructions
   403   } // End of for all blocks
   404 }
   406 //=============================================================================
   407 //------------------------------coalesce---------------------------------------
   408 // Aggressive (but pessimistic) copy coalescing of a single block
   410 // The following coalesce pass represents a single round of aggressive
   411 // pessimistic coalesce.  "Aggressive" means no attempt to preserve
   412 // colorability when coalescing.  This occasionally means more spills, but
   413 // it also means fewer rounds of coalescing for better code - and that means
   414 // faster compiles.
   416 // "Pessimistic" means we do not hit the fixed point in one pass (and we are
   417 // reaching for the least fixed point to boot).  This is typically solved
   418 // with a few more rounds of coalescing, but the compiler must run fast.  We
   419 // could optimistically coalescing everything touching PhiNodes together
   420 // into one big live range, then check for self-interference.  Everywhere
   421 // the live range interferes with self it would have to be split.  Finding
   422 // the right split points can be done with some heuristics (based on
   423 // expected frequency of edges in the live range).  In short, it's a real
   424 // research problem and the timeline is too short to allow such research.
   425 // Further thoughts: (1) build the LR in a pass, (2) find self-interference
   426 // in another pass, (3) per each self-conflict, split, (4) split by finding
   427 // the low-cost cut (min-cut) of the LR, (5) edges in the LR are weighted
   428 // according to the GCM algorithm (or just exec freq on CFG edges).
   430 void PhaseAggressiveCoalesce::coalesce( Block *b ) {
   431   // Copies are still "virtual" - meaning we have not made them explicitly
   432   // copies.  Instead, Phi functions of successor blocks have mis-matched
   433   // live-ranges.  If I fail to coalesce, I'll have to insert a copy to line
   434   // up the live-ranges.  Check for Phis in successor blocks.
   435   uint i;
   436   for( i=0; i<b->_num_succs; i++ ) {
   437     Block *bs = b->_succs[i];
   438     // Find index of 'b' in 'bs' predecessors
   439     uint j=1;
   440     while( _phc._cfg._bbs[bs->pred(j)->_idx] != b ) j++;
   441     // Visit all the Phis in successor block
   442     for( uint k = 1; k<bs->_nodes.size(); k++ ) {
   443       Node *n = bs->_nodes[k];
   444       if( !n->is_Phi() ) break;
   445       combine_these_two( n, n->in(j) );
   446     }
   447   } // End of for all successor blocks
   450   // Check _this_ block for 2-address instructions and copies.
   451   uint cnt = b->end_idx();
   452   for( i = 1; i<cnt; i++ ) {
   453     Node *n = b->_nodes[i];
   454     uint idx;
   455     // 2-address instructions have a virtual Copy matching their input
   456     // to their output
   457     if (n->is_Mach() && (idx = n->as_Mach()->two_adr())) {
   458       MachNode *mach = n->as_Mach();
   459       combine_these_two(mach, mach->in(idx));
   460     }
   461   } // End of for all instructions in block
   462 }
   464 //=============================================================================
   465 //------------------------------PhaseConservativeCoalesce----------------------
   466 PhaseConservativeCoalesce::PhaseConservativeCoalesce(PhaseChaitin &chaitin) : PhaseCoalesce(chaitin) {
   467   _ulr.initialize(_phc._lrg_map.max_lrg_id());
   468 }
   470 //------------------------------verify-----------------------------------------
   471 void PhaseConservativeCoalesce::verify() {
   472 #ifdef ASSERT
   473   _phc.set_was_low();
   474 #endif
   475 }
   477 //------------------------------union_helper-----------------------------------
   478 void PhaseConservativeCoalesce::union_helper( Node *lr1_node, Node *lr2_node, uint lr1, uint lr2, Node *src_def, Node *dst_copy, Node *src_copy, Block *b, uint bindex ) {
   479   // Join live ranges.  Merge larger into smaller.  Union lr2 into lr1 in the
   480   // union-find tree
   481   _phc.Union( lr1_node, lr2_node );
   483   // Single-def live range ONLY if both live ranges are single-def.
   484   // If both are single def, then src_def powers one live range
   485   // and def_copy powers the other.  After merging, src_def powers
   486   // the combined live range.
   487   lrgs(lr1)._def = (lrgs(lr1).is_multidef() ||
   488                         lrgs(lr2).is_multidef() )
   489     ? NodeSentinel : src_def;
   490   lrgs(lr2)._def = NULL;    // No def for lrg 2
   491   lrgs(lr2).Clear();        // Force empty mask for LRG 2
   492   //lrgs(lr2)._size = 0;      // Live-range 2 goes dead
   493   lrgs(lr1)._is_oop |= lrgs(lr2)._is_oop;
   494   lrgs(lr2)._is_oop = 0;    // In particular, not an oop for GC info
   496   if (lrgs(lr1)._maxfreq < lrgs(lr2)._maxfreq)
   497     lrgs(lr1)._maxfreq = lrgs(lr2)._maxfreq;
   499   // Copy original value instead.  Intermediate copies go dead, and
   500   // the dst_copy becomes useless.
   501   int didx = dst_copy->is_Copy();
   502   dst_copy->set_req( didx, src_def );
   503   // Add copy to free list
   504   // _phc.free_spillcopy(b->_nodes[bindex]);
   505   assert( b->_nodes[bindex] == dst_copy, "" );
   506   dst_copy->replace_by( dst_copy->in(didx) );
   507   dst_copy->set_req( didx, NULL);
   508   b->_nodes.remove(bindex);
   509   if( bindex < b->_ihrp_index ) b->_ihrp_index--;
   510   if( bindex < b->_fhrp_index ) b->_fhrp_index--;
   512   // Stretched lr1; add it to liveness of intermediate blocks
   513   Block *b2 = _phc._cfg._bbs[src_copy->_idx];
   514   while( b != b2 ) {
   515     b = _phc._cfg._bbs[b->pred(1)->_idx];
   516     _phc._live->live(b)->insert(lr1);
   517   }
   518 }
   520 //------------------------------compute_separating_interferences---------------
   521 // Factored code from copy_copy that computes extra interferences from
   522 // lengthening a live range by double-coalescing.
   523 uint PhaseConservativeCoalesce::compute_separating_interferences(Node *dst_copy, Node *src_copy, Block *b, uint bindex, RegMask &rm, uint reg_degree, uint rm_size, uint lr1, uint lr2 ) {
   525   assert(!lrgs(lr1)._fat_proj, "cannot coalesce fat_proj");
   526   assert(!lrgs(lr2)._fat_proj, "cannot coalesce fat_proj");
   527   Node *prev_copy = dst_copy->in(dst_copy->is_Copy());
   528   Block *b2 = b;
   529   uint bindex2 = bindex;
   530   while( 1 ) {
   531     // Find previous instruction
   532     bindex2--;                  // Chain backwards 1 instruction
   533     while( bindex2 == 0 ) {     // At block start, find prior block
   534       assert( b2->num_preds() == 2, "cannot double coalesce across c-flow" );
   535       b2 = _phc._cfg._bbs[b2->pred(1)->_idx];
   536       bindex2 = b2->end_idx()-1;
   537     }
   538     // Get prior instruction
   539     assert(bindex2 < b2->_nodes.size(), "index out of bounds");
   540     Node *x = b2->_nodes[bindex2];
   541     if( x == prev_copy ) {      // Previous copy in copy chain?
   542       if( prev_copy == src_copy)// Found end of chain and all interferences
   543         break;                  // So break out of loop
   544       // Else work back one in copy chain
   545       prev_copy = prev_copy->in(prev_copy->is_Copy());
   546     } else {                    // Else collect interferences
   547       uint lidx = _phc._lrg_map.find(x);
   548       // Found another def of live-range being stretched?
   549       if(lidx == lr1) {
   550         return max_juint;
   551       }
   552       if(lidx == lr2) {
   553         return max_juint;
   554       }
   556       // If we attempt to coalesce across a bound def
   557       if( lrgs(lidx).is_bound() ) {
   558         // Do not let the coalesced LRG expect to get the bound color
   559         rm.SUBTRACT( lrgs(lidx).mask() );
   560         // Recompute rm_size
   561         rm_size = rm.Size();
   562         //if( rm._flags ) rm_size += 1000000;
   563         if( reg_degree >= rm_size ) return max_juint;
   564       }
   565       if( rm.overlap(lrgs(lidx).mask()) ) {
   566         // Insert lidx into union LRG; returns TRUE if actually inserted
   567         if( _ulr.insert(lidx) ) {
   568           // Infinite-stack neighbors do not alter colorability, as they
   569           // can always color to some other color.
   570           if( !lrgs(lidx).mask().is_AllStack() ) {
   571             // If this coalesce will make any new neighbor uncolorable,
   572             // do not coalesce.
   573             if( lrgs(lidx).just_lo_degree() )
   574               return max_juint;
   575             // Bump our degree
   576             if( ++reg_degree >= rm_size )
   577               return max_juint;
   578           } // End of if not infinite-stack neighbor
   579         } // End of if actually inserted
   580       } // End of if live range overlaps
   581     } // End of else collect interferences for 1 node
   582   } // End of while forever, scan back for interferences
   583   return reg_degree;
   584 }
   586 //------------------------------update_ifg-------------------------------------
   587 void PhaseConservativeCoalesce::update_ifg(uint lr1, uint lr2, IndexSet *n_lr1, IndexSet *n_lr2) {
   588   // Some original neighbors of lr1 might have gone away
   589   // because the constrained register mask prevented them.
   590   // Remove lr1 from such neighbors.
   591   IndexSetIterator one(n_lr1);
   592   uint neighbor;
   593   LRG &lrg1 = lrgs(lr1);
   594   while ((neighbor = one.next()) != 0)
   595     if( !_ulr.member(neighbor) )
   596       if( _phc._ifg->neighbors(neighbor)->remove(lr1) )
   597         lrgs(neighbor).inc_degree( -lrg1.compute_degree(lrgs(neighbor)) );
   600   // lr2 is now called (coalesced into) lr1.
   601   // Remove lr2 from the IFG.
   602   IndexSetIterator two(n_lr2);
   603   LRG &lrg2 = lrgs(lr2);
   604   while ((neighbor = two.next()) != 0)
   605     if( _phc._ifg->neighbors(neighbor)->remove(lr2) )
   606       lrgs(neighbor).inc_degree( -lrg2.compute_degree(lrgs(neighbor)) );
   608   // Some neighbors of intermediate copies now interfere with the
   609   // combined live range.
   610   IndexSetIterator three(&_ulr);
   611   while ((neighbor = three.next()) != 0)
   612     if( _phc._ifg->neighbors(neighbor)->insert(lr1) )
   613       lrgs(neighbor).inc_degree( lrg1.compute_degree(lrgs(neighbor)) );
   614 }
   616 //------------------------------record_bias------------------------------------
   617 static void record_bias( const PhaseIFG *ifg, int lr1, int lr2 ) {
   618   // Tag copy bias here
   619   if( !ifg->lrgs(lr1)._copy_bias )
   620     ifg->lrgs(lr1)._copy_bias = lr2;
   621   if( !ifg->lrgs(lr2)._copy_bias )
   622     ifg->lrgs(lr2)._copy_bias = lr1;
   623 }
   625 //------------------------------copy_copy--------------------------------------
   626 // See if I can coalesce a series of multiple copies together.  I need the
   627 // final dest copy and the original src copy.  They can be the same Node.
   628 // Compute the compatible register masks.
   629 bool PhaseConservativeCoalesce::copy_copy(Node *dst_copy, Node *src_copy, Block *b, uint bindex) {
   631   if (!dst_copy->is_SpillCopy()) {
   632     return false;
   633   }
   634   if (!src_copy->is_SpillCopy()) {
   635     return false;
   636   }
   637   Node *src_def = src_copy->in(src_copy->is_Copy());
   638   uint lr1 = _phc._lrg_map.find(dst_copy);
   639   uint lr2 = _phc._lrg_map.find(src_def);
   641   // Same live ranges already?
   642   if (lr1 == lr2) {
   643     return false;
   644   }
   646   // Interfere?
   647   if (_phc._ifg->test_edge_sq(lr1, lr2)) {
   648     return false;
   649   }
   651   // Not an oop->int cast; oop->oop, int->int, AND int->oop are OK.
   652   if (!lrgs(lr1)._is_oop && lrgs(lr2)._is_oop) { // not an oop->int cast
   653     return false;
   654   }
   656   // Coalescing between an aligned live range and a mis-aligned live range?
   657   // No, no!  Alignment changes how we count degree.
   658   if (lrgs(lr1)._fat_proj != lrgs(lr2)._fat_proj) {
   659     return false;
   660   }
   662   // Sort; use smaller live-range number
   663   Node *lr1_node = dst_copy;
   664   Node *lr2_node = src_def;
   665   if (lr1 > lr2) {
   666     uint tmp = lr1; lr1 = lr2; lr2 = tmp;
   667     lr1_node = src_def;  lr2_node = dst_copy;
   668   }
   670   // Check for compatibility of the 2 live ranges by
   671   // intersecting their allowed register sets.
   672   RegMask rm = lrgs(lr1).mask();
   673   rm.AND(lrgs(lr2).mask());
   674   // Number of bits free
   675   uint rm_size = rm.Size();
   677   if (UseFPUForSpilling && rm.is_AllStack() ) {
   678     // Don't coalesce when frequency difference is large
   679     Block *dst_b = _phc._cfg._bbs[dst_copy->_idx];
   680     Block *src_def_b = _phc._cfg._bbs[src_def->_idx];
   681     if (src_def_b->_freq > 10*dst_b->_freq )
   682       return false;
   683   }
   685   // If we can use any stack slot, then effective size is infinite
   686   if( rm.is_AllStack() ) rm_size += 1000000;
   687   // Incompatible masks, no way to coalesce
   688   if( rm_size == 0 ) return false;
   690   // Another early bail-out test is when we are double-coalescing and the
   691   // 2 copies are separated by some control flow.
   692   if( dst_copy != src_copy ) {
   693     Block *src_b = _phc._cfg._bbs[src_copy->_idx];
   694     Block *b2 = b;
   695     while( b2 != src_b ) {
   696       if( b2->num_preds() > 2 ){// Found merge-point
   697         _phc._lost_opp_cflow_coalesce++;
   698         // extra record_bias commented out because Chris believes it is not
   699         // productive.  Since we can record only 1 bias, we want to choose one
   700         // that stands a chance of working and this one probably does not.
   701         //record_bias( _phc._lrgs, lr1, lr2 );
   702         return false;           // To hard to find all interferences
   703       }
   704       b2 = _phc._cfg._bbs[b2->pred(1)->_idx];
   705     }
   706   }
   708   // Union the two interference sets together into '_ulr'
   709   uint reg_degree = _ulr.lrg_union( lr1, lr2, rm_size, _phc._ifg, rm );
   711   if( reg_degree >= rm_size ) {
   712     record_bias( _phc._ifg, lr1, lr2 );
   713     return false;
   714   }
   716   // Now I need to compute all the interferences between dst_copy and
   717   // src_copy.  I'm not willing visit the entire interference graph, so
   718   // I limit my search to things in dst_copy's block or in a straight
   719   // line of previous blocks.  I give up at merge points or when I get
   720   // more interferences than my degree.  I can stop when I find src_copy.
   721   if( dst_copy != src_copy ) {
   722     reg_degree = compute_separating_interferences(dst_copy, src_copy, b, bindex, rm, rm_size, reg_degree, lr1, lr2 );
   723     if( reg_degree == max_juint ) {
   724       record_bias( _phc._ifg, lr1, lr2 );
   725       return false;
   726     }
   727   } // End of if dst_copy & src_copy are different
   730   // ---- THE COMBINED LRG IS COLORABLE ----
   732   // YEAH - Now coalesce this copy away
   733   assert( lrgs(lr1).num_regs() == lrgs(lr2).num_regs(),   "" );
   735   IndexSet *n_lr1 = _phc._ifg->neighbors(lr1);
   736   IndexSet *n_lr2 = _phc._ifg->neighbors(lr2);
   738   // Update the interference graph
   739   update_ifg(lr1, lr2, n_lr1, n_lr2);
   741   _ulr.remove(lr1);
   743   // Uncomment the following code to trace Coalescing in great detail.
   744   //
   745   //if (false) {
   746   //  tty->cr();
   747   //  tty->print_cr("#######################################");
   748   //  tty->print_cr("union %d and %d", lr1, lr2);
   749   //  n_lr1->dump();
   750   //  n_lr2->dump();
   751   //  tty->print_cr("resulting set is");
   752   //  _ulr.dump();
   753   //}
   755   // Replace n_lr1 with the new combined live range.  _ulr will use
   756   // n_lr1's old memory on the next iteration.  n_lr2 is cleared to
   757   // send its internal memory to the free list.
   758   _ulr.swap(n_lr1);
   759   _ulr.clear();
   760   n_lr2->clear();
   762   lrgs(lr1).set_degree( _phc._ifg->effective_degree(lr1) );
   763   lrgs(lr2).set_degree( 0 );
   765   // Join live ranges.  Merge larger into smaller.  Union lr2 into lr1 in the
   766   // union-find tree
   767   union_helper( lr1_node, lr2_node, lr1, lr2, src_def, dst_copy, src_copy, b, bindex );
   768   // Combine register restrictions
   769   lrgs(lr1).set_mask(rm);
   770   lrgs(lr1).compute_set_mask_size();
   771   lrgs(lr1)._cost += lrgs(lr2)._cost;
   772   lrgs(lr1)._area += lrgs(lr2)._area;
   774   // While its uncommon to successfully coalesce live ranges that started out
   775   // being not-lo-degree, it can happen.  In any case the combined coalesced
   776   // live range better Simplify nicely.
   777   lrgs(lr1)._was_lo = 1;
   779   // kinda expensive to do all the time
   780   //tty->print_cr("warning: slow verify happening");
   781   //_phc._ifg->verify( &_phc );
   782   return true;
   783 }
   785 //------------------------------coalesce---------------------------------------
   786 // Conservative (but pessimistic) copy coalescing of a single block
   787 void PhaseConservativeCoalesce::coalesce( Block *b ) {
   788   // Bail out on infrequent blocks
   789   if( b->is_uncommon(_phc._cfg._bbs) )
   790     return;
   791   // Check this block for copies.
   792   for( uint i = 1; i<b->end_idx(); i++ ) {
   793     // Check for actual copies on inputs.  Coalesce a copy into its
   794     // input if use and copy's input are compatible.
   795     Node *copy1 = b->_nodes[i];
   796     uint idx1 = copy1->is_Copy();
   797     if( !idx1 ) continue;       // Not a copy
   799     if( copy_copy(copy1,copy1,b,i) ) {
   800       i--;                      // Retry, same location in block
   801       PhaseChaitin::_conserv_coalesce++;  // Collect stats on success
   802       continue;
   803     }
   804   }
   805 }

mercurial