duke@435: /* trims@1907: * Copyright (c) 1997, 2009, Oracle and/or its affiliates. All rights reserved. duke@435: * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. duke@435: * duke@435: * This code is free software; you can redistribute it and/or modify it duke@435: * under the terms of the GNU General Public License version 2 only, as duke@435: * published by the Free Software Foundation. duke@435: * duke@435: * This code is distributed in the hope that it will be useful, but WITHOUT duke@435: * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or duke@435: * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License duke@435: * version 2 for more details (a copy is included in the LICENSE file that duke@435: * accompanied this code). duke@435: * duke@435: * You should have received a copy of the GNU General Public License version duke@435: * 2 along with this work; if not, write to the Free Software Foundation, duke@435: * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. duke@435: * trims@1907: * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA trims@1907: * or visit www.oracle.com if you need additional information or have any trims@1907: * questions. duke@435: * duke@435: */ duke@435: duke@435: #include "incls/_precompiled.incl" duke@435: #include "incls/_coalesce.cpp.incl" duke@435: duke@435: //============================================================================= duke@435: //------------------------------reset_uf_map----------------------------------- duke@435: void PhaseChaitin::reset_uf_map( uint maxlrg ) { duke@435: _maxlrg = maxlrg; duke@435: // Force the Union-Find mapping to be at least this large duke@435: _uf_map.extend(_maxlrg,0); duke@435: // Initialize it to be the ID mapping. duke@435: for( uint i=0; i<_maxlrg; i++ ) duke@435: _uf_map.map(i,i); duke@435: } duke@435: duke@435: //------------------------------compress_uf_map-------------------------------- duke@435: // Make all Nodes map directly to their final live range; no need for duke@435: // the Union-Find mapping after this call. duke@435: void PhaseChaitin::compress_uf_map_for_nodes( ) { duke@435: // For all Nodes, compress mapping duke@435: uint unique = _names.Size(); duke@435: for( uint i=0; i_idx]); duke@435: _names.map(n->_idx,lrg); duke@435: return lrg; duke@435: } duke@435: duke@435: //------------------------------Find_const------------------------------------- duke@435: // Like Find above, but no path compress, so bad asymptotic behavior duke@435: uint PhaseChaitin::Find_const( uint lrg ) const { duke@435: if( !lrg ) return lrg; // Ignore the zero LRG duke@435: // Off the end? This happens during debugging dumps when you got duke@435: // brand new live ranges but have not told the allocator yet. duke@435: if( lrg >= _maxlrg ) return lrg; duke@435: uint next = _uf_map[lrg]; duke@435: while( next != lrg ) { // Scan chain of equivalences duke@435: assert( next < lrg, "always union smaller" ); duke@435: lrg = next; // until find a fixed-point duke@435: next = _uf_map[lrg]; duke@435: } duke@435: return next; duke@435: } duke@435: duke@435: //------------------------------Find------------------------------------------- duke@435: // Like Find above, but no path compress, so bad asymptotic behavior duke@435: uint PhaseChaitin::Find_const( const Node *n ) const { duke@435: if( n->_idx >= _names.Size() ) return 0; // not mapped, usual for debug dump duke@435: return Find_const( _names[n->_idx] ); duke@435: } duke@435: duke@435: //------------------------------Union------------------------------------------ duke@435: // union 2 sets together. duke@435: void PhaseChaitin::Union( const Node *src_n, const Node *dst_n ) { duke@435: uint src = Find(src_n); duke@435: uint dst = Find(dst_n); duke@435: assert( src, "" ); duke@435: assert( dst, "" ); duke@435: assert( src < _maxlrg, "oob" ); duke@435: assert( dst < _maxlrg, "oob" ); duke@435: assert( src < dst, "always union smaller" ); duke@435: _uf_map.map(dst,src); duke@435: } duke@435: duke@435: //------------------------------new_lrg---------------------------------------- duke@435: void PhaseChaitin::new_lrg( const Node *x, uint lrg ) { duke@435: // Make the Node->LRG mapping duke@435: _names.extend(x->_idx,lrg); duke@435: // Make the Union-Find mapping an identity function duke@435: _uf_map.extend(lrg,lrg); duke@435: } duke@435: duke@435: //------------------------------clone_projs------------------------------------ twisti@1040: // After cloning some rematerialized instruction, clone any MachProj's that duke@435: // follow it. Example: Intel zero is XOR, kills flags. Sparc FP constants duke@435: // use G3 as an address temp. duke@435: int PhaseChaitin::clone_projs( Block *b, uint idx, Node *con, Node *copy, uint &maxlrg ) { duke@435: Block *bcon = _cfg._bbs[con->_idx]; duke@435: uint cindex = bcon->find_node(con); duke@435: Node *con_next = bcon->_nodes[cindex+1]; duke@435: if( con_next->in(0) != con || con_next->Opcode() != Op_MachProj ) duke@435: return false; // No MachProj's follow duke@435: duke@435: // Copy kills after the cloned constant duke@435: Node *kills = con_next->clone(); duke@435: kills->set_req( 0, copy ); duke@435: b->_nodes.insert( idx, kills ); duke@435: _cfg._bbs.map( kills->_idx, b ); duke@435: new_lrg( kills, maxlrg++ ); duke@435: return true; duke@435: } duke@435: duke@435: //------------------------------compact---------------------------------------- duke@435: // Renumber the live ranges to compact them. Makes the IFG smaller. duke@435: void PhaseChaitin::compact() { duke@435: // Current the _uf_map contains a series of short chains which are headed duke@435: // by a self-cycle. All the chains run from big numbers to little numbers. duke@435: // The Find() call chases the chains & shortens them for the next Find call. duke@435: // We are going to change this structure slightly. Numbers above a moving duke@435: // wave 'i' are unchanged. Numbers below 'j' point directly to their duke@435: // compacted live range with no further chaining. There are no chains or duke@435: // cycles below 'i', so the Find call no longer works. duke@435: uint j=1; duke@435: uint i; duke@435: for( i=1; i < _maxlrg; i++ ) { duke@435: uint lr = _uf_map[i]; duke@435: // Ignore unallocated live ranges duke@435: if( !lr ) continue; duke@435: assert( lr <= i, "" ); duke@435: _uf_map.map(i, ( lr == i ) ? j++ : _uf_map[lr]); duke@435: } duke@435: if( false ) // PrintOptoCompactLiveRanges duke@435: printf("Compacted %d LRs from %d\n",i-j,i); duke@435: // Now change the Node->LR mapping to reflect the compacted names duke@435: uint unique = _names.Size(); duke@435: for( i=0; iprint("L%d/N%d ",r,n->_idx); duke@435: } duke@435: duke@435: //------------------------------dump------------------------------------------- duke@435: void PhaseCoalesce::dump() const { duke@435: // I know I have a block layout now, so I can print blocks in a loop duke@435: for( uint i=0; i<_phc._cfg._num_blocks; i++ ) { duke@435: uint j; duke@435: Block *b = _phc._cfg._blocks[i]; duke@435: // Print a nice block header duke@435: tty->print("B%d: ",b->_pre_order); duke@435: for( j=1; jnum_preds(); j++ ) duke@435: tty->print("B%d ", _phc._cfg._bbs[b->pred(j)->_idx]->_pre_order); duke@435: tty->print("-> "); duke@435: for( j=0; j_num_succs; j++ ) duke@435: tty->print("B%d ",b->_succs[j]->_pre_order); duke@435: tty->print(" IDom: B%d/#%d\n", b->_idom ? b->_idom->_pre_order : 0, b->_dom_depth); duke@435: uint cnt = b->_nodes.size(); duke@435: for( j=0; j_nodes[j]; duke@435: dump( n ); duke@435: tty->print("\t%s\t",n->Name()); duke@435: duke@435: // Dump the inputs duke@435: uint k; // Exit value of loop duke@435: for( k=0; kreq(); k++ ) // For all required inputs duke@435: if( n->in(k) ) dump( n->in(k) ); duke@435: else tty->print("_ "); duke@435: int any_prec = 0; duke@435: for( ; klen(); k++ ) // For all precedence inputs duke@435: if( n->in(k) ) { duke@435: if( !any_prec++ ) tty->print(" |"); duke@435: dump( n->in(k) ); duke@435: } duke@435: duke@435: // Dump node-specific info duke@435: n->dump_spec(tty); duke@435: tty->print("\n"); duke@435: duke@435: } duke@435: tty->print("\n"); duke@435: } duke@435: } duke@435: #endif duke@435: duke@435: //------------------------------combine_these_two------------------------------ duke@435: // Combine the live ranges def'd by these 2 Nodes. N2 is an input to N1. duke@435: void PhaseCoalesce::combine_these_two( Node *n1, Node *n2 ) { duke@435: uint lr1 = _phc.Find(n1); duke@435: uint lr2 = _phc.Find(n2); duke@435: if( lr1 != lr2 && // Different live ranges already AND duke@435: !_phc._ifg->test_edge_sq( lr1, lr2 ) ) { // Do not interfere duke@435: LRG *lrg1 = &_phc.lrgs(lr1); duke@435: LRG *lrg2 = &_phc.lrgs(lr2); duke@435: // Not an oop->int cast; oop->oop, int->int, AND int->oop are OK. duke@435: duke@435: // Now, why is int->oop OK? We end up declaring a raw-pointer as an oop duke@435: // and in general that's a bad thing. However, int->oop conversions only duke@435: // happen at GC points, so the lifetime of the misclassified raw-pointer duke@435: // is from the CheckCastPP (that converts it to an oop) backwards up duke@435: // through a merge point and into the slow-path call, and around the duke@435: // diamond up to the heap-top check and back down into the slow-path call. duke@435: // The misclassified raw pointer is NOT live across the slow-path call, duke@435: // and so does not appear in any GC info, so the fact that it is duke@435: // misclassified is OK. duke@435: duke@435: if( (lrg1->_is_oop || !lrg2->_is_oop) && // not an oop->int cast AND duke@435: // Compatible final mask duke@435: lrg1->mask().overlap( lrg2->mask() ) ) { duke@435: // Merge larger into smaller. duke@435: if( lr1 > lr2 ) { duke@435: uint tmp = lr1; lr1 = lr2; lr2 = tmp; duke@435: Node *n = n1; n1 = n2; n2 = n; duke@435: LRG *ltmp = lrg1; lrg1 = lrg2; lrg2 = ltmp; duke@435: } duke@435: // Union lr2 into lr1 duke@435: _phc.Union( n1, n2 ); duke@435: if (lrg1->_maxfreq < lrg2->_maxfreq) duke@435: lrg1->_maxfreq = lrg2->_maxfreq; duke@435: // Merge in the IFG duke@435: _phc._ifg->Union( lr1, lr2 ); duke@435: // Combine register restrictions duke@435: lrg1->AND(lrg2->mask()); duke@435: } duke@435: } duke@435: } duke@435: duke@435: //------------------------------coalesce_driver-------------------------------- duke@435: // Copy coalescing duke@435: void PhaseCoalesce::coalesce_driver( ) { duke@435: duke@435: verify(); duke@435: // Coalesce from high frequency to low duke@435: for( uint i=0; i<_phc._cfg._num_blocks; i++ ) duke@435: coalesce( _phc._blks[i] ); duke@435: duke@435: } duke@435: duke@435: //------------------------------insert_copy_with_overlap----------------------- duke@435: // I am inserting copies to come out of SSA form. In the general case, I am duke@435: // doing a parallel renaming. I'm in the Named world now, so I can't do a duke@435: // general parallel renaming. All the copies now use "names" (live-ranges) duke@435: // to carry values instead of the explicit use-def chains. Suppose I need to duke@435: // insert 2 copies into the same block. They copy L161->L128 and L128->L132. duke@435: // If I insert them in the wrong order then L128 will get clobbered before it duke@435: // can get used by the second copy. This cannot happen in the SSA model; duke@435: // direct use-def chains get me the right value. It DOES happen in the named duke@435: // model so I have to handle the reordering of copies. duke@435: // duke@435: // In general, I need to topo-sort the placed copies to avoid conflicts. duke@435: // Its possible to have a closed cycle of copies (e.g., recirculating the same duke@435: // values around a loop). In this case I need a temp to break the cycle. duke@435: void PhaseAggressiveCoalesce::insert_copy_with_overlap( Block *b, Node *copy, uint dst_name, uint src_name ) { duke@435: duke@435: // Scan backwards for the locations of the last use of the dst_name. duke@435: // I am about to clobber the dst_name, so the copy must be inserted duke@435: // after the last use. Last use is really first-use on a backwards scan. duke@435: uint i = b->end_idx()-1; duke@435: while( 1 ) { duke@435: Node *n = b->_nodes[i]; duke@435: // Check for end of virtual copies; this is also the end of the duke@435: // parallel renaming effort. duke@435: if( n->_idx < _unique ) break; duke@435: uint idx = n->is_Copy(); duke@435: assert( idx || n->is_Con() || n->Opcode() == Op_MachProj, "Only copies during parallel renaming" ); duke@435: if( idx && _phc.Find(n->in(idx)) == dst_name ) break; duke@435: i--; duke@435: } duke@435: uint last_use_idx = i; duke@435: duke@435: // Also search for any kill of src_name that exits the block. duke@435: // Since the copy uses src_name, I have to come before any kill. duke@435: uint kill_src_idx = b->end_idx(); duke@435: // There can be only 1 kill that exits any block and that is duke@435: // the last kill. Thus it is the first kill on a backwards scan. duke@435: i = b->end_idx()-1; duke@435: while( 1 ) { duke@435: Node *n = b->_nodes[i]; duke@435: // Check for end of virtual copies; this is also the end of the duke@435: // parallel renaming effort. duke@435: if( n->_idx < _unique ) break; duke@435: assert( n->is_Copy() || n->is_Con() || n->Opcode() == Op_MachProj, "Only copies during parallel renaming" ); duke@435: if( _phc.Find(n) == src_name ) { duke@435: kill_src_idx = i; duke@435: break; duke@435: } duke@435: i--; duke@435: } duke@435: // Need a temp? Last use of dst comes after the kill of src? duke@435: if( last_use_idx >= kill_src_idx ) { duke@435: // Need to break a cycle with a temp duke@435: uint idx = copy->is_Copy(); duke@435: Node *tmp = copy->clone(); duke@435: _phc.new_lrg(tmp,_phc._maxlrg++); duke@435: // Insert new temp between copy and source duke@435: tmp ->set_req(idx,copy->in(idx)); duke@435: copy->set_req(idx,tmp); duke@435: // Save source in temp early, before source is killed duke@435: b->_nodes.insert(kill_src_idx,tmp); duke@435: _phc._cfg._bbs.map( tmp->_idx, b ); duke@435: last_use_idx++; duke@435: } duke@435: duke@435: // Insert just after last use duke@435: b->_nodes.insert(last_use_idx+1,copy); duke@435: } duke@435: duke@435: //------------------------------insert_copies---------------------------------- duke@435: void PhaseAggressiveCoalesce::insert_copies( Matcher &matcher ) { duke@435: // We do LRGs compressing and fix a liveout data only here since the other duke@435: // place in Split() is guarded by the assert which we never hit. duke@435: _phc.compress_uf_map_for_nodes(); duke@435: // Fix block's liveout data for compressed live ranges. duke@435: for(uint lrg = 1; lrg < _phc._maxlrg; lrg++ ) { duke@435: uint compressed_lrg = _phc.Find(lrg); duke@435: if( lrg != compressed_lrg ) { duke@435: for( uint bidx = 0; bidx < _phc._cfg._num_blocks; bidx++ ) { duke@435: IndexSet *liveout = _phc._live->live(_phc._cfg._blocks[bidx]); duke@435: if( liveout->member(lrg) ) { duke@435: liveout->remove(lrg); duke@435: liveout->insert(compressed_lrg); duke@435: } duke@435: } duke@435: } duke@435: } duke@435: duke@435: // All new nodes added are actual copies to replace virtual copies. duke@435: // Nodes with index less than '_unique' are original, non-virtual Nodes. duke@435: _unique = C->unique(); duke@435: duke@435: for( uint i=0; i<_phc._cfg._num_blocks; i++ ) { duke@435: Block *b = _phc._cfg._blocks[i]; duke@435: uint cnt = b->num_preds(); // Number of inputs to the Phi duke@435: duke@435: for( uint l = 1; l_nodes.size(); l++ ) { duke@435: Node *n = b->_nodes[l]; duke@435: duke@435: // Do not use removed-copies, use copied value instead duke@435: uint ncnt = n->req(); duke@435: for( uint k = 1; kin(k); duke@435: uint cidx = copy->is_Copy(); duke@435: if( cidx ) { duke@435: Node *def = copy->in(cidx); duke@435: if( _phc.Find(copy) == _phc.Find(def) ) duke@435: n->set_req(k,def); duke@435: } duke@435: } duke@435: duke@435: // Remove any explicit copies that get coalesced. duke@435: uint cidx = n->is_Copy(); duke@435: if( cidx ) { duke@435: Node *def = n->in(cidx); duke@435: if( _phc.Find(n) == _phc.Find(def) ) { duke@435: n->replace_by(def); duke@435: n->set_req(cidx,NULL); duke@435: b->_nodes.remove(l); duke@435: l--; duke@435: continue; duke@435: } duke@435: } duke@435: duke@435: if( n->is_Phi() ) { duke@435: // Get the chosen name for the Phi duke@435: uint phi_name = _phc.Find( n ); duke@435: // Ignore the pre-allocated specials duke@435: if( !phi_name ) continue; duke@435: // Check for mismatch inputs to Phi duke@435: for( uint j = 1; jin(j); duke@435: uint src_name = _phc.Find(m); duke@435: if( src_name != phi_name ) { duke@435: Block *pred = _phc._cfg._bbs[b->pred(j)->_idx]; duke@435: Node *copy; duke@435: assert(!m->is_Con() || m->is_Mach(), "all Con must be Mach"); duke@435: // Rematerialize constants instead of copying them duke@435: if( m->is_Mach() && m->as_Mach()->is_Con() && duke@435: m->as_Mach()->rematerialize() ) { duke@435: copy = m->clone(); duke@435: // Insert the copy in the predecessor basic block duke@435: pred->add_inst(copy); duke@435: // Copy any flags as well duke@435: _phc.clone_projs( pred, pred->end_idx(), m, copy, _phc._maxlrg ); duke@435: } else { duke@435: const RegMask *rm = C->matcher()->idealreg2spillmask[m->ideal_reg()]; duke@435: copy = new (C) MachSpillCopyNode(m,*rm,*rm); duke@435: // Find a good place to insert. Kinda tricky, use a subroutine duke@435: insert_copy_with_overlap(pred,copy,phi_name,src_name); duke@435: } duke@435: // Insert the copy in the use-def chain duke@435: n->set_req( j, copy ); duke@435: _phc._cfg._bbs.map( copy->_idx, pred ); duke@435: // Extend ("register allocate") the names array for the copy. duke@435: _phc._names.extend( copy->_idx, phi_name ); duke@435: } // End of if Phi names do not match duke@435: } // End of for all inputs to Phi duke@435: } else { // End of if Phi duke@435: duke@435: // Now check for 2-address instructions duke@435: uint idx; duke@435: if( n->is_Mach() && (idx=n->as_Mach()->two_adr()) ) { duke@435: // Get the chosen name for the Node duke@435: uint name = _phc.Find( n ); duke@435: assert( name, "no 2-address specials" ); duke@435: // Check for name mis-match on the 2-address input duke@435: Node *m = n->in(idx); duke@435: if( _phc.Find(m) != name ) { duke@435: Node *copy; duke@435: assert(!m->is_Con() || m->is_Mach(), "all Con must be Mach"); duke@435: // At this point it is unsafe to extend live ranges (6550579). duke@435: // Rematerialize only constants as we do for Phi above. duke@435: if( m->is_Mach() && m->as_Mach()->is_Con() && duke@435: m->as_Mach()->rematerialize() ) { duke@435: copy = m->clone(); duke@435: // Insert the copy in the basic block, just before us duke@435: b->_nodes.insert( l++, copy ); duke@435: if( _phc.clone_projs( b, l, m, copy, _phc._maxlrg ) ) duke@435: l++; duke@435: } else { duke@435: const RegMask *rm = C->matcher()->idealreg2spillmask[m->ideal_reg()]; duke@435: copy = new (C) MachSpillCopyNode( m, *rm, *rm ); duke@435: // Insert the copy in the basic block, just before us duke@435: b->_nodes.insert( l++, copy ); duke@435: } duke@435: // Insert the copy in the use-def chain duke@435: n->set_req(idx, copy ); duke@435: // Extend ("register allocate") the names array for the copy. duke@435: _phc._names.extend( copy->_idx, name ); duke@435: _phc._cfg._bbs.map( copy->_idx, b ); duke@435: } duke@435: duke@435: } // End of is two-adr duke@435: duke@435: // Insert a copy at a debug use for a lrg which has high frequency kvn@1108: if( b->_freq < OPTO_DEBUG_SPLIT_FREQ || b->is_uncommon(_phc._cfg._bbs) ) { duke@435: // Walk the debug inputs to the node and check for lrg freq duke@435: JVMState* jvms = n->jvms(); duke@435: uint debug_start = jvms ? jvms->debug_start() : 999999; duke@435: uint debug_end = jvms ? jvms->debug_end() : 999999; duke@435: for(uint inpidx = debug_start; inpidx < debug_end; inpidx++) { duke@435: // Do not split monitors; they are only needed for debug table duke@435: // entries and need no code. duke@435: if( jvms->is_monitor_use(inpidx) ) continue; duke@435: Node *inp = n->in(inpidx); duke@435: uint nidx = _phc.n2lidx(inp); duke@435: LRG &lrg = lrgs(nidx); duke@435: duke@435: // If this lrg has a high frequency use/def kvn@1108: if( lrg._maxfreq >= _phc.high_frequency_lrg() ) { duke@435: // If the live range is also live out of this block (like it duke@435: // would be for a fast/slow idiom), the normal spill mechanism duke@435: // does an excellent job. If it is not live out of this block duke@435: // (like it would be for debug info to uncommon trap) splitting duke@435: // the live range now allows a better allocation in the high duke@435: // frequency blocks. duke@435: // Build_IFG_virtual has converted the live sets to duke@435: // live-IN info, not live-OUT info. duke@435: uint k; duke@435: for( k=0; k < b->_num_succs; k++ ) duke@435: if( _phc._live->live(b->_succs[k])->member( nidx ) ) duke@435: break; // Live in to some successor block? duke@435: if( k < b->_num_succs ) duke@435: continue; // Live out; do not pre-split duke@435: // Split the lrg at this use duke@435: const RegMask *rm = C->matcher()->idealreg2spillmask[inp->ideal_reg()]; duke@435: Node *copy = new (C) MachSpillCopyNode( inp, *rm, *rm ); duke@435: // Insert the copy in the use-def chain duke@435: n->set_req(inpidx, copy ); duke@435: // Insert the copy in the basic block, just before us duke@435: b->_nodes.insert( l++, copy ); duke@435: // Extend ("register allocate") the names array for the copy. duke@435: _phc.new_lrg( copy, _phc._maxlrg++ ); duke@435: _phc._cfg._bbs.map( copy->_idx, b ); duke@435: //tty->print_cr("Split a debug use in Aggressive Coalesce"); duke@435: } // End of if high frequency use/def duke@435: } // End of for all debug inputs duke@435: } // End of if low frequency safepoint duke@435: duke@435: } // End of if Phi duke@435: duke@435: } // End of for all instructions duke@435: } // End of for all blocks duke@435: } duke@435: duke@435: //============================================================================= duke@435: //------------------------------coalesce--------------------------------------- duke@435: // Aggressive (but pessimistic) copy coalescing of a single block duke@435: duke@435: // The following coalesce pass represents a single round of aggressive duke@435: // pessimistic coalesce. "Aggressive" means no attempt to preserve duke@435: // colorability when coalescing. This occasionally means more spills, but duke@435: // it also means fewer rounds of coalescing for better code - and that means duke@435: // faster compiles. duke@435: duke@435: // "Pessimistic" means we do not hit the fixed point in one pass (and we are duke@435: // reaching for the least fixed point to boot). This is typically solved duke@435: // with a few more rounds of coalescing, but the compiler must run fast. We duke@435: // could optimistically coalescing everything touching PhiNodes together duke@435: // into one big live range, then check for self-interference. Everywhere duke@435: // the live range interferes with self it would have to be split. Finding duke@435: // the right split points can be done with some heuristics (based on duke@435: // expected frequency of edges in the live range). In short, it's a real duke@435: // research problem and the timeline is too short to allow such research. duke@435: // Further thoughts: (1) build the LR in a pass, (2) find self-interference duke@435: // in another pass, (3) per each self-conflict, split, (4) split by finding duke@435: // the low-cost cut (min-cut) of the LR, (5) edges in the LR are weighted duke@435: // according to the GCM algorithm (or just exec freq on CFG edges). duke@435: duke@435: void PhaseAggressiveCoalesce::coalesce( Block *b ) { duke@435: // Copies are still "virtual" - meaning we have not made them explicitly duke@435: // copies. Instead, Phi functions of successor blocks have mis-matched duke@435: // live-ranges. If I fail to coalesce, I'll have to insert a copy to line duke@435: // up the live-ranges. Check for Phis in successor blocks. duke@435: uint i; duke@435: for( i=0; i_num_succs; i++ ) { duke@435: Block *bs = b->_succs[i]; duke@435: // Find index of 'b' in 'bs' predecessors duke@435: uint j=1; duke@435: while( _phc._cfg._bbs[bs->pred(j)->_idx] != b ) j++; duke@435: // Visit all the Phis in successor block duke@435: for( uint k = 1; k_nodes.size(); k++ ) { duke@435: Node *n = bs->_nodes[k]; duke@435: if( !n->is_Phi() ) break; duke@435: combine_these_two( n, n->in(j) ); duke@435: } duke@435: } // End of for all successor blocks duke@435: duke@435: duke@435: // Check _this_ block for 2-address instructions and copies. duke@435: uint cnt = b->end_idx(); duke@435: for( i = 1; i_nodes[i]; duke@435: uint idx; duke@435: // 2-address instructions have a virtual Copy matching their input duke@435: // to their output duke@435: if( n->is_Mach() && (idx = n->as_Mach()->two_adr()) ) { duke@435: MachNode *mach = n->as_Mach(); duke@435: combine_these_two( mach, mach->in(idx) ); duke@435: } duke@435: } // End of for all instructions in block duke@435: } duke@435: duke@435: //============================================================================= duke@435: //------------------------------PhaseConservativeCoalesce---------------------- duke@435: PhaseConservativeCoalesce::PhaseConservativeCoalesce( PhaseChaitin &chaitin ) : PhaseCoalesce(chaitin) { duke@435: _ulr.initialize(_phc._maxlrg); duke@435: } duke@435: duke@435: //------------------------------verify----------------------------------------- duke@435: void PhaseConservativeCoalesce::verify() { duke@435: #ifdef ASSERT duke@435: _phc.set_was_low(); duke@435: #endif duke@435: } duke@435: duke@435: //------------------------------union_helper----------------------------------- duke@435: void PhaseConservativeCoalesce::union_helper( Node *lr1_node, Node *lr2_node, uint lr1, uint lr2, Node *src_def, Node *dst_copy, Node *src_copy, Block *b, uint bindex ) { duke@435: // Join live ranges. Merge larger into smaller. Union lr2 into lr1 in the duke@435: // union-find tree duke@435: _phc.Union( lr1_node, lr2_node ); duke@435: duke@435: // Single-def live range ONLY if both live ranges are single-def. duke@435: // If both are single def, then src_def powers one live range duke@435: // and def_copy powers the other. After merging, src_def powers duke@435: // the combined live range. never@730: lrgs(lr1)._def = (lrgs(lr1).is_multidef() || never@730: lrgs(lr2).is_multidef() ) duke@435: ? NodeSentinel : src_def; duke@435: lrgs(lr2)._def = NULL; // No def for lrg 2 duke@435: lrgs(lr2).Clear(); // Force empty mask for LRG 2 duke@435: //lrgs(lr2)._size = 0; // Live-range 2 goes dead duke@435: lrgs(lr1)._is_oop |= lrgs(lr2)._is_oop; duke@435: lrgs(lr2)._is_oop = 0; // In particular, not an oop for GC info duke@435: duke@435: if (lrgs(lr1)._maxfreq < lrgs(lr2)._maxfreq) duke@435: lrgs(lr1)._maxfreq = lrgs(lr2)._maxfreq; duke@435: duke@435: // Copy original value instead. Intermediate copies go dead, and duke@435: // the dst_copy becomes useless. duke@435: int didx = dst_copy->is_Copy(); duke@435: dst_copy->set_req( didx, src_def ); duke@435: // Add copy to free list duke@435: // _phc.free_spillcopy(b->_nodes[bindex]); duke@435: assert( b->_nodes[bindex] == dst_copy, "" ); duke@435: dst_copy->replace_by( dst_copy->in(didx) ); duke@435: dst_copy->set_req( didx, NULL); duke@435: b->_nodes.remove(bindex); duke@435: if( bindex < b->_ihrp_index ) b->_ihrp_index--; duke@435: if( bindex < b->_fhrp_index ) b->_fhrp_index--; duke@435: duke@435: // Stretched lr1; add it to liveness of intermediate blocks duke@435: Block *b2 = _phc._cfg._bbs[src_copy->_idx]; duke@435: while( b != b2 ) { duke@435: b = _phc._cfg._bbs[b->pred(1)->_idx]; duke@435: _phc._live->live(b)->insert(lr1); duke@435: } duke@435: } duke@435: duke@435: //------------------------------compute_separating_interferences--------------- duke@435: // Factored code from copy_copy that computes extra interferences from duke@435: // lengthening a live range by double-coalescing. duke@435: uint PhaseConservativeCoalesce::compute_separating_interferences(Node *dst_copy, Node *src_copy, Block *b, uint bindex, RegMask &rm, uint reg_degree, uint rm_size, uint lr1, uint lr2 ) { duke@435: duke@435: assert(!lrgs(lr1)._fat_proj, "cannot coalesce fat_proj"); duke@435: assert(!lrgs(lr2)._fat_proj, "cannot coalesce fat_proj"); duke@435: Node *prev_copy = dst_copy->in(dst_copy->is_Copy()); duke@435: Block *b2 = b; duke@435: uint bindex2 = bindex; duke@435: while( 1 ) { duke@435: // Find previous instruction duke@435: bindex2--; // Chain backwards 1 instruction duke@435: while( bindex2 == 0 ) { // At block start, find prior block duke@435: assert( b2->num_preds() == 2, "cannot double coalesce across c-flow" ); duke@435: b2 = _phc._cfg._bbs[b2->pred(1)->_idx]; duke@435: bindex2 = b2->end_idx()-1; duke@435: } duke@435: // Get prior instruction duke@435: assert(bindex2 < b2->_nodes.size(), "index out of bounds"); duke@435: Node *x = b2->_nodes[bindex2]; duke@435: if( x == prev_copy ) { // Previous copy in copy chain? duke@435: if( prev_copy == src_copy)// Found end of chain and all interferences duke@435: break; // So break out of loop duke@435: // Else work back one in copy chain duke@435: prev_copy = prev_copy->in(prev_copy->is_Copy()); duke@435: } else { // Else collect interferences duke@435: uint lidx = _phc.Find(x); duke@435: // Found another def of live-range being stretched? duke@435: if( lidx == lr1 ) return max_juint; duke@435: if( lidx == lr2 ) return max_juint; duke@435: duke@435: // If we attempt to coalesce across a bound def duke@435: if( lrgs(lidx).is_bound() ) { duke@435: // Do not let the coalesced LRG expect to get the bound color duke@435: rm.SUBTRACT( lrgs(lidx).mask() ); duke@435: // Recompute rm_size duke@435: rm_size = rm.Size(); duke@435: //if( rm._flags ) rm_size += 1000000; duke@435: if( reg_degree >= rm_size ) return max_juint; duke@435: } duke@435: if( rm.overlap(lrgs(lidx).mask()) ) { duke@435: // Insert lidx into union LRG; returns TRUE if actually inserted duke@435: if( _ulr.insert(lidx) ) { duke@435: // Infinite-stack neighbors do not alter colorability, as they duke@435: // can always color to some other color. duke@435: if( !lrgs(lidx).mask().is_AllStack() ) { duke@435: // If this coalesce will make any new neighbor uncolorable, duke@435: // do not coalesce. duke@435: if( lrgs(lidx).just_lo_degree() ) duke@435: return max_juint; duke@435: // Bump our degree duke@435: if( ++reg_degree >= rm_size ) duke@435: return max_juint; duke@435: } // End of if not infinite-stack neighbor duke@435: } // End of if actually inserted duke@435: } // End of if live range overlaps twisti@1040: } // End of else collect interferences for 1 node twisti@1040: } // End of while forever, scan back for interferences duke@435: return reg_degree; duke@435: } duke@435: duke@435: //------------------------------update_ifg------------------------------------- duke@435: void PhaseConservativeCoalesce::update_ifg(uint lr1, uint lr2, IndexSet *n_lr1, IndexSet *n_lr2) { duke@435: // Some original neighbors of lr1 might have gone away duke@435: // because the constrained register mask prevented them. duke@435: // Remove lr1 from such neighbors. duke@435: IndexSetIterator one(n_lr1); duke@435: uint neighbor; duke@435: LRG &lrg1 = lrgs(lr1); duke@435: while ((neighbor = one.next()) != 0) duke@435: if( !_ulr.member(neighbor) ) duke@435: if( _phc._ifg->neighbors(neighbor)->remove(lr1) ) duke@435: lrgs(neighbor).inc_degree( -lrg1.compute_degree(lrgs(neighbor)) ); duke@435: duke@435: duke@435: // lr2 is now called (coalesced into) lr1. duke@435: // Remove lr2 from the IFG. duke@435: IndexSetIterator two(n_lr2); duke@435: LRG &lrg2 = lrgs(lr2); duke@435: while ((neighbor = two.next()) != 0) duke@435: if( _phc._ifg->neighbors(neighbor)->remove(lr2) ) duke@435: lrgs(neighbor).inc_degree( -lrg2.compute_degree(lrgs(neighbor)) ); duke@435: duke@435: // Some neighbors of intermediate copies now interfere with the duke@435: // combined live range. duke@435: IndexSetIterator three(&_ulr); duke@435: while ((neighbor = three.next()) != 0) duke@435: if( _phc._ifg->neighbors(neighbor)->insert(lr1) ) duke@435: lrgs(neighbor).inc_degree( lrg1.compute_degree(lrgs(neighbor)) ); duke@435: } duke@435: duke@435: //------------------------------record_bias------------------------------------ duke@435: static void record_bias( const PhaseIFG *ifg, int lr1, int lr2 ) { duke@435: // Tag copy bias here duke@435: if( !ifg->lrgs(lr1)._copy_bias ) duke@435: ifg->lrgs(lr1)._copy_bias = lr2; duke@435: if( !ifg->lrgs(lr2)._copy_bias ) duke@435: ifg->lrgs(lr2)._copy_bias = lr1; duke@435: } duke@435: duke@435: //------------------------------copy_copy-------------------------------------- duke@435: // See if I can coalesce a series of multiple copies together. I need the duke@435: // final dest copy and the original src copy. They can be the same Node. duke@435: // Compute the compatible register masks. duke@435: bool PhaseConservativeCoalesce::copy_copy( Node *dst_copy, Node *src_copy, Block *b, uint bindex ) { duke@435: duke@435: if( !dst_copy->is_SpillCopy() ) return false; duke@435: if( !src_copy->is_SpillCopy() ) return false; duke@435: Node *src_def = src_copy->in(src_copy->is_Copy()); duke@435: uint lr1 = _phc.Find(dst_copy); duke@435: uint lr2 = _phc.Find(src_def ); duke@435: duke@435: // Same live ranges already? duke@435: if( lr1 == lr2 ) return false; duke@435: duke@435: // Interfere? duke@435: if( _phc._ifg->test_edge_sq( lr1, lr2 ) ) return false; duke@435: duke@435: // Not an oop->int cast; oop->oop, int->int, AND int->oop are OK. duke@435: if( !lrgs(lr1)._is_oop && lrgs(lr2)._is_oop ) // not an oop->int cast duke@435: return false; duke@435: duke@435: // Coalescing between an aligned live range and a mis-aligned live range? duke@435: // No, no! Alignment changes how we count degree. duke@435: if( lrgs(lr1)._fat_proj != lrgs(lr2)._fat_proj ) duke@435: return false; duke@435: duke@435: // Sort; use smaller live-range number duke@435: Node *lr1_node = dst_copy; duke@435: Node *lr2_node = src_def; duke@435: if( lr1 > lr2 ) { duke@435: uint tmp = lr1; lr1 = lr2; lr2 = tmp; duke@435: lr1_node = src_def; lr2_node = dst_copy; duke@435: } duke@435: duke@435: // Check for compatibility of the 2 live ranges by duke@435: // intersecting their allowed register sets. duke@435: RegMask rm = lrgs(lr1).mask(); duke@435: rm.AND(lrgs(lr2).mask()); duke@435: // Number of bits free duke@435: uint rm_size = rm.Size(); duke@435: never@2085: if (UseFPUForSpilling && rm.is_AllStack() ) { never@2085: // Don't coalesce when frequency difference is large never@2085: Block *dst_b = _phc._cfg._bbs[dst_copy->_idx]; never@2085: Block *src_def_b = _phc._cfg._bbs[src_def->_idx]; never@2085: if (src_def_b->_freq > 10*dst_b->_freq ) never@2085: return false; never@2085: } never@2085: duke@435: // If we can use any stack slot, then effective size is infinite duke@435: if( rm.is_AllStack() ) rm_size += 1000000; duke@435: // Incompatible masks, no way to coalesce duke@435: if( rm_size == 0 ) return false; duke@435: duke@435: // Another early bail-out test is when we are double-coalescing and the twisti@1040: // 2 copies are separated by some control flow. duke@435: if( dst_copy != src_copy ) { duke@435: Block *src_b = _phc._cfg._bbs[src_copy->_idx]; duke@435: Block *b2 = b; duke@435: while( b2 != src_b ) { duke@435: if( b2->num_preds() > 2 ){// Found merge-point duke@435: _phc._lost_opp_cflow_coalesce++; duke@435: // extra record_bias commented out because Chris believes it is not duke@435: // productive. Since we can record only 1 bias, we want to choose one duke@435: // that stands a chance of working and this one probably does not. duke@435: //record_bias( _phc._lrgs, lr1, lr2 ); duke@435: return false; // To hard to find all interferences duke@435: } duke@435: b2 = _phc._cfg._bbs[b2->pred(1)->_idx]; duke@435: } duke@435: } duke@435: duke@435: // Union the two interference sets together into '_ulr' duke@435: uint reg_degree = _ulr.lrg_union( lr1, lr2, rm_size, _phc._ifg, rm ); duke@435: duke@435: if( reg_degree >= rm_size ) { duke@435: record_bias( _phc._ifg, lr1, lr2 ); duke@435: return false; duke@435: } duke@435: duke@435: // Now I need to compute all the interferences between dst_copy and duke@435: // src_copy. I'm not willing visit the entire interference graph, so duke@435: // I limit my search to things in dst_copy's block or in a straight duke@435: // line of previous blocks. I give up at merge points or when I get duke@435: // more interferences than my degree. I can stop when I find src_copy. duke@435: if( dst_copy != src_copy ) { duke@435: reg_degree = compute_separating_interferences(dst_copy, src_copy, b, bindex, rm, rm_size, reg_degree, lr1, lr2 ); duke@435: if( reg_degree == max_juint ) { duke@435: record_bias( _phc._ifg, lr1, lr2 ); duke@435: return false; duke@435: } duke@435: } // End of if dst_copy & src_copy are different duke@435: duke@435: duke@435: // ---- THE COMBINED LRG IS COLORABLE ---- duke@435: duke@435: // YEAH - Now coalesce this copy away duke@435: assert( lrgs(lr1).num_regs() == lrgs(lr2).num_regs(), "" ); duke@435: duke@435: IndexSet *n_lr1 = _phc._ifg->neighbors(lr1); duke@435: IndexSet *n_lr2 = _phc._ifg->neighbors(lr2); duke@435: duke@435: // Update the interference graph duke@435: update_ifg(lr1, lr2, n_lr1, n_lr2); duke@435: duke@435: _ulr.remove(lr1); duke@435: duke@435: // Uncomment the following code to trace Coalescing in great detail. duke@435: // duke@435: //if (false) { duke@435: // tty->cr(); duke@435: // tty->print_cr("#######################################"); duke@435: // tty->print_cr("union %d and %d", lr1, lr2); duke@435: // n_lr1->dump(); duke@435: // n_lr2->dump(); duke@435: // tty->print_cr("resulting set is"); duke@435: // _ulr.dump(); duke@435: //} duke@435: duke@435: // Replace n_lr1 with the new combined live range. _ulr will use duke@435: // n_lr1's old memory on the next iteration. n_lr2 is cleared to duke@435: // send its internal memory to the free list. duke@435: _ulr.swap(n_lr1); duke@435: _ulr.clear(); duke@435: n_lr2->clear(); duke@435: duke@435: lrgs(lr1).set_degree( _phc._ifg->effective_degree(lr1) ); duke@435: lrgs(lr2).set_degree( 0 ); duke@435: duke@435: // Join live ranges. Merge larger into smaller. Union lr2 into lr1 in the duke@435: // union-find tree duke@435: union_helper( lr1_node, lr2_node, lr1, lr2, src_def, dst_copy, src_copy, b, bindex ); duke@435: // Combine register restrictions duke@435: lrgs(lr1).set_mask(rm); duke@435: lrgs(lr1).compute_set_mask_size(); duke@435: lrgs(lr1)._cost += lrgs(lr2)._cost; duke@435: lrgs(lr1)._area += lrgs(lr2)._area; duke@435: duke@435: // While its uncommon to successfully coalesce live ranges that started out duke@435: // being not-lo-degree, it can happen. In any case the combined coalesced duke@435: // live range better Simplify nicely. duke@435: lrgs(lr1)._was_lo = 1; duke@435: duke@435: // kinda expensive to do all the time duke@435: //tty->print_cr("warning: slow verify happening"); duke@435: //_phc._ifg->verify( &_phc ); duke@435: return true; duke@435: } duke@435: duke@435: //------------------------------coalesce--------------------------------------- duke@435: // Conservative (but pessimistic) copy coalescing of a single block duke@435: void PhaseConservativeCoalesce::coalesce( Block *b ) { duke@435: // Bail out on infrequent blocks duke@435: if( b->is_uncommon(_phc._cfg._bbs) ) duke@435: return; duke@435: // Check this block for copies. duke@435: for( uint i = 1; iend_idx(); i++ ) { duke@435: // Check for actual copies on inputs. Coalesce a copy into its duke@435: // input if use and copy's input are compatible. duke@435: Node *copy1 = b->_nodes[i]; duke@435: uint idx1 = copy1->is_Copy(); duke@435: if( !idx1 ) continue; // Not a copy duke@435: duke@435: if( copy_copy(copy1,copy1,b,i) ) { duke@435: i--; // Retry, same location in block duke@435: PhaseChaitin::_conserv_coalesce++; // Collect stats on success duke@435: continue; duke@435: } duke@435: duke@435: /* do not attempt pairs. About 1/2 of all pairs can be removed by duke@435: post-alloc. The other set are too few to bother. duke@435: Node *copy2 = copy1->in(idx1); duke@435: uint idx2 = copy2->is_Copy(); duke@435: if( !idx2 ) continue; duke@435: if( copy_copy(copy1,copy2,b,i) ) { duke@435: i--; // Retry, same location in block duke@435: PhaseChaitin::_conserv_coalesce_pair++; // Collect stats on success duke@435: continue; duke@435: } duke@435: */ duke@435: } duke@435: }