duke@435: /* kvn@3882: * Copyright (c) 1998, 2012, Oracle and/or its affiliates. All rights reserved. duke@435: * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. duke@435: * duke@435: * This code is free software; you can redistribute it and/or modify it duke@435: * under the terms of the GNU General Public License version 2 only, as duke@435: * published by the Free Software Foundation. duke@435: * duke@435: * This code is distributed in the hope that it will be useful, but WITHOUT duke@435: * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or duke@435: * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License duke@435: * version 2 for more details (a copy is included in the LICENSE file that duke@435: * accompanied this code). duke@435: * duke@435: * You should have received a copy of the GNU General Public License version duke@435: * 2 along with this work; if not, write to the Free Software Foundation, duke@435: * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. duke@435: * trims@1907: * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA trims@1907: * or visit www.oracle.com if you need additional information or have any trims@1907: * questions. duke@435: * duke@435: */ duke@435: stefank@2314: #include "precompiled.hpp" stefank@2314: #include "memory/allocation.inline.hpp" stefank@2314: #include "opto/block.hpp" stefank@2314: #include "opto/c2compiler.hpp" stefank@2314: #include "opto/callnode.hpp" stefank@2314: #include "opto/cfgnode.hpp" stefank@2314: #include "opto/machnode.hpp" stefank@2314: #include "opto/runtime.hpp" stefank@2314: #ifdef TARGET_ARCH_MODEL_x86_32 stefank@2314: # include "adfiles/ad_x86_32.hpp" stefank@2314: #endif stefank@2314: #ifdef TARGET_ARCH_MODEL_x86_64 stefank@2314: # include "adfiles/ad_x86_64.hpp" stefank@2314: #endif stefank@2314: #ifdef TARGET_ARCH_MODEL_sparc stefank@2314: # include "adfiles/ad_sparc.hpp" stefank@2314: #endif stefank@2314: #ifdef TARGET_ARCH_MODEL_zero stefank@2314: # include "adfiles/ad_zero.hpp" stefank@2314: #endif roland@2683: #ifdef TARGET_ARCH_MODEL_arm roland@2683: # include "adfiles/ad_arm.hpp" roland@2683: #endif jcoomes@2993: #ifdef TARGET_ARCH_MODEL_ppc jcoomes@2993: # include "adfiles/ad_ppc.hpp" jcoomes@2993: #endif stefank@2314: duke@435: // Optimization - Graph Style duke@435: duke@435: //------------------------------implicit_null_check---------------------------- duke@435: // Detect implicit-null-check opportunities. Basically, find NULL checks duke@435: // with suitable memory ops nearby. Use the memory op to do the NULL check. duke@435: // I can generate a memory op if there is not one nearby. duke@435: // The proj is the control projection for the not-null case. kvn@1930: // The val is the pointer being checked for nullness or kvn@1930: // decodeHeapOop_not_null node if it did not fold into address. duke@435: void Block::implicit_null_check(PhaseCFG *cfg, Node *proj, Node *val, int allowed_reasons) { duke@435: // Assume if null check need for 0 offset then always needed duke@435: // Intel solaris doesn't support any null checks yet and no duke@435: // mechanism exists (yet) to set the switches at an os_cpu level duke@435: if( !ImplicitNullChecks || MacroAssembler::needs_explicit_null_check(0)) return; duke@435: duke@435: // Make sure the ptr-is-null path appears to be uncommon! duke@435: float f = end()->as_MachIf()->_prob; duke@435: if( proj->Opcode() == Op_IfTrue ) f = 1.0f - f; duke@435: if( f > PROB_UNLIKELY_MAG(4) ) return; duke@435: duke@435: uint bidx = 0; // Capture index of value into memop duke@435: bool was_store; // Memory op is a store op duke@435: duke@435: // Get the successor block for if the test ptr is non-null duke@435: Block* not_null_block; // this one goes with the proj duke@435: Block* null_block; duke@435: if (_nodes[_nodes.size()-1] == proj) { duke@435: null_block = _succs[0]; duke@435: not_null_block = _succs[1]; duke@435: } else { duke@435: assert(_nodes[_nodes.size()-2] == proj, "proj is one or the other"); duke@435: not_null_block = _succs[0]; duke@435: null_block = _succs[1]; duke@435: } kvn@767: while (null_block->is_Empty() == Block::empty_with_goto) { kvn@767: null_block = null_block->_succs[0]; kvn@767: } duke@435: duke@435: // Search the exception block for an uncommon trap. duke@435: // (See Parse::do_if and Parse::do_ifnull for the reason duke@435: // we need an uncommon trap. Briefly, we need a way to duke@435: // detect failure of this optimization, as in 6366351.) duke@435: { duke@435: bool found_trap = false; duke@435: for (uint i1 = 0; i1 < null_block->_nodes.size(); i1++) { duke@435: Node* nn = null_block->_nodes[i1]; duke@435: if (nn->is_MachCall() && twisti@2103: nn->as_MachCall()->entry_point() == SharedRuntime::uncommon_trap_blob()->entry_point()) { duke@435: const Type* trtype = nn->in(TypeFunc::Parms)->bottom_type(); duke@435: if (trtype->isa_int() && trtype->is_int()->is_con()) { duke@435: jint tr_con = trtype->is_int()->get_con(); duke@435: Deoptimization::DeoptReason reason = Deoptimization::trap_request_reason(tr_con); duke@435: Deoptimization::DeoptAction action = Deoptimization::trap_request_action(tr_con); duke@435: assert((int)reason < (int)BitsPerInt, "recode bit map"); duke@435: if (is_set_nth_bit(allowed_reasons, (int) reason) duke@435: && action != Deoptimization::Action_none) { duke@435: // This uncommon trap is sure to recompile, eventually. duke@435: // When that happens, C->too_many_traps will prevent duke@435: // this transformation from happening again. duke@435: found_trap = true; duke@435: } duke@435: } duke@435: break; duke@435: } duke@435: } duke@435: if (!found_trap) { duke@435: // We did not find an uncommon trap. duke@435: return; duke@435: } duke@435: } duke@435: kvn@1930: // Check for decodeHeapOop_not_null node which did not fold into address kvn@1930: bool is_decoden = ((intptr_t)val) & 1; kvn@1930: val = (Node*)(((intptr_t)val) & ~1); kvn@1930: kvn@1930: assert(!is_decoden || (val->in(0) == NULL) && val->is_Mach() && kvn@1930: (val->as_Mach()->ideal_Opcode() == Op_DecodeN), "sanity"); kvn@1930: duke@435: // Search the successor block for a load or store who's base value is also duke@435: // the tested value. There may be several. duke@435: Node_List *out = new Node_List(Thread::current()->resource_area()); duke@435: MachNode *best = NULL; // Best found so far duke@435: for (DUIterator i = val->outs(); val->has_out(i); i++) { duke@435: Node *m = val->out(i); duke@435: if( !m->is_Mach() ) continue; duke@435: MachNode *mach = m->as_Mach(); duke@435: was_store = false; kvn@2048: int iop = mach->ideal_Opcode(); kvn@2048: switch( iop ) { duke@435: case Op_LoadB: kvn@3882: case Op_LoadUB: twisti@993: case Op_LoadUS: duke@435: case Op_LoadD: duke@435: case Op_LoadF: duke@435: case Op_LoadI: duke@435: case Op_LoadL: duke@435: case Op_LoadP: coleenp@548: case Op_LoadN: duke@435: case Op_LoadS: duke@435: case Op_LoadKlass: kvn@599: case Op_LoadNKlass: duke@435: case Op_LoadRange: duke@435: case Op_LoadD_unaligned: duke@435: case Op_LoadL_unaligned: kvn@1586: assert(mach->in(2) == val, "should be address"); duke@435: break; duke@435: case Op_StoreB: duke@435: case Op_StoreC: duke@435: case Op_StoreCM: duke@435: case Op_StoreD: duke@435: case Op_StoreF: duke@435: case Op_StoreI: duke@435: case Op_StoreL: duke@435: case Op_StoreP: coleenp@548: case Op_StoreN: duke@435: was_store = true; // Memory op is a store op duke@435: // Stores will have their address in slot 2 (memory in slot 1). duke@435: // If the value being nul-checked is in another slot, it means we duke@435: // are storing the checked value, which does NOT check the value! duke@435: if( mach->in(2) != val ) continue; duke@435: break; // Found a memory op? duke@435: case Op_StrComp: cfang@1116: case Op_StrEquals: cfang@1116: case Op_StrIndexOf: rasbold@604: case Op_AryEq: duke@435: // Not a legit memory op for implicit null check regardless of duke@435: // embedded loads duke@435: continue; duke@435: default: // Also check for embedded loads duke@435: if( !mach->needs_anti_dependence_check() ) duke@435: continue; // Not an memory op; skip it kvn@2048: if( must_clone[iop] ) { kvn@2048: // Do not move nodes which produce flags because kvn@2048: // RA will try to clone it to place near branch and kvn@2048: // it will cause recompilation, see clone_node(). kvn@2048: continue; kvn@2048: } kvn@1586: { kvn@1930: // Check that value is used in memory address in kvn@1930: // instructions with embedded load (CmpP val1,(val2+off)). kvn@1586: Node* base; kvn@1586: Node* index; kvn@1586: const MachOper* oper = mach->memory_inputs(base, index); kvn@1586: if (oper == NULL || oper == (MachOper*)-1) { kvn@1586: continue; // Not an memory op; skip it kvn@1586: } kvn@1586: if (val == base || kvn@1586: val == index && val->bottom_type()->isa_narrowoop()) { kvn@1586: break; // Found it kvn@1586: } else { kvn@1586: continue; // Skip it kvn@1586: } kvn@1586: } duke@435: break; duke@435: } duke@435: // check if the offset is not too high for implicit exception duke@435: { duke@435: intptr_t offset = 0; duke@435: const TypePtr *adr_type = NULL; // Do not need this return value here duke@435: const Node* base = mach->get_base_and_disp(offset, adr_type); duke@435: if (base == NULL || base == NodeSentinel) { kvn@767: // Narrow oop address doesn't have base, only index kvn@767: if( val->bottom_type()->isa_narrowoop() && kvn@767: MacroAssembler::needs_explicit_null_check(offset) ) kvn@767: continue; // Give up if offset is beyond page size duke@435: // cannot reason about it; is probably not implicit null exception duke@435: } else { kvn@1077: const TypePtr* tptr; kvn@1077: if (UseCompressedOops && Universe::narrow_oop_shift() == 0) { kvn@1077: // 32-bits narrow oop can be the base of address expressions kvn@1077: tptr = base->bottom_type()->make_ptr(); kvn@1077: } else { kvn@1077: // only regular oops are expected here kvn@1077: tptr = base->bottom_type()->is_ptr(); kvn@1077: } duke@435: // Give up if offset is not a compile-time constant duke@435: if( offset == Type::OffsetBot || tptr->_offset == Type::OffsetBot ) duke@435: continue; duke@435: offset += tptr->_offset; // correct if base is offseted duke@435: if( MacroAssembler::needs_explicit_null_check(offset) ) duke@435: continue; // Give up is reference is beyond 4K page size duke@435: } duke@435: } duke@435: duke@435: // Check ctrl input to see if the null-check dominates the memory op duke@435: Block *cb = cfg->_bbs[mach->_idx]; duke@435: cb = cb->_idom; // Always hoist at least 1 block duke@435: if( !was_store ) { // Stores can be hoisted only one block duke@435: while( cb->_dom_depth > (_dom_depth + 1)) duke@435: cb = cb->_idom; // Hoist loads as far as we want duke@435: // The non-null-block should dominate the memory op, too. Live duke@435: // range spilling will insert a spill in the non-null-block if it is duke@435: // needs to spill the memory op for an implicit null check. duke@435: if (cb->_dom_depth == (_dom_depth + 1)) { duke@435: if (cb != not_null_block) continue; duke@435: cb = cb->_idom; duke@435: } duke@435: } duke@435: if( cb != this ) continue; duke@435: duke@435: // Found a memory user; see if it can be hoisted to check-block duke@435: uint vidx = 0; // Capture index of value into memop duke@435: uint j; duke@435: for( j = mach->req()-1; j > 0; j-- ) { kvn@1930: if( mach->in(j) == val ) { kvn@1930: vidx = j; kvn@1930: // Ignore DecodeN val which could be hoisted to where needed. kvn@1930: if( is_decoden ) continue; kvn@1930: } duke@435: // Block of memory-op input duke@435: Block *inb = cfg->_bbs[mach->in(j)->_idx]; duke@435: Block *b = this; // Start from nul check duke@435: while( b != inb && b->_dom_depth > inb->_dom_depth ) duke@435: b = b->_idom; // search upwards for input duke@435: // See if input dominates null check duke@435: if( b != inb ) duke@435: break; duke@435: } duke@435: if( j > 0 ) duke@435: continue; duke@435: Block *mb = cfg->_bbs[mach->_idx]; duke@435: // Hoisting stores requires more checks for the anti-dependence case. duke@435: // Give up hoisting if we have to move the store past any load. duke@435: if( was_store ) { duke@435: Block *b = mb; // Start searching here for a local load duke@435: // mach use (faulting) trying to hoist duke@435: // n might be blocker to hoisting duke@435: while( b != this ) { duke@435: uint k; duke@435: for( k = 1; k < b->_nodes.size(); k++ ) { duke@435: Node *n = b->_nodes[k]; duke@435: if( n->needs_anti_dependence_check() && duke@435: n->in(LoadNode::Memory) == mach->in(StoreNode::Memory) ) duke@435: break; // Found anti-dependent load duke@435: } duke@435: if( k < b->_nodes.size() ) duke@435: break; // Found anti-dependent load duke@435: // Make sure control does not do a merge (would have to check allpaths) duke@435: if( b->num_preds() != 2 ) break; duke@435: b = cfg->_bbs[b->pred(1)->_idx]; // Move up to predecessor block duke@435: } duke@435: if( b != this ) continue; duke@435: } duke@435: duke@435: // Make sure this memory op is not already being used for a NullCheck duke@435: Node *e = mb->end(); duke@435: if( e->is_MachNullCheck() && e->in(1) == mach ) duke@435: continue; // Already being used as a NULL check duke@435: duke@435: // Found a candidate! Pick one with least dom depth - the highest duke@435: // in the dom tree should be closest to the null check. duke@435: if( !best || duke@435: cfg->_bbs[mach->_idx]->_dom_depth < cfg->_bbs[best->_idx]->_dom_depth ) { duke@435: best = mach; duke@435: bidx = vidx; duke@435: duke@435: } duke@435: } duke@435: // No candidate! duke@435: if( !best ) return; duke@435: duke@435: // ---- Found an implicit null check duke@435: extern int implicit_null_checks; duke@435: implicit_null_checks++; duke@435: kvn@1930: if( is_decoden ) { kvn@1930: // Check if we need to hoist decodeHeapOop_not_null first. kvn@1930: Block *valb = cfg->_bbs[val->_idx]; kvn@1930: if( this != valb && this->_dom_depth < valb->_dom_depth ) { kvn@1930: // Hoist it up to the end of the test block. kvn@1930: valb->find_remove(val); kvn@1930: this->add_inst(val); kvn@1930: cfg->_bbs.map(val->_idx,this); kvn@1930: // DecodeN on x86 may kill flags. Check for flag-killing projections kvn@1930: // that also need to be hoisted. kvn@1930: for (DUIterator_Fast jmax, j = val->fast_outs(jmax); j < jmax; j++) { kvn@1930: Node* n = val->fast_out(j); kvn@3040: if( n->is_MachProj() ) { kvn@1930: cfg->_bbs[n->_idx]->find_remove(n); kvn@1930: this->add_inst(n); kvn@1930: cfg->_bbs.map(n->_idx,this); kvn@1930: } kvn@1930: } kvn@1930: } kvn@1930: } duke@435: // Hoist the memory candidate up to the end of the test block. duke@435: Block *old_block = cfg->_bbs[best->_idx]; duke@435: old_block->find_remove(best); duke@435: add_inst(best); duke@435: cfg->_bbs.map(best->_idx,this); duke@435: duke@435: // Move the control dependence duke@435: if (best->in(0) && best->in(0) == old_block->_nodes[0]) duke@435: best->set_req(0, _nodes[0]); duke@435: duke@435: // Check for flag-killing projections that also need to be hoisted duke@435: // Should be DU safe because no edge updates. duke@435: for (DUIterator_Fast jmax, j = best->fast_outs(jmax); j < jmax; j++) { duke@435: Node* n = best->fast_out(j); kvn@3040: if( n->is_MachProj() ) { duke@435: cfg->_bbs[n->_idx]->find_remove(n); duke@435: add_inst(n); duke@435: cfg->_bbs.map(n->_idx,this); duke@435: } duke@435: } duke@435: duke@435: Compile *C = cfg->C; duke@435: // proj==Op_True --> ne test; proj==Op_False --> eq test. duke@435: // One of two graph shapes got matched: duke@435: // (IfTrue (If (Bool NE (CmpP ptr NULL)))) duke@435: // (IfFalse (If (Bool EQ (CmpP ptr NULL)))) duke@435: // NULL checks are always branch-if-eq. If we see a IfTrue projection duke@435: // then we are replacing a 'ne' test with a 'eq' NULL check test. duke@435: // We need to flip the projections to keep the same semantics. duke@435: if( proj->Opcode() == Op_IfTrue ) { duke@435: // Swap order of projections in basic block to swap branch targets duke@435: Node *tmp1 = _nodes[end_idx()+1]; duke@435: Node *tmp2 = _nodes[end_idx()+2]; duke@435: _nodes.map(end_idx()+1, tmp2); duke@435: _nodes.map(end_idx()+2, tmp1); duke@435: Node *tmp = new (C, 1) Node(C->top()); // Use not NULL input duke@435: tmp1->replace_by(tmp); duke@435: tmp2->replace_by(tmp1); duke@435: tmp->replace_by(tmp2); duke@435: tmp->destruct(); duke@435: } duke@435: duke@435: // Remove the existing null check; use a new implicit null check instead. duke@435: // Since schedule-local needs precise def-use info, we need to correct duke@435: // it as well. duke@435: Node *old_tst = proj->in(0); duke@435: MachNode *nul_chk = new (C) MachNullCheckNode(old_tst->in(0),best,bidx); duke@435: _nodes.map(end_idx(),nul_chk); duke@435: cfg->_bbs.map(nul_chk->_idx,this); duke@435: // Redirect users of old_test to nul_chk duke@435: for (DUIterator_Last i2min, i2 = old_tst->last_outs(i2min); i2 >= i2min; --i2) duke@435: old_tst->last_out(i2)->set_req(0, nul_chk); duke@435: // Clean-up any dead code duke@435: for (uint i3 = 0; i3 < old_tst->req(); i3++) duke@435: old_tst->set_req(i3, NULL); duke@435: duke@435: cfg->latency_from_uses(nul_chk); duke@435: cfg->latency_from_uses(best); duke@435: } duke@435: duke@435: duke@435: //------------------------------select----------------------------------------- duke@435: // Select a nice fellow from the worklist to schedule next. If there is only duke@435: // one choice, then use it. Projections take top priority for correctness duke@435: // reasons - if I see a projection, then it is next. There are a number of duke@435: // other special cases, for instructions that consume condition codes, et al. duke@435: // These are chosen immediately. Some instructions are required to immediately duke@435: // precede the last instruction in the block, and these are taken last. Of the duke@435: // remaining cases (most), choose the instruction with the greatest latency duke@435: // (that is, the most number of pseudo-cycles required to the end of the duke@435: // routine). If there is a tie, choose the instruction with the most inputs. roland@3447: Node *Block::select(PhaseCFG *cfg, Node_List &worklist, GrowableArray &ready_cnt, VectorSet &next_call, uint sched_slot) { duke@435: duke@435: // If only a single entry on the stack, use it duke@435: uint cnt = worklist.size(); duke@435: if (cnt == 1) { duke@435: Node *n = worklist[0]; duke@435: worklist.map(0,worklist.pop()); duke@435: return n; duke@435: } duke@435: duke@435: uint choice = 0; // Bigger is most important duke@435: uint latency = 0; // Bigger is scheduled first duke@435: uint score = 0; // Bigger is better kvn@688: int idx = -1; // Index in worklist duke@435: duke@435: for( uint i=0; iis_Mach() ? n->as_Mach()->ideal_Opcode() : 0; duke@435: if( n->is_Proj() || // Projections always win duke@435: n->Opcode()== Op_Con || // So does constant 'Top' duke@435: iop == Op_CreateEx || // Create-exception must start block duke@435: iop == Op_CheckCastPP duke@435: ) { duke@435: worklist.map(i,worklist.pop()); duke@435: return n; duke@435: } duke@435: duke@435: // Final call in a block must be adjacent to 'catch' duke@435: Node *e = end(); duke@435: if( e->is_Catch() && e->in(0)->in(0) == n ) duke@435: continue; duke@435: duke@435: // Memory op for an implicit null check has to be at the end of the block duke@435: if( e->is_MachNullCheck() && e->in(1) == n ) duke@435: continue; duke@435: kvn@3882: // Schedule IV increment last. kvn@3882: if (e->is_Mach() && e->as_Mach()->ideal_Opcode() == Op_CountedLoopEnd && kvn@3882: e->in(1)->in(1) == n && n->is_iteratively_computed()) kvn@3882: continue; kvn@3882: duke@435: uint n_choice = 2; duke@435: duke@435: // See if this instruction is consumed by a branch. If so, then (as the duke@435: // branch is the last instruction in the basic block) force it to the duke@435: // end of the basic block duke@435: if ( must_clone[iop] ) { duke@435: // See if any use is a branch duke@435: bool found_machif = false; duke@435: duke@435: for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++) { duke@435: Node* use = n->fast_out(j); duke@435: duke@435: // The use is a conditional branch, make them adjacent duke@435: if (use->is_MachIf() && cfg->_bbs[use->_idx]==this ) { duke@435: found_machif = true; duke@435: break; duke@435: } duke@435: duke@435: // More than this instruction pending for successor to be ready, duke@435: // don't choose this if other opportunities are ready roland@3447: if (ready_cnt.at(use->_idx) > 1) duke@435: n_choice = 1; duke@435: } duke@435: duke@435: // loop terminated, prefer not to use this instruction duke@435: if (found_machif) duke@435: continue; duke@435: } duke@435: duke@435: // See if this has a predecessor that is "must_clone", i.e. sets the duke@435: // condition code. If so, choose this first duke@435: for (uint j = 0; j < n->req() ; j++) { duke@435: Node *inn = n->in(j); duke@435: if (inn) { duke@435: if (inn->is_Mach() && must_clone[inn->as_Mach()->ideal_Opcode()] ) { duke@435: n_choice = 3; duke@435: break; duke@435: } duke@435: } duke@435: } duke@435: duke@435: // MachTemps should be scheduled last so they are near their uses duke@435: if (n->is_MachTemp()) { duke@435: n_choice = 1; duke@435: } duke@435: kvn@2040: uint n_latency = cfg->_node_latency->at_grow(n->_idx); duke@435: uint n_score = n->req(); // Many inputs get high score to break ties duke@435: duke@435: // Keep best latency found duke@435: if( choice < n_choice || duke@435: ( choice == n_choice && duke@435: ( latency < n_latency || duke@435: ( latency == n_latency && duke@435: ( score < n_score ))))) { duke@435: choice = n_choice; duke@435: latency = n_latency; duke@435: score = n_score; duke@435: idx = i; // Also keep index in worklist duke@435: } duke@435: } // End of for all ready nodes in worklist duke@435: kvn@688: assert(idx >= 0, "index should be set"); kvn@688: Node *n = worklist[(uint)idx]; // Get the winner duke@435: kvn@688: worklist.map((uint)idx, worklist.pop()); // Compress worklist duke@435: return n; duke@435: } duke@435: duke@435: duke@435: //------------------------------set_next_call---------------------------------- duke@435: void Block::set_next_call( Node *n, VectorSet &next_call, Block_Array &bbs ) { duke@435: if( next_call.test_set(n->_idx) ) return; duke@435: for( uint i=0; ilen(); i++ ) { duke@435: Node *m = n->in(i); duke@435: if( !m ) continue; // must see all nodes in block that precede call duke@435: if( bbs[m->_idx] == this ) duke@435: set_next_call( m, next_call, bbs ); duke@435: } duke@435: } duke@435: duke@435: //------------------------------needed_for_next_call--------------------------- duke@435: // Set the flag 'next_call' for each Node that is needed for the next call to duke@435: // be scheduled. This flag lets me bias scheduling so Nodes needed for the duke@435: // next subroutine call get priority - basically it moves things NOT needed duke@435: // for the next call till after the call. This prevents me from trying to duke@435: // carry lots of stuff live across a call. duke@435: void Block::needed_for_next_call(Node *this_call, VectorSet &next_call, Block_Array &bbs) { duke@435: // Find the next control-defining Node in this block duke@435: Node* call = NULL; duke@435: for (DUIterator_Fast imax, i = this_call->fast_outs(imax); i < imax; i++) { duke@435: Node* m = this_call->fast_out(i); duke@435: if( bbs[m->_idx] == this && // Local-block user duke@435: m != this_call && // Not self-start node kvn@3040: m->is_MachCall() ) duke@435: call = m; duke@435: break; duke@435: } duke@435: if (call == NULL) return; // No next call (e.g., block end is near) duke@435: // Set next-call for all inputs to this call duke@435: set_next_call(call, next_call, bbs); duke@435: } duke@435: roland@3316: //------------------------------add_call_kills------------------------------------- roland@3316: void Block::add_call_kills(MachProjNode *proj, RegMask& regs, const char* save_policy, bool exclude_soe) { roland@3316: // Fill in the kill mask for the call roland@3316: for( OptoReg::Name r = OptoReg::Name(0); r < _last_Mach_Reg; r=OptoReg::add(r,1) ) { roland@3316: if( !regs.Member(r) ) { // Not already defined by the call roland@3316: // Save-on-call register? roland@3316: if ((save_policy[r] == 'C') || roland@3316: (save_policy[r] == 'A') || roland@3316: ((save_policy[r] == 'E') && exclude_soe)) { roland@3316: proj->_rout.Insert(r); roland@3316: } roland@3316: } roland@3316: } roland@3316: } roland@3316: roland@3316: duke@435: //------------------------------sched_call------------------------------------- roland@3447: uint Block::sched_call( Matcher &matcher, Block_Array &bbs, uint node_cnt, Node_List &worklist, GrowableArray &ready_cnt, MachCallNode *mcall, VectorSet &next_call ) { duke@435: RegMask regs; duke@435: duke@435: // Schedule all the users of the call right now. All the users are duke@435: // projection Nodes, so they must be scheduled next to the call. duke@435: // Collect all the defined registers. duke@435: for (DUIterator_Fast imax, i = mcall->fast_outs(imax); i < imax; i++) { duke@435: Node* n = mcall->fast_out(i); kvn@3040: assert( n->is_MachProj(), "" ); roland@3447: int n_cnt = ready_cnt.at(n->_idx)-1; roland@3447: ready_cnt.at_put(n->_idx, n_cnt); roland@3447: assert( n_cnt == 0, "" ); duke@435: // Schedule next to call duke@435: _nodes.map(node_cnt++, n); duke@435: // Collect defined registers duke@435: regs.OR(n->out_RegMask()); duke@435: // Check for scheduling the next control-definer duke@435: if( n->bottom_type() == Type::CONTROL ) duke@435: // Warm up next pile of heuristic bits duke@435: needed_for_next_call(n, next_call, bbs); duke@435: duke@435: // Children of projections are now all ready duke@435: for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++) { duke@435: Node* m = n->fast_out(j); // Get user duke@435: if( bbs[m->_idx] != this ) continue; duke@435: if( m->is_Phi() ) continue; roland@3447: int m_cnt = ready_cnt.at(m->_idx)-1; roland@3447: ready_cnt.at_put(m->_idx, m_cnt); roland@3447: if( m_cnt == 0 ) duke@435: worklist.push(m); duke@435: } duke@435: duke@435: } duke@435: duke@435: // Act as if the call defines the Frame Pointer. duke@435: // Certainly the FP is alive and well after the call. duke@435: regs.Insert(matcher.c_frame_pointer()); duke@435: duke@435: // Set all registers killed and not already defined by the call. duke@435: uint r_cnt = mcall->tf()->range()->cnt(); duke@435: int op = mcall->ideal_Opcode(); duke@435: MachProjNode *proj = new (matcher.C, 1) MachProjNode( mcall, r_cnt+1, RegMask::Empty, MachProjNode::fat_proj ); duke@435: bbs.map(proj->_idx,this); duke@435: _nodes.insert(node_cnt++, proj); duke@435: duke@435: // Select the right register save policy. duke@435: const char * save_policy; duke@435: switch (op) { duke@435: case Op_CallRuntime: duke@435: case Op_CallLeaf: duke@435: case Op_CallLeafNoFP: duke@435: // Calling C code so use C calling convention duke@435: save_policy = matcher._c_reg_save_policy; duke@435: break; duke@435: duke@435: case Op_CallStaticJava: duke@435: case Op_CallDynamicJava: duke@435: // Calling Java code so use Java calling convention duke@435: save_policy = matcher._register_save_policy; duke@435: break; duke@435: duke@435: default: duke@435: ShouldNotReachHere(); duke@435: } duke@435: duke@435: // When using CallRuntime mark SOE registers as killed by the call duke@435: // so values that could show up in the RegisterMap aren't live in a duke@435: // callee saved register since the register wouldn't know where to duke@435: // find them. CallLeaf and CallLeafNoFP are ok because they can't duke@435: // have debug info on them. Strictly speaking this only needs to be duke@435: // done for oops since idealreg2debugmask takes care of debug info duke@435: // references but there no way to handle oops differently than other duke@435: // pointers as far as the kill mask goes. duke@435: bool exclude_soe = op == Op_CallRuntime; duke@435: twisti@1572: // If the call is a MethodHandle invoke, we need to exclude the twisti@1572: // register which is used to save the SP value over MH invokes from twisti@1572: // the mask. Otherwise this register could be used for twisti@1572: // deoptimization information. twisti@1572: if (op == Op_CallStaticJava) { twisti@1572: MachCallStaticJavaNode* mcallstaticjava = (MachCallStaticJavaNode*) mcall; twisti@1572: if (mcallstaticjava->_method_handle_invoke) twisti@1572: proj->_rout.OR(Matcher::method_handle_invoke_SP_save_mask()); twisti@1572: } twisti@1572: roland@3316: add_call_kills(proj, regs, save_policy, exclude_soe); duke@435: duke@435: return node_cnt; duke@435: } duke@435: duke@435: duke@435: //------------------------------schedule_local--------------------------------- duke@435: // Topological sort within a block. Someday become a real scheduler. roland@3447: bool Block::schedule_local(PhaseCFG *cfg, Matcher &matcher, GrowableArray &ready_cnt, VectorSet &next_call) { duke@435: // Already "sorted" are the block start Node (as the first entry), and duke@435: // the block-ending Node and any trailing control projections. We leave duke@435: // these alone. PhiNodes and ParmNodes are made to follow the block start duke@435: // Node. Everything else gets topo-sorted. duke@435: duke@435: #ifndef PRODUCT duke@435: if (cfg->trace_opto_pipelining()) { duke@435: tty->print_cr("# --- schedule_local B%d, before: ---", _pre_order); duke@435: for (uint i = 0;i < _nodes.size();i++) { duke@435: tty->print("# "); duke@435: _nodes[i]->fast_dump(); duke@435: } duke@435: tty->print_cr("#"); duke@435: } duke@435: #endif duke@435: duke@435: // RootNode is already sorted duke@435: if( _nodes.size() == 1 ) return true; duke@435: duke@435: // Move PhiNodes and ParmNodes from 1 to cnt up to the start duke@435: uint node_cnt = end_idx(); duke@435: uint phi_cnt = 1; duke@435: uint i; duke@435: for( i = 1; iis_Phi() || // Found a PhiNode or ParmNode duke@435: (n->is_Proj() && n->in(0) == head()) ) { duke@435: // Move guy at 'phi_cnt' to the end; makes a hole at phi_cnt duke@435: _nodes.map(i,_nodes[phi_cnt]); duke@435: _nodes.map(phi_cnt++,n); // swap Phi/Parm up front duke@435: } else { // All others duke@435: // Count block-local inputs to 'n' duke@435: uint cnt = n->len(); // Input count duke@435: uint local = 0; duke@435: for( uint j=0; jin(j); duke@435: if( m && cfg->_bbs[m->_idx] == this && !m->is_top() ) duke@435: local++; // One more block-local input duke@435: } roland@3447: ready_cnt.at_put(n->_idx, local); // Count em up duke@435: never@2780: #ifdef ASSERT never@2780: if( UseConcMarkSweepGC || UseG1GC ) { never@2780: if( n->is_Mach() && n->as_Mach()->ideal_Opcode() == Op_StoreCM ) { never@2780: // Check the precedence edges never@2780: for (uint prec = n->req(); prec < n->len(); prec++) { never@2780: Node* oop_store = n->in(prec); never@2780: if (oop_store != NULL) { never@2780: assert(cfg->_bbs[oop_store->_idx]->_dom_depth <= this->_dom_depth, "oop_store must dominate card-mark"); never@2780: } never@2780: } never@2780: } never@2780: } never@2780: #endif never@2780: duke@435: // A few node types require changing a required edge to a precedence edge duke@435: // before allocation. kvn@1535: if( n->is_Mach() && n->req() > TypeFunc::Parms && kvn@1535: (n->as_Mach()->ideal_Opcode() == Op_MemBarAcquire || kvn@1535: n->as_Mach()->ideal_Opcode() == Op_MemBarVolatile) ) { kvn@688: // MemBarAcquire could be created without Precedent edge. kvn@688: // del_req() replaces the specified edge with the last input edge kvn@688: // and then removes the last edge. If the specified edge > number of kvn@688: // edges the last edge will be moved outside of the input edges array kvn@688: // and the edge will be lost. This is why this code should be kvn@688: // executed only when Precedent (== TypeFunc::Parms) edge is present. duke@435: Node *x = n->in(TypeFunc::Parms); duke@435: n->del_req(TypeFunc::Parms); duke@435: n->add_prec(x); duke@435: } duke@435: } duke@435: } duke@435: for(uint i2=i; i2<_nodes.size(); i2++ ) // Trailing guys get zapped count roland@3447: ready_cnt.at_put(_nodes[i2]->_idx, 0); duke@435: duke@435: // All the prescheduled guys do not hold back internal nodes duke@435: uint i3; duke@435: for(i3 = 0; i3fast_outs(jmax); j < jmax; j++) { duke@435: Node* m = n->fast_out(j); roland@3447: if( cfg->_bbs[m->_idx] ==this ) { // Local-block user roland@3447: int m_cnt = ready_cnt.at(m->_idx)-1; roland@3447: ready_cnt.at_put(m->_idx, m_cnt); // Fix ready count roland@3447: } duke@435: } duke@435: } duke@435: duke@435: Node_List delay; duke@435: // Make a worklist duke@435: Node_List worklist; duke@435: for(uint i4=i3; i4_idx) ) { // Zero ready count? duke@435: if (m->is_iteratively_computed()) { duke@435: // Push induction variable increments last to allow other uses duke@435: // of the phi to be scheduled first. The select() method breaks duke@435: // ties in scheduling by worklist order. duke@435: delay.push(m); never@560: } else if (m->is_Mach() && m->as_Mach()->ideal_Opcode() == Op_CreateEx) { never@560: // Force the CreateEx to the top of the list so it's processed never@560: // first and ends up at the start of the block. never@560: worklist.insert(0, m); duke@435: } else { duke@435: worklist.push(m); // Then on to worklist! duke@435: } duke@435: } duke@435: } duke@435: while (delay.size()) { duke@435: Node* d = delay.pop(); duke@435: worklist.push(d); duke@435: } duke@435: duke@435: // Warm up the 'next_call' heuristic bits duke@435: needed_for_next_call(_nodes[0], next_call, cfg->_bbs); duke@435: duke@435: #ifndef PRODUCT duke@435: if (cfg->trace_opto_pipelining()) { duke@435: for (uint j=0; j<_nodes.size(); j++) { duke@435: Node *n = _nodes[j]; duke@435: int idx = n->_idx; roland@3447: tty->print("# ready cnt:%3d ", ready_cnt.at(idx)); kvn@2040: tty->print("latency:%3d ", cfg->_node_latency->at_grow(idx)); duke@435: tty->print("%4d: %s\n", idx, n->Name()); duke@435: } duke@435: } duke@435: #endif duke@435: roland@3447: uint max_idx = (uint)ready_cnt.length(); duke@435: // Pull from worklist and schedule duke@435: while( worklist.size() ) { // Worklist is not ready duke@435: duke@435: #ifndef PRODUCT duke@435: if (cfg->trace_opto_pipelining()) { duke@435: tty->print("# ready list:"); duke@435: for( uint i=0; iprint(" %d", n->_idx); duke@435: } duke@435: tty->cr(); duke@435: } duke@435: #endif duke@435: duke@435: // Select and pop a ready guy from worklist duke@435: Node* n = select(cfg, worklist, ready_cnt, next_call, phi_cnt); duke@435: _nodes.map(phi_cnt++,n); // Schedule him next duke@435: duke@435: #ifndef PRODUCT duke@435: if (cfg->trace_opto_pipelining()) { duke@435: tty->print("# select %d: %s", n->_idx, n->Name()); kvn@2040: tty->print(", latency:%d", cfg->_node_latency->at_grow(n->_idx)); duke@435: n->dump(); duke@435: if (Verbose) { duke@435: tty->print("# ready list:"); duke@435: for( uint i=0; iprint(" %d", n->_idx); duke@435: } duke@435: tty->cr(); duke@435: } duke@435: } duke@435: duke@435: #endif duke@435: if( n->is_MachCall() ) { duke@435: MachCallNode *mcall = n->as_MachCall(); duke@435: phi_cnt = sched_call(matcher, cfg->_bbs, phi_cnt, worklist, ready_cnt, mcall, next_call); duke@435: continue; duke@435: } roland@3316: roland@3316: if (n->is_Mach() && n->as_Mach()->has_call()) { roland@3316: RegMask regs; roland@3316: regs.Insert(matcher.c_frame_pointer()); roland@3316: regs.OR(n->out_RegMask()); roland@3316: roland@3316: MachProjNode *proj = new (matcher.C, 1) MachProjNode( n, 1, RegMask::Empty, MachProjNode::fat_proj ); roland@3316: cfg->_bbs.map(proj->_idx,this); roland@3316: _nodes.insert(phi_cnt++, proj); roland@3316: roland@3316: add_call_kills(proj, regs, matcher._c_reg_save_policy, false); roland@3316: } roland@3316: duke@435: // Children are now all ready duke@435: for (DUIterator_Fast i5max, i5 = n->fast_outs(i5max); i5 < i5max; i5++) { duke@435: Node* m = n->fast_out(i5); // Get user duke@435: if( cfg->_bbs[m->_idx] != this ) continue; duke@435: if( m->is_Phi() ) continue; roland@3447: if (m->_idx >= max_idx) { // new node, skip it roland@3316: assert(m->is_MachProj() && n->is_Mach() && n->as_Mach()->has_call(), "unexpected node types"); roland@3316: continue; roland@3316: } roland@3447: int m_cnt = ready_cnt.at(m->_idx)-1; roland@3447: ready_cnt.at_put(m->_idx, m_cnt); roland@3447: if( m_cnt == 0 ) duke@435: worklist.push(m); duke@435: } duke@435: } duke@435: duke@435: if( phi_cnt != end_idx() ) { duke@435: // did not schedule all. Retry, Bailout, or Die duke@435: Compile* C = matcher.C; duke@435: if (C->subsume_loads() == true && !C->failing()) { duke@435: // Retry with subsume_loads == false duke@435: // If this is the first failure, the sentinel string will "stick" duke@435: // to the Compile object, and the C2Compiler will see it and retry. duke@435: C->record_failure(C2Compiler::retry_no_subsuming_loads()); duke@435: } duke@435: // assert( phi_cnt == end_idx(), "did not schedule all" ); duke@435: return false; duke@435: } duke@435: duke@435: #ifndef PRODUCT duke@435: if (cfg->trace_opto_pipelining()) { duke@435: tty->print_cr("#"); duke@435: tty->print_cr("# after schedule_local"); duke@435: for (uint i = 0;i < _nodes.size();i++) { duke@435: tty->print("# "); duke@435: _nodes[i]->fast_dump(); duke@435: } duke@435: tty->cr(); duke@435: } duke@435: #endif duke@435: duke@435: duke@435: return true; duke@435: } duke@435: duke@435: //--------------------------catch_cleanup_fix_all_inputs----------------------- duke@435: static void catch_cleanup_fix_all_inputs(Node *use, Node *old_def, Node *new_def) { duke@435: for (uint l = 0; l < use->len(); l++) { duke@435: if (use->in(l) == old_def) { duke@435: if (l < use->req()) { duke@435: use->set_req(l, new_def); duke@435: } else { duke@435: use->rm_prec(l); duke@435: use->add_prec(new_def); duke@435: l--; duke@435: } duke@435: } duke@435: } duke@435: } duke@435: duke@435: //------------------------------catch_cleanup_find_cloned_def------------------ duke@435: static Node *catch_cleanup_find_cloned_def(Block *use_blk, Node *def, Block *def_blk, Block_Array &bbs, int n_clone_idx) { duke@435: assert( use_blk != def_blk, "Inter-block cleanup only"); duke@435: duke@435: // The use is some block below the Catch. Find and return the clone of the def duke@435: // that dominates the use. If there is no clone in a dominating block, then duke@435: // create a phi for the def in a dominating block. duke@435: duke@435: // Find which successor block dominates this use. The successor duke@435: // blocks must all be single-entry (from the Catch only; I will have duke@435: // split blocks to make this so), hence they all dominate. duke@435: while( use_blk->_dom_depth > def_blk->_dom_depth+1 ) duke@435: use_blk = use_blk->_idom; duke@435: duke@435: // Find the successor duke@435: Node *fixup = NULL; duke@435: duke@435: uint j; duke@435: for( j = 0; j < def_blk->_num_succs; j++ ) duke@435: if( use_blk == def_blk->_succs[j] ) duke@435: break; duke@435: duke@435: if( j == def_blk->_num_succs ) { duke@435: // Block at same level in dom-tree is not a successor. It needs a duke@435: // PhiNode, the PhiNode uses from the def and IT's uses need fixup. duke@435: Node_Array inputs = new Node_List(Thread::current()->resource_area()); duke@435: for(uint k = 1; k < use_blk->num_preds(); k++) { duke@435: inputs.map(k, catch_cleanup_find_cloned_def(bbs[use_blk->pred(k)->_idx], def, def_blk, bbs, n_clone_idx)); duke@435: } duke@435: duke@435: // Check to see if the use_blk already has an identical phi inserted. duke@435: // If it exists, it will be at the first position since all uses of a duke@435: // def are processed together. duke@435: Node *phi = use_blk->_nodes[1]; duke@435: if( phi->is_Phi() ) { duke@435: fixup = phi; duke@435: for (uint k = 1; k < use_blk->num_preds(); k++) { duke@435: if (phi->in(k) != inputs[k]) { duke@435: // Not a match duke@435: fixup = NULL; duke@435: break; duke@435: } duke@435: } duke@435: } duke@435: duke@435: // If an existing PhiNode was not found, make a new one. duke@435: if (fixup == NULL) { duke@435: Node *new_phi = PhiNode::make(use_blk->head(), def); duke@435: use_blk->_nodes.insert(1, new_phi); duke@435: bbs.map(new_phi->_idx, use_blk); duke@435: for (uint k = 1; k < use_blk->num_preds(); k++) { duke@435: new_phi->set_req(k, inputs[k]); duke@435: } duke@435: fixup = new_phi; duke@435: } duke@435: duke@435: } else { duke@435: // Found the use just below the Catch. Make it use the clone. duke@435: fixup = use_blk->_nodes[n_clone_idx]; duke@435: } duke@435: duke@435: return fixup; duke@435: } duke@435: duke@435: //--------------------------catch_cleanup_intra_block-------------------------- duke@435: // Fix all input edges in use that reference "def". The use is in the same duke@435: // block as the def and both have been cloned in each successor block. duke@435: static void catch_cleanup_intra_block(Node *use, Node *def, Block *blk, int beg, int n_clone_idx) { duke@435: duke@435: // Both the use and def have been cloned. For each successor block, duke@435: // get the clone of the use, and make its input the clone of the def duke@435: // found in that block. duke@435: duke@435: uint use_idx = blk->find_node(use); duke@435: uint offset_idx = use_idx - beg; duke@435: for( uint k = 0; k < blk->_num_succs; k++ ) { duke@435: // Get clone in each successor block duke@435: Block *sb = blk->_succs[k]; duke@435: Node *clone = sb->_nodes[offset_idx+1]; duke@435: assert( clone->Opcode() == use->Opcode(), "" ); duke@435: duke@435: // Make use-clone reference the def-clone duke@435: catch_cleanup_fix_all_inputs(clone, def, sb->_nodes[n_clone_idx]); duke@435: } duke@435: } duke@435: duke@435: //------------------------------catch_cleanup_inter_block--------------------- duke@435: // Fix all input edges in use that reference "def". The use is in a different duke@435: // block than the def. duke@435: static void catch_cleanup_inter_block(Node *use, Block *use_blk, Node *def, Block *def_blk, Block_Array &bbs, int n_clone_idx) { duke@435: if( !use_blk ) return; // Can happen if the use is a precedence edge duke@435: duke@435: Node *new_def = catch_cleanup_find_cloned_def(use_blk, def, def_blk, bbs, n_clone_idx); duke@435: catch_cleanup_fix_all_inputs(use, def, new_def); duke@435: } duke@435: duke@435: //------------------------------call_catch_cleanup----------------------------- duke@435: // If we inserted any instructions between a Call and his CatchNode, duke@435: // clone the instructions on all paths below the Catch. duke@435: void Block::call_catch_cleanup(Block_Array &bbs) { duke@435: duke@435: // End of region to clone duke@435: uint end = end_idx(); duke@435: if( !_nodes[end]->is_Catch() ) return; duke@435: // Start of region to clone duke@435: uint beg = end; kvn@3040: while(!_nodes[beg-1]->is_MachProj() || kvn@3040: !_nodes[beg-1]->in(0)->is_MachCall() ) { duke@435: beg--; duke@435: assert(beg > 0,"Catch cleanup walking beyond block boundary"); duke@435: } duke@435: // Range of inserted instructions is [beg, end) duke@435: if( beg == end ) return; duke@435: duke@435: // Clone along all Catch output paths. Clone area between the 'beg' and duke@435: // 'end' indices. duke@435: for( uint i = 0; i < _num_succs; i++ ) { duke@435: Block *sb = _succs[i]; duke@435: // Clone the entire area; ignoring the edge fixup for now. duke@435: for( uint j = end; j > beg; j-- ) { kvn@2048: // It is safe here to clone a node with anti_dependence kvn@2048: // since clones dominate on each path. duke@435: Node *clone = _nodes[j-1]->clone(); duke@435: sb->_nodes.insert( 1, clone ); duke@435: bbs.map(clone->_idx,sb); duke@435: } duke@435: } duke@435: duke@435: duke@435: // Fixup edges. Check the def-use info per cloned Node duke@435: for(uint i2 = beg; i2 < end; i2++ ) { duke@435: uint n_clone_idx = i2-beg+1; // Index of clone of n in each successor block duke@435: Node *n = _nodes[i2]; // Node that got cloned duke@435: // Need DU safe iterator because of edge manipulation in calls. duke@435: Unique_Node_List *out = new Unique_Node_List(Thread::current()->resource_area()); duke@435: for (DUIterator_Fast j1max, j1 = n->fast_outs(j1max); j1 < j1max; j1++) { duke@435: out->push(n->fast_out(j1)); duke@435: } duke@435: uint max = out->size(); duke@435: for (uint j = 0; j < max; j++) {// For all users duke@435: Node *use = out->pop(); duke@435: Block *buse = bbs[use->_idx]; duke@435: if( use->is_Phi() ) { duke@435: for( uint k = 1; k < use->req(); k++ ) duke@435: if( use->in(k) == n ) { duke@435: Node *fixup = catch_cleanup_find_cloned_def(bbs[buse->pred(k)->_idx], n, this, bbs, n_clone_idx); duke@435: use->set_req(k, fixup); duke@435: } duke@435: } else { duke@435: if (this == buse) { duke@435: catch_cleanup_intra_block(use, n, this, beg, n_clone_idx); duke@435: } else { duke@435: catch_cleanup_inter_block(use, buse, n, this, bbs, n_clone_idx); duke@435: } duke@435: } duke@435: } // End for all users duke@435: duke@435: } // End of for all Nodes in cloned area duke@435: duke@435: // Remove the now-dead cloned ops duke@435: for(uint i3 = beg; i3 < end; i3++ ) { duke@435: _nodes[beg]->disconnect_inputs(NULL); duke@435: _nodes.remove(beg); duke@435: } duke@435: duke@435: // If the successor blocks have a CreateEx node, move it back to the top duke@435: for(uint i4 = 0; i4 < _num_succs; i4++ ) { duke@435: Block *sb = _succs[i4]; duke@435: uint new_cnt = end - beg; duke@435: // Remove any newly created, but dead, nodes. duke@435: for( uint j = new_cnt; j > 0; j-- ) { duke@435: Node *n = sb->_nodes[j]; duke@435: if (n->outcnt() == 0 && duke@435: (!n->is_Proj() || n->as_Proj()->in(0)->outcnt() == 1) ){ duke@435: n->disconnect_inputs(NULL); duke@435: sb->_nodes.remove(j); duke@435: new_cnt--; duke@435: } duke@435: } duke@435: // If any newly created nodes remain, move the CreateEx node to the top duke@435: if (new_cnt > 0) { duke@435: Node *cex = sb->_nodes[1+new_cnt]; duke@435: if( cex->is_Mach() && cex->as_Mach()->ideal_Opcode() == Op_CreateEx ) { duke@435: sb->_nodes.remove(1+new_cnt); duke@435: sb->_nodes.insert(1,cex); duke@435: } duke@435: } duke@435: } duke@435: }