src/share/vm/opto/lcm.cpp

Thu, 19 Mar 2009 09:13:24 -0700

author
kvn
date
Thu, 19 Mar 2009 09:13:24 -0700
changeset 1082
bd441136a5ce
parent 1014
0fbdb4381b99
parent 1077
660978a2a31a
child 1116
fbde8ec322d0
permissions
-rw-r--r--

Merge

     1 /*
     2  * Copyright 1998-2009 Sun Microsystems, Inc.  All Rights Reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
    20  * CA 95054 USA or visit www.sun.com if you need additional information or
    21  * have any questions.
    22  *
    23  */
    25 // Optimization - Graph Style
    27 #include "incls/_precompiled.incl"
    28 #include "incls/_lcm.cpp.incl"
    30 //------------------------------implicit_null_check----------------------------
    31 // Detect implicit-null-check opportunities.  Basically, find NULL checks
    32 // with suitable memory ops nearby.  Use the memory op to do the NULL check.
    33 // I can generate a memory op if there is not one nearby.
    34 // The proj is the control projection for the not-null case.
    35 // The val is the pointer being checked for nullness.
    36 void Block::implicit_null_check(PhaseCFG *cfg, Node *proj, Node *val, int allowed_reasons) {
    37   // Assume if null check need for 0 offset then always needed
    38   // Intel solaris doesn't support any null checks yet and no
    39   // mechanism exists (yet) to set the switches at an os_cpu level
    40   if( !ImplicitNullChecks || MacroAssembler::needs_explicit_null_check(0)) return;
    42   // Make sure the ptr-is-null path appears to be uncommon!
    43   float f = end()->as_MachIf()->_prob;
    44   if( proj->Opcode() == Op_IfTrue ) f = 1.0f - f;
    45   if( f > PROB_UNLIKELY_MAG(4) ) return;
    47   uint bidx = 0;                // Capture index of value into memop
    48   bool was_store;               // Memory op is a store op
    50   // Get the successor block for if the test ptr is non-null
    51   Block* not_null_block;  // this one goes with the proj
    52   Block* null_block;
    53   if (_nodes[_nodes.size()-1] == proj) {
    54     null_block     = _succs[0];
    55     not_null_block = _succs[1];
    56   } else {
    57     assert(_nodes[_nodes.size()-2] == proj, "proj is one or the other");
    58     not_null_block = _succs[0];
    59     null_block     = _succs[1];
    60   }
    61   while (null_block->is_Empty() == Block::empty_with_goto) {
    62     null_block     = null_block->_succs[0];
    63   }
    65   // Search the exception block for an uncommon trap.
    66   // (See Parse::do_if and Parse::do_ifnull for the reason
    67   // we need an uncommon trap.  Briefly, we need a way to
    68   // detect failure of this optimization, as in 6366351.)
    69   {
    70     bool found_trap = false;
    71     for (uint i1 = 0; i1 < null_block->_nodes.size(); i1++) {
    72       Node* nn = null_block->_nodes[i1];
    73       if (nn->is_MachCall() &&
    74           nn->as_MachCall()->entry_point() ==
    75           SharedRuntime::uncommon_trap_blob()->instructions_begin()) {
    76         const Type* trtype = nn->in(TypeFunc::Parms)->bottom_type();
    77         if (trtype->isa_int() && trtype->is_int()->is_con()) {
    78           jint tr_con = trtype->is_int()->get_con();
    79           Deoptimization::DeoptReason reason = Deoptimization::trap_request_reason(tr_con);
    80           Deoptimization::DeoptAction action = Deoptimization::trap_request_action(tr_con);
    81           assert((int)reason < (int)BitsPerInt, "recode bit map");
    82           if (is_set_nth_bit(allowed_reasons, (int) reason)
    83               && action != Deoptimization::Action_none) {
    84             // This uncommon trap is sure to recompile, eventually.
    85             // When that happens, C->too_many_traps will prevent
    86             // this transformation from happening again.
    87             found_trap = true;
    88           }
    89         }
    90         break;
    91       }
    92     }
    93     if (!found_trap) {
    94       // We did not find an uncommon trap.
    95       return;
    96     }
    97   }
    99   // Search the successor block for a load or store who's base value is also
   100   // the tested value.  There may be several.
   101   Node_List *out = new Node_List(Thread::current()->resource_area());
   102   MachNode *best = NULL;        // Best found so far
   103   for (DUIterator i = val->outs(); val->has_out(i); i++) {
   104     Node *m = val->out(i);
   105     if( !m->is_Mach() ) continue;
   106     MachNode *mach = m->as_Mach();
   107     was_store = false;
   108     switch( mach->ideal_Opcode() ) {
   109     case Op_LoadB:
   110     case Op_LoadUS:
   111     case Op_LoadD:
   112     case Op_LoadF:
   113     case Op_LoadI:
   114     case Op_LoadL:
   115     case Op_LoadP:
   116     case Op_LoadN:
   117     case Op_LoadS:
   118     case Op_LoadKlass:
   119     case Op_LoadNKlass:
   120     case Op_LoadRange:
   121     case Op_LoadD_unaligned:
   122     case Op_LoadL_unaligned:
   123       break;
   124     case Op_StoreB:
   125     case Op_StoreC:
   126     case Op_StoreCM:
   127     case Op_StoreD:
   128     case Op_StoreF:
   129     case Op_StoreI:
   130     case Op_StoreL:
   131     case Op_StoreP:
   132     case Op_StoreN:
   133       was_store = true;         // Memory op is a store op
   134       // Stores will have their address in slot 2 (memory in slot 1).
   135       // If the value being nul-checked is in another slot, it means we
   136       // are storing the checked value, which does NOT check the value!
   137       if( mach->in(2) != val ) continue;
   138       break;                    // Found a memory op?
   139     case Op_StrComp:
   140     case Op_AryEq:
   141       // Not a legit memory op for implicit null check regardless of
   142       // embedded loads
   143       continue;
   144     default:                    // Also check for embedded loads
   145       if( !mach->needs_anti_dependence_check() )
   146         continue;               // Not an memory op; skip it
   147       break;
   148     }
   149     // check if the offset is not too high for implicit exception
   150     {
   151       intptr_t offset = 0;
   152       const TypePtr *adr_type = NULL;  // Do not need this return value here
   153       const Node* base = mach->get_base_and_disp(offset, adr_type);
   154       if (base == NULL || base == NodeSentinel) {
   155         // Narrow oop address doesn't have base, only index
   156         if( val->bottom_type()->isa_narrowoop() &&
   157             MacroAssembler::needs_explicit_null_check(offset) )
   158           continue;             // Give up if offset is beyond page size
   159         // cannot reason about it; is probably not implicit null exception
   160       } else {
   161         const TypePtr* tptr;
   162         if (UseCompressedOops && Universe::narrow_oop_shift() == 0) {
   163           // 32-bits narrow oop can be the base of address expressions
   164           tptr = base->bottom_type()->make_ptr();
   165         } else {
   166           // only regular oops are expected here
   167           tptr = base->bottom_type()->is_ptr();
   168         }
   169         // Give up if offset is not a compile-time constant
   170         if( offset == Type::OffsetBot || tptr->_offset == Type::OffsetBot )
   171           continue;
   172         offset += tptr->_offset; // correct if base is offseted
   173         if( MacroAssembler::needs_explicit_null_check(offset) )
   174           continue;             // Give up is reference is beyond 4K page size
   175       }
   176     }
   178     // Check ctrl input to see if the null-check dominates the memory op
   179     Block *cb = cfg->_bbs[mach->_idx];
   180     cb = cb->_idom;             // Always hoist at least 1 block
   181     if( !was_store ) {          // Stores can be hoisted only one block
   182       while( cb->_dom_depth > (_dom_depth + 1))
   183         cb = cb->_idom;         // Hoist loads as far as we want
   184       // The non-null-block should dominate the memory op, too. Live
   185       // range spilling will insert a spill in the non-null-block if it is
   186       // needs to spill the memory op for an implicit null check.
   187       if (cb->_dom_depth == (_dom_depth + 1)) {
   188         if (cb != not_null_block) continue;
   189         cb = cb->_idom;
   190       }
   191     }
   192     if( cb != this ) continue;
   194     // Found a memory user; see if it can be hoisted to check-block
   195     uint vidx = 0;              // Capture index of value into memop
   196     uint j;
   197     for( j = mach->req()-1; j > 0; j-- ) {
   198       if( mach->in(j) == val ) vidx = j;
   199       // Block of memory-op input
   200       Block *inb = cfg->_bbs[mach->in(j)->_idx];
   201       Block *b = this;          // Start from nul check
   202       while( b != inb && b->_dom_depth > inb->_dom_depth )
   203         b = b->_idom;           // search upwards for input
   204       // See if input dominates null check
   205       if( b != inb )
   206         break;
   207     }
   208     if( j > 0 )
   209       continue;
   210     Block *mb = cfg->_bbs[mach->_idx];
   211     // Hoisting stores requires more checks for the anti-dependence case.
   212     // Give up hoisting if we have to move the store past any load.
   213     if( was_store ) {
   214       Block *b = mb;            // Start searching here for a local load
   215       // mach use (faulting) trying to hoist
   216       // n might be blocker to hoisting
   217       while( b != this ) {
   218         uint k;
   219         for( k = 1; k < b->_nodes.size(); k++ ) {
   220           Node *n = b->_nodes[k];
   221           if( n->needs_anti_dependence_check() &&
   222               n->in(LoadNode::Memory) == mach->in(StoreNode::Memory) )
   223             break;              // Found anti-dependent load
   224         }
   225         if( k < b->_nodes.size() )
   226           break;                // Found anti-dependent load
   227         // Make sure control does not do a merge (would have to check allpaths)
   228         if( b->num_preds() != 2 ) break;
   229         b = cfg->_bbs[b->pred(1)->_idx]; // Move up to predecessor block
   230       }
   231       if( b != this ) continue;
   232     }
   234     // Make sure this memory op is not already being used for a NullCheck
   235     Node *e = mb->end();
   236     if( e->is_MachNullCheck() && e->in(1) == mach )
   237       continue;                 // Already being used as a NULL check
   239     // Found a candidate!  Pick one with least dom depth - the highest
   240     // in the dom tree should be closest to the null check.
   241     if( !best ||
   242         cfg->_bbs[mach->_idx]->_dom_depth < cfg->_bbs[best->_idx]->_dom_depth ) {
   243       best = mach;
   244       bidx = vidx;
   246     }
   247   }
   248   // No candidate!
   249   if( !best ) return;
   251   // ---- Found an implicit null check
   252   extern int implicit_null_checks;
   253   implicit_null_checks++;
   255   // Hoist the memory candidate up to the end of the test block.
   256   Block *old_block = cfg->_bbs[best->_idx];
   257   old_block->find_remove(best);
   258   add_inst(best);
   259   cfg->_bbs.map(best->_idx,this);
   261   // Move the control dependence
   262   if (best->in(0) && best->in(0) == old_block->_nodes[0])
   263     best->set_req(0, _nodes[0]);
   265   // Check for flag-killing projections that also need to be hoisted
   266   // Should be DU safe because no edge updates.
   267   for (DUIterator_Fast jmax, j = best->fast_outs(jmax); j < jmax; j++) {
   268     Node* n = best->fast_out(j);
   269     if( n->Opcode() == Op_MachProj ) {
   270       cfg->_bbs[n->_idx]->find_remove(n);
   271       add_inst(n);
   272       cfg->_bbs.map(n->_idx,this);
   273     }
   274   }
   276   Compile *C = cfg->C;
   277   // proj==Op_True --> ne test; proj==Op_False --> eq test.
   278   // One of two graph shapes got matched:
   279   //   (IfTrue  (If (Bool NE (CmpP ptr NULL))))
   280   //   (IfFalse (If (Bool EQ (CmpP ptr NULL))))
   281   // NULL checks are always branch-if-eq.  If we see a IfTrue projection
   282   // then we are replacing a 'ne' test with a 'eq' NULL check test.
   283   // We need to flip the projections to keep the same semantics.
   284   if( proj->Opcode() == Op_IfTrue ) {
   285     // Swap order of projections in basic block to swap branch targets
   286     Node *tmp1 = _nodes[end_idx()+1];
   287     Node *tmp2 = _nodes[end_idx()+2];
   288     _nodes.map(end_idx()+1, tmp2);
   289     _nodes.map(end_idx()+2, tmp1);
   290     Node *tmp = new (C, 1) Node(C->top()); // Use not NULL input
   291     tmp1->replace_by(tmp);
   292     tmp2->replace_by(tmp1);
   293     tmp->replace_by(tmp2);
   294     tmp->destruct();
   295   }
   297   // Remove the existing null check; use a new implicit null check instead.
   298   // Since schedule-local needs precise def-use info, we need to correct
   299   // it as well.
   300   Node *old_tst = proj->in(0);
   301   MachNode *nul_chk = new (C) MachNullCheckNode(old_tst->in(0),best,bidx);
   302   _nodes.map(end_idx(),nul_chk);
   303   cfg->_bbs.map(nul_chk->_idx,this);
   304   // Redirect users of old_test to nul_chk
   305   for (DUIterator_Last i2min, i2 = old_tst->last_outs(i2min); i2 >= i2min; --i2)
   306     old_tst->last_out(i2)->set_req(0, nul_chk);
   307   // Clean-up any dead code
   308   for (uint i3 = 0; i3 < old_tst->req(); i3++)
   309     old_tst->set_req(i3, NULL);
   311   cfg->latency_from_uses(nul_chk);
   312   cfg->latency_from_uses(best);
   313 }
   316 //------------------------------select-----------------------------------------
   317 // Select a nice fellow from the worklist to schedule next. If there is only
   318 // one choice, then use it. Projections take top priority for correctness
   319 // reasons - if I see a projection, then it is next.  There are a number of
   320 // other special cases, for instructions that consume condition codes, et al.
   321 // These are chosen immediately. Some instructions are required to immediately
   322 // precede the last instruction in the block, and these are taken last. Of the
   323 // remaining cases (most), choose the instruction with the greatest latency
   324 // (that is, the most number of pseudo-cycles required to the end of the
   325 // routine). If there is a tie, choose the instruction with the most inputs.
   326 Node *Block::select(PhaseCFG *cfg, Node_List &worklist, int *ready_cnt, VectorSet &next_call, uint sched_slot) {
   328   // If only a single entry on the stack, use it
   329   uint cnt = worklist.size();
   330   if (cnt == 1) {
   331     Node *n = worklist[0];
   332     worklist.map(0,worklist.pop());
   333     return n;
   334   }
   336   uint choice  = 0; // Bigger is most important
   337   uint latency = 0; // Bigger is scheduled first
   338   uint score   = 0; // Bigger is better
   339   int idx = -1;     // Index in worklist
   341   for( uint i=0; i<cnt; i++ ) { // Inspect entire worklist
   342     // Order in worklist is used to break ties.
   343     // See caller for how this is used to delay scheduling
   344     // of induction variable increments to after the other
   345     // uses of the phi are scheduled.
   346     Node *n = worklist[i];      // Get Node on worklist
   348     int iop = n->is_Mach() ? n->as_Mach()->ideal_Opcode() : 0;
   349     if( n->is_Proj() ||         // Projections always win
   350         n->Opcode()== Op_Con || // So does constant 'Top'
   351         iop == Op_CreateEx ||   // Create-exception must start block
   352         iop == Op_CheckCastPP
   353         ) {
   354       worklist.map(i,worklist.pop());
   355       return n;
   356     }
   358     // Final call in a block must be adjacent to 'catch'
   359     Node *e = end();
   360     if( e->is_Catch() && e->in(0)->in(0) == n )
   361       continue;
   363     // Memory op for an implicit null check has to be at the end of the block
   364     if( e->is_MachNullCheck() && e->in(1) == n )
   365       continue;
   367     uint n_choice  = 2;
   369     // See if this instruction is consumed by a branch. If so, then (as the
   370     // branch is the last instruction in the basic block) force it to the
   371     // end of the basic block
   372     if ( must_clone[iop] ) {
   373       // See if any use is a branch
   374       bool found_machif = false;
   376       for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++) {
   377         Node* use = n->fast_out(j);
   379         // The use is a conditional branch, make them adjacent
   380         if (use->is_MachIf() && cfg->_bbs[use->_idx]==this ) {
   381           found_machif = true;
   382           break;
   383         }
   385         // More than this instruction pending for successor to be ready,
   386         // don't choose this if other opportunities are ready
   387         if (ready_cnt[use->_idx] > 1)
   388           n_choice = 1;
   389       }
   391       // loop terminated, prefer not to use this instruction
   392       if (found_machif)
   393         continue;
   394     }
   396     // See if this has a predecessor that is "must_clone", i.e. sets the
   397     // condition code. If so, choose this first
   398     for (uint j = 0; j < n->req() ; j++) {
   399       Node *inn = n->in(j);
   400       if (inn) {
   401         if (inn->is_Mach() && must_clone[inn->as_Mach()->ideal_Opcode()] ) {
   402           n_choice = 3;
   403           break;
   404         }
   405       }
   406     }
   408     // MachTemps should be scheduled last so they are near their uses
   409     if (n->is_MachTemp()) {
   410       n_choice = 1;
   411     }
   413     uint n_latency = cfg->_node_latency.at_grow(n->_idx);
   414     uint n_score   = n->req();   // Many inputs get high score to break ties
   416     // Keep best latency found
   417     if( choice < n_choice ||
   418         ( choice == n_choice &&
   419           ( latency < n_latency ||
   420             ( latency == n_latency &&
   421               ( score < n_score ))))) {
   422       choice  = n_choice;
   423       latency = n_latency;
   424       score   = n_score;
   425       idx     = i;               // Also keep index in worklist
   426     }
   427   } // End of for all ready nodes in worklist
   429   assert(idx >= 0, "index should be set");
   430   Node *n = worklist[(uint)idx];      // Get the winner
   432   worklist.map((uint)idx, worklist.pop());     // Compress worklist
   433   return n;
   434 }
   437 //------------------------------set_next_call----------------------------------
   438 void Block::set_next_call( Node *n, VectorSet &next_call, Block_Array &bbs ) {
   439   if( next_call.test_set(n->_idx) ) return;
   440   for( uint i=0; i<n->len(); i++ ) {
   441     Node *m = n->in(i);
   442     if( !m ) continue;  // must see all nodes in block that precede call
   443     if( bbs[m->_idx] == this )
   444       set_next_call( m, next_call, bbs );
   445   }
   446 }
   448 //------------------------------needed_for_next_call---------------------------
   449 // Set the flag 'next_call' for each Node that is needed for the next call to
   450 // be scheduled.  This flag lets me bias scheduling so Nodes needed for the
   451 // next subroutine call get priority - basically it moves things NOT needed
   452 // for the next call till after the call.  This prevents me from trying to
   453 // carry lots of stuff live across a call.
   454 void Block::needed_for_next_call(Node *this_call, VectorSet &next_call, Block_Array &bbs) {
   455   // Find the next control-defining Node in this block
   456   Node* call = NULL;
   457   for (DUIterator_Fast imax, i = this_call->fast_outs(imax); i < imax; i++) {
   458     Node* m = this_call->fast_out(i);
   459     if( bbs[m->_idx] == this && // Local-block user
   460         m != this_call &&       // Not self-start node
   461         m->is_Call() )
   462       call = m;
   463       break;
   464   }
   465   if (call == NULL)  return;    // No next call (e.g., block end is near)
   466   // Set next-call for all inputs to this call
   467   set_next_call(call, next_call, bbs);
   468 }
   470 //------------------------------sched_call-------------------------------------
   471 uint Block::sched_call( Matcher &matcher, Block_Array &bbs, uint node_cnt, Node_List &worklist, int *ready_cnt, MachCallNode *mcall, VectorSet &next_call ) {
   472   RegMask regs;
   474   // Schedule all the users of the call right now.  All the users are
   475   // projection Nodes, so they must be scheduled next to the call.
   476   // Collect all the defined registers.
   477   for (DUIterator_Fast imax, i = mcall->fast_outs(imax); i < imax; i++) {
   478     Node* n = mcall->fast_out(i);
   479     assert( n->Opcode()==Op_MachProj, "" );
   480     --ready_cnt[n->_idx];
   481     assert( !ready_cnt[n->_idx], "" );
   482     // Schedule next to call
   483     _nodes.map(node_cnt++, n);
   484     // Collect defined registers
   485     regs.OR(n->out_RegMask());
   486     // Check for scheduling the next control-definer
   487     if( n->bottom_type() == Type::CONTROL )
   488       // Warm up next pile of heuristic bits
   489       needed_for_next_call(n, next_call, bbs);
   491     // Children of projections are now all ready
   492     for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++) {
   493       Node* m = n->fast_out(j); // Get user
   494       if( bbs[m->_idx] != this ) continue;
   495       if( m->is_Phi() ) continue;
   496       if( !--ready_cnt[m->_idx] )
   497         worklist.push(m);
   498     }
   500   }
   502   // Act as if the call defines the Frame Pointer.
   503   // Certainly the FP is alive and well after the call.
   504   regs.Insert(matcher.c_frame_pointer());
   506   // Set all registers killed and not already defined by the call.
   507   uint r_cnt = mcall->tf()->range()->cnt();
   508   int op = mcall->ideal_Opcode();
   509   MachProjNode *proj = new (matcher.C, 1) MachProjNode( mcall, r_cnt+1, RegMask::Empty, MachProjNode::fat_proj );
   510   bbs.map(proj->_idx,this);
   511   _nodes.insert(node_cnt++, proj);
   513   // Select the right register save policy.
   514   const char * save_policy;
   515   switch (op) {
   516     case Op_CallRuntime:
   517     case Op_CallLeaf:
   518     case Op_CallLeafNoFP:
   519       // Calling C code so use C calling convention
   520       save_policy = matcher._c_reg_save_policy;
   521       break;
   523     case Op_CallStaticJava:
   524     case Op_CallDynamicJava:
   525       // Calling Java code so use Java calling convention
   526       save_policy = matcher._register_save_policy;
   527       break;
   529     default:
   530       ShouldNotReachHere();
   531   }
   533   // When using CallRuntime mark SOE registers as killed by the call
   534   // so values that could show up in the RegisterMap aren't live in a
   535   // callee saved register since the register wouldn't know where to
   536   // find them.  CallLeaf and CallLeafNoFP are ok because they can't
   537   // have debug info on them.  Strictly speaking this only needs to be
   538   // done for oops since idealreg2debugmask takes care of debug info
   539   // references but there no way to handle oops differently than other
   540   // pointers as far as the kill mask goes.
   541   bool exclude_soe = op == Op_CallRuntime;
   543   // Fill in the kill mask for the call
   544   for( OptoReg::Name r = OptoReg::Name(0); r < _last_Mach_Reg; r=OptoReg::add(r,1) ) {
   545     if( !regs.Member(r) ) {     // Not already defined by the call
   546       // Save-on-call register?
   547       if ((save_policy[r] == 'C') ||
   548           (save_policy[r] == 'A') ||
   549           ((save_policy[r] == 'E') && exclude_soe)) {
   550         proj->_rout.Insert(r);
   551       }
   552     }
   553   }
   555   return node_cnt;
   556 }
   559 //------------------------------schedule_local---------------------------------
   560 // Topological sort within a block.  Someday become a real scheduler.
   561 bool Block::schedule_local(PhaseCFG *cfg, Matcher &matcher, int *ready_cnt, VectorSet &next_call) {
   562   // Already "sorted" are the block start Node (as the first entry), and
   563   // the block-ending Node and any trailing control projections.  We leave
   564   // these alone.  PhiNodes and ParmNodes are made to follow the block start
   565   // Node.  Everything else gets topo-sorted.
   567 #ifndef PRODUCT
   568     if (cfg->trace_opto_pipelining()) {
   569       tty->print_cr("# --- schedule_local B%d, before: ---", _pre_order);
   570       for (uint i = 0;i < _nodes.size();i++) {
   571         tty->print("# ");
   572         _nodes[i]->fast_dump();
   573       }
   574       tty->print_cr("#");
   575     }
   576 #endif
   578   // RootNode is already sorted
   579   if( _nodes.size() == 1 ) return true;
   581   // Move PhiNodes and ParmNodes from 1 to cnt up to the start
   582   uint node_cnt = end_idx();
   583   uint phi_cnt = 1;
   584   uint i;
   585   for( i = 1; i<node_cnt; i++ ) { // Scan for Phi
   586     Node *n = _nodes[i];
   587     if( n->is_Phi() ||          // Found a PhiNode or ParmNode
   588         (n->is_Proj()  && n->in(0) == head()) ) {
   589       // Move guy at 'phi_cnt' to the end; makes a hole at phi_cnt
   590       _nodes.map(i,_nodes[phi_cnt]);
   591       _nodes.map(phi_cnt++,n);  // swap Phi/Parm up front
   592     } else {                    // All others
   593       // Count block-local inputs to 'n'
   594       uint cnt = n->len();      // Input count
   595       uint local = 0;
   596       for( uint j=0; j<cnt; j++ ) {
   597         Node *m = n->in(j);
   598         if( m && cfg->_bbs[m->_idx] == this && !m->is_top() )
   599           local++;              // One more block-local input
   600       }
   601       ready_cnt[n->_idx] = local; // Count em up
   603       // A few node types require changing a required edge to a precedence edge
   604       // before allocation.
   605       if( UseConcMarkSweepGC || UseG1GC ) {
   606         if( n->is_Mach() && n->as_Mach()->ideal_Opcode() == Op_StoreCM ) {
   607           // Note: Required edges with an index greater than oper_input_base
   608           // are not supported by the allocator.
   609           // Note2: Can only depend on unmatched edge being last,
   610           // can not depend on its absolute position.
   611           Node *oop_store = n->in(n->req() - 1);
   612           n->del_req(n->req() - 1);
   613           n->add_prec(oop_store);
   614           assert(cfg->_bbs[oop_store->_idx]->_dom_depth <= this->_dom_depth, "oop_store must dominate card-mark");
   615         }
   616       }
   617       if( n->is_Mach() && n->as_Mach()->ideal_Opcode() == Op_MemBarAcquire &&
   618           n->req() > TypeFunc::Parms ) {
   619         // MemBarAcquire could be created without Precedent edge.
   620         // del_req() replaces the specified edge with the last input edge
   621         // and then removes the last edge. If the specified edge > number of
   622         // edges the last edge will be moved outside of the input edges array
   623         // and the edge will be lost. This is why this code should be
   624         // executed only when Precedent (== TypeFunc::Parms) edge is present.
   625         Node *x = n->in(TypeFunc::Parms);
   626         n->del_req(TypeFunc::Parms);
   627         n->add_prec(x);
   628       }
   629     }
   630   }
   631   for(uint i2=i; i2<_nodes.size(); i2++ ) // Trailing guys get zapped count
   632     ready_cnt[_nodes[i2]->_idx] = 0;
   634   // All the prescheduled guys do not hold back internal nodes
   635   uint i3;
   636   for(i3 = 0; i3<phi_cnt; i3++ ) {  // For all pre-scheduled
   637     Node *n = _nodes[i3];       // Get pre-scheduled
   638     for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++) {
   639       Node* m = n->fast_out(j);
   640       if( cfg->_bbs[m->_idx] ==this ) // Local-block user
   641         ready_cnt[m->_idx]--;   // Fix ready count
   642     }
   643   }
   645   Node_List delay;
   646   // Make a worklist
   647   Node_List worklist;
   648   for(uint i4=i3; i4<node_cnt; i4++ ) {    // Put ready guys on worklist
   649     Node *m = _nodes[i4];
   650     if( !ready_cnt[m->_idx] ) {   // Zero ready count?
   651       if (m->is_iteratively_computed()) {
   652         // Push induction variable increments last to allow other uses
   653         // of the phi to be scheduled first. The select() method breaks
   654         // ties in scheduling by worklist order.
   655         delay.push(m);
   656       } else if (m->is_Mach() && m->as_Mach()->ideal_Opcode() == Op_CreateEx) {
   657         // Force the CreateEx to the top of the list so it's processed
   658         // first and ends up at the start of the block.
   659         worklist.insert(0, m);
   660       } else {
   661         worklist.push(m);         // Then on to worklist!
   662       }
   663     }
   664   }
   665   while (delay.size()) {
   666     Node* d = delay.pop();
   667     worklist.push(d);
   668   }
   670   // Warm up the 'next_call' heuristic bits
   671   needed_for_next_call(_nodes[0], next_call, cfg->_bbs);
   673 #ifndef PRODUCT
   674     if (cfg->trace_opto_pipelining()) {
   675       for (uint j=0; j<_nodes.size(); j++) {
   676         Node     *n = _nodes[j];
   677         int     idx = n->_idx;
   678         tty->print("#   ready cnt:%3d  ", ready_cnt[idx]);
   679         tty->print("latency:%3d  ", cfg->_node_latency.at_grow(idx));
   680         tty->print("%4d: %s\n", idx, n->Name());
   681       }
   682     }
   683 #endif
   685   // Pull from worklist and schedule
   686   while( worklist.size() ) {    // Worklist is not ready
   688 #ifndef PRODUCT
   689     if (cfg->trace_opto_pipelining()) {
   690       tty->print("#   ready list:");
   691       for( uint i=0; i<worklist.size(); i++ ) { // Inspect entire worklist
   692         Node *n = worklist[i];      // Get Node on worklist
   693         tty->print(" %d", n->_idx);
   694       }
   695       tty->cr();
   696     }
   697 #endif
   699     // Select and pop a ready guy from worklist
   700     Node* n = select(cfg, worklist, ready_cnt, next_call, phi_cnt);
   701     _nodes.map(phi_cnt++,n);    // Schedule him next
   703 #ifndef PRODUCT
   704     if (cfg->trace_opto_pipelining()) {
   705       tty->print("#    select %d: %s", n->_idx, n->Name());
   706       tty->print(", latency:%d", cfg->_node_latency.at_grow(n->_idx));
   707       n->dump();
   708       if (Verbose) {
   709         tty->print("#   ready list:");
   710         for( uint i=0; i<worklist.size(); i++ ) { // Inspect entire worklist
   711           Node *n = worklist[i];      // Get Node on worklist
   712           tty->print(" %d", n->_idx);
   713         }
   714         tty->cr();
   715       }
   716     }
   718 #endif
   719     if( n->is_MachCall() ) {
   720       MachCallNode *mcall = n->as_MachCall();
   721       phi_cnt = sched_call(matcher, cfg->_bbs, phi_cnt, worklist, ready_cnt, mcall, next_call);
   722       continue;
   723     }
   724     // Children are now all ready
   725     for (DUIterator_Fast i5max, i5 = n->fast_outs(i5max); i5 < i5max; i5++) {
   726       Node* m = n->fast_out(i5); // Get user
   727       if( cfg->_bbs[m->_idx] != this ) continue;
   728       if( m->is_Phi() ) continue;
   729       if( !--ready_cnt[m->_idx] )
   730         worklist.push(m);
   731     }
   732   }
   734   if( phi_cnt != end_idx() ) {
   735     // did not schedule all.  Retry, Bailout, or Die
   736     Compile* C = matcher.C;
   737     if (C->subsume_loads() == true && !C->failing()) {
   738       // Retry with subsume_loads == false
   739       // If this is the first failure, the sentinel string will "stick"
   740       // to the Compile object, and the C2Compiler will see it and retry.
   741       C->record_failure(C2Compiler::retry_no_subsuming_loads());
   742     }
   743     // assert( phi_cnt == end_idx(), "did not schedule all" );
   744     return false;
   745   }
   747 #ifndef PRODUCT
   748   if (cfg->trace_opto_pipelining()) {
   749     tty->print_cr("#");
   750     tty->print_cr("# after schedule_local");
   751     for (uint i = 0;i < _nodes.size();i++) {
   752       tty->print("# ");
   753       _nodes[i]->fast_dump();
   754     }
   755     tty->cr();
   756   }
   757 #endif
   760   return true;
   761 }
   763 //--------------------------catch_cleanup_fix_all_inputs-----------------------
   764 static void catch_cleanup_fix_all_inputs(Node *use, Node *old_def, Node *new_def) {
   765   for (uint l = 0; l < use->len(); l++) {
   766     if (use->in(l) == old_def) {
   767       if (l < use->req()) {
   768         use->set_req(l, new_def);
   769       } else {
   770         use->rm_prec(l);
   771         use->add_prec(new_def);
   772         l--;
   773       }
   774     }
   775   }
   776 }
   778 //------------------------------catch_cleanup_find_cloned_def------------------
   779 static Node *catch_cleanup_find_cloned_def(Block *use_blk, Node *def, Block *def_blk, Block_Array &bbs, int n_clone_idx) {
   780   assert( use_blk != def_blk, "Inter-block cleanup only");
   782   // The use is some block below the Catch.  Find and return the clone of the def
   783   // that dominates the use. If there is no clone in a dominating block, then
   784   // create a phi for the def in a dominating block.
   786   // Find which successor block dominates this use.  The successor
   787   // blocks must all be single-entry (from the Catch only; I will have
   788   // split blocks to make this so), hence they all dominate.
   789   while( use_blk->_dom_depth > def_blk->_dom_depth+1 )
   790     use_blk = use_blk->_idom;
   792   // Find the successor
   793   Node *fixup = NULL;
   795   uint j;
   796   for( j = 0; j < def_blk->_num_succs; j++ )
   797     if( use_blk == def_blk->_succs[j] )
   798       break;
   800   if( j == def_blk->_num_succs ) {
   801     // Block at same level in dom-tree is not a successor.  It needs a
   802     // PhiNode, the PhiNode uses from the def and IT's uses need fixup.
   803     Node_Array inputs = new Node_List(Thread::current()->resource_area());
   804     for(uint k = 1; k < use_blk->num_preds(); k++) {
   805       inputs.map(k, catch_cleanup_find_cloned_def(bbs[use_blk->pred(k)->_idx], def, def_blk, bbs, n_clone_idx));
   806     }
   808     // Check to see if the use_blk already has an identical phi inserted.
   809     // If it exists, it will be at the first position since all uses of a
   810     // def are processed together.
   811     Node *phi = use_blk->_nodes[1];
   812     if( phi->is_Phi() ) {
   813       fixup = phi;
   814       for (uint k = 1; k < use_blk->num_preds(); k++) {
   815         if (phi->in(k) != inputs[k]) {
   816           // Not a match
   817           fixup = NULL;
   818           break;
   819         }
   820       }
   821     }
   823     // If an existing PhiNode was not found, make a new one.
   824     if (fixup == NULL) {
   825       Node *new_phi = PhiNode::make(use_blk->head(), def);
   826       use_blk->_nodes.insert(1, new_phi);
   827       bbs.map(new_phi->_idx, use_blk);
   828       for (uint k = 1; k < use_blk->num_preds(); k++) {
   829         new_phi->set_req(k, inputs[k]);
   830       }
   831       fixup = new_phi;
   832     }
   834   } else {
   835     // Found the use just below the Catch.  Make it use the clone.
   836     fixup = use_blk->_nodes[n_clone_idx];
   837   }
   839   return fixup;
   840 }
   842 //--------------------------catch_cleanup_intra_block--------------------------
   843 // Fix all input edges in use that reference "def".  The use is in the same
   844 // block as the def and both have been cloned in each successor block.
   845 static void catch_cleanup_intra_block(Node *use, Node *def, Block *blk, int beg, int n_clone_idx) {
   847   // Both the use and def have been cloned. For each successor block,
   848   // get the clone of the use, and make its input the clone of the def
   849   // found in that block.
   851   uint use_idx = blk->find_node(use);
   852   uint offset_idx = use_idx - beg;
   853   for( uint k = 0; k < blk->_num_succs; k++ ) {
   854     // Get clone in each successor block
   855     Block *sb = blk->_succs[k];
   856     Node *clone = sb->_nodes[offset_idx+1];
   857     assert( clone->Opcode() == use->Opcode(), "" );
   859     // Make use-clone reference the def-clone
   860     catch_cleanup_fix_all_inputs(clone, def, sb->_nodes[n_clone_idx]);
   861   }
   862 }
   864 //------------------------------catch_cleanup_inter_block---------------------
   865 // Fix all input edges in use that reference "def".  The use is in a different
   866 // block than the def.
   867 static void catch_cleanup_inter_block(Node *use, Block *use_blk, Node *def, Block *def_blk, Block_Array &bbs, int n_clone_idx) {
   868   if( !use_blk ) return;        // Can happen if the use is a precedence edge
   870   Node *new_def = catch_cleanup_find_cloned_def(use_blk, def, def_blk, bbs, n_clone_idx);
   871   catch_cleanup_fix_all_inputs(use, def, new_def);
   872 }
   874 //------------------------------call_catch_cleanup-----------------------------
   875 // If we inserted any instructions between a Call and his CatchNode,
   876 // clone the instructions on all paths below the Catch.
   877 void Block::call_catch_cleanup(Block_Array &bbs) {
   879   // End of region to clone
   880   uint end = end_idx();
   881   if( !_nodes[end]->is_Catch() ) return;
   882   // Start of region to clone
   883   uint beg = end;
   884   while( _nodes[beg-1]->Opcode() != Op_MachProj ||
   885         !_nodes[beg-1]->in(0)->is_Call() ) {
   886     beg--;
   887     assert(beg > 0,"Catch cleanup walking beyond block boundary");
   888   }
   889   // Range of inserted instructions is [beg, end)
   890   if( beg == end ) return;
   892   // Clone along all Catch output paths.  Clone area between the 'beg' and
   893   // 'end' indices.
   894   for( uint i = 0; i < _num_succs; i++ ) {
   895     Block *sb = _succs[i];
   896     // Clone the entire area; ignoring the edge fixup for now.
   897     for( uint j = end; j > beg; j-- ) {
   898       Node *clone = _nodes[j-1]->clone();
   899       sb->_nodes.insert( 1, clone );
   900       bbs.map(clone->_idx,sb);
   901     }
   902   }
   905   // Fixup edges.  Check the def-use info per cloned Node
   906   for(uint i2 = beg; i2 < end; i2++ ) {
   907     uint n_clone_idx = i2-beg+1; // Index of clone of n in each successor block
   908     Node *n = _nodes[i2];        // Node that got cloned
   909     // Need DU safe iterator because of edge manipulation in calls.
   910     Unique_Node_List *out = new Unique_Node_List(Thread::current()->resource_area());
   911     for (DUIterator_Fast j1max, j1 = n->fast_outs(j1max); j1 < j1max; j1++) {
   912       out->push(n->fast_out(j1));
   913     }
   914     uint max = out->size();
   915     for (uint j = 0; j < max; j++) {// For all users
   916       Node *use = out->pop();
   917       Block *buse = bbs[use->_idx];
   918       if( use->is_Phi() ) {
   919         for( uint k = 1; k < use->req(); k++ )
   920           if( use->in(k) == n ) {
   921             Node *fixup = catch_cleanup_find_cloned_def(bbs[buse->pred(k)->_idx], n, this, bbs, n_clone_idx);
   922             use->set_req(k, fixup);
   923           }
   924       } else {
   925         if (this == buse) {
   926           catch_cleanup_intra_block(use, n, this, beg, n_clone_idx);
   927         } else {
   928           catch_cleanup_inter_block(use, buse, n, this, bbs, n_clone_idx);
   929         }
   930       }
   931     } // End for all users
   933   } // End of for all Nodes in cloned area
   935   // Remove the now-dead cloned ops
   936   for(uint i3 = beg; i3 < end; i3++ ) {
   937     _nodes[beg]->disconnect_inputs(NULL);
   938     _nodes.remove(beg);
   939   }
   941   // If the successor blocks have a CreateEx node, move it back to the top
   942   for(uint i4 = 0; i4 < _num_succs; i4++ ) {
   943     Block *sb = _succs[i4];
   944     uint new_cnt = end - beg;
   945     // Remove any newly created, but dead, nodes.
   946     for( uint j = new_cnt; j > 0; j-- ) {
   947       Node *n = sb->_nodes[j];
   948       if (n->outcnt() == 0 &&
   949           (!n->is_Proj() || n->as_Proj()->in(0)->outcnt() == 1) ){
   950         n->disconnect_inputs(NULL);
   951         sb->_nodes.remove(j);
   952         new_cnt--;
   953       }
   954     }
   955     // If any newly created nodes remain, move the CreateEx node to the top
   956     if (new_cnt > 0) {
   957       Node *cex = sb->_nodes[1+new_cnt];
   958       if( cex->is_Mach() && cex->as_Mach()->ideal_Opcode() == Op_CreateEx ) {
   959         sb->_nodes.remove(1+new_cnt);
   960         sb->_nodes.insert(1,cex);
   961       }
   962     }
   963   }
   964 }

mercurial