src/share/vm/opto/block.cpp

Wed, 27 Apr 2016 01:25:04 +0800

author
aoqi
date
Wed, 27 Apr 2016 01:25:04 +0800
changeset 0
f90c822e73f8
child 6876
710a3c8b516e
permissions
-rw-r--r--

Initial load
http://hg.openjdk.java.net/jdk8u/jdk8u/hotspot/
changeset: 6782:28b50d07f6f8
tag: jdk8u25-b17

     1 /*
     2  * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    20  * or visit www.oracle.com if you need additional information or have any
    21  * questions.
    22  *
    23  */
    25 #include "precompiled.hpp"
    26 #include "libadt/vectset.hpp"
    27 #include "memory/allocation.inline.hpp"
    28 #include "opto/block.hpp"
    29 #include "opto/cfgnode.hpp"
    30 #include "opto/chaitin.hpp"
    31 #include "opto/loopnode.hpp"
    32 #include "opto/machnode.hpp"
    33 #include "opto/matcher.hpp"
    34 #include "opto/opcodes.hpp"
    35 #include "opto/rootnode.hpp"
    36 #include "utilities/copy.hpp"
    38 void Block_Array::grow( uint i ) {
    39   assert(i >= Max(), "must be an overflow");
    40   debug_only(_limit = i+1);
    41   if( i < _size )  return;
    42   if( !_size ) {
    43     _size = 1;
    44     _blocks = (Block**)_arena->Amalloc( _size * sizeof(Block*) );
    45     _blocks[0] = NULL;
    46   }
    47   uint old = _size;
    48   while( i >= _size ) _size <<= 1;      // Double to fit
    49   _blocks = (Block**)_arena->Arealloc( _blocks, old*sizeof(Block*),_size*sizeof(Block*));
    50   Copy::zero_to_bytes( &_blocks[old], (_size-old)*sizeof(Block*) );
    51 }
    53 void Block_List::remove(uint i) {
    54   assert(i < _cnt, "index out of bounds");
    55   Copy::conjoint_words_to_lower((HeapWord*)&_blocks[i+1], (HeapWord*)&_blocks[i], ((_cnt-i-1)*sizeof(Block*)));
    56   pop(); // shrink list by one block
    57 }
    59 void Block_List::insert(uint i, Block *b) {
    60   push(b); // grow list by one block
    61   Copy::conjoint_words_to_higher((HeapWord*)&_blocks[i], (HeapWord*)&_blocks[i+1], ((_cnt-i-1)*sizeof(Block*)));
    62   _blocks[i] = b;
    63 }
    65 #ifndef PRODUCT
    66 void Block_List::print() {
    67   for (uint i=0; i < size(); i++) {
    68     tty->print("B%d ", _blocks[i]->_pre_order);
    69   }
    70   tty->print("size = %d\n", size());
    71 }
    72 #endif
    74 uint Block::code_alignment() {
    75   // Check for Root block
    76   if (_pre_order == 0) return CodeEntryAlignment;
    77   // Check for Start block
    78   if (_pre_order == 1) return InteriorEntryAlignment;
    79   // Check for loop alignment
    80   if (has_loop_alignment()) return loop_alignment();
    82   return relocInfo::addr_unit(); // no particular alignment
    83 }
    85 uint Block::compute_loop_alignment() {
    86   Node *h = head();
    87   int unit_sz = relocInfo::addr_unit();
    88   if (h->is_Loop() && h->as_Loop()->is_inner_loop())  {
    89     // Pre- and post-loops have low trip count so do not bother with
    90     // NOPs for align loop head.  The constants are hidden from tuning
    91     // but only because my "divide by 4" heuristic surely gets nearly
    92     // all possible gain (a "do not align at all" heuristic has a
    93     // chance of getting a really tiny gain).
    94     if (h->is_CountedLoop() && (h->as_CountedLoop()->is_pre_loop() ||
    95                                 h->as_CountedLoop()->is_post_loop())) {
    96       return (OptoLoopAlignment > 4*unit_sz) ? (OptoLoopAlignment>>2) : unit_sz;
    97     }
    98     // Loops with low backedge frequency should not be aligned.
    99     Node *n = h->in(LoopNode::LoopBackControl)->in(0);
   100     if (n->is_MachIf() && n->as_MachIf()->_prob < 0.01) {
   101       return unit_sz; // Loop does not loop, more often than not!
   102     }
   103     return OptoLoopAlignment; // Otherwise align loop head
   104   }
   106   return unit_sz; // no particular alignment
   107 }
   109 // Compute the size of first 'inst_cnt' instructions in this block.
   110 // Return the number of instructions left to compute if the block has
   111 // less then 'inst_cnt' instructions. Stop, and return 0 if sum_size
   112 // exceeds OptoLoopAlignment.
   113 uint Block::compute_first_inst_size(uint& sum_size, uint inst_cnt,
   114                                     PhaseRegAlloc* ra) {
   115   uint last_inst = number_of_nodes();
   116   for( uint j = 0; j < last_inst && inst_cnt > 0; j++ ) {
   117     uint inst_size = get_node(j)->size(ra);
   118     if( inst_size > 0 ) {
   119       inst_cnt--;
   120       uint sz = sum_size + inst_size;
   121       if( sz <= (uint)OptoLoopAlignment ) {
   122         // Compute size of instructions which fit into fetch buffer only
   123         // since all inst_cnt instructions will not fit even if we align them.
   124         sum_size = sz;
   125       } else {
   126         return 0;
   127       }
   128     }
   129   }
   130   return inst_cnt;
   131 }
   133 uint Block::find_node( const Node *n ) const {
   134   for( uint i = 0; i < number_of_nodes(); i++ ) {
   135     if( get_node(i) == n )
   136       return i;
   137   }
   138   ShouldNotReachHere();
   139   return 0;
   140 }
   142 // Find and remove n from block list
   143 void Block::find_remove( const Node *n ) {
   144   remove_node(find_node(n));
   145 }
   147 bool Block::contains(const Node *n) const {
   148   return _nodes.contains(n);
   149 }
   151 // Return empty status of a block.  Empty blocks contain only the head, other
   152 // ideal nodes, and an optional trailing goto.
   153 int Block::is_Empty() const {
   155   // Root or start block is not considered empty
   156   if (head()->is_Root() || head()->is_Start()) {
   157     return not_empty;
   158   }
   160   int success_result = completely_empty;
   161   int end_idx = number_of_nodes() - 1;
   163   // Check for ending goto
   164   if ((end_idx > 0) && (get_node(end_idx)->is_MachGoto())) {
   165     success_result = empty_with_goto;
   166     end_idx--;
   167   }
   169   // Unreachable blocks are considered empty
   170   if (num_preds() <= 1) {
   171     return success_result;
   172   }
   174   // Ideal nodes are allowable in empty blocks: skip them  Only MachNodes
   175   // turn directly into code, because only MachNodes have non-trivial
   176   // emit() functions.
   177   while ((end_idx > 0) && !get_node(end_idx)->is_Mach()) {
   178     end_idx--;
   179   }
   181   // No room for any interesting instructions?
   182   if (end_idx == 0) {
   183     return success_result;
   184   }
   186   return not_empty;
   187 }
   189 // Return true if the block's code implies that it is likely to be
   190 // executed infrequently.  Check to see if the block ends in a Halt or
   191 // a low probability call.
   192 bool Block::has_uncommon_code() const {
   193   Node* en = end();
   195   if (en->is_MachGoto())
   196     en = en->in(0);
   197   if (en->is_Catch())
   198     en = en->in(0);
   199   if (en->is_MachProj() && en->in(0)->is_MachCall()) {
   200     MachCallNode* call = en->in(0)->as_MachCall();
   201     if (call->cnt() != COUNT_UNKNOWN && call->cnt() <= PROB_UNLIKELY_MAG(4)) {
   202       // This is true for slow-path stubs like new_{instance,array},
   203       // slow_arraycopy, complete_monitor_locking, uncommon_trap.
   204       // The magic number corresponds to the probability of an uncommon_trap,
   205       // even though it is a count not a probability.
   206       return true;
   207     }
   208   }
   210   int op = en->is_Mach() ? en->as_Mach()->ideal_Opcode() : en->Opcode();
   211   return op == Op_Halt;
   212 }
   214 // True if block is low enough frequency or guarded by a test which
   215 // mostly does not go here.
   216 bool PhaseCFG::is_uncommon(const Block* block) {
   217   // Initial blocks must never be moved, so are never uncommon.
   218   if (block->head()->is_Root() || block->head()->is_Start())  return false;
   220   // Check for way-low freq
   221   if(block->_freq < BLOCK_FREQUENCY(0.00001f) ) return true;
   223   // Look for code shape indicating uncommon_trap or slow path
   224   if (block->has_uncommon_code()) return true;
   226   const float epsilon = 0.05f;
   227   const float guard_factor = PROB_UNLIKELY_MAG(4) / (1.f - epsilon);
   228   uint uncommon_preds = 0;
   229   uint freq_preds = 0;
   230   uint uncommon_for_freq_preds = 0;
   232   for( uint i=1; i< block->num_preds(); i++ ) {
   233     Block* guard = get_block_for_node(block->pred(i));
   234     // Check to see if this block follows its guard 1 time out of 10000
   235     // or less.
   236     //
   237     // See list of magnitude-4 unlikely probabilities in cfgnode.hpp which
   238     // we intend to be "uncommon", such as slow-path TLE allocation,
   239     // predicted call failure, and uncommon trap triggers.
   240     //
   241     // Use an epsilon value of 5% to allow for variability in frequency
   242     // predictions and floating point calculations. The net effect is
   243     // that guard_factor is set to 9500.
   244     //
   245     // Ignore low-frequency blocks.
   246     // The next check is (guard->_freq < 1.e-5 * 9500.).
   247     if(guard->_freq*BLOCK_FREQUENCY(guard_factor) < BLOCK_FREQUENCY(0.00001f)) {
   248       uncommon_preds++;
   249     } else {
   250       freq_preds++;
   251       if(block->_freq < guard->_freq * guard_factor ) {
   252         uncommon_for_freq_preds++;
   253       }
   254     }
   255   }
   256   if( block->num_preds() > 1 &&
   257       // The block is uncommon if all preds are uncommon or
   258       (uncommon_preds == (block->num_preds()-1) ||
   259       // it is uncommon for all frequent preds.
   260        uncommon_for_freq_preds == freq_preds) ) {
   261     return true;
   262   }
   263   return false;
   264 }
   266 #ifndef PRODUCT
   267 void Block::dump_bidx(const Block* orig, outputStream* st) const {
   268   if (_pre_order) st->print("B%d",_pre_order);
   269   else st->print("N%d", head()->_idx);
   271   if (Verbose && orig != this) {
   272     // Dump the original block's idx
   273     st->print(" (");
   274     orig->dump_bidx(orig, st);
   275     st->print(")");
   276   }
   277 }
   279 void Block::dump_pred(const PhaseCFG* cfg, Block* orig, outputStream* st) const {
   280   if (is_connector()) {
   281     for (uint i=1; i<num_preds(); i++) {
   282       Block *p = cfg->get_block_for_node(pred(i));
   283       p->dump_pred(cfg, orig, st);
   284     }
   285   } else {
   286     dump_bidx(orig, st);
   287     st->print(" ");
   288   }
   289 }
   291 void Block::dump_head(const PhaseCFG* cfg, outputStream* st) const {
   292   // Print the basic block
   293   dump_bidx(this, st);
   294   st->print(": #\t");
   296   // Print the incoming CFG edges and the outgoing CFG edges
   297   for( uint i=0; i<_num_succs; i++ ) {
   298     non_connector_successor(i)->dump_bidx(_succs[i], st);
   299     st->print(" ");
   300   }
   301   st->print("<- ");
   302   if( head()->is_block_start() ) {
   303     for (uint i=1; i<num_preds(); i++) {
   304       Node *s = pred(i);
   305       if (cfg != NULL) {
   306         Block *p = cfg->get_block_for_node(s);
   307         p->dump_pred(cfg, p, st);
   308       } else {
   309         while (!s->is_block_start())
   310           s = s->in(0);
   311         st->print("N%d ", s->_idx );
   312       }
   313     }
   314   } else {
   315     st->print("BLOCK HEAD IS JUNK  ");
   316   }
   318   // Print loop, if any
   319   const Block *bhead = this;    // Head of self-loop
   320   Node *bh = bhead->head();
   322   if ((cfg != NULL) && bh->is_Loop() && !head()->is_Root()) {
   323     LoopNode *loop = bh->as_Loop();
   324     const Block *bx = cfg->get_block_for_node(loop->in(LoopNode::LoopBackControl));
   325     while (bx->is_connector()) {
   326       bx = cfg->get_block_for_node(bx->pred(1));
   327     }
   328     st->print("\tLoop: B%d-B%d ", bhead->_pre_order, bx->_pre_order);
   329     // Dump any loop-specific bits, especially for CountedLoops.
   330     loop->dump_spec(st);
   331   } else if (has_loop_alignment()) {
   332     st->print(" top-of-loop");
   333   }
   334   st->print(" Freq: %g",_freq);
   335   if( Verbose || WizardMode ) {
   336     st->print(" IDom: %d/#%d", _idom ? _idom->_pre_order : 0, _dom_depth);
   337     st->print(" RegPressure: %d",_reg_pressure);
   338     st->print(" IHRP Index: %d",_ihrp_index);
   339     st->print(" FRegPressure: %d",_freg_pressure);
   340     st->print(" FHRP Index: %d",_fhrp_index);
   341   }
   342   st->cr();
   343 }
   345 void Block::dump() const {
   346   dump(NULL);
   347 }
   349 void Block::dump(const PhaseCFG* cfg) const {
   350   dump_head(cfg);
   351   for (uint i=0; i< number_of_nodes(); i++) {
   352     get_node(i)->dump();
   353   }
   354   tty->print("\n");
   355 }
   356 #endif
   358 PhaseCFG::PhaseCFG(Arena* arena, RootNode* root, Matcher& matcher)
   359 : Phase(CFG)
   360 , _block_arena(arena)
   361 , _root(root)
   362 , _matcher(matcher)
   363 , _node_to_block_mapping(arena)
   364 , _node_latency(NULL)
   365 #ifndef PRODUCT
   366 , _trace_opto_pipelining(TraceOptoPipelining || C->method_has_option("TraceOptoPipelining"))
   367 #endif
   368 #ifdef ASSERT
   369 , _raw_oops(arena)
   370 #endif
   371 {
   372   ResourceMark rm;
   373   // I'll need a few machine-specific GotoNodes.  Make an Ideal GotoNode,
   374   // then Match it into a machine-specific Node.  Then clone the machine
   375   // Node on demand.
   376   Node *x = new (C) GotoNode(NULL);
   377   x->init_req(0, x);
   378   _goto = matcher.match_tree(x);
   379   assert(_goto != NULL, "");
   380   _goto->set_req(0,_goto);
   382   // Build the CFG in Reverse Post Order
   383   _number_of_blocks = build_cfg();
   384   _root_block = get_block_for_node(_root);
   385 }
   387 // Build a proper looking CFG.  Make every block begin with either a StartNode
   388 // or a RegionNode.  Make every block end with either a Goto, If or Return.
   389 // The RootNode both starts and ends it's own block.  Do this with a recursive
   390 // backwards walk over the control edges.
   391 uint PhaseCFG::build_cfg() {
   392   Arena *a = Thread::current()->resource_area();
   393   VectorSet visited(a);
   395   // Allocate stack with enough space to avoid frequent realloc
   396   Node_Stack nstack(a, C->unique() >> 1);
   397   nstack.push(_root, 0);
   398   uint sum = 0;                 // Counter for blocks
   400   while (nstack.is_nonempty()) {
   401     // node and in's index from stack's top
   402     // 'np' is _root (see above) or RegionNode, StartNode: we push on stack
   403     // only nodes which point to the start of basic block (see below).
   404     Node *np = nstack.node();
   405     // idx > 0, except for the first node (_root) pushed on stack
   406     // at the beginning when idx == 0.
   407     // We will use the condition (idx == 0) later to end the build.
   408     uint idx = nstack.index();
   409     Node *proj = np->in(idx);
   410     const Node *x = proj->is_block_proj();
   411     // Does the block end with a proper block-ending Node?  One of Return,
   412     // If or Goto? (This check should be done for visited nodes also).
   413     if (x == NULL) {                    // Does not end right...
   414       Node *g = _goto->clone(); // Force it to end in a Goto
   415       g->set_req(0, proj);
   416       np->set_req(idx, g);
   417       x = proj = g;
   418     }
   419     if (!visited.test_set(x->_idx)) { // Visit this block once
   420       // Skip any control-pinned middle'in stuff
   421       Node *p = proj;
   422       do {
   423         proj = p;                   // Update pointer to last Control
   424         p = p->in(0);               // Move control forward
   425       } while( !p->is_block_proj() &&
   426                !p->is_block_start() );
   427       // Make the block begin with one of Region or StartNode.
   428       if( !p->is_block_start() ) {
   429         RegionNode *r = new (C) RegionNode( 2 );
   430         r->init_req(1, p);         // Insert RegionNode in the way
   431         proj->set_req(0, r);        // Insert RegionNode in the way
   432         p = r;
   433       }
   434       // 'p' now points to the start of this basic block
   436       // Put self in array of basic blocks
   437       Block *bb = new (_block_arena) Block(_block_arena, p);
   438       map_node_to_block(p, bb);
   439       map_node_to_block(x, bb);
   440       if( x != p ) {                // Only for root is x == p
   441         bb->push_node((Node*)x);
   442       }
   443       // Now handle predecessors
   444       ++sum;                        // Count 1 for self block
   445       uint cnt = bb->num_preds();
   446       for (int i = (cnt - 1); i > 0; i-- ) { // For all predecessors
   447         Node *prevproj = p->in(i);  // Get prior input
   448         assert( !prevproj->is_Con(), "dead input not removed" );
   449         // Check to see if p->in(i) is a "control-dependent" CFG edge -
   450         // i.e., it splits at the source (via an IF or SWITCH) and merges
   451         // at the destination (via a many-input Region).
   452         // This breaks critical edges.  The RegionNode to start the block
   453         // will be added when <p,i> is pulled off the node stack
   454         if ( cnt > 2 ) {             // Merging many things?
   455           assert( prevproj== bb->pred(i),"");
   456           if(prevproj->is_block_proj() != prevproj) { // Control-dependent edge?
   457             // Force a block on the control-dependent edge
   458             Node *g = _goto->clone();       // Force it to end in a Goto
   459             g->set_req(0,prevproj);
   460             p->set_req(i,g);
   461           }
   462         }
   463         nstack.push(p, i);  // 'p' is RegionNode or StartNode
   464       }
   465     } else { // Post-processing visited nodes
   466       nstack.pop();                 // remove node from stack
   467       // Check if it the fist node pushed on stack at the beginning.
   468       if (idx == 0) break;          // end of the build
   469       // Find predecessor basic block
   470       Block *pb = get_block_for_node(x);
   471       // Insert into nodes array, if not already there
   472       if (!has_block(proj)) {
   473         assert( x != proj, "" );
   474         // Map basic block of projection
   475         map_node_to_block(proj, pb);
   476         pb->push_node(proj);
   477       }
   478       // Insert self as a child of my predecessor block
   479       pb->_succs.map(pb->_num_succs++, get_block_for_node(np));
   480       assert( pb->get_node(pb->number_of_nodes() - pb->_num_succs)->is_block_proj(),
   481               "too many control users, not a CFG?" );
   482     }
   483   }
   484   // Return number of basic blocks for all children and self
   485   return sum;
   486 }
   488 // Inserts a goto & corresponding basic block between
   489 // block[block_no] and its succ_no'th successor block
   490 void PhaseCFG::insert_goto_at(uint block_no, uint succ_no) {
   491   // get block with block_no
   492   assert(block_no < number_of_blocks(), "illegal block number");
   493   Block* in  = get_block(block_no);
   494   // get successor block succ_no
   495   assert(succ_no < in->_num_succs, "illegal successor number");
   496   Block* out = in->_succs[succ_no];
   497   // Compute frequency of the new block. Do this before inserting
   498   // new block in case succ_prob() needs to infer the probability from
   499   // surrounding blocks.
   500   float freq = in->_freq * in->succ_prob(succ_no);
   501   // get ProjNode corresponding to the succ_no'th successor of the in block
   502   ProjNode* proj = in->get_node(in->number_of_nodes() - in->_num_succs + succ_no)->as_Proj();
   503   // create region for basic block
   504   RegionNode* region = new (C) RegionNode(2);
   505   region->init_req(1, proj);
   506   // setup corresponding basic block
   507   Block* block = new (_block_arena) Block(_block_arena, region);
   508   map_node_to_block(region, block);
   509   C->regalloc()->set_bad(region->_idx);
   510   // add a goto node
   511   Node* gto = _goto->clone(); // get a new goto node
   512   gto->set_req(0, region);
   513   // add it to the basic block
   514   block->push_node(gto);
   515   map_node_to_block(gto, block);
   516   C->regalloc()->set_bad(gto->_idx);
   517   // hook up successor block
   518   block->_succs.map(block->_num_succs++, out);
   519   // remap successor's predecessors if necessary
   520   for (uint i = 1; i < out->num_preds(); i++) {
   521     if (out->pred(i) == proj) out->head()->set_req(i, gto);
   522   }
   523   // remap predecessor's successor to new block
   524   in->_succs.map(succ_no, block);
   525   // Set the frequency of the new block
   526   block->_freq = freq;
   527   // add new basic block to basic block list
   528   add_block_at(block_no + 1, block);
   529 }
   531 // Does this block end in a multiway branch that cannot have the default case
   532 // flipped for another case?
   533 static bool no_flip_branch(Block *b) {
   534   int branch_idx = b->number_of_nodes() - b->_num_succs-1;
   535   if (branch_idx < 1) {
   536     return false;
   537   }
   538   Node *branch = b->get_node(branch_idx);
   539   if (branch->is_Catch()) {
   540     return true;
   541   }
   542   if (branch->is_Mach()) {
   543     if (branch->is_MachNullCheck()) {
   544       return true;
   545     }
   546     int iop = branch->as_Mach()->ideal_Opcode();
   547     if (iop == Op_FastLock || iop == Op_FastUnlock) {
   548       return true;
   549     }
   550     // Don't flip if branch has an implicit check.
   551     if (branch->as_Mach()->is_TrapBasedCheckNode()) {
   552       return true;
   553     }
   554   }
   555   return false;
   556 }
   558 // Check for NeverBranch at block end.  This needs to become a GOTO to the
   559 // true target.  NeverBranch are treated as a conditional branch that always
   560 // goes the same direction for most of the optimizer and are used to give a
   561 // fake exit path to infinite loops.  At this late stage they need to turn
   562 // into Goto's so that when you enter the infinite loop you indeed hang.
   563 void PhaseCFG::convert_NeverBranch_to_Goto(Block *b) {
   564   // Find true target
   565   int end_idx = b->end_idx();
   566   int idx = b->get_node(end_idx+1)->as_Proj()->_con;
   567   Block *succ = b->_succs[idx];
   568   Node* gto = _goto->clone(); // get a new goto node
   569   gto->set_req(0, b->head());
   570   Node *bp = b->get_node(end_idx);
   571   b->map_node(gto, end_idx); // Slam over NeverBranch
   572   map_node_to_block(gto, b);
   573   C->regalloc()->set_bad(gto->_idx);
   574   b->pop_node();              // Yank projections
   575   b->pop_node();              // Yank projections
   576   b->_succs.map(0,succ);        // Map only successor
   577   b->_num_succs = 1;
   578   // remap successor's predecessors if necessary
   579   uint j;
   580   for( j = 1; j < succ->num_preds(); j++)
   581     if( succ->pred(j)->in(0) == bp )
   582       succ->head()->set_req(j, gto);
   583   // Kill alternate exit path
   584   Block *dead = b->_succs[1-idx];
   585   for( j = 1; j < dead->num_preds(); j++)
   586     if( dead->pred(j)->in(0) == bp )
   587       break;
   588   // Scan through block, yanking dead path from
   589   // all regions and phis.
   590   dead->head()->del_req(j);
   591   for( int k = 1; dead->get_node(k)->is_Phi(); k++ )
   592     dead->get_node(k)->del_req(j);
   593 }
   595 // Helper function to move block bx to the slot following b_index. Return
   596 // true if the move is successful, otherwise false
   597 bool PhaseCFG::move_to_next(Block* bx, uint b_index) {
   598   if (bx == NULL) return false;
   600   // Return false if bx is already scheduled.
   601   uint bx_index = bx->_pre_order;
   602   if ((bx_index <= b_index) && (get_block(bx_index) == bx)) {
   603     return false;
   604   }
   606   // Find the current index of block bx on the block list
   607   bx_index = b_index + 1;
   608   while (bx_index < number_of_blocks() && get_block(bx_index) != bx) {
   609     bx_index++;
   610   }
   611   assert(get_block(bx_index) == bx, "block not found");
   613   // If the previous block conditionally falls into bx, return false,
   614   // because moving bx will create an extra jump.
   615   for(uint k = 1; k < bx->num_preds(); k++ ) {
   616     Block* pred = get_block_for_node(bx->pred(k));
   617     if (pred == get_block(bx_index - 1)) {
   618       if (pred->_num_succs != 1) {
   619         return false;
   620       }
   621     }
   622   }
   624   // Reinsert bx just past block 'b'
   625   _blocks.remove(bx_index);
   626   _blocks.insert(b_index + 1, bx);
   627   return true;
   628 }
   630 // Move empty and uncommon blocks to the end.
   631 void PhaseCFG::move_to_end(Block *b, uint i) {
   632   int e = b->is_Empty();
   633   if (e != Block::not_empty) {
   634     if (e == Block::empty_with_goto) {
   635       // Remove the goto, but leave the block.
   636       b->pop_node();
   637     }
   638     // Mark this block as a connector block, which will cause it to be
   639     // ignored in certain functions such as non_connector_successor().
   640     b->set_connector();
   641   }
   642   // Move the empty block to the end, and don't recheck.
   643   _blocks.remove(i);
   644   _blocks.push(b);
   645 }
   647 // Set loop alignment for every block
   648 void PhaseCFG::set_loop_alignment() {
   649   uint last = number_of_blocks();
   650   assert(get_block(0) == get_root_block(), "");
   652   for (uint i = 1; i < last; i++) {
   653     Block* block = get_block(i);
   654     if (block->head()->is_Loop()) {
   655       block->set_loop_alignment(block);
   656     }
   657   }
   658 }
   660 // Make empty basic blocks to be "connector" blocks, Move uncommon blocks
   661 // to the end.
   662 void PhaseCFG::remove_empty_blocks() {
   663   // Move uncommon blocks to the end
   664   uint last = number_of_blocks();
   665   assert(get_block(0) == get_root_block(), "");
   667   for (uint i = 1; i < last; i++) {
   668     Block* block = get_block(i);
   669     if (block->is_connector()) {
   670       break;
   671     }
   673     // Check for NeverBranch at block end.  This needs to become a GOTO to the
   674     // true target.  NeverBranch are treated as a conditional branch that
   675     // always goes the same direction for most of the optimizer and are used
   676     // to give a fake exit path to infinite loops.  At this late stage they
   677     // need to turn into Goto's so that when you enter the infinite loop you
   678     // indeed hang.
   679     if (block->get_node(block->end_idx())->Opcode() == Op_NeverBranch) {
   680       convert_NeverBranch_to_Goto(block);
   681     }
   683     // Look for uncommon blocks and move to end.
   684     if (!C->do_freq_based_layout()) {
   685       if (is_uncommon(block)) {
   686         move_to_end(block, i);
   687         last--;                   // No longer check for being uncommon!
   688         if (no_flip_branch(block)) { // Fall-thru case must follow?
   689           // Find the fall-thru block
   690           block = get_block(i);
   691           move_to_end(block, i);
   692           last--;
   693         }
   694         // backup block counter post-increment
   695         i--;
   696       }
   697     }
   698   }
   700   // Move empty blocks to the end
   701   last = number_of_blocks();
   702   for (uint i = 1; i < last; i++) {
   703     Block* block = get_block(i);
   704     if (block->is_Empty() != Block::not_empty) {
   705       move_to_end(block, i);
   706       last--;
   707       i--;
   708     }
   709   } // End of for all blocks
   710 }
   712 Block *PhaseCFG::fixup_trap_based_check(Node *branch, Block *block, int block_pos, Block *bnext) {
   713   // Trap based checks must fall through to the successor with
   714   // PROB_ALWAYS.
   715   // They should be an If with 2 successors.
   716   assert(branch->is_MachIf(),   "must be If");
   717   assert(block->_num_succs == 2, "must have 2 successors");
   719   // Get the If node and the projection for the first successor.
   720   MachIfNode *iff   = block->get_node(block->number_of_nodes()-3)->as_MachIf();
   721   ProjNode   *proj0 = block->get_node(block->number_of_nodes()-2)->as_Proj();
   722   ProjNode   *proj1 = block->get_node(block->number_of_nodes()-1)->as_Proj();
   723   ProjNode   *projt = (proj0->Opcode() == Op_IfTrue)  ? proj0 : proj1;
   724   ProjNode   *projf = (proj0->Opcode() == Op_IfFalse) ? proj0 : proj1;
   726   // Assert that proj0 and succs[0] match up. Similarly for proj1 and succs[1].
   727   assert(proj0->raw_out(0) == block->_succs[0]->head(), "Mismatch successor 0");
   728   assert(proj1->raw_out(0) == block->_succs[1]->head(), "Mismatch successor 1");
   730   ProjNode *proj_always;
   731   ProjNode *proj_never;
   732   // We must negate the branch if the implicit check doesn't follow
   733   // the branch's TRUE path. Then, the new TRUE branch target will
   734   // be the old FALSE branch target.
   735   if (iff->_prob <= 2*PROB_NEVER) {   // There are small rounding errors.
   736     proj_never  = projt;
   737     proj_always = projf;
   738   } else {
   739     // We must negate the branch if the trap doesn't follow the
   740     // branch's TRUE path. Then, the new TRUE branch target will
   741     // be the old FALSE branch target.
   742     proj_never  = projf;
   743     proj_always = projt;
   744     iff->negate();
   745   }
   746   assert(iff->_prob <= 2*PROB_NEVER, "Trap based checks are expected to trap never!");
   747   // Map the successors properly
   748   block->_succs.map(0, get_block_for_node(proj_never ->raw_out(0)));   // The target of the trap.
   749   block->_succs.map(1, get_block_for_node(proj_always->raw_out(0)));   // The fall through target.
   751   if (block->get_node(block->number_of_nodes() - block->_num_succs + 1) != proj_always) {
   752     block->map_node(proj_never,  block->number_of_nodes() - block->_num_succs + 0);
   753     block->map_node(proj_always, block->number_of_nodes() - block->_num_succs + 1);
   754   }
   756   // Place the fall through block after this block.
   757   Block *bs1 = block->non_connector_successor(1);
   758   if (bs1 != bnext && move_to_next(bs1, block_pos)) {
   759     bnext = bs1;
   760   }
   761   // If the fall through block still is not the next block, insert a goto.
   762   if (bs1 != bnext) {
   763     insert_goto_at(block_pos, 1);
   764   }
   765   return bnext;
   766 }
   768 // Fix up the final control flow for basic blocks.
   769 void PhaseCFG::fixup_flow() {
   770   // Fixup final control flow for the blocks.  Remove jump-to-next
   771   // block. If neither arm of an IF follows the conditional branch, we
   772   // have to add a second jump after the conditional.  We place the
   773   // TRUE branch target in succs[0] for both GOTOs and IFs.
   774   for (uint i = 0; i < number_of_blocks(); i++) {
   775     Block* block = get_block(i);
   776     block->_pre_order = i;          // turn pre-order into block-index
   778     // Connector blocks need no further processing.
   779     if (block->is_connector()) {
   780       assert((i+1) == number_of_blocks() || get_block(i + 1)->is_connector(), "All connector blocks should sink to the end");
   781       continue;
   782     }
   783     assert(block->is_Empty() != Block::completely_empty, "Empty blocks should be connectors");
   785     Block* bnext = (i < number_of_blocks() - 1) ? get_block(i + 1) : NULL;
   786     Block* bs0 = block->non_connector_successor(0);
   788     // Check for multi-way branches where I cannot negate the test to
   789     // exchange the true and false targets.
   790     if (no_flip_branch(block)) {
   791       // Find fall through case - if must fall into its target.
   792       // Get the index of the branch's first successor.
   793       int branch_idx = block->number_of_nodes() - block->_num_succs;
   795       // The branch is 1 before the branch's first successor.
   796       Node *branch = block->get_node(branch_idx-1);
   798       // Handle no-flip branches which have implicit checks and which require
   799       // special block ordering and individual semantics of the 'fall through
   800       // case'.
   801       if ((TrapBasedNullChecks || TrapBasedRangeChecks) &&
   802           branch->is_Mach() && branch->as_Mach()->is_TrapBasedCheckNode()) {
   803         bnext = fixup_trap_based_check(branch, block, i, bnext);
   804       } else {
   805         // Else, default handling for no-flip branches
   806         for (uint j2 = 0; j2 < block->_num_succs; j2++) {
   807           const ProjNode* p = block->get_node(branch_idx + j2)->as_Proj();
   808           if (p->_con == 0) {
   809             // successor j2 is fall through case
   810             if (block->non_connector_successor(j2) != bnext) {
   811               // but it is not the next block => insert a goto
   812               insert_goto_at(i, j2);
   813             }
   814             // Put taken branch in slot 0
   815             if (j2 == 0 && block->_num_succs == 2) {
   816               // Flip targets in succs map
   817               Block *tbs0 = block->_succs[0];
   818               Block *tbs1 = block->_succs[1];
   819               block->_succs.map(0, tbs1);
   820               block->_succs.map(1, tbs0);
   821             }
   822             break;
   823           }
   824         }
   825       }
   827       // Remove all CatchProjs
   828       for (uint j = 0; j < block->_num_succs; j++) {
   829         block->pop_node();
   830       }
   832     } else if (block->_num_succs == 1) {
   833       // Block ends in a Goto?
   834       if (bnext == bs0) {
   835         // We fall into next block; remove the Goto
   836         block->pop_node();
   837       }
   839     } else if(block->_num_succs == 2) { // Block ends in a If?
   840       // Get opcode of 1st projection (matches _succs[0])
   841       // Note: Since this basic block has 2 exits, the last 2 nodes must
   842       //       be projections (in any order), the 3rd last node must be
   843       //       the IfNode (we have excluded other 2-way exits such as
   844       //       CatchNodes already).
   845       MachNode* iff   = block->get_node(block->number_of_nodes() - 3)->as_Mach();
   846       ProjNode* proj0 = block->get_node(block->number_of_nodes() - 2)->as_Proj();
   847       ProjNode* proj1 = block->get_node(block->number_of_nodes() - 1)->as_Proj();
   849       // Assert that proj0 and succs[0] match up. Similarly for proj1 and succs[1].
   850       assert(proj0->raw_out(0) == block->_succs[0]->head(), "Mismatch successor 0");
   851       assert(proj1->raw_out(0) == block->_succs[1]->head(), "Mismatch successor 1");
   853       Block* bs1 = block->non_connector_successor(1);
   855       // Check for neither successor block following the current
   856       // block ending in a conditional. If so, move one of the
   857       // successors after the current one, provided that the
   858       // successor was previously unscheduled, but moveable
   859       // (i.e., all paths to it involve a branch).
   860       if (!C->do_freq_based_layout() && bnext != bs0 && bnext != bs1) {
   861         // Choose the more common successor based on the probability
   862         // of the conditional branch.
   863         Block* bx = bs0;
   864         Block* by = bs1;
   866         // _prob is the probability of taking the true path. Make
   867         // p the probability of taking successor #1.
   868         float p = iff->as_MachIf()->_prob;
   869         if (proj0->Opcode() == Op_IfTrue) {
   870           p = 1.0 - p;
   871         }
   873         // Prefer successor #1 if p > 0.5
   874         if (p > PROB_FAIR) {
   875           bx = bs1;
   876           by = bs0;
   877         }
   879         // Attempt the more common successor first
   880         if (move_to_next(bx, i)) {
   881           bnext = bx;
   882         } else if (move_to_next(by, i)) {
   883           bnext = by;
   884         }
   885       }
   887       // Check for conditional branching the wrong way.  Negate
   888       // conditional, if needed, so it falls into the following block
   889       // and branches to the not-following block.
   891       // Check for the next block being in succs[0].  We are going to branch
   892       // to succs[0], so we want the fall-thru case as the next block in
   893       // succs[1].
   894       if (bnext == bs0) {
   895         // Fall-thru case in succs[0], so flip targets in succs map
   896         Block* tbs0 = block->_succs[0];
   897         Block* tbs1 = block->_succs[1];
   898         block->_succs.map(0, tbs1);
   899         block->_succs.map(1, tbs0);
   900         // Flip projection for each target
   901         ProjNode* tmp = proj0;
   902         proj0 = proj1;
   903         proj1 = tmp;
   905       } else if(bnext != bs1) {
   906         // Need a double-branch
   907         // The existing conditional branch need not change.
   908         // Add a unconditional branch to the false target.
   909         // Alas, it must appear in its own block and adding a
   910         // block this late in the game is complicated.  Sigh.
   911         insert_goto_at(i, 1);
   912       }
   914       // Make sure we TRUE branch to the target
   915       if (proj0->Opcode() == Op_IfFalse) {
   916         iff->as_MachIf()->negate();
   917       }
   919       block->pop_node();          // Remove IfFalse & IfTrue projections
   920       block->pop_node();
   922     } else {
   923       // Multi-exit block, e.g. a switch statement
   924       // But we don't need to do anything here
   925     }
   926   } // End of for all blocks
   927 }
   930 // postalloc_expand: Expand nodes after register allocation.
   931 //
   932 // postalloc_expand has to be called after register allocation, just
   933 // before output (i.e. scheduling). It only gets called if
   934 // Matcher::require_postalloc_expand is true.
   935 //
   936 // Background:
   937 //
   938 // Nodes that are expandend (one compound node requiring several
   939 // assembler instructions to be implemented split into two or more
   940 // non-compound nodes) after register allocation are not as nice as
   941 // the ones expanded before register allocation - they don't
   942 // participate in optimizations as global code motion. But after
   943 // register allocation we can expand nodes that use registers which
   944 // are not spillable or registers that are not allocated, because the
   945 // old compound node is simply replaced (in its location in the basic
   946 // block) by a new subgraph which does not contain compound nodes any
   947 // more. The scheduler called during output can later on process these
   948 // non-compound nodes.
   949 //
   950 // Implementation:
   951 //
   952 // Nodes requiring postalloc expand are specified in the ad file by using
   953 // a postalloc_expand statement instead of ins_encode. A postalloc_expand
   954 // contains a single call to an encoding, as does an ins_encode
   955 // statement. Instead of an emit() function a postalloc_expand() function
   956 // is generated that doesn't emit assembler but creates a new
   957 // subgraph. The code below calls this postalloc_expand function for each
   958 // node with the appropriate attribute. This function returns the new
   959 // nodes generated in an array passed in the call. The old node,
   960 // potential MachTemps before and potential Projs after it then get
   961 // disconnected and replaced by the new nodes. The instruction
   962 // generating the result has to be the last one in the array. In
   963 // general it is assumed that Projs after the node expanded are
   964 // kills. These kills are not required any more after expanding as
   965 // there are now explicitly visible def-use chains and the Projs are
   966 // removed. This does not hold for calls: They do not only have
   967 // kill-Projs but also Projs defining values. Therefore Projs after
   968 // the node expanded are removed for all but for calls. If a node is
   969 // to be reused, it must be added to the nodes list returned, and it
   970 // will be added again.
   971 //
   972 // Implementing the postalloc_expand function for a node in an enc_class
   973 // is rather tedious. It requires knowledge about many node details, as
   974 // the nodes and the subgraph must be hand crafted. To simplify this,
   975 // adlc generates some utility variables into the postalloc_expand function,
   976 // e.g., holding the operands as specified by the postalloc_expand encoding
   977 // specification, e.g.:
   978 //  * unsigned idx_<par_name>  holding the index of the node in the ins
   979 //  * Node *n_<par_name>       holding the node loaded from the ins
   980 //  * MachOpnd *op_<par_name>  holding the corresponding operand
   981 //
   982 // The ordering of operands can not be determined by looking at a
   983 // rule. Especially if a match rule matches several different trees,
   984 // several nodes are generated from one instruct specification with
   985 // different operand orderings. In this case the adlc generated
   986 // variables are the only way to access the ins and operands
   987 // deterministically.
   988 //
   989 // If assigning a register to a node that contains an oop, don't
   990 // forget to call ra_->set_oop() for the node.
   991 void PhaseCFG::postalloc_expand(PhaseRegAlloc* _ra) {
   992   GrowableArray <Node *> new_nodes(32); // Array with new nodes filled by postalloc_expand function of node.
   993   GrowableArray <Node *> remove(32);
   994   GrowableArray <Node *> succs(32);
   995   unsigned int max_idx = C->unique();   // Remember to distinguish new from old nodes.
   996   DEBUG_ONLY(bool foundNode = false);
   998   // for all blocks
   999   for (uint i = 0; i < number_of_blocks(); i++) {
  1000     Block *b = _blocks[i];
  1001     // For all instructions in the current block.
  1002     for (uint j = 0; j < b->number_of_nodes(); j++) {
  1003       Node *n = b->get_node(j);
  1004       if (n->is_Mach() && n->as_Mach()->requires_postalloc_expand()) {
  1005 #ifdef ASSERT
  1006         if (TracePostallocExpand) {
  1007           if (!foundNode) {
  1008             foundNode = true;
  1009             tty->print("POSTALLOC EXPANDING %d %s\n", C->compile_id(),
  1010                        C->method() ? C->method()->name()->as_utf8() : C->stub_name());
  1012           tty->print("  postalloc expanding "); n->dump();
  1013           if (Verbose) {
  1014             tty->print("    with ins:\n");
  1015             for (uint k = 0; k < n->len(); ++k) {
  1016               if (n->in(k)) { tty->print("        "); n->in(k)->dump(); }
  1020 #endif
  1021         new_nodes.clear();
  1022         // Collect nodes that have to be removed from the block later on.
  1023         uint req = n->req();
  1024         remove.clear();
  1025         for (uint k = 0; k < req; ++k) {
  1026           if (n->in(k) && n->in(k)->is_MachTemp()) {
  1027             remove.push(n->in(k)); // MachTemps which are inputs to the old node have to be removed.
  1028             n->in(k)->del_req(0);
  1029             j--;
  1033         // Check whether we can allocate enough nodes. We set a fix limit for
  1034         // the size of postalloc expands with this.
  1035         uint unique_limit = C->unique() + 40;
  1036         if (unique_limit >= _ra->node_regs_max_index()) {
  1037           Compile::current()->record_failure("out of nodes in postalloc expand");
  1038           return;
  1041         // Emit (i.e. generate new nodes).
  1042         n->as_Mach()->postalloc_expand(&new_nodes, _ra);
  1044         assert(C->unique() < unique_limit, "You allocated too many nodes in your postalloc expand.");
  1046         // Disconnect the inputs of the old node.
  1047         //
  1048         // We reuse MachSpillCopy nodes. If we need to expand them, there
  1049         // are many, so reusing pays off. If reused, the node already
  1050         // has the new ins. n must be the last node on new_nodes list.
  1051         if (!n->is_MachSpillCopy()) {
  1052           for (int k = req - 1; k >= 0; --k) {
  1053             n->del_req(k);
  1057 #ifdef ASSERT
  1058         // Check that all nodes have proper operands.
  1059         for (int k = 0; k < new_nodes.length(); ++k) {
  1060           if (new_nodes.at(k)->_idx < max_idx || !new_nodes.at(k)->is_Mach()) continue; // old node, Proj ...
  1061           MachNode *m = new_nodes.at(k)->as_Mach();
  1062           for (unsigned int l = 0; l < m->num_opnds(); ++l) {
  1063             if (MachOper::notAnOper(m->_opnds[l])) {
  1064               outputStream *os = tty;
  1065               os->print("Node %s ", m->Name());
  1066               os->print("has invalid opnd %d: %p\n", l, m->_opnds[l]);
  1067               assert(0, "Invalid operands, see inline trace in hs_err_pid file.");
  1071 #endif
  1073         // Collect succs of old node in remove (for projections) and in succs (for
  1074         // all other nodes) do _not_ collect projections in remove (but in succs)
  1075         // in case the node is a call. We need the projections for calls as they are
  1076         // associated with registes (i.e. they are defs).
  1077         succs.clear();
  1078         for (DUIterator k = n->outs(); n->has_out(k); k++) {
  1079           if (n->out(k)->is_Proj() && !n->is_MachCall() && !n->is_MachBranch()) {
  1080             remove.push(n->out(k));
  1081           } else {
  1082             succs.push(n->out(k));
  1085         // Replace old node n as input of its succs by last of the new nodes.
  1086         for (int k = 0; k < succs.length(); ++k) {
  1087           Node *succ = succs.at(k);
  1088           for (uint l = 0; l < succ->req(); ++l) {
  1089             if (succ->in(l) == n) {
  1090               succ->set_req(l, new_nodes.at(new_nodes.length() - 1));
  1093           for (uint l = succ->req(); l < succ->len(); ++l) {
  1094             if (succ->in(l) == n) {
  1095               succ->set_prec(l, new_nodes.at(new_nodes.length() - 1));
  1100         // Index of old node in block.
  1101         uint index = b->find_node(n);
  1102         // Insert new nodes into block and map them in nodes->blocks array
  1103         // and remember last node in n2.
  1104         Node *n2 = NULL;
  1105         for (int k = 0; k < new_nodes.length(); ++k) {
  1106           n2 = new_nodes.at(k);
  1107           b->insert_node(n2, ++index);
  1108           map_node_to_block(n2, b);
  1111         // Add old node n to remove and remove them all from block.
  1112         remove.push(n);
  1113         j--;
  1114 #ifdef ASSERT
  1115         if (TracePostallocExpand && Verbose) {
  1116           tty->print("    removing:\n");
  1117           for (int k = 0; k < remove.length(); ++k) {
  1118             tty->print("        "); remove.at(k)->dump();
  1120           tty->print("    inserting:\n");
  1121           for (int k = 0; k < new_nodes.length(); ++k) {
  1122             tty->print("        "); new_nodes.at(k)->dump();
  1125 #endif
  1126         for (int k = 0; k < remove.length(); ++k) {
  1127           if (b->contains(remove.at(k))) {
  1128             b->find_remove(remove.at(k));
  1129           } else {
  1130             assert(remove.at(k)->is_Proj() && (remove.at(k)->in(0)->is_MachBranch()), "");
  1133         // If anything has been inserted (n2 != NULL), continue after last node inserted.
  1134         // This does not always work. Some postalloc expands don't insert any nodes, if they
  1135         // do optimizations (e.g., max(x,x)). In this case we decrement j accordingly.
  1136         j = n2 ? b->find_node(n2) : j;
  1141 #ifdef ASSERT
  1142   if (foundNode) {
  1143     tty->print("FINISHED %d %s\n", C->compile_id(),
  1144                C->method() ? C->method()->name()->as_utf8() : C->stub_name());
  1145     tty->flush();
  1147 #endif
  1151 //------------------------------dump-------------------------------------------
  1152 #ifndef PRODUCT
  1153 void PhaseCFG::_dump_cfg( const Node *end, VectorSet &visited  ) const {
  1154   const Node *x = end->is_block_proj();
  1155   assert( x, "not a CFG" );
  1157   // Do not visit this block again
  1158   if( visited.test_set(x->_idx) ) return;
  1160   // Skip through this block
  1161   const Node *p = x;
  1162   do {
  1163     p = p->in(0);               // Move control forward
  1164     assert( !p->is_block_proj() || p->is_Root(), "not a CFG" );
  1165   } while( !p->is_block_start() );
  1167   // Recursively visit
  1168   for (uint i = 1; i < p->req(); i++) {
  1169     _dump_cfg(p->in(i), visited);
  1172   // Dump the block
  1173   get_block_for_node(p)->dump(this);
  1176 void PhaseCFG::dump( ) const {
  1177   tty->print("\n--- CFG --- %d BBs\n", number_of_blocks());
  1178   if (_blocks.size()) {        // Did we do basic-block layout?
  1179     for (uint i = 0; i < number_of_blocks(); i++) {
  1180       const Block* block = get_block(i);
  1181       block->dump(this);
  1183   } else {                      // Else do it with a DFS
  1184     VectorSet visited(_block_arena);
  1185     _dump_cfg(_root,visited);
  1189 void PhaseCFG::dump_headers() {
  1190   for (uint i = 0; i < number_of_blocks(); i++) {
  1191     Block* block = get_block(i);
  1192     if (block != NULL) {
  1193       block->dump_head(this);
  1198 void PhaseCFG::verify() const {
  1199 #ifdef ASSERT
  1200   // Verify sane CFG
  1201   for (uint i = 0; i < number_of_blocks(); i++) {
  1202     Block* block = get_block(i);
  1203     uint cnt = block->number_of_nodes();
  1204     uint j;
  1205     for (j = 0; j < cnt; j++)  {
  1206       Node *n = block->get_node(j);
  1207       assert(get_block_for_node(n) == block, "");
  1208       if (j >= 1 && n->is_Mach() && n->as_Mach()->ideal_Opcode() == Op_CreateEx) {
  1209         assert(j == 1 || block->get_node(j-1)->is_Phi(), "CreateEx must be first instruction in block");
  1211       for (uint k = 0; k < n->req(); k++) {
  1212         Node *def = n->in(k);
  1213         if (def && def != n) {
  1214           assert(get_block_for_node(def) || def->is_Con(), "must have block; constants for debug info ok");
  1215           // Verify that instructions in the block is in correct order.
  1216           // Uses must follow their definition if they are at the same block.
  1217           // Mostly done to check that MachSpillCopy nodes are placed correctly
  1218           // when CreateEx node is moved in build_ifg_physical().
  1219           if (get_block_for_node(def) == block && !(block->head()->is_Loop() && n->is_Phi()) &&
  1220               // See (+++) comment in reg_split.cpp
  1221               !(n->jvms() != NULL && n->jvms()->is_monitor_use(k))) {
  1222             bool is_loop = false;
  1223             if (n->is_Phi()) {
  1224               for (uint l = 1; l < def->req(); l++) {
  1225                 if (n == def->in(l)) {
  1226                   is_loop = true;
  1227                   break; // Some kind of loop
  1231             assert(is_loop || block->find_node(def) < j, "uses must follow definitions");
  1237     j = block->end_idx();
  1238     Node* bp = (Node*)block->get_node(block->number_of_nodes() - 1)->is_block_proj();
  1239     assert(bp, "last instruction must be a block proj");
  1240     assert(bp == block->get_node(j), "wrong number of successors for this block");
  1241     if (bp->is_Catch()) {
  1242       while (block->get_node(--j)->is_MachProj()) {
  1245       assert(block->get_node(j)->is_MachCall(), "CatchProj must follow call");
  1246     } else if (bp->is_Mach() && bp->as_Mach()->ideal_Opcode() == Op_If) {
  1247       assert(block->_num_succs == 2, "Conditional branch must have two targets");
  1250 #endif
  1252 #endif
  1254 UnionFind::UnionFind( uint max ) : _cnt(max), _max(max), _indices(NEW_RESOURCE_ARRAY(uint,max)) {
  1255   Copy::zero_to_bytes( _indices, sizeof(uint)*max );
  1258 void UnionFind::extend( uint from_idx, uint to_idx ) {
  1259   _nesting.check();
  1260   if( from_idx >= _max ) {
  1261     uint size = 16;
  1262     while( size <= from_idx ) size <<=1;
  1263     _indices = REALLOC_RESOURCE_ARRAY( uint, _indices, _max, size );
  1264     _max = size;
  1266   while( _cnt <= from_idx ) _indices[_cnt++] = 0;
  1267   _indices[from_idx] = to_idx;
  1270 void UnionFind::reset( uint max ) {
  1271   assert( max <= max_uint, "Must fit within uint" );
  1272   // Force the Union-Find mapping to be at least this large
  1273   extend(max,0);
  1274   // Initialize to be the ID mapping.
  1275   for( uint i=0; i<max; i++ ) map(i,i);
  1278 // Straight out of Tarjan's union-find algorithm
  1279 uint UnionFind::Find_compress( uint idx ) {
  1280   uint cur  = idx;
  1281   uint next = lookup(cur);
  1282   while( next != cur ) {        // Scan chain of equivalences
  1283     assert( next < cur, "always union smaller" );
  1284     cur = next;                 // until find a fixed-point
  1285     next = lookup(cur);
  1287   // Core of union-find algorithm: update chain of
  1288   // equivalences to be equal to the root.
  1289   while( idx != next ) {
  1290     uint tmp = lookup(idx);
  1291     map(idx, next);
  1292     idx = tmp;
  1294   return idx;
  1297 // Like Find above, but no path compress, so bad asymptotic behavior
  1298 uint UnionFind::Find_const( uint idx ) const {
  1299   if( idx == 0 ) return idx;    // Ignore the zero idx
  1300   // Off the end?  This can happen during debugging dumps
  1301   // when data structures have not finished being updated.
  1302   if( idx >= _max ) return idx;
  1303   uint next = lookup(idx);
  1304   while( next != idx ) {        // Scan chain of equivalences
  1305     idx = next;                 // until find a fixed-point
  1306     next = lookup(idx);
  1308   return next;
  1311 // union 2 sets together.
  1312 void UnionFind::Union( uint idx1, uint idx2 ) {
  1313   uint src = Find(idx1);
  1314   uint dst = Find(idx2);
  1315   assert( src, "" );
  1316   assert( dst, "" );
  1317   assert( src < _max, "oob" );
  1318   assert( dst < _max, "oob" );
  1319   assert( src < dst, "always union smaller" );
  1320   map(dst,src);
  1323 #ifndef PRODUCT
  1324 void Trace::dump( ) const {
  1325   tty->print_cr("Trace (freq %f)", first_block()->_freq);
  1326   for (Block *b = first_block(); b != NULL; b = next(b)) {
  1327     tty->print("  B%d", b->_pre_order);
  1328     if (b->head()->is_Loop()) {
  1329       tty->print(" (L%d)", b->compute_loop_alignment());
  1331     if (b->has_loop_alignment()) {
  1332       tty->print(" (T%d)", b->code_alignment());
  1335   tty->cr();
  1338 void CFGEdge::dump( ) const {
  1339   tty->print(" B%d  -->  B%d  Freq: %f  out:%3d%%  in:%3d%%  State: ",
  1340              from()->_pre_order, to()->_pre_order, freq(), _from_pct, _to_pct);
  1341   switch(state()) {
  1342   case connected:
  1343     tty->print("connected");
  1344     break;
  1345   case open:
  1346     tty->print("open");
  1347     break;
  1348   case interior:
  1349     tty->print("interior");
  1350     break;
  1352   if (infrequent()) {
  1353     tty->print("  infrequent");
  1355   tty->cr();
  1357 #endif
  1359 // Comparison function for edges
  1360 static int edge_order(CFGEdge **e0, CFGEdge **e1) {
  1361   float freq0 = (*e0)->freq();
  1362   float freq1 = (*e1)->freq();
  1363   if (freq0 != freq1) {
  1364     return freq0 > freq1 ? -1 : 1;
  1367   int dist0 = (*e0)->to()->_rpo - (*e0)->from()->_rpo;
  1368   int dist1 = (*e1)->to()->_rpo - (*e1)->from()->_rpo;
  1370   return dist1 - dist0;
  1373 // Comparison function for edges
  1374 extern "C" int trace_frequency_order(const void *p0, const void *p1) {
  1375   Trace *tr0 = *(Trace **) p0;
  1376   Trace *tr1 = *(Trace **) p1;
  1377   Block *b0 = tr0->first_block();
  1378   Block *b1 = tr1->first_block();
  1380   // The trace of connector blocks goes at the end;
  1381   // we only expect one such trace
  1382   if (b0->is_connector() != b1->is_connector()) {
  1383     return b1->is_connector() ? -1 : 1;
  1386   // Pull more frequently executed blocks to the beginning
  1387   float freq0 = b0->_freq;
  1388   float freq1 = b1->_freq;
  1389   if (freq0 != freq1) {
  1390     return freq0 > freq1 ? -1 : 1;
  1393   int diff = tr0->first_block()->_rpo - tr1->first_block()->_rpo;
  1395   return diff;
  1398 // Find edges of interest, i.e, those which can fall through. Presumes that
  1399 // edges which don't fall through are of low frequency and can be generally
  1400 // ignored.  Initialize the list of traces.
  1401 void PhaseBlockLayout::find_edges() {
  1402   // Walk the blocks, creating edges and Traces
  1403   uint i;
  1404   Trace *tr = NULL;
  1405   for (i = 0; i < _cfg.number_of_blocks(); i++) {
  1406     Block* b = _cfg.get_block(i);
  1407     tr = new Trace(b, next, prev);
  1408     traces[tr->id()] = tr;
  1410     // All connector blocks should be at the end of the list
  1411     if (b->is_connector()) break;
  1413     // If this block and the next one have a one-to-one successor
  1414     // predecessor relationship, simply append the next block
  1415     int nfallthru = b->num_fall_throughs();
  1416     while (nfallthru == 1 &&
  1417            b->succ_fall_through(0)) {
  1418       Block *n = b->_succs[0];
  1420       // Skip over single-entry connector blocks, we don't want to
  1421       // add them to the trace.
  1422       while (n->is_connector() && n->num_preds() == 1) {
  1423         n = n->_succs[0];
  1426       // We see a merge point, so stop search for the next block
  1427       if (n->num_preds() != 1) break;
  1429       i++;
  1430       assert(n = _cfg.get_block(i), "expecting next block");
  1431       tr->append(n);
  1432       uf->map(n->_pre_order, tr->id());
  1433       traces[n->_pre_order] = NULL;
  1434       nfallthru = b->num_fall_throughs();
  1435       b = n;
  1438     if (nfallthru > 0) {
  1439       // Create a CFGEdge for each outgoing
  1440       // edge that could be a fall-through.
  1441       for (uint j = 0; j < b->_num_succs; j++ ) {
  1442         if (b->succ_fall_through(j)) {
  1443           Block *target = b->non_connector_successor(j);
  1444           float freq = b->_freq * b->succ_prob(j);
  1445           int from_pct = (int) ((100 * freq) / b->_freq);
  1446           int to_pct = (int) ((100 * freq) / target->_freq);
  1447           edges->append(new CFGEdge(b, target, freq, from_pct, to_pct));
  1453   // Group connector blocks into one trace
  1454   for (i++; i < _cfg.number_of_blocks(); i++) {
  1455     Block *b = _cfg.get_block(i);
  1456     assert(b->is_connector(), "connector blocks at the end");
  1457     tr->append(b);
  1458     uf->map(b->_pre_order, tr->id());
  1459     traces[b->_pre_order] = NULL;
  1463 // Union two traces together in uf, and null out the trace in the list
  1464 void PhaseBlockLayout::union_traces(Trace* updated_trace, Trace* old_trace) {
  1465   uint old_id = old_trace->id();
  1466   uint updated_id = updated_trace->id();
  1468   uint lo_id = updated_id;
  1469   uint hi_id = old_id;
  1471   // If from is greater than to, swap values to meet
  1472   // UnionFind guarantee.
  1473   if (updated_id > old_id) {
  1474     lo_id = old_id;
  1475     hi_id = updated_id;
  1477     // Fix up the trace ids
  1478     traces[lo_id] = traces[updated_id];
  1479     updated_trace->set_id(lo_id);
  1482   // Union the lower with the higher and remove the pointer
  1483   // to the higher.
  1484   uf->Union(lo_id, hi_id);
  1485   traces[hi_id] = NULL;
  1488 // Append traces together via the most frequently executed edges
  1489 void PhaseBlockLayout::grow_traces() {
  1490   // Order the edges, and drive the growth of Traces via the most
  1491   // frequently executed edges.
  1492   edges->sort(edge_order);
  1493   for (int i = 0; i < edges->length(); i++) {
  1494     CFGEdge *e = edges->at(i);
  1496     if (e->state() != CFGEdge::open) continue;
  1498     Block *src_block = e->from();
  1499     Block *targ_block = e->to();
  1501     // Don't grow traces along backedges?
  1502     if (!BlockLayoutRotateLoops) {
  1503       if (targ_block->_rpo <= src_block->_rpo) {
  1504         targ_block->set_loop_alignment(targ_block);
  1505         continue;
  1509     Trace *src_trace = trace(src_block);
  1510     Trace *targ_trace = trace(targ_block);
  1512     // If the edge in question can join two traces at their ends,
  1513     // append one trace to the other.
  1514    if (src_trace->last_block() == src_block) {
  1515       if (src_trace == targ_trace) {
  1516         e->set_state(CFGEdge::interior);
  1517         if (targ_trace->backedge(e)) {
  1518           // Reset i to catch any newly eligible edge
  1519           // (Or we could remember the first "open" edge, and reset there)
  1520           i = 0;
  1522       } else if (targ_trace->first_block() == targ_block) {
  1523         e->set_state(CFGEdge::connected);
  1524         src_trace->append(targ_trace);
  1525         union_traces(src_trace, targ_trace);
  1531 // Embed one trace into another, if the fork or join points are sufficiently
  1532 // balanced.
  1533 void PhaseBlockLayout::merge_traces(bool fall_thru_only) {
  1534   // Walk the edge list a another time, looking at unprocessed edges.
  1535   // Fold in diamonds
  1536   for (int i = 0; i < edges->length(); i++) {
  1537     CFGEdge *e = edges->at(i);
  1539     if (e->state() != CFGEdge::open) continue;
  1540     if (fall_thru_only) {
  1541       if (e->infrequent()) continue;
  1544     Block *src_block = e->from();
  1545     Trace *src_trace = trace(src_block);
  1546     bool src_at_tail = src_trace->last_block() == src_block;
  1548     Block *targ_block  = e->to();
  1549     Trace *targ_trace  = trace(targ_block);
  1550     bool targ_at_start = targ_trace->first_block() == targ_block;
  1552     if (src_trace == targ_trace) {
  1553       // This may be a loop, but we can't do much about it.
  1554       e->set_state(CFGEdge::interior);
  1555       continue;
  1558     if (fall_thru_only) {
  1559       // If the edge links the middle of two traces, we can't do anything.
  1560       // Mark the edge and continue.
  1561       if (!src_at_tail & !targ_at_start) {
  1562         continue;
  1565       // Don't grow traces along backedges?
  1566       if (!BlockLayoutRotateLoops && (targ_block->_rpo <= src_block->_rpo)) {
  1567           continue;
  1570       // If both ends of the edge are available, why didn't we handle it earlier?
  1571       assert(src_at_tail ^ targ_at_start, "Should have caught this edge earlier.");
  1573       if (targ_at_start) {
  1574         // Insert the "targ" trace in the "src" trace if the insertion point
  1575         // is a two way branch.
  1576         // Better profitability check possible, but may not be worth it.
  1577         // Someday, see if the this "fork" has an associated "join";
  1578         // then make a policy on merging this trace at the fork or join.
  1579         // For example, other things being equal, it may be better to place this
  1580         // trace at the join point if the "src" trace ends in a two-way, but
  1581         // the insertion point is one-way.
  1582         assert(src_block->num_fall_throughs() == 2, "unexpected diamond");
  1583         e->set_state(CFGEdge::connected);
  1584         src_trace->insert_after(src_block, targ_trace);
  1585         union_traces(src_trace, targ_trace);
  1586       } else if (src_at_tail) {
  1587         if (src_trace != trace(_cfg.get_root_block())) {
  1588           e->set_state(CFGEdge::connected);
  1589           targ_trace->insert_before(targ_block, src_trace);
  1590           union_traces(targ_trace, src_trace);
  1593     } else if (e->state() == CFGEdge::open) {
  1594       // Append traces, even without a fall-thru connection.
  1595       // But leave root entry at the beginning of the block list.
  1596       if (targ_trace != trace(_cfg.get_root_block())) {
  1597         e->set_state(CFGEdge::connected);
  1598         src_trace->append(targ_trace);
  1599         union_traces(src_trace, targ_trace);
  1605 // Order the sequence of the traces in some desirable way, and fixup the
  1606 // jumps at the end of each block.
  1607 void PhaseBlockLayout::reorder_traces(int count) {
  1608   ResourceArea *area = Thread::current()->resource_area();
  1609   Trace ** new_traces = NEW_ARENA_ARRAY(area, Trace *, count);
  1610   Block_List worklist;
  1611   int new_count = 0;
  1613   // Compact the traces.
  1614   for (int i = 0; i < count; i++) {
  1615     Trace *tr = traces[i];
  1616     if (tr != NULL) {
  1617       new_traces[new_count++] = tr;
  1621   // The entry block should be first on the new trace list.
  1622   Trace *tr = trace(_cfg.get_root_block());
  1623   assert(tr == new_traces[0], "entry trace misplaced");
  1625   // Sort the new trace list by frequency
  1626   qsort(new_traces + 1, new_count - 1, sizeof(new_traces[0]), trace_frequency_order);
  1628   // Patch up the successor blocks
  1629   _cfg.clear_blocks();
  1630   for (int i = 0; i < new_count; i++) {
  1631     Trace *tr = new_traces[i];
  1632     if (tr != NULL) {
  1633       tr->fixup_blocks(_cfg);
  1638 // Order basic blocks based on frequency
  1639 PhaseBlockLayout::PhaseBlockLayout(PhaseCFG &cfg)
  1640 : Phase(BlockLayout)
  1641 , _cfg(cfg) {
  1642   ResourceMark rm;
  1643   ResourceArea *area = Thread::current()->resource_area();
  1645   // List of traces
  1646   int size = _cfg.number_of_blocks() + 1;
  1647   traces = NEW_ARENA_ARRAY(area, Trace *, size);
  1648   memset(traces, 0, size*sizeof(Trace*));
  1649   next = NEW_ARENA_ARRAY(area, Block *, size);
  1650   memset(next,   0, size*sizeof(Block *));
  1651   prev = NEW_ARENA_ARRAY(area, Block *, size);
  1652   memset(prev  , 0, size*sizeof(Block *));
  1654   // List of edges
  1655   edges = new GrowableArray<CFGEdge*>;
  1657   // Mapping block index --> block_trace
  1658   uf = new UnionFind(size);
  1659   uf->reset(size);
  1661   // Find edges and create traces.
  1662   find_edges();
  1664   // Grow traces at their ends via most frequent edges.
  1665   grow_traces();
  1667   // Merge one trace into another, but only at fall-through points.
  1668   // This may make diamonds and other related shapes in a trace.
  1669   merge_traces(true);
  1671   // Run merge again, allowing two traces to be catenated, even if
  1672   // one does not fall through into the other. This appends loosely
  1673   // related traces to be near each other.
  1674   merge_traces(false);
  1676   // Re-order all the remaining traces by frequency
  1677   reorder_traces(size);
  1679   assert(_cfg.number_of_blocks() >= (uint) (size - 1), "number of blocks can not shrink");
  1683 // Edge e completes a loop in a trace. If the target block is head of the
  1684 // loop, rotate the loop block so that the loop ends in a conditional branch.
  1685 bool Trace::backedge(CFGEdge *e) {
  1686   bool loop_rotated = false;
  1687   Block *src_block  = e->from();
  1688   Block *targ_block    = e->to();
  1690   assert(last_block() == src_block, "loop discovery at back branch");
  1691   if (first_block() == targ_block) {
  1692     if (BlockLayoutRotateLoops && last_block()->num_fall_throughs() < 2) {
  1693       // Find the last block in the trace that has a conditional
  1694       // branch.
  1695       Block *b;
  1696       for (b = last_block(); b != NULL; b = prev(b)) {
  1697         if (b->num_fall_throughs() == 2) {
  1698           break;
  1702       if (b != last_block() && b != NULL) {
  1703         loop_rotated = true;
  1705         // Rotate the loop by doing two-part linked-list surgery.
  1706         append(first_block());
  1707         break_loop_after(b);
  1711     // Backbranch to the top of a trace
  1712     // Scroll forward through the trace from the targ_block. If we find
  1713     // a loop head before another loop top, use the the loop head alignment.
  1714     for (Block *b = targ_block; b != NULL; b = next(b)) {
  1715       if (b->has_loop_alignment()) {
  1716         break;
  1718       if (b->head()->is_Loop()) {
  1719         targ_block = b;
  1720         break;
  1724     first_block()->set_loop_alignment(targ_block);
  1726   } else {
  1727     // Backbranch into the middle of a trace
  1728     targ_block->set_loop_alignment(targ_block);
  1731   return loop_rotated;
  1734 // push blocks onto the CFG list
  1735 // ensure that blocks have the correct two-way branch sense
  1736 void Trace::fixup_blocks(PhaseCFG &cfg) {
  1737   Block *last = last_block();
  1738   for (Block *b = first_block(); b != NULL; b = next(b)) {
  1739     cfg.add_block(b);
  1740     if (!b->is_connector()) {
  1741       int nfallthru = b->num_fall_throughs();
  1742       if (b != last) {
  1743         if (nfallthru == 2) {
  1744           // Ensure that the sense of the branch is correct
  1745           Block *bnext = next(b);
  1746           Block *bs0 = b->non_connector_successor(0);
  1748           MachNode *iff = b->get_node(b->number_of_nodes() - 3)->as_Mach();
  1749           ProjNode *proj0 = b->get_node(b->number_of_nodes() - 2)->as_Proj();
  1750           ProjNode *proj1 = b->get_node(b->number_of_nodes() - 1)->as_Proj();
  1752           if (bnext == bs0) {
  1753             // Fall-thru case in succs[0], should be in succs[1]
  1755             // Flip targets in _succs map
  1756             Block *tbs0 = b->_succs[0];
  1757             Block *tbs1 = b->_succs[1];
  1758             b->_succs.map( 0, tbs1 );
  1759             b->_succs.map( 1, tbs0 );
  1761             // Flip projections to match targets
  1762             b->map_node(proj1, b->number_of_nodes() - 2);
  1763             b->map_node(proj0, b->number_of_nodes() - 1);

mercurial