src/share/vm/opto/gcm.cpp

Thu, 20 Jun 2013 16:30:44 -0700

author
goetz
date
Thu, 20 Jun 2013 16:30:44 -0700
changeset 6441
d2907f74462e
parent 4691
571076d3c79d
child 6462
e2722a66aba7
permissions
-rw-r--r--

8016586: PPC64 (part 3): basic changes for PPC64
Summary: added #includes needed for ppc64 port. Renamed _MODEL_ppc to _MODEL_ppc_32 and renamed corresponding old _ppc files to _ppc_32.
Reviewed-by: dholmes, kvn

     1 /*
     2  * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    20  * or visit www.oracle.com if you need additional information or have any
    21  * questions.
    22  *
    23  */
    25 #include "precompiled.hpp"
    26 #include "libadt/vectset.hpp"
    27 #include "memory/allocation.inline.hpp"
    28 #include "opto/block.hpp"
    29 #include "opto/c2compiler.hpp"
    30 #include "opto/callnode.hpp"
    31 #include "opto/cfgnode.hpp"
    32 #include "opto/machnode.hpp"
    33 #include "opto/opcodes.hpp"
    34 #include "opto/phaseX.hpp"
    35 #include "opto/rootnode.hpp"
    36 #include "opto/runtime.hpp"
    37 #include "runtime/deoptimization.hpp"
    38 #ifdef TARGET_ARCH_MODEL_x86_32
    39 # include "adfiles/ad_x86_32.hpp"
    40 #endif
    41 #ifdef TARGET_ARCH_MODEL_x86_64
    42 # include "adfiles/ad_x86_64.hpp"
    43 #endif
    44 #ifdef TARGET_ARCH_MODEL_sparc
    45 # include "adfiles/ad_sparc.hpp"
    46 #endif
    47 #ifdef TARGET_ARCH_MODEL_zero
    48 # include "adfiles/ad_zero.hpp"
    49 #endif
    50 #ifdef TARGET_ARCH_MODEL_arm
    51 # include "adfiles/ad_arm.hpp"
    52 #endif
    53 #ifdef TARGET_ARCH_MODEL_ppc_32
    54 # include "adfiles/ad_ppc_32.hpp"
    55 #endif
    56 #ifdef TARGET_ARCH_MODEL_ppc_64
    57 # include "adfiles/ad_ppc_64.hpp"
    58 #endif
    61 // Portions of code courtesy of Clifford Click
    63 // Optimization - Graph Style
    65 // To avoid float value underflow
    66 #define MIN_BLOCK_FREQUENCY 1.e-35f
    68 //----------------------------schedule_node_into_block-------------------------
    69 // Insert node n into block b. Look for projections of n and make sure they
    70 // are in b also.
    71 void PhaseCFG::schedule_node_into_block( Node *n, Block *b ) {
    72   // Set basic block of n, Add n to b,
    73   _bbs.map(n->_idx, b);
    74   b->add_inst(n);
    76   // After Matching, nearly any old Node may have projections trailing it.
    77   // These are usually machine-dependent flags.  In any case, they might
    78   // float to another block below this one.  Move them up.
    79   for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
    80     Node*  use  = n->fast_out(i);
    81     if (use->is_Proj()) {
    82       Block* buse = _bbs[use->_idx];
    83       if (buse != b) {              // In wrong block?
    84         if (buse != NULL)
    85           buse->find_remove(use);   // Remove from wrong block
    86         _bbs.map(use->_idx, b);     // Re-insert in this block
    87         b->add_inst(use);
    88       }
    89     }
    90   }
    91 }
    93 //----------------------------replace_block_proj_ctrl-------------------------
    94 // Nodes that have is_block_proj() nodes as their control need to use
    95 // the appropriate Region for their actual block as their control since
    96 // the projection will be in a predecessor block.
    97 void PhaseCFG::replace_block_proj_ctrl( Node *n ) {
    98   const Node *in0 = n->in(0);
    99   assert(in0 != NULL, "Only control-dependent");
   100   const Node *p = in0->is_block_proj();
   101   if (p != NULL && p != n) {    // Control from a block projection?
   102     assert(!n->pinned() || n->is_MachConstantBase(), "only pinned MachConstantBase node is expected here");
   103     // Find trailing Region
   104     Block *pb = _bbs[in0->_idx]; // Block-projection already has basic block
   105     uint j = 0;
   106     if (pb->_num_succs != 1) {  // More then 1 successor?
   107       // Search for successor
   108       uint max = pb->_nodes.size();
   109       assert( max > 1, "" );
   110       uint start = max - pb->_num_succs;
   111       // Find which output path belongs to projection
   112       for (j = start; j < max; j++) {
   113         if( pb->_nodes[j] == in0 )
   114           break;
   115       }
   116       assert( j < max, "must find" );
   117       // Change control to match head of successor basic block
   118       j -= start;
   119     }
   120     n->set_req(0, pb->_succs[j]->head());
   121   }
   122 }
   125 //------------------------------schedule_pinned_nodes--------------------------
   126 // Set the basic block for Nodes pinned into blocks
   127 void PhaseCFG::schedule_pinned_nodes( VectorSet &visited ) {
   128   // Allocate node stack of size C->unique()+8 to avoid frequent realloc
   129   GrowableArray <Node *> spstack(C->unique()+8);
   130   spstack.push(_root);
   131   while ( spstack.is_nonempty() ) {
   132     Node *n = spstack.pop();
   133     if( !visited.test_set(n->_idx) ) { // Test node and flag it as visited
   134       if( n->pinned() && !_bbs.lookup(n->_idx) ) {  // Pinned?  Nail it down!
   135         assert( n->in(0), "pinned Node must have Control" );
   136         // Before setting block replace block_proj control edge
   137         replace_block_proj_ctrl(n);
   138         Node *input = n->in(0);
   139         while( !input->is_block_start() )
   140           input = input->in(0);
   141         Block *b = _bbs[input->_idx];  // Basic block of controlling input
   142         schedule_node_into_block(n, b);
   143       }
   144       for( int i = n->req() - 1; i >= 0; --i ) {  // For all inputs
   145         if( n->in(i) != NULL )
   146           spstack.push(n->in(i));
   147       }
   148     }
   149   }
   150 }
   152 #ifdef ASSERT
   153 // Assert that new input b2 is dominated by all previous inputs.
   154 // Check this by by seeing that it is dominated by b1, the deepest
   155 // input observed until b2.
   156 static void assert_dom(Block* b1, Block* b2, Node* n, Block_Array &bbs) {
   157   if (b1 == NULL)  return;
   158   assert(b1->_dom_depth < b2->_dom_depth, "sanity");
   159   Block* tmp = b2;
   160   while (tmp != b1 && tmp != NULL) {
   161     tmp = tmp->_idom;
   162   }
   163   if (tmp != b1) {
   164     // Detected an unschedulable graph.  Print some nice stuff and die.
   165     tty->print_cr("!!! Unschedulable graph !!!");
   166     for (uint j=0; j<n->len(); j++) { // For all inputs
   167       Node* inn = n->in(j); // Get input
   168       if (inn == NULL)  continue;  // Ignore NULL, missing inputs
   169       Block* inb = bbs[inn->_idx];
   170       tty->print("B%d idom=B%d depth=%2d ",inb->_pre_order,
   171                  inb->_idom ? inb->_idom->_pre_order : 0, inb->_dom_depth);
   172       inn->dump();
   173     }
   174     tty->print("Failing node: ");
   175     n->dump();
   176     assert(false, "unscheduable graph");
   177   }
   178 }
   179 #endif
   181 static Block* find_deepest_input(Node* n, Block_Array &bbs) {
   182   // Find the last input dominated by all other inputs.
   183   Block* deepb           = NULL;        // Deepest block so far
   184   int    deepb_dom_depth = 0;
   185   for (uint k = 0; k < n->len(); k++) { // For all inputs
   186     Node* inn = n->in(k);               // Get input
   187     if (inn == NULL)  continue;         // Ignore NULL, missing inputs
   188     Block* inb = bbs[inn->_idx];
   189     assert(inb != NULL, "must already have scheduled this input");
   190     if (deepb_dom_depth < (int) inb->_dom_depth) {
   191       // The new inb must be dominated by the previous deepb.
   192       // The various inputs must be linearly ordered in the dom
   193       // tree, or else there will not be a unique deepest block.
   194       DEBUG_ONLY(assert_dom(deepb, inb, n, bbs));
   195       deepb = inb;                      // Save deepest block
   196       deepb_dom_depth = deepb->_dom_depth;
   197     }
   198   }
   199   assert(deepb != NULL, "must be at least one input to n");
   200   return deepb;
   201 }
   204 //------------------------------schedule_early---------------------------------
   205 // Find the earliest Block any instruction can be placed in.  Some instructions
   206 // are pinned into Blocks.  Unpinned instructions can appear in last block in
   207 // which all their inputs occur.
   208 bool PhaseCFG::schedule_early(VectorSet &visited, Node_List &roots) {
   209   // Allocate stack with enough space to avoid frequent realloc
   210   Node_Stack nstack(roots.Size() + 8); // (unique >> 1) + 24 from Java2D stats
   211   // roots.push(_root); _root will be processed among C->top() inputs
   212   roots.push(C->top());
   213   visited.set(C->top()->_idx);
   215   while (roots.size() != 0) {
   216     // Use local variables nstack_top_n & nstack_top_i to cache values
   217     // on stack's top.
   218     Node *nstack_top_n = roots.pop();
   219     uint  nstack_top_i = 0;
   220 //while_nstack_nonempty:
   221     while (true) {
   222       // Get parent node and next input's index from stack's top.
   223       Node *n = nstack_top_n;
   224       uint  i = nstack_top_i;
   226       if (i == 0) {
   227         // Fixup some control.  Constants without control get attached
   228         // to root and nodes that use is_block_proj() nodes should be attached
   229         // to the region that starts their block.
   230         const Node *in0 = n->in(0);
   231         if (in0 != NULL) {              // Control-dependent?
   232           replace_block_proj_ctrl(n);
   233         } else {               // n->in(0) == NULL
   234           if (n->req() == 1) { // This guy is a constant with NO inputs?
   235             n->set_req(0, _root);
   236           }
   237         }
   238       }
   240       // First, visit all inputs and force them to get a block.  If an
   241       // input is already in a block we quit following inputs (to avoid
   242       // cycles). Instead we put that Node on a worklist to be handled
   243       // later (since IT'S inputs may not have a block yet).
   244       bool done = true;              // Assume all n's inputs will be processed
   245       while (i < n->len()) {         // For all inputs
   246         Node *in = n->in(i);         // Get input
   247         ++i;
   248         if (in == NULL) continue;    // Ignore NULL, missing inputs
   249         int is_visited = visited.test_set(in->_idx);
   250         if (!_bbs.lookup(in->_idx)) { // Missing block selection?
   251           if (is_visited) {
   252             // assert( !visited.test(in->_idx), "did not schedule early" );
   253             return false;
   254           }
   255           nstack.push(n, i);         // Save parent node and next input's index.
   256           nstack_top_n = in;         // Process current input now.
   257           nstack_top_i = 0;
   258           done = false;              // Not all n's inputs processed.
   259           break; // continue while_nstack_nonempty;
   260         } else if (!is_visited) {    // Input not yet visited?
   261           roots.push(in);            // Visit this guy later, using worklist
   262         }
   263       }
   264       if (done) {
   265         // All of n's inputs have been processed, complete post-processing.
   267         // Some instructions are pinned into a block.  These include Region,
   268         // Phi, Start, Return, and other control-dependent instructions and
   269         // any projections which depend on them.
   270         if (!n->pinned()) {
   271           // Set earliest legal block.
   272           _bbs.map(n->_idx, find_deepest_input(n, _bbs));
   273         } else {
   274           assert(_bbs[n->_idx] == _bbs[n->in(0)->_idx], "Pinned Node should be at the same block as its control edge");
   275         }
   277         if (nstack.is_empty()) {
   278           // Finished all nodes on stack.
   279           // Process next node on the worklist 'roots'.
   280           break;
   281         }
   282         // Get saved parent node and next input's index.
   283         nstack_top_n = nstack.node();
   284         nstack_top_i = nstack.index();
   285         nstack.pop();
   286       } //    if (done)
   287     }   // while (true)
   288   }     // while (roots.size() != 0)
   289   return true;
   290 }
   292 //------------------------------dom_lca----------------------------------------
   293 // Find least common ancestor in dominator tree
   294 // LCA is a current notion of LCA, to be raised above 'this'.
   295 // As a convenient boundary condition, return 'this' if LCA is NULL.
   296 // Find the LCA of those two nodes.
   297 Block* Block::dom_lca(Block* LCA) {
   298   if (LCA == NULL || LCA == this)  return this;
   300   Block* anc = this;
   301   while (anc->_dom_depth > LCA->_dom_depth)
   302     anc = anc->_idom;           // Walk up till anc is as high as LCA
   304   while (LCA->_dom_depth > anc->_dom_depth)
   305     LCA = LCA->_idom;           // Walk up till LCA is as high as anc
   307   while (LCA != anc) {          // Walk both up till they are the same
   308     LCA = LCA->_idom;
   309     anc = anc->_idom;
   310   }
   312   return LCA;
   313 }
   315 //--------------------------raise_LCA_above_use--------------------------------
   316 // We are placing a definition, and have been given a def->use edge.
   317 // The definition must dominate the use, so move the LCA upward in the
   318 // dominator tree to dominate the use.  If the use is a phi, adjust
   319 // the LCA only with the phi input paths which actually use this def.
   320 static Block* raise_LCA_above_use(Block* LCA, Node* use, Node* def, Block_Array &bbs) {
   321   Block* buse = bbs[use->_idx];
   322   if (buse == NULL)    return LCA;   // Unused killing Projs have no use block
   323   if (!use->is_Phi())  return buse->dom_lca(LCA);
   324   uint pmax = use->req();       // Number of Phi inputs
   325   // Why does not this loop just break after finding the matching input to
   326   // the Phi?  Well...it's like this.  I do not have true def-use/use-def
   327   // chains.  Means I cannot distinguish, from the def-use direction, which
   328   // of many use-defs lead from the same use to the same def.  That is, this
   329   // Phi might have several uses of the same def.  Each use appears in a
   330   // different predecessor block.  But when I enter here, I cannot distinguish
   331   // which use-def edge I should find the predecessor block for.  So I find
   332   // them all.  Means I do a little extra work if a Phi uses the same value
   333   // more than once.
   334   for (uint j=1; j<pmax; j++) { // For all inputs
   335     if (use->in(j) == def) {    // Found matching input?
   336       Block* pred = bbs[buse->pred(j)->_idx];
   337       LCA = pred->dom_lca(LCA);
   338     }
   339   }
   340   return LCA;
   341 }
   343 //----------------------------raise_LCA_above_marks----------------------------
   344 // Return a new LCA that dominates LCA and any of its marked predecessors.
   345 // Search all my parents up to 'early' (exclusive), looking for predecessors
   346 // which are marked with the given index.  Return the LCA (in the dom tree)
   347 // of all marked blocks.  If there are none marked, return the original
   348 // LCA.
   349 static Block* raise_LCA_above_marks(Block* LCA, node_idx_t mark,
   350                                     Block* early, Block_Array &bbs) {
   351   Block_List worklist;
   352   worklist.push(LCA);
   353   while (worklist.size() > 0) {
   354     Block* mid = worklist.pop();
   355     if (mid == early)  continue;  // stop searching here
   357     // Test and set the visited bit.
   358     if (mid->raise_LCA_visited() == mark)  continue;  // already visited
   360     // Don't process the current LCA, otherwise the search may terminate early
   361     if (mid != LCA && mid->raise_LCA_mark() == mark) {
   362       // Raise the LCA.
   363       LCA = mid->dom_lca(LCA);
   364       if (LCA == early)  break;   // stop searching everywhere
   365       assert(early->dominates(LCA), "early is high enough");
   366       // Resume searching at that point, skipping intermediate levels.
   367       worklist.push(LCA);
   368       if (LCA == mid)
   369         continue; // Don't mark as visited to avoid early termination.
   370     } else {
   371       // Keep searching through this block's predecessors.
   372       for (uint j = 1, jmax = mid->num_preds(); j < jmax; j++) {
   373         Block* mid_parent = bbs[ mid->pred(j)->_idx ];
   374         worklist.push(mid_parent);
   375       }
   376     }
   377     mid->set_raise_LCA_visited(mark);
   378   }
   379   return LCA;
   380 }
   382 //--------------------------memory_early_block--------------------------------
   383 // This is a variation of find_deepest_input, the heart of schedule_early.
   384 // Find the "early" block for a load, if we considered only memory and
   385 // address inputs, that is, if other data inputs were ignored.
   386 //
   387 // Because a subset of edges are considered, the resulting block will
   388 // be earlier (at a shallower dom_depth) than the true schedule_early
   389 // point of the node. We compute this earlier block as a more permissive
   390 // site for anti-dependency insertion, but only if subsume_loads is enabled.
   391 static Block* memory_early_block(Node* load, Block* early, Block_Array &bbs) {
   392   Node* base;
   393   Node* index;
   394   Node* store = load->in(MemNode::Memory);
   395   load->as_Mach()->memory_inputs(base, index);
   397   assert(base != NodeSentinel && index != NodeSentinel,
   398          "unexpected base/index inputs");
   400   Node* mem_inputs[4];
   401   int mem_inputs_length = 0;
   402   if (base != NULL)  mem_inputs[mem_inputs_length++] = base;
   403   if (index != NULL) mem_inputs[mem_inputs_length++] = index;
   404   if (store != NULL) mem_inputs[mem_inputs_length++] = store;
   406   // In the comparision below, add one to account for the control input,
   407   // which may be null, but always takes up a spot in the in array.
   408   if (mem_inputs_length + 1 < (int) load->req()) {
   409     // This "load" has more inputs than just the memory, base and index inputs.
   410     // For purposes of checking anti-dependences, we need to start
   411     // from the early block of only the address portion of the instruction,
   412     // and ignore other blocks that may have factored into the wider
   413     // schedule_early calculation.
   414     if (load->in(0) != NULL) mem_inputs[mem_inputs_length++] = load->in(0);
   416     Block* deepb           = NULL;        // Deepest block so far
   417     int    deepb_dom_depth = 0;
   418     for (int i = 0; i < mem_inputs_length; i++) {
   419       Block* inb = bbs[mem_inputs[i]->_idx];
   420       if (deepb_dom_depth < (int) inb->_dom_depth) {
   421         // The new inb must be dominated by the previous deepb.
   422         // The various inputs must be linearly ordered in the dom
   423         // tree, or else there will not be a unique deepest block.
   424         DEBUG_ONLY(assert_dom(deepb, inb, load, bbs));
   425         deepb = inb;                      // Save deepest block
   426         deepb_dom_depth = deepb->_dom_depth;
   427       }
   428     }
   429     early = deepb;
   430   }
   432   return early;
   433 }
   435 //--------------------------insert_anti_dependences---------------------------
   436 // A load may need to witness memory that nearby stores can overwrite.
   437 // For each nearby store, either insert an "anti-dependence" edge
   438 // from the load to the store, or else move LCA upward to force the
   439 // load to (eventually) be scheduled in a block above the store.
   440 //
   441 // Do not add edges to stores on distinct control-flow paths;
   442 // only add edges to stores which might interfere.
   443 //
   444 // Return the (updated) LCA.  There will not be any possibly interfering
   445 // store between the load's "early block" and the updated LCA.
   446 // Any stores in the updated LCA will have new precedence edges
   447 // back to the load.  The caller is expected to schedule the load
   448 // in the LCA, in which case the precedence edges will make LCM
   449 // preserve anti-dependences.  The caller may also hoist the load
   450 // above the LCA, if it is not the early block.
   451 Block* PhaseCFG::insert_anti_dependences(Block* LCA, Node* load, bool verify) {
   452   assert(load->needs_anti_dependence_check(), "must be a load of some sort");
   453   assert(LCA != NULL, "");
   454   DEBUG_ONLY(Block* LCA_orig = LCA);
   456   // Compute the alias index.  Loads and stores with different alias indices
   457   // do not need anti-dependence edges.
   458   uint load_alias_idx = C->get_alias_index(load->adr_type());
   459 #ifdef ASSERT
   460   if (load_alias_idx == Compile::AliasIdxBot && C->AliasLevel() > 0 &&
   461       (PrintOpto || VerifyAliases ||
   462        PrintMiscellaneous && (WizardMode || Verbose))) {
   463     // Load nodes should not consume all of memory.
   464     // Reporting a bottom type indicates a bug in adlc.
   465     // If some particular type of node validly consumes all of memory,
   466     // sharpen the preceding "if" to exclude it, so we can catch bugs here.
   467     tty->print_cr("*** Possible Anti-Dependence Bug:  Load consumes all of memory.");
   468     load->dump(2);
   469     if (VerifyAliases)  assert(load_alias_idx != Compile::AliasIdxBot, "");
   470   }
   471 #endif
   472   assert(load_alias_idx || (load->is_Mach() && load->as_Mach()->ideal_Opcode() == Op_StrComp),
   473          "String compare is only known 'load' that does not conflict with any stores");
   474   assert(load_alias_idx || (load->is_Mach() && load->as_Mach()->ideal_Opcode() == Op_StrEquals),
   475          "String equals is a 'load' that does not conflict with any stores");
   476   assert(load_alias_idx || (load->is_Mach() && load->as_Mach()->ideal_Opcode() == Op_StrIndexOf),
   477          "String indexOf is a 'load' that does not conflict with any stores");
   478   assert(load_alias_idx || (load->is_Mach() && load->as_Mach()->ideal_Opcode() == Op_AryEq),
   479          "Arrays equals is a 'load' that do not conflict with any stores");
   481   if (!C->alias_type(load_alias_idx)->is_rewritable()) {
   482     // It is impossible to spoil this load by putting stores before it,
   483     // because we know that the stores will never update the value
   484     // which 'load' must witness.
   485     return LCA;
   486   }
   488   node_idx_t load_index = load->_idx;
   490   // Note the earliest legal placement of 'load', as determined by
   491   // by the unique point in the dom tree where all memory effects
   492   // and other inputs are first available.  (Computed by schedule_early.)
   493   // For normal loads, 'early' is the shallowest place (dom graph wise)
   494   // to look for anti-deps between this load and any store.
   495   Block* early = _bbs[load_index];
   497   // If we are subsuming loads, compute an "early" block that only considers
   498   // memory or address inputs. This block may be different than the
   499   // schedule_early block in that it could be at an even shallower depth in the
   500   // dominator tree, and allow for a broader discovery of anti-dependences.
   501   if (C->subsume_loads()) {
   502     early = memory_early_block(load, early, _bbs);
   503   }
   505   ResourceArea *area = Thread::current()->resource_area();
   506   Node_List worklist_mem(area);     // prior memory state to store
   507   Node_List worklist_store(area);   // possible-def to explore
   508   Node_List worklist_visited(area); // visited mergemem nodes
   509   Node_List non_early_stores(area); // all relevant stores outside of early
   510   bool must_raise_LCA = false;
   512 #ifdef TRACK_PHI_INPUTS
   513   // %%% This extra checking fails because MergeMem nodes are not GVNed.
   514   // Provide "phi_inputs" to check if every input to a PhiNode is from the
   515   // original memory state.  This indicates a PhiNode for which should not
   516   // prevent the load from sinking.  For such a block, set_raise_LCA_mark
   517   // may be overly conservative.
   518   // Mechanism: count inputs seen for each Phi encountered in worklist_store.
   519   DEBUG_ONLY(GrowableArray<uint> phi_inputs(area, C->unique(),0,0));
   520 #endif
   522   // 'load' uses some memory state; look for users of the same state.
   523   // Recurse through MergeMem nodes to the stores that use them.
   525   // Each of these stores is a possible definition of memory
   526   // that 'load' needs to use.  We need to force 'load'
   527   // to occur before each such store.  When the store is in
   528   // the same block as 'load', we insert an anti-dependence
   529   // edge load->store.
   531   // The relevant stores "nearby" the load consist of a tree rooted
   532   // at initial_mem, with internal nodes of type MergeMem.
   533   // Therefore, the branches visited by the worklist are of this form:
   534   //    initial_mem -> (MergeMem ->)* store
   535   // The anti-dependence constraints apply only to the fringe of this tree.
   537   Node* initial_mem = load->in(MemNode::Memory);
   538   worklist_store.push(initial_mem);
   539   worklist_visited.push(initial_mem);
   540   worklist_mem.push(NULL);
   541   while (worklist_store.size() > 0) {
   542     // Examine a nearby store to see if it might interfere with our load.
   543     Node* mem   = worklist_mem.pop();
   544     Node* store = worklist_store.pop();
   545     uint op = store->Opcode();
   547     // MergeMems do not directly have anti-deps.
   548     // Treat them as internal nodes in a forward tree of memory states,
   549     // the leaves of which are each a 'possible-def'.
   550     if (store == initial_mem    // root (exclusive) of tree we are searching
   551         || op == Op_MergeMem    // internal node of tree we are searching
   552         ) {
   553       mem = store;   // It's not a possibly interfering store.
   554       if (store == initial_mem)
   555         initial_mem = NULL;  // only process initial memory once
   557       for (DUIterator_Fast imax, i = mem->fast_outs(imax); i < imax; i++) {
   558         store = mem->fast_out(i);
   559         if (store->is_MergeMem()) {
   560           // Be sure we don't get into combinatorial problems.
   561           // (Allow phis to be repeated; they can merge two relevant states.)
   562           uint j = worklist_visited.size();
   563           for (; j > 0; j--) {
   564             if (worklist_visited.at(j-1) == store)  break;
   565           }
   566           if (j > 0)  continue; // already on work list; do not repeat
   567           worklist_visited.push(store);
   568         }
   569         worklist_mem.push(mem);
   570         worklist_store.push(store);
   571       }
   572       continue;
   573     }
   575     if (op == Op_MachProj || op == Op_Catch)   continue;
   576     if (store->needs_anti_dependence_check())  continue;  // not really a store
   578     // Compute the alias index.  Loads and stores with different alias
   579     // indices do not need anti-dependence edges.  Wide MemBar's are
   580     // anti-dependent on everything (except immutable memories).
   581     const TypePtr* adr_type = store->adr_type();
   582     if (!C->can_alias(adr_type, load_alias_idx))  continue;
   584     // Most slow-path runtime calls do NOT modify Java memory, but
   585     // they can block and so write Raw memory.
   586     if (store->is_Mach()) {
   587       MachNode* mstore = store->as_Mach();
   588       if (load_alias_idx != Compile::AliasIdxRaw) {
   589         // Check for call into the runtime using the Java calling
   590         // convention (and from there into a wrapper); it has no
   591         // _method.  Can't do this optimization for Native calls because
   592         // they CAN write to Java memory.
   593         if (mstore->ideal_Opcode() == Op_CallStaticJava) {
   594           assert(mstore->is_MachSafePoint(), "");
   595           MachSafePointNode* ms = (MachSafePointNode*) mstore;
   596           assert(ms->is_MachCallJava(), "");
   597           MachCallJavaNode* mcj = (MachCallJavaNode*) ms;
   598           if (mcj->_method == NULL) {
   599             // These runtime calls do not write to Java visible memory
   600             // (other than Raw) and so do not require anti-dependence edges.
   601             continue;
   602           }
   603         }
   604         // Same for SafePoints: they read/write Raw but only read otherwise.
   605         // This is basically a workaround for SafePoints only defining control
   606         // instead of control + memory.
   607         if (mstore->ideal_Opcode() == Op_SafePoint)
   608           continue;
   609       } else {
   610         // Some raw memory, such as the load of "top" at an allocation,
   611         // can be control dependent on the previous safepoint. See
   612         // comments in GraphKit::allocate_heap() about control input.
   613         // Inserting an anti-dep between such a safepoint and a use
   614         // creates a cycle, and will cause a subsequent failure in
   615         // local scheduling.  (BugId 4919904)
   616         // (%%% How can a control input be a safepoint and not a projection??)
   617         if (mstore->ideal_Opcode() == Op_SafePoint && load->in(0) == mstore)
   618           continue;
   619       }
   620     }
   622     // Identify a block that the current load must be above,
   623     // or else observe that 'store' is all the way up in the
   624     // earliest legal block for 'load'.  In the latter case,
   625     // immediately insert an anti-dependence edge.
   626     Block* store_block = _bbs[store->_idx];
   627     assert(store_block != NULL, "unused killing projections skipped above");
   629     if (store->is_Phi()) {
   630       // 'load' uses memory which is one (or more) of the Phi's inputs.
   631       // It must be scheduled not before the Phi, but rather before
   632       // each of the relevant Phi inputs.
   633       //
   634       // Instead of finding the LCA of all inputs to a Phi that match 'mem',
   635       // we mark each corresponding predecessor block and do a combined
   636       // hoisting operation later (raise_LCA_above_marks).
   637       //
   638       // Do not assert(store_block != early, "Phi merging memory after access")
   639       // PhiNode may be at start of block 'early' with backedge to 'early'
   640       DEBUG_ONLY(bool found_match = false);
   641       for (uint j = PhiNode::Input, jmax = store->req(); j < jmax; j++) {
   642         if (store->in(j) == mem) {   // Found matching input?
   643           DEBUG_ONLY(found_match = true);
   644           Block* pred_block = _bbs[store_block->pred(j)->_idx];
   645           if (pred_block != early) {
   646             // If any predecessor of the Phi matches the load's "early block",
   647             // we do not need a precedence edge between the Phi and 'load'
   648             // since the load will be forced into a block preceding the Phi.
   649             pred_block->set_raise_LCA_mark(load_index);
   650             assert(!LCA_orig->dominates(pred_block) ||
   651                    early->dominates(pred_block), "early is high enough");
   652             must_raise_LCA = true;
   653           } else {
   654             // anti-dependent upon PHI pinned below 'early', no edge needed
   655             LCA = early;             // but can not schedule below 'early'
   656           }
   657         }
   658       }
   659       assert(found_match, "no worklist bug");
   660 #ifdef TRACK_PHI_INPUTS
   661 #ifdef ASSERT
   662       // This assert asks about correct handling of PhiNodes, which may not
   663       // have all input edges directly from 'mem'. See BugId 4621264
   664       int num_mem_inputs = phi_inputs.at_grow(store->_idx,0) + 1;
   665       // Increment by exactly one even if there are multiple copies of 'mem'
   666       // coming into the phi, because we will run this block several times
   667       // if there are several copies of 'mem'.  (That's how DU iterators work.)
   668       phi_inputs.at_put(store->_idx, num_mem_inputs);
   669       assert(PhiNode::Input + num_mem_inputs < store->req(),
   670              "Expect at least one phi input will not be from original memory state");
   671 #endif //ASSERT
   672 #endif //TRACK_PHI_INPUTS
   673     } else if (store_block != early) {
   674       // 'store' is between the current LCA and earliest possible block.
   675       // Label its block, and decide later on how to raise the LCA
   676       // to include the effect on LCA of this store.
   677       // If this store's block gets chosen as the raised LCA, we
   678       // will find him on the non_early_stores list and stick him
   679       // with a precedence edge.
   680       // (But, don't bother if LCA is already raised all the way.)
   681       if (LCA != early) {
   682         store_block->set_raise_LCA_mark(load_index);
   683         must_raise_LCA = true;
   684         non_early_stores.push(store);
   685       }
   686     } else {
   687       // Found a possibly-interfering store in the load's 'early' block.
   688       // This means 'load' cannot sink at all in the dominator tree.
   689       // Add an anti-dep edge, and squeeze 'load' into the highest block.
   690       assert(store != load->in(0), "dependence cycle found");
   691       if (verify) {
   692         assert(store->find_edge(load) != -1, "missing precedence edge");
   693       } else {
   694         store->add_prec(load);
   695       }
   696       LCA = early;
   697       // This turns off the process of gathering non_early_stores.
   698     }
   699   }
   700   // (Worklist is now empty; all nearby stores have been visited.)
   702   // Finished if 'load' must be scheduled in its 'early' block.
   703   // If we found any stores there, they have already been given
   704   // precedence edges.
   705   if (LCA == early)  return LCA;
   707   // We get here only if there are no possibly-interfering stores
   708   // in the load's 'early' block.  Move LCA up above all predecessors
   709   // which contain stores we have noted.
   710   //
   711   // The raised LCA block can be a home to such interfering stores,
   712   // but its predecessors must not contain any such stores.
   713   //
   714   // The raised LCA will be a lower bound for placing the load,
   715   // preventing the load from sinking past any block containing
   716   // a store that may invalidate the memory state required by 'load'.
   717   if (must_raise_LCA)
   718     LCA = raise_LCA_above_marks(LCA, load->_idx, early, _bbs);
   719   if (LCA == early)  return LCA;
   721   // Insert anti-dependence edges from 'load' to each store
   722   // in the non-early LCA block.
   723   // Mine the non_early_stores list for such stores.
   724   if (LCA->raise_LCA_mark() == load_index) {
   725     while (non_early_stores.size() > 0) {
   726       Node* store = non_early_stores.pop();
   727       Block* store_block = _bbs[store->_idx];
   728       if (store_block == LCA) {
   729         // add anti_dependence from store to load in its own block
   730         assert(store != load->in(0), "dependence cycle found");
   731         if (verify) {
   732           assert(store->find_edge(load) != -1, "missing precedence edge");
   733         } else {
   734           store->add_prec(load);
   735         }
   736       } else {
   737         assert(store_block->raise_LCA_mark() == load_index, "block was marked");
   738         // Any other stores we found must be either inside the new LCA
   739         // or else outside the original LCA.  In the latter case, they
   740         // did not interfere with any use of 'load'.
   741         assert(LCA->dominates(store_block)
   742                || !LCA_orig->dominates(store_block), "no stray stores");
   743       }
   744     }
   745   }
   747   // Return the highest block containing stores; any stores
   748   // within that block have been given anti-dependence edges.
   749   return LCA;
   750 }
   752 // This class is used to iterate backwards over the nodes in the graph.
   754 class Node_Backward_Iterator {
   756 private:
   757   Node_Backward_Iterator();
   759 public:
   760   // Constructor for the iterator
   761   Node_Backward_Iterator(Node *root, VectorSet &visited, Node_List &stack, Block_Array &bbs);
   763   // Postincrement operator to iterate over the nodes
   764   Node *next();
   766 private:
   767   VectorSet   &_visited;
   768   Node_List   &_stack;
   769   Block_Array &_bbs;
   770 };
   772 // Constructor for the Node_Backward_Iterator
   773 Node_Backward_Iterator::Node_Backward_Iterator( Node *root, VectorSet &visited, Node_List &stack, Block_Array &bbs )
   774   : _visited(visited), _stack(stack), _bbs(bbs) {
   775   // The stack should contain exactly the root
   776   stack.clear();
   777   stack.push(root);
   779   // Clear the visited bits
   780   visited.Clear();
   781 }
   783 // Iterator for the Node_Backward_Iterator
   784 Node *Node_Backward_Iterator::next() {
   786   // If the _stack is empty, then just return NULL: finished.
   787   if ( !_stack.size() )
   788     return NULL;
   790   // '_stack' is emulating a real _stack.  The 'visit-all-users' loop has been
   791   // made stateless, so I do not need to record the index 'i' on my _stack.
   792   // Instead I visit all users each time, scanning for unvisited users.
   793   // I visit unvisited not-anti-dependence users first, then anti-dependent
   794   // children next.
   795   Node *self = _stack.pop();
   797   // I cycle here when I am entering a deeper level of recursion.
   798   // The key variable 'self' was set prior to jumping here.
   799   while( 1 ) {
   801     _visited.set(self->_idx);
   803     // Now schedule all uses as late as possible.
   804     uint src     = self->is_Proj() ? self->in(0)->_idx : self->_idx;
   805     uint src_rpo = _bbs[src]->_rpo;
   807     // Schedule all nodes in a post-order visit
   808     Node *unvisited = NULL;  // Unvisited anti-dependent Node, if any
   810     // Scan for unvisited nodes
   811     for (DUIterator_Fast imax, i = self->fast_outs(imax); i < imax; i++) {
   812       // For all uses, schedule late
   813       Node* n = self->fast_out(i); // Use
   815       // Skip already visited children
   816       if ( _visited.test(n->_idx) )
   817         continue;
   819       // do not traverse backward control edges
   820       Node *use = n->is_Proj() ? n->in(0) : n;
   821       uint use_rpo = _bbs[use->_idx]->_rpo;
   823       if ( use_rpo < src_rpo )
   824         continue;
   826       // Phi nodes always precede uses in a basic block
   827       if ( use_rpo == src_rpo && use->is_Phi() )
   828         continue;
   830       unvisited = n;      // Found unvisited
   832       // Check for possible-anti-dependent
   833       if( !n->needs_anti_dependence_check() )
   834         break;            // Not visited, not anti-dep; schedule it NOW
   835     }
   837     // Did I find an unvisited not-anti-dependent Node?
   838     if ( !unvisited )
   839       break;                  // All done with children; post-visit 'self'
   841     // Visit the unvisited Node.  Contains the obvious push to
   842     // indicate I'm entering a deeper level of recursion.  I push the
   843     // old state onto the _stack and set a new state and loop (recurse).
   844     _stack.push(self);
   845     self = unvisited;
   846   } // End recursion loop
   848   return self;
   849 }
   851 //------------------------------ComputeLatenciesBackwards----------------------
   852 // Compute the latency of all the instructions.
   853 void PhaseCFG::ComputeLatenciesBackwards(VectorSet &visited, Node_List &stack) {
   854 #ifndef PRODUCT
   855   if (trace_opto_pipelining())
   856     tty->print("\n#---- ComputeLatenciesBackwards ----\n");
   857 #endif
   859   Node_Backward_Iterator iter((Node *)_root, visited, stack, _bbs);
   860   Node *n;
   862   // Walk over all the nodes from last to first
   863   while (n = iter.next()) {
   864     // Set the latency for the definitions of this instruction
   865     partial_latency_of_defs(n);
   866   }
   867 } // end ComputeLatenciesBackwards
   869 //------------------------------partial_latency_of_defs------------------------
   870 // Compute the latency impact of this node on all defs.  This computes
   871 // a number that increases as we approach the beginning of the routine.
   872 void PhaseCFG::partial_latency_of_defs(Node *n) {
   873   // Set the latency for this instruction
   874 #ifndef PRODUCT
   875   if (trace_opto_pipelining()) {
   876     tty->print("# latency_to_inputs: node_latency[%d] = %d for node",
   877                n->_idx, _node_latency->at_grow(n->_idx));
   878     dump();
   879   }
   880 #endif
   882   if (n->is_Proj())
   883     n = n->in(0);
   885   if (n->is_Root())
   886     return;
   888   uint nlen = n->len();
   889   uint use_latency = _node_latency->at_grow(n->_idx);
   890   uint use_pre_order = _bbs[n->_idx]->_pre_order;
   892   for ( uint j=0; j<nlen; j++ ) {
   893     Node *def = n->in(j);
   895     if (!def || def == n)
   896       continue;
   898     // Walk backwards thru projections
   899     if (def->is_Proj())
   900       def = def->in(0);
   902 #ifndef PRODUCT
   903     if (trace_opto_pipelining()) {
   904       tty->print("#    in(%2d): ", j);
   905       def->dump();
   906     }
   907 #endif
   909     // If the defining block is not known, assume it is ok
   910     Block *def_block = _bbs[def->_idx];
   911     uint def_pre_order = def_block ? def_block->_pre_order : 0;
   913     if ( (use_pre_order <  def_pre_order) ||
   914          (use_pre_order == def_pre_order && n->is_Phi()) )
   915       continue;
   917     uint delta_latency = n->latency(j);
   918     uint current_latency = delta_latency + use_latency;
   920     if (_node_latency->at_grow(def->_idx) < current_latency) {
   921       _node_latency->at_put_grow(def->_idx, current_latency);
   922     }
   924 #ifndef PRODUCT
   925     if (trace_opto_pipelining()) {
   926       tty->print_cr("#      %d + edge_latency(%d) == %d -> %d, node_latency[%d] = %d",
   927                     use_latency, j, delta_latency, current_latency, def->_idx,
   928                     _node_latency->at_grow(def->_idx));
   929     }
   930 #endif
   931   }
   932 }
   934 //------------------------------latency_from_use-------------------------------
   935 // Compute the latency of a specific use
   936 int PhaseCFG::latency_from_use(Node *n, const Node *def, Node *use) {
   937   // If self-reference, return no latency
   938   if (use == n || use->is_Root())
   939     return 0;
   941   uint def_pre_order = _bbs[def->_idx]->_pre_order;
   942   uint latency = 0;
   944   // If the use is not a projection, then it is simple...
   945   if (!use->is_Proj()) {
   946 #ifndef PRODUCT
   947     if (trace_opto_pipelining()) {
   948       tty->print("#    out(): ");
   949       use->dump();
   950     }
   951 #endif
   953     uint use_pre_order = _bbs[use->_idx]->_pre_order;
   955     if (use_pre_order < def_pre_order)
   956       return 0;
   958     if (use_pre_order == def_pre_order && use->is_Phi())
   959       return 0;
   961     uint nlen = use->len();
   962     uint nl = _node_latency->at_grow(use->_idx);
   964     for ( uint j=0; j<nlen; j++ ) {
   965       if (use->in(j) == n) {
   966         // Change this if we want local latencies
   967         uint ul = use->latency(j);
   968         uint  l = ul + nl;
   969         if (latency < l) latency = l;
   970 #ifndef PRODUCT
   971         if (trace_opto_pipelining()) {
   972           tty->print_cr("#      %d + edge_latency(%d) == %d -> %d, latency = %d",
   973                         nl, j, ul, l, latency);
   974         }
   975 #endif
   976       }
   977     }
   978   } else {
   979     // This is a projection, just grab the latency of the use(s)
   980     for (DUIterator_Fast jmax, j = use->fast_outs(jmax); j < jmax; j++) {
   981       uint l = latency_from_use(use, def, use->fast_out(j));
   982       if (latency < l) latency = l;
   983     }
   984   }
   986   return latency;
   987 }
   989 //------------------------------latency_from_uses------------------------------
   990 // Compute the latency of this instruction relative to all of it's uses.
   991 // This computes a number that increases as we approach the beginning of the
   992 // routine.
   993 void PhaseCFG::latency_from_uses(Node *n) {
   994   // Set the latency for this instruction
   995 #ifndef PRODUCT
   996   if (trace_opto_pipelining()) {
   997     tty->print("# latency_from_outputs: node_latency[%d] = %d for node",
   998                n->_idx, _node_latency->at_grow(n->_idx));
   999     dump();
  1001 #endif
  1002   uint latency=0;
  1003   const Node *def = n->is_Proj() ? n->in(0): n;
  1005   for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
  1006     uint l = latency_from_use(n, def, n->fast_out(i));
  1008     if (latency < l) latency = l;
  1011   _node_latency->at_put_grow(n->_idx, latency);
  1014 //------------------------------hoist_to_cheaper_block-------------------------
  1015 // Pick a block for node self, between early and LCA, that is a cheaper
  1016 // alternative to LCA.
  1017 Block* PhaseCFG::hoist_to_cheaper_block(Block* LCA, Block* early, Node* self) {
  1018   const double delta = 1+PROB_UNLIKELY_MAG(4);
  1019   Block* least       = LCA;
  1020   double least_freq  = least->_freq;
  1021   uint target        = _node_latency->at_grow(self->_idx);
  1022   uint start_latency = _node_latency->at_grow(LCA->_nodes[0]->_idx);
  1023   uint end_latency   = _node_latency->at_grow(LCA->_nodes[LCA->end_idx()]->_idx);
  1024   bool in_latency    = (target <= start_latency);
  1025   const Block* root_block = _bbs[_root->_idx];
  1027   // Turn off latency scheduling if scheduling is just plain off
  1028   if (!C->do_scheduling())
  1029     in_latency = true;
  1031   // Do not hoist (to cover latency) instructions which target a
  1032   // single register.  Hoisting stretches the live range of the
  1033   // single register and may force spilling.
  1034   MachNode* mach = self->is_Mach() ? self->as_Mach() : NULL;
  1035   if (mach && mach->out_RegMask().is_bound1() && mach->out_RegMask().is_NotEmpty())
  1036     in_latency = true;
  1038 #ifndef PRODUCT
  1039   if (trace_opto_pipelining()) {
  1040     tty->print("# Find cheaper block for latency %d: ",
  1041       _node_latency->at_grow(self->_idx));
  1042     self->dump();
  1043     tty->print_cr("#   B%d: start latency for [%4d]=%d, end latency for [%4d]=%d, freq=%g",
  1044       LCA->_pre_order,
  1045       LCA->_nodes[0]->_idx,
  1046       start_latency,
  1047       LCA->_nodes[LCA->end_idx()]->_idx,
  1048       end_latency,
  1049       least_freq);
  1051 #endif
  1053   int cand_cnt = 0;  // number of candidates tried
  1055   // Walk up the dominator tree from LCA (Lowest common ancestor) to
  1056   // the earliest legal location.  Capture the least execution frequency.
  1057   while (LCA != early) {
  1058     LCA = LCA->_idom;         // Follow up the dominator tree
  1060     if (LCA == NULL) {
  1061       // Bailout without retry
  1062       C->record_method_not_compilable("late schedule failed: LCA == NULL");
  1063       return least;
  1066     // Don't hoist machine instructions to the root basic block
  1067     if (mach && LCA == root_block)
  1068       break;
  1070     uint start_lat = _node_latency->at_grow(LCA->_nodes[0]->_idx);
  1071     uint end_idx   = LCA->end_idx();
  1072     uint end_lat   = _node_latency->at_grow(LCA->_nodes[end_idx]->_idx);
  1073     double LCA_freq = LCA->_freq;
  1074 #ifndef PRODUCT
  1075     if (trace_opto_pipelining()) {
  1076       tty->print_cr("#   B%d: start latency for [%4d]=%d, end latency for [%4d]=%d, freq=%g",
  1077         LCA->_pre_order, LCA->_nodes[0]->_idx, start_lat, end_idx, end_lat, LCA_freq);
  1079 #endif
  1080     cand_cnt++;
  1081     if (LCA_freq < least_freq              || // Better Frequency
  1082         (StressGCM && Compile::randomized_select(cand_cnt)) || // Should be randomly accepted in stress mode
  1083          (!StressGCM                    &&    // Otherwise, choose with latency
  1084           !in_latency                   &&    // No block containing latency
  1085           LCA_freq < least_freq * delta &&    // No worse frequency
  1086           target >= end_lat             &&    // within latency range
  1087           !self->is_iteratively_computed() )  // But don't hoist IV increments
  1088              // because they may end up above other uses of their phi forcing
  1089              // their result register to be different from their input.
  1090        ) {
  1091       least = LCA;            // Found cheaper block
  1092       least_freq = LCA_freq;
  1093       start_latency = start_lat;
  1094       end_latency = end_lat;
  1095       if (target <= start_lat)
  1096         in_latency = true;
  1100 #ifndef PRODUCT
  1101   if (trace_opto_pipelining()) {
  1102     tty->print_cr("#  Choose block B%d with start latency=%d and freq=%g",
  1103       least->_pre_order, start_latency, least_freq);
  1105 #endif
  1107   // See if the latency needs to be updated
  1108   if (target < end_latency) {
  1109 #ifndef PRODUCT
  1110     if (trace_opto_pipelining()) {
  1111       tty->print_cr("#  Change latency for [%4d] from %d to %d", self->_idx, target, end_latency);
  1113 #endif
  1114     _node_latency->at_put_grow(self->_idx, end_latency);
  1115     partial_latency_of_defs(self);
  1118   return least;
  1122 //------------------------------schedule_late-----------------------------------
  1123 // Now schedule all codes as LATE as possible.  This is the LCA in the
  1124 // dominator tree of all USES of a value.  Pick the block with the least
  1125 // loop nesting depth that is lowest in the dominator tree.
  1126 extern const char must_clone[];
  1127 void PhaseCFG::schedule_late(VectorSet &visited, Node_List &stack) {
  1128 #ifndef PRODUCT
  1129   if (trace_opto_pipelining())
  1130     tty->print("\n#---- schedule_late ----\n");
  1131 #endif
  1133   Node_Backward_Iterator iter((Node *)_root, visited, stack, _bbs);
  1134   Node *self;
  1136   // Walk over all the nodes from last to first
  1137   while (self = iter.next()) {
  1138     Block* early = _bbs[self->_idx];   // Earliest legal placement
  1140     if (self->is_top()) {
  1141       // Top node goes in bb #2 with other constants.
  1142       // It must be special-cased, because it has no out edges.
  1143       early->add_inst(self);
  1144       continue;
  1147     // No uses, just terminate
  1148     if (self->outcnt() == 0) {
  1149       assert(self->is_MachProj(), "sanity");
  1150       continue;                   // Must be a dead machine projection
  1153     // If node is pinned in the block, then no scheduling can be done.
  1154     if( self->pinned() )          // Pinned in block?
  1155       continue;
  1157     MachNode* mach = self->is_Mach() ? self->as_Mach() : NULL;
  1158     if (mach) {
  1159       switch (mach->ideal_Opcode()) {
  1160       case Op_CreateEx:
  1161         // Don't move exception creation
  1162         early->add_inst(self);
  1163         continue;
  1164         break;
  1165       case Op_CheckCastPP:
  1166         // Don't move CheckCastPP nodes away from their input, if the input
  1167         // is a rawptr (5071820).
  1168         Node *def = self->in(1);
  1169         if (def != NULL && def->bottom_type()->base() == Type::RawPtr) {
  1170           early->add_inst(self);
  1171 #ifdef ASSERT
  1172           _raw_oops.push(def);
  1173 #endif
  1174           continue;
  1176         break;
  1180     // Gather LCA of all uses
  1181     Block *LCA = NULL;
  1183       for (DUIterator_Fast imax, i = self->fast_outs(imax); i < imax; i++) {
  1184         // For all uses, find LCA
  1185         Node* use = self->fast_out(i);
  1186         LCA = raise_LCA_above_use(LCA, use, self, _bbs);
  1188     }  // (Hide defs of imax, i from rest of block.)
  1190     // Place temps in the block of their use.  This isn't a
  1191     // requirement for correctness but it reduces useless
  1192     // interference between temps and other nodes.
  1193     if (mach != NULL && mach->is_MachTemp()) {
  1194       _bbs.map(self->_idx, LCA);
  1195       LCA->add_inst(self);
  1196       continue;
  1199     // Check if 'self' could be anti-dependent on memory
  1200     if (self->needs_anti_dependence_check()) {
  1201       // Hoist LCA above possible-defs and insert anti-dependences to
  1202       // defs in new LCA block.
  1203       LCA = insert_anti_dependences(LCA, self);
  1206     if (early->_dom_depth > LCA->_dom_depth) {
  1207       // Somehow the LCA has moved above the earliest legal point.
  1208       // (One way this can happen is via memory_early_block.)
  1209       if (C->subsume_loads() == true && !C->failing()) {
  1210         // Retry with subsume_loads == false
  1211         // If this is the first failure, the sentinel string will "stick"
  1212         // to the Compile object, and the C2Compiler will see it and retry.
  1213         C->record_failure(C2Compiler::retry_no_subsuming_loads());
  1214       } else {
  1215         // Bailout without retry when (early->_dom_depth > LCA->_dom_depth)
  1216         C->record_method_not_compilable("late schedule failed: incorrect graph");
  1218       return;
  1221     // If there is no opportunity to hoist, then we're done.
  1222     // In stress mode, try to hoist even the single operations.
  1223     bool try_to_hoist = StressGCM || (LCA != early);
  1225     // Must clone guys stay next to use; no hoisting allowed.
  1226     // Also cannot hoist guys that alter memory or are otherwise not
  1227     // allocatable (hoisting can make a value live longer, leading to
  1228     // anti and output dependency problems which are normally resolved
  1229     // by the register allocator giving everyone a different register).
  1230     if (mach != NULL && must_clone[mach->ideal_Opcode()])
  1231       try_to_hoist = false;
  1233     Block* late = NULL;
  1234     if (try_to_hoist) {
  1235       // Now find the block with the least execution frequency.
  1236       // Start at the latest schedule and work up to the earliest schedule
  1237       // in the dominator tree.  Thus the Node will dominate all its uses.
  1238       late = hoist_to_cheaper_block(LCA, early, self);
  1239     } else {
  1240       // Just use the LCA of the uses.
  1241       late = LCA;
  1244     // Put the node into target block
  1245     schedule_node_into_block(self, late);
  1247 #ifdef ASSERT
  1248     if (self->needs_anti_dependence_check()) {
  1249       // since precedence edges are only inserted when we're sure they
  1250       // are needed make sure that after placement in a block we don't
  1251       // need any new precedence edges.
  1252       verify_anti_dependences(late, self);
  1254 #endif
  1255   } // Loop until all nodes have been visited
  1257 } // end ScheduleLate
  1259 //------------------------------GlobalCodeMotion-------------------------------
  1260 void PhaseCFG::GlobalCodeMotion( Matcher &matcher, uint unique, Node_List &proj_list ) {
  1261   ResourceMark rm;
  1263 #ifndef PRODUCT
  1264   if (trace_opto_pipelining()) {
  1265     tty->print("\n---- Start GlobalCodeMotion ----\n");
  1267 #endif
  1269   // Initialize the bbs.map for things on the proj_list
  1270   uint i;
  1271   for( i=0; i < proj_list.size(); i++ )
  1272     _bbs.map(proj_list[i]->_idx, NULL);
  1274   // Set the basic block for Nodes pinned into blocks
  1275   Arena *a = Thread::current()->resource_area();
  1276   VectorSet visited(a);
  1277   schedule_pinned_nodes( visited );
  1279   // Find the earliest Block any instruction can be placed in.  Some
  1280   // instructions are pinned into Blocks.  Unpinned instructions can
  1281   // appear in last block in which all their inputs occur.
  1282   visited.Clear();
  1283   Node_List stack(a);
  1284   stack.map( (unique >> 1) + 16, NULL); // Pre-grow the list
  1285   if (!schedule_early(visited, stack)) {
  1286     // Bailout without retry
  1287     C->record_method_not_compilable("early schedule failed");
  1288     return;
  1291   // Build Def-Use edges.
  1292   proj_list.push(_root);        // Add real root as another root
  1293   proj_list.pop();
  1295   // Compute the latency information (via backwards walk) for all the
  1296   // instructions in the graph
  1297   _node_latency = new GrowableArray<uint>(); // resource_area allocation
  1299   if( C->do_scheduling() )
  1300     ComputeLatenciesBackwards(visited, stack);
  1302   // Now schedule all codes as LATE as possible.  This is the LCA in the
  1303   // dominator tree of all USES of a value.  Pick the block with the least
  1304   // loop nesting depth that is lowest in the dominator tree.
  1305   // ( visited.Clear() called in schedule_late()->Node_Backward_Iterator() )
  1306   schedule_late(visited, stack);
  1307   if( C->failing() ) {
  1308     // schedule_late fails only when graph is incorrect.
  1309     assert(!VerifyGraphEdges, "verification should have failed");
  1310     return;
  1313   unique = C->unique();
  1315 #ifndef PRODUCT
  1316   if (trace_opto_pipelining()) {
  1317     tty->print("\n---- Detect implicit null checks ----\n");
  1319 #endif
  1321   // Detect implicit-null-check opportunities.  Basically, find NULL checks
  1322   // with suitable memory ops nearby.  Use the memory op to do the NULL check.
  1323   // I can generate a memory op if there is not one nearby.
  1324   if (C->is_method_compilation()) {
  1325     // Don't do it for natives, adapters, or runtime stubs
  1326     int allowed_reasons = 0;
  1327     // ...and don't do it when there have been too many traps, globally.
  1328     for (int reason = (int)Deoptimization::Reason_none+1;
  1329          reason < Compile::trapHistLength; reason++) {
  1330       assert(reason < BitsPerInt, "recode bit map");
  1331       if (!C->too_many_traps((Deoptimization::DeoptReason) reason))
  1332         allowed_reasons |= nth_bit(reason);
  1334     // By reversing the loop direction we get a very minor gain on mpegaudio.
  1335     // Feel free to revert to a forward loop for clarity.
  1336     // for( int i=0; i < (int)matcher._null_check_tests.size(); i+=2 ) {
  1337     for( int i= matcher._null_check_tests.size()-2; i>=0; i-=2 ) {
  1338       Node *proj = matcher._null_check_tests[i  ];
  1339       Node *val  = matcher._null_check_tests[i+1];
  1340       _bbs[proj->_idx]->implicit_null_check(this, proj, val, allowed_reasons);
  1341       // The implicit_null_check will only perform the transformation
  1342       // if the null branch is truly uncommon, *and* it leads to an
  1343       // uncommon trap.  Combined with the too_many_traps guards
  1344       // above, this prevents SEGV storms reported in 6366351,
  1345       // by recompiling offending methods without this optimization.
  1349 #ifndef PRODUCT
  1350   if (trace_opto_pipelining()) {
  1351     tty->print("\n---- Start Local Scheduling ----\n");
  1353 #endif
  1355   // Schedule locally.  Right now a simple topological sort.
  1356   // Later, do a real latency aware scheduler.
  1357   uint max_idx = C->unique();
  1358   GrowableArray<int> ready_cnt(max_idx, max_idx, -1);
  1359   visited.Clear();
  1360   for (i = 0; i < _num_blocks; i++) {
  1361     if (!_blocks[i]->schedule_local(this, matcher, ready_cnt, visited)) {
  1362       if (!C->failure_reason_is(C2Compiler::retry_no_subsuming_loads())) {
  1363         C->record_method_not_compilable("local schedule failed");
  1365       return;
  1369   // If we inserted any instructions between a Call and his CatchNode,
  1370   // clone the instructions on all paths below the Catch.
  1371   for( i=0; i < _num_blocks; i++ )
  1372     _blocks[i]->call_catch_cleanup(_bbs, C);
  1374 #ifndef PRODUCT
  1375   if (trace_opto_pipelining()) {
  1376     tty->print("\n---- After GlobalCodeMotion ----\n");
  1377     for (uint i = 0; i < _num_blocks; i++) {
  1378       _blocks[i]->dump();
  1381 #endif
  1382   // Dead.
  1383   _node_latency = (GrowableArray<uint> *)0xdeadbeef;
  1387 //------------------------------Estimate_Block_Frequency-----------------------
  1388 // Estimate block frequencies based on IfNode probabilities.
  1389 void PhaseCFG::Estimate_Block_Frequency() {
  1391   // Force conditional branches leading to uncommon traps to be unlikely,
  1392   // not because we get to the uncommon_trap with less relative frequency,
  1393   // but because an uncommon_trap typically causes a deopt, so we only get
  1394   // there once.
  1395   if (C->do_freq_based_layout()) {
  1396     Block_List worklist;
  1397     Block* root_blk = _blocks[0];
  1398     for (uint i = 1; i < root_blk->num_preds(); i++) {
  1399       Block *pb = _bbs[root_blk->pred(i)->_idx];
  1400       if (pb->has_uncommon_code()) {
  1401         worklist.push(pb);
  1404     while (worklist.size() > 0) {
  1405       Block* uct = worklist.pop();
  1406       if (uct == _broot) continue;
  1407       for (uint i = 1; i < uct->num_preds(); i++) {
  1408         Block *pb = _bbs[uct->pred(i)->_idx];
  1409         if (pb->_num_succs == 1) {
  1410           worklist.push(pb);
  1411         } else if (pb->num_fall_throughs() == 2) {
  1412           pb->update_uncommon_branch(uct);
  1418   // Create the loop tree and calculate loop depth.
  1419   _root_loop = create_loop_tree();
  1420   _root_loop->compute_loop_depth(0);
  1422   // Compute block frequency of each block, relative to a single loop entry.
  1423   _root_loop->compute_freq();
  1425   // Adjust all frequencies to be relative to a single method entry
  1426   _root_loop->_freq = 1.0;
  1427   _root_loop->scale_freq();
  1429   // Save outmost loop frequency for LRG frequency threshold
  1430   _outer_loop_freq = _root_loop->outer_loop_freq();
  1432   // force paths ending at uncommon traps to be infrequent
  1433   if (!C->do_freq_based_layout()) {
  1434     Block_List worklist;
  1435     Block* root_blk = _blocks[0];
  1436     for (uint i = 1; i < root_blk->num_preds(); i++) {
  1437       Block *pb = _bbs[root_blk->pred(i)->_idx];
  1438       if (pb->has_uncommon_code()) {
  1439         worklist.push(pb);
  1442     while (worklist.size() > 0) {
  1443       Block* uct = worklist.pop();
  1444       uct->_freq = PROB_MIN;
  1445       for (uint i = 1; i < uct->num_preds(); i++) {
  1446         Block *pb = _bbs[uct->pred(i)->_idx];
  1447         if (pb->_num_succs == 1 && pb->_freq > PROB_MIN) {
  1448           worklist.push(pb);
  1454 #ifdef ASSERT
  1455   for (uint i = 0; i < _num_blocks; i++ ) {
  1456     Block *b = _blocks[i];
  1457     assert(b->_freq >= MIN_BLOCK_FREQUENCY, "Register Allocator requires meaningful block frequency");
  1459 #endif
  1461 #ifndef PRODUCT
  1462   if (PrintCFGBlockFreq) {
  1463     tty->print_cr("CFG Block Frequencies");
  1464     _root_loop->dump_tree();
  1465     if (Verbose) {
  1466       tty->print_cr("PhaseCFG dump");
  1467       dump();
  1468       tty->print_cr("Node dump");
  1469       _root->dump(99999);
  1472 #endif
  1475 //----------------------------create_loop_tree--------------------------------
  1476 // Create a loop tree from the CFG
  1477 CFGLoop* PhaseCFG::create_loop_tree() {
  1479 #ifdef ASSERT
  1480   assert( _blocks[0] == _broot, "" );
  1481   for (uint i = 0; i < _num_blocks; i++ ) {
  1482     Block *b = _blocks[i];
  1483     // Check that _loop field are clear...we could clear them if not.
  1484     assert(b->_loop == NULL, "clear _loop expected");
  1485     // Sanity check that the RPO numbering is reflected in the _blocks array.
  1486     // It doesn't have to be for the loop tree to be built, but if it is not,
  1487     // then the blocks have been reordered since dom graph building...which
  1488     // may question the RPO numbering
  1489     assert(b->_rpo == i, "unexpected reverse post order number");
  1491 #endif
  1493   int idct = 0;
  1494   CFGLoop* root_loop = new CFGLoop(idct++);
  1496   Block_List worklist;
  1498   // Assign blocks to loops
  1499   for(uint i = _num_blocks - 1; i > 0; i-- ) { // skip Root block
  1500     Block *b = _blocks[i];
  1502     if (b->head()->is_Loop()) {
  1503       Block* loop_head = b;
  1504       assert(loop_head->num_preds() - 1 == 2, "loop must have 2 predecessors");
  1505       Node* tail_n = loop_head->pred(LoopNode::LoopBackControl);
  1506       Block* tail = _bbs[tail_n->_idx];
  1508       // Defensively filter out Loop nodes for non-single-entry loops.
  1509       // For all reasonable loops, the head occurs before the tail in RPO.
  1510       if (i <= tail->_rpo) {
  1512         // The tail and (recursive) predecessors of the tail
  1513         // are made members of a new loop.
  1515         assert(worklist.size() == 0, "nonempty worklist");
  1516         CFGLoop* nloop = new CFGLoop(idct++);
  1517         assert(loop_head->_loop == NULL, "just checking");
  1518         loop_head->_loop = nloop;
  1519         // Add to nloop so push_pred() will skip over inner loops
  1520         nloop->add_member(loop_head);
  1521         nloop->push_pred(loop_head, LoopNode::LoopBackControl, worklist, _bbs);
  1523         while (worklist.size() > 0) {
  1524           Block* member = worklist.pop();
  1525           if (member != loop_head) {
  1526             for (uint j = 1; j < member->num_preds(); j++) {
  1527               nloop->push_pred(member, j, worklist, _bbs);
  1535   // Create a member list for each loop consisting
  1536   // of both blocks and (immediate child) loops.
  1537   for (uint i = 0; i < _num_blocks; i++) {
  1538     Block *b = _blocks[i];
  1539     CFGLoop* lp = b->_loop;
  1540     if (lp == NULL) {
  1541       // Not assigned to a loop. Add it to the method's pseudo loop.
  1542       b->_loop = root_loop;
  1543       lp = root_loop;
  1545     if (lp == root_loop || b != lp->head()) { // loop heads are already members
  1546       lp->add_member(b);
  1548     if (lp != root_loop) {
  1549       if (lp->parent() == NULL) {
  1550         // Not a nested loop. Make it a child of the method's pseudo loop.
  1551         root_loop->add_nested_loop(lp);
  1553       if (b == lp->head()) {
  1554         // Add nested loop to member list of parent loop.
  1555         lp->parent()->add_member(lp);
  1560   return root_loop;
  1563 //------------------------------push_pred--------------------------------------
  1564 void CFGLoop::push_pred(Block* blk, int i, Block_List& worklist, Block_Array& node_to_blk) {
  1565   Node* pred_n = blk->pred(i);
  1566   Block* pred = node_to_blk[pred_n->_idx];
  1567   CFGLoop *pred_loop = pred->_loop;
  1568   if (pred_loop == NULL) {
  1569     // Filter out blocks for non-single-entry loops.
  1570     // For all reasonable loops, the head occurs before the tail in RPO.
  1571     if (pred->_rpo > head()->_rpo) {
  1572       pred->_loop = this;
  1573       worklist.push(pred);
  1575   } else if (pred_loop != this) {
  1576     // Nested loop.
  1577     while (pred_loop->_parent != NULL && pred_loop->_parent != this) {
  1578       pred_loop = pred_loop->_parent;
  1580     // Make pred's loop be a child
  1581     if (pred_loop->_parent == NULL) {
  1582       add_nested_loop(pred_loop);
  1583       // Continue with loop entry predecessor.
  1584       Block* pred_head = pred_loop->head();
  1585       assert(pred_head->num_preds() - 1 == 2, "loop must have 2 predecessors");
  1586       assert(pred_head != head(), "loop head in only one loop");
  1587       push_pred(pred_head, LoopNode::EntryControl, worklist, node_to_blk);
  1588     } else {
  1589       assert(pred_loop->_parent == this && _parent == NULL, "just checking");
  1594 //------------------------------add_nested_loop--------------------------------
  1595 // Make cl a child of the current loop in the loop tree.
  1596 void CFGLoop::add_nested_loop(CFGLoop* cl) {
  1597   assert(_parent == NULL, "no parent yet");
  1598   assert(cl != this, "not my own parent");
  1599   cl->_parent = this;
  1600   CFGLoop* ch = _child;
  1601   if (ch == NULL) {
  1602     _child = cl;
  1603   } else {
  1604     while (ch->_sibling != NULL) { ch = ch->_sibling; }
  1605     ch->_sibling = cl;
  1609 //------------------------------compute_loop_depth-----------------------------
  1610 // Store the loop depth in each CFGLoop object.
  1611 // Recursively walk the children to do the same for them.
  1612 void CFGLoop::compute_loop_depth(int depth) {
  1613   _depth = depth;
  1614   CFGLoop* ch = _child;
  1615   while (ch != NULL) {
  1616     ch->compute_loop_depth(depth + 1);
  1617     ch = ch->_sibling;
  1621 //------------------------------compute_freq-----------------------------------
  1622 // Compute the frequency of each block and loop, relative to a single entry
  1623 // into the dominating loop head.
  1624 void CFGLoop::compute_freq() {
  1625   // Bottom up traversal of loop tree (visit inner loops first.)
  1626   // Set loop head frequency to 1.0, then transitively
  1627   // compute frequency for all successors in the loop,
  1628   // as well as for each exit edge.  Inner loops are
  1629   // treated as single blocks with loop exit targets
  1630   // as the successor blocks.
  1632   // Nested loops first
  1633   CFGLoop* ch = _child;
  1634   while (ch != NULL) {
  1635     ch->compute_freq();
  1636     ch = ch->_sibling;
  1638   assert (_members.length() > 0, "no empty loops");
  1639   Block* hd = head();
  1640   hd->_freq = 1.0f;
  1641   for (int i = 0; i < _members.length(); i++) {
  1642     CFGElement* s = _members.at(i);
  1643     float freq = s->_freq;
  1644     if (s->is_block()) {
  1645       Block* b = s->as_Block();
  1646       for (uint j = 0; j < b->_num_succs; j++) {
  1647         Block* sb = b->_succs[j];
  1648         update_succ_freq(sb, freq * b->succ_prob(j));
  1650     } else {
  1651       CFGLoop* lp = s->as_CFGLoop();
  1652       assert(lp->_parent == this, "immediate child");
  1653       for (int k = 0; k < lp->_exits.length(); k++) {
  1654         Block* eb = lp->_exits.at(k).get_target();
  1655         float prob = lp->_exits.at(k).get_prob();
  1656         update_succ_freq(eb, freq * prob);
  1661   // For all loops other than the outer, "method" loop,
  1662   // sum and normalize the exit probability. The "method" loop
  1663   // should keep the initial exit probability of 1, so that
  1664   // inner blocks do not get erroneously scaled.
  1665   if (_depth != 0) {
  1666     // Total the exit probabilities for this loop.
  1667     float exits_sum = 0.0f;
  1668     for (int i = 0; i < _exits.length(); i++) {
  1669       exits_sum += _exits.at(i).get_prob();
  1672     // Normalize the exit probabilities. Until now, the
  1673     // probabilities estimate the possibility of exit per
  1674     // a single loop iteration; afterward, they estimate
  1675     // the probability of exit per loop entry.
  1676     for (int i = 0; i < _exits.length(); i++) {
  1677       Block* et = _exits.at(i).get_target();
  1678       float new_prob = 0.0f;
  1679       if (_exits.at(i).get_prob() > 0.0f) {
  1680         new_prob = _exits.at(i).get_prob() / exits_sum;
  1682       BlockProbPair bpp(et, new_prob);
  1683       _exits.at_put(i, bpp);
  1686     // Save the total, but guard against unreasonable probability,
  1687     // as the value is used to estimate the loop trip count.
  1688     // An infinite trip count would blur relative block
  1689     // frequencies.
  1690     if (exits_sum > 1.0f) exits_sum = 1.0;
  1691     if (exits_sum < PROB_MIN) exits_sum = PROB_MIN;
  1692     _exit_prob = exits_sum;
  1696 //------------------------------succ_prob-------------------------------------
  1697 // Determine the probability of reaching successor 'i' from the receiver block.
  1698 float Block::succ_prob(uint i) {
  1699   int eidx = end_idx();
  1700   Node *n = _nodes[eidx];  // Get ending Node
  1702   int op = n->Opcode();
  1703   if (n->is_Mach()) {
  1704     if (n->is_MachNullCheck()) {
  1705       // Can only reach here if called after lcm. The original Op_If is gone,
  1706       // so we attempt to infer the probability from one or both of the
  1707       // successor blocks.
  1708       assert(_num_succs == 2, "expecting 2 successors of a null check");
  1709       // If either successor has only one predecessor, then the
  1710       // probability estimate can be derived using the
  1711       // relative frequency of the successor and this block.
  1712       if (_succs[i]->num_preds() == 2) {
  1713         return _succs[i]->_freq / _freq;
  1714       } else if (_succs[1-i]->num_preds() == 2) {
  1715         return 1 - (_succs[1-i]->_freq / _freq);
  1716       } else {
  1717         // Estimate using both successor frequencies
  1718         float freq = _succs[i]->_freq;
  1719         return freq / (freq + _succs[1-i]->_freq);
  1722     op = n->as_Mach()->ideal_Opcode();
  1726   // Switch on branch type
  1727   switch( op ) {
  1728   case Op_CountedLoopEnd:
  1729   case Op_If: {
  1730     assert (i < 2, "just checking");
  1731     // Conditionals pass on only part of their frequency
  1732     float prob  = n->as_MachIf()->_prob;
  1733     assert(prob >= 0.0 && prob <= 1.0, "out of range probability");
  1734     // If succ[i] is the FALSE branch, invert path info
  1735     if( _nodes[i + eidx + 1]->Opcode() == Op_IfFalse ) {
  1736       return 1.0f - prob; // not taken
  1737     } else {
  1738       return prob; // taken
  1742   case Op_Jump:
  1743     // Divide the frequency between all successors evenly
  1744     return 1.0f/_num_succs;
  1746   case Op_Catch: {
  1747     const CatchProjNode *ci = _nodes[i + eidx + 1]->as_CatchProj();
  1748     if (ci->_con == CatchProjNode::fall_through_index) {
  1749       // Fall-thru path gets the lion's share.
  1750       return 1.0f - PROB_UNLIKELY_MAG(5)*_num_succs;
  1751     } else {
  1752       // Presume exceptional paths are equally unlikely
  1753       return PROB_UNLIKELY_MAG(5);
  1757   case Op_Root:
  1758   case Op_Goto:
  1759     // Pass frequency straight thru to target
  1760     return 1.0f;
  1762   case Op_NeverBranch:
  1763     return 0.0f;
  1765   case Op_TailCall:
  1766   case Op_TailJump:
  1767   case Op_Return:
  1768   case Op_Halt:
  1769   case Op_Rethrow:
  1770     // Do not push out freq to root block
  1771     return 0.0f;
  1773   default:
  1774     ShouldNotReachHere();
  1777   return 0.0f;
  1780 //------------------------------num_fall_throughs-----------------------------
  1781 // Return the number of fall-through candidates for a block
  1782 int Block::num_fall_throughs() {
  1783   int eidx = end_idx();
  1784   Node *n = _nodes[eidx];  // Get ending Node
  1786   int op = n->Opcode();
  1787   if (n->is_Mach()) {
  1788     if (n->is_MachNullCheck()) {
  1789       // In theory, either side can fall-thru, for simplicity sake,
  1790       // let's say only the false branch can now.
  1791       return 1;
  1793     op = n->as_Mach()->ideal_Opcode();
  1796   // Switch on branch type
  1797   switch( op ) {
  1798   case Op_CountedLoopEnd:
  1799   case Op_If:
  1800     return 2;
  1802   case Op_Root:
  1803   case Op_Goto:
  1804     return 1;
  1806   case Op_Catch: {
  1807     for (uint i = 0; i < _num_succs; i++) {
  1808       const CatchProjNode *ci = _nodes[i + eidx + 1]->as_CatchProj();
  1809       if (ci->_con == CatchProjNode::fall_through_index) {
  1810         return 1;
  1813     return 0;
  1816   case Op_Jump:
  1817   case Op_NeverBranch:
  1818   case Op_TailCall:
  1819   case Op_TailJump:
  1820   case Op_Return:
  1821   case Op_Halt:
  1822   case Op_Rethrow:
  1823     return 0;
  1825   default:
  1826     ShouldNotReachHere();
  1829   return 0;
  1832 //------------------------------succ_fall_through-----------------------------
  1833 // Return true if a specific successor could be fall-through target.
  1834 bool Block::succ_fall_through(uint i) {
  1835   int eidx = end_idx();
  1836   Node *n = _nodes[eidx];  // Get ending Node
  1838   int op = n->Opcode();
  1839   if (n->is_Mach()) {
  1840     if (n->is_MachNullCheck()) {
  1841       // In theory, either side can fall-thru, for simplicity sake,
  1842       // let's say only the false branch can now.
  1843       return _nodes[i + eidx + 1]->Opcode() == Op_IfFalse;
  1845     op = n->as_Mach()->ideal_Opcode();
  1848   // Switch on branch type
  1849   switch( op ) {
  1850   case Op_CountedLoopEnd:
  1851   case Op_If:
  1852   case Op_Root:
  1853   case Op_Goto:
  1854     return true;
  1856   case Op_Catch: {
  1857     const CatchProjNode *ci = _nodes[i + eidx + 1]->as_CatchProj();
  1858     return ci->_con == CatchProjNode::fall_through_index;
  1861   case Op_Jump:
  1862   case Op_NeverBranch:
  1863   case Op_TailCall:
  1864   case Op_TailJump:
  1865   case Op_Return:
  1866   case Op_Halt:
  1867   case Op_Rethrow:
  1868     return false;
  1870   default:
  1871     ShouldNotReachHere();
  1874   return false;
  1877 //------------------------------update_uncommon_branch------------------------
  1878 // Update the probability of a two-branch to be uncommon
  1879 void Block::update_uncommon_branch(Block* ub) {
  1880   int eidx = end_idx();
  1881   Node *n = _nodes[eidx];  // Get ending Node
  1883   int op = n->as_Mach()->ideal_Opcode();
  1885   assert(op == Op_CountedLoopEnd || op == Op_If, "must be a If");
  1886   assert(num_fall_throughs() == 2, "must be a two way branch block");
  1888   // Which successor is ub?
  1889   uint s;
  1890   for (s = 0; s <_num_succs; s++) {
  1891     if (_succs[s] == ub) break;
  1893   assert(s < 2, "uncommon successor must be found");
  1895   // If ub is the true path, make the proability small, else
  1896   // ub is the false path, and make the probability large
  1897   bool invert = (_nodes[s + eidx + 1]->Opcode() == Op_IfFalse);
  1899   // Get existing probability
  1900   float p = n->as_MachIf()->_prob;
  1902   if (invert) p = 1.0 - p;
  1903   if (p > PROB_MIN) {
  1904     p = PROB_MIN;
  1906   if (invert) p = 1.0 - p;
  1908   n->as_MachIf()->_prob = p;
  1911 //------------------------------update_succ_freq-------------------------------
  1912 // Update the appropriate frequency associated with block 'b', a successor of
  1913 // a block in this loop.
  1914 void CFGLoop::update_succ_freq(Block* b, float freq) {
  1915   if (b->_loop == this) {
  1916     if (b == head()) {
  1917       // back branch within the loop
  1918       // Do nothing now, the loop carried frequency will be
  1919       // adjust later in scale_freq().
  1920     } else {
  1921       // simple branch within the loop
  1922       b->_freq += freq;
  1924   } else if (!in_loop_nest(b)) {
  1925     // branch is exit from this loop
  1926     BlockProbPair bpp(b, freq);
  1927     _exits.append(bpp);
  1928   } else {
  1929     // branch into nested loop
  1930     CFGLoop* ch = b->_loop;
  1931     ch->_freq += freq;
  1935 //------------------------------in_loop_nest-----------------------------------
  1936 // Determine if block b is in the receiver's loop nest.
  1937 bool CFGLoop::in_loop_nest(Block* b) {
  1938   int depth = _depth;
  1939   CFGLoop* b_loop = b->_loop;
  1940   int b_depth = b_loop->_depth;
  1941   if (depth == b_depth) {
  1942     return true;
  1944   while (b_depth > depth) {
  1945     b_loop = b_loop->_parent;
  1946     b_depth = b_loop->_depth;
  1948   return b_loop == this;
  1951 //------------------------------scale_freq-------------------------------------
  1952 // Scale frequency of loops and blocks by trip counts from outer loops
  1953 // Do a top down traversal of loop tree (visit outer loops first.)
  1954 void CFGLoop::scale_freq() {
  1955   float loop_freq = _freq * trip_count();
  1956   _freq = loop_freq;
  1957   for (int i = 0; i < _members.length(); i++) {
  1958     CFGElement* s = _members.at(i);
  1959     float block_freq = s->_freq * loop_freq;
  1960     if (g_isnan(block_freq) || block_freq < MIN_BLOCK_FREQUENCY)
  1961       block_freq = MIN_BLOCK_FREQUENCY;
  1962     s->_freq = block_freq;
  1964   CFGLoop* ch = _child;
  1965   while (ch != NULL) {
  1966     ch->scale_freq();
  1967     ch = ch->_sibling;
  1971 // Frequency of outer loop
  1972 float CFGLoop::outer_loop_freq() const {
  1973   if (_child != NULL) {
  1974     return _child->_freq;
  1976   return _freq;
  1979 #ifndef PRODUCT
  1980 //------------------------------dump_tree--------------------------------------
  1981 void CFGLoop::dump_tree() const {
  1982   dump();
  1983   if (_child != NULL)   _child->dump_tree();
  1984   if (_sibling != NULL) _sibling->dump_tree();
  1987 //------------------------------dump-------------------------------------------
  1988 void CFGLoop::dump() const {
  1989   for (int i = 0; i < _depth; i++) tty->print("   ");
  1990   tty->print("%s: %d  trip_count: %6.0f freq: %6.0f\n",
  1991              _depth == 0 ? "Method" : "Loop", _id, trip_count(), _freq);
  1992   for (int i = 0; i < _depth; i++) tty->print("   ");
  1993   tty->print("         members:", _id);
  1994   int k = 0;
  1995   for (int i = 0; i < _members.length(); i++) {
  1996     if (k++ >= 6) {
  1997       tty->print("\n              ");
  1998       for (int j = 0; j < _depth+1; j++) tty->print("   ");
  1999       k = 0;
  2001     CFGElement *s = _members.at(i);
  2002     if (s->is_block()) {
  2003       Block *b = s->as_Block();
  2004       tty->print(" B%d(%6.3f)", b->_pre_order, b->_freq);
  2005     } else {
  2006       CFGLoop* lp = s->as_CFGLoop();
  2007       tty->print(" L%d(%6.3f)", lp->_id, lp->_freq);
  2010   tty->print("\n");
  2011   for (int i = 0; i < _depth; i++) tty->print("   ");
  2012   tty->print("         exits:  ");
  2013   k = 0;
  2014   for (int i = 0; i < _exits.length(); i++) {
  2015     if (k++ >= 7) {
  2016       tty->print("\n              ");
  2017       for (int j = 0; j < _depth+1; j++) tty->print("   ");
  2018       k = 0;
  2020     Block *blk = _exits.at(i).get_target();
  2021     float prob = _exits.at(i).get_prob();
  2022     tty->print(" ->%d@%d%%", blk->_pre_order, (int)(prob*100));
  2024   tty->print("\n");
  2026 #endif

mercurial