src/share/vm/opto/output.cpp

Wed, 16 Nov 2011 09:13:57 -0800

author
kvn
date
Wed, 16 Nov 2011 09:13:57 -0800
changeset 3311
1bd45abaa507
parent 3055
739a9abbbd4b
child 3406
e9a5e0a812c8
permissions
-rw-r--r--

6890673: Eliminate allocations immediately after EA
Summary: Try to eliminate allocations and related locks immediately after escape analysis.
Reviewed-by: never

     1 /*
     2  * Copyright (c) 1998, 2011, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    20  * or visit www.oracle.com if you need additional information or have any
    21  * questions.
    22  *
    23  */
    25 #include "precompiled.hpp"
    26 #include "asm/assembler.inline.hpp"
    27 #include "code/debugInfo.hpp"
    28 #include "code/debugInfoRec.hpp"
    29 #include "compiler/compileBroker.hpp"
    30 #include "compiler/oopMap.hpp"
    31 #include "memory/allocation.inline.hpp"
    32 #include "opto/callnode.hpp"
    33 #include "opto/cfgnode.hpp"
    34 #include "opto/locknode.hpp"
    35 #include "opto/machnode.hpp"
    36 #include "opto/output.hpp"
    37 #include "opto/regalloc.hpp"
    38 #include "opto/runtime.hpp"
    39 #include "opto/subnode.hpp"
    40 #include "opto/type.hpp"
    41 #include "runtime/handles.inline.hpp"
    42 #include "utilities/xmlstream.hpp"
    44 extern uint size_java_to_interp();
    45 extern uint reloc_java_to_interp();
    46 extern uint size_exception_handler();
    47 extern uint size_deopt_handler();
    49 #ifndef PRODUCT
    50 #define DEBUG_ARG(x) , x
    51 #else
    52 #define DEBUG_ARG(x)
    53 #endif
    55 extern int emit_exception_handler(CodeBuffer &cbuf);
    56 extern int emit_deopt_handler(CodeBuffer &cbuf);
    58 //------------------------------Output-----------------------------------------
    59 // Convert Nodes to instruction bits and pass off to the VM
    60 void Compile::Output() {
    61   // RootNode goes
    62   assert( _cfg->_broot->_nodes.size() == 0, "" );
    64   // The number of new nodes (mostly MachNop) is proportional to
    65   // the number of java calls and inner loops which are aligned.
    66   if ( C->check_node_count((NodeLimitFudgeFactor + C->java_calls()*3 +
    67                             C->inner_loops()*(OptoLoopAlignment-1)),
    68                            "out of nodes before code generation" ) ) {
    69     return;
    70   }
    71   // Make sure I can find the Start Node
    72   Block_Array& bbs = _cfg->_bbs;
    73   Block *entry = _cfg->_blocks[1];
    74   Block *broot = _cfg->_broot;
    76   const StartNode *start = entry->_nodes[0]->as_Start();
    78   // Replace StartNode with prolog
    79   MachPrologNode *prolog = new (this) MachPrologNode();
    80   entry->_nodes.map( 0, prolog );
    81   bbs.map( prolog->_idx, entry );
    82   bbs.map( start->_idx, NULL ); // start is no longer in any block
    84   // Virtual methods need an unverified entry point
    86   if( is_osr_compilation() ) {
    87     if( PoisonOSREntry ) {
    88       // TODO: Should use a ShouldNotReachHereNode...
    89       _cfg->insert( broot, 0, new (this) MachBreakpointNode() );
    90     }
    91   } else {
    92     if( _method && !_method->flags().is_static() ) {
    93       // Insert unvalidated entry point
    94       _cfg->insert( broot, 0, new (this) MachUEPNode() );
    95     }
    97   }
   100   // Break before main entry point
   101   if( (_method && _method->break_at_execute())
   102 #ifndef PRODUCT
   103     ||(OptoBreakpoint && is_method_compilation())
   104     ||(OptoBreakpointOSR && is_osr_compilation())
   105     ||(OptoBreakpointC2R && !_method)
   106 #endif
   107     ) {
   108     // checking for _method means that OptoBreakpoint does not apply to
   109     // runtime stubs or frame converters
   110     _cfg->insert( entry, 1, new (this) MachBreakpointNode() );
   111   }
   113   // Insert epilogs before every return
   114   for( uint i=0; i<_cfg->_num_blocks; i++ ) {
   115     Block *b = _cfg->_blocks[i];
   116     if( !b->is_connector() && b->non_connector_successor(0) == _cfg->_broot ) { // Found a program exit point?
   117       Node *m = b->end();
   118       if( m->is_Mach() && m->as_Mach()->ideal_Opcode() != Op_Halt ) {
   119         MachEpilogNode *epilog = new (this) MachEpilogNode(m->as_Mach()->ideal_Opcode() == Op_Return);
   120         b->add_inst( epilog );
   121         bbs.map(epilog->_idx, b);
   122         //_regalloc->set_bad(epilog->_idx); // Already initialized this way.
   123       }
   124     }
   125   }
   127 # ifdef ENABLE_ZAP_DEAD_LOCALS
   128   if ( ZapDeadCompiledLocals )  Insert_zap_nodes();
   129 # endif
   131   uint* blk_starts = NEW_RESOURCE_ARRAY(uint,_cfg->_num_blocks+1);
   132   blk_starts[0]    = 0;
   134   // Initialize code buffer and process short branches.
   135   CodeBuffer* cb = init_buffer(blk_starts);
   137   if (cb == NULL || failing())  return;
   139   ScheduleAndBundle();
   141 #ifndef PRODUCT
   142   if (trace_opto_output()) {
   143     tty->print("\n---- After ScheduleAndBundle ----\n");
   144     for (uint i = 0; i < _cfg->_num_blocks; i++) {
   145       tty->print("\nBB#%03d:\n", i);
   146       Block *bb = _cfg->_blocks[i];
   147       for (uint j = 0; j < bb->_nodes.size(); j++) {
   148         Node *n = bb->_nodes[j];
   149         OptoReg::Name reg = _regalloc->get_reg_first(n);
   150         tty->print(" %-6s ", reg >= 0 && reg < REG_COUNT ? Matcher::regName[reg] : "");
   151         n->dump();
   152       }
   153     }
   154   }
   155 #endif
   157   if (failing())  return;
   159   BuildOopMaps();
   161   if (failing())  return;
   163   fill_buffer(cb, blk_starts);
   164 }
   166 bool Compile::need_stack_bang(int frame_size_in_bytes) const {
   167   // Determine if we need to generate a stack overflow check.
   168   // Do it if the method is not a stub function and
   169   // has java calls or has frame size > vm_page_size/8.
   170   return (stub_function() == NULL &&
   171           (has_java_calls() || frame_size_in_bytes > os::vm_page_size()>>3));
   172 }
   174 bool Compile::need_register_stack_bang() const {
   175   // Determine if we need to generate a register stack overflow check.
   176   // This is only used on architectures which have split register
   177   // and memory stacks (ie. IA64).
   178   // Bang if the method is not a stub function and has java calls
   179   return (stub_function() == NULL && has_java_calls());
   180 }
   182 # ifdef ENABLE_ZAP_DEAD_LOCALS
   185 // In order to catch compiler oop-map bugs, we have implemented
   186 // a debugging mode called ZapDeadCompilerLocals.
   187 // This mode causes the compiler to insert a call to a runtime routine,
   188 // "zap_dead_locals", right before each place in compiled code
   189 // that could potentially be a gc-point (i.e., a safepoint or oop map point).
   190 // The runtime routine checks that locations mapped as oops are really
   191 // oops, that locations mapped as values do not look like oops,
   192 // and that locations mapped as dead are not used later
   193 // (by zapping them to an invalid address).
   195 int Compile::_CompiledZap_count = 0;
   197 void Compile::Insert_zap_nodes() {
   198   bool skip = false;
   201   // Dink with static counts because code code without the extra
   202   // runtime calls is MUCH faster for debugging purposes
   204        if ( CompileZapFirst  ==  0  ) ; // nothing special
   205   else if ( CompileZapFirst  >  CompiledZap_count() )  skip = true;
   206   else if ( CompileZapFirst  == CompiledZap_count() )
   207     warning("starting zap compilation after skipping");
   209        if ( CompileZapLast  ==  -1  ) ; // nothing special
   210   else if ( CompileZapLast  <   CompiledZap_count() )  skip = true;
   211   else if ( CompileZapLast  ==  CompiledZap_count() )
   212     warning("about to compile last zap");
   214   ++_CompiledZap_count; // counts skipped zaps, too
   216   if ( skip )  return;
   219   if ( _method == NULL )
   220     return; // no safepoints/oopmaps emitted for calls in stubs,so we don't care
   222   // Insert call to zap runtime stub before every node with an oop map
   223   for( uint i=0; i<_cfg->_num_blocks; i++ ) {
   224     Block *b = _cfg->_blocks[i];
   225     for ( uint j = 0;  j < b->_nodes.size();  ++j ) {
   226       Node *n = b->_nodes[j];
   228       // Determining if we should insert a zap-a-lot node in output.
   229       // We do that for all nodes that has oopmap info, except for calls
   230       // to allocation.  Calls to allocation passes in the old top-of-eden pointer
   231       // and expect the C code to reset it.  Hence, there can be no safepoints between
   232       // the inlined-allocation and the call to new_Java, etc.
   233       // We also cannot zap monitor calls, as they must hold the microlock
   234       // during the call to Zap, which also wants to grab the microlock.
   235       bool insert = n->is_MachSafePoint() && (n->as_MachSafePoint()->oop_map() != NULL);
   236       if ( insert ) { // it is MachSafePoint
   237         if ( !n->is_MachCall() ) {
   238           insert = false;
   239         } else if ( n->is_MachCall() ) {
   240           MachCallNode* call = n->as_MachCall();
   241           if (call->entry_point() == OptoRuntime::new_instance_Java() ||
   242               call->entry_point() == OptoRuntime::new_array_Java() ||
   243               call->entry_point() == OptoRuntime::multianewarray2_Java() ||
   244               call->entry_point() == OptoRuntime::multianewarray3_Java() ||
   245               call->entry_point() == OptoRuntime::multianewarray4_Java() ||
   246               call->entry_point() == OptoRuntime::multianewarray5_Java() ||
   247               call->entry_point() == OptoRuntime::slow_arraycopy_Java() ||
   248               call->entry_point() == OptoRuntime::complete_monitor_locking_Java()
   249               ) {
   250             insert = false;
   251           }
   252         }
   253         if (insert) {
   254           Node *zap = call_zap_node(n->as_MachSafePoint(), i);
   255           b->_nodes.insert( j, zap );
   256           _cfg->_bbs.map( zap->_idx, b );
   257           ++j;
   258         }
   259       }
   260     }
   261   }
   262 }
   265 Node* Compile::call_zap_node(MachSafePointNode* node_to_check, int block_no) {
   266   const TypeFunc *tf = OptoRuntime::zap_dead_locals_Type();
   267   CallStaticJavaNode* ideal_node =
   268     new (this, tf->domain()->cnt()) CallStaticJavaNode( tf,
   269          OptoRuntime::zap_dead_locals_stub(_method->flags().is_native()),
   270                             "call zap dead locals stub", 0, TypePtr::BOTTOM);
   271   // We need to copy the OopMap from the site we're zapping at.
   272   // We have to make a copy, because the zap site might not be
   273   // a call site, and zap_dead is a call site.
   274   OopMap* clone = node_to_check->oop_map()->deep_copy();
   276   // Add the cloned OopMap to the zap node
   277   ideal_node->set_oop_map(clone);
   278   return _matcher->match_sfpt(ideal_node);
   279 }
   281 //------------------------------is_node_getting_a_safepoint--------------------
   282 bool Compile::is_node_getting_a_safepoint( Node* n) {
   283   // This code duplicates the logic prior to the call of add_safepoint
   284   // below in this file.
   285   if( n->is_MachSafePoint() ) return true;
   286   return false;
   287 }
   289 # endif // ENABLE_ZAP_DEAD_LOCALS
   291 //------------------------------compute_loop_first_inst_sizes------------------
   292 // Compute the size of first NumberOfLoopInstrToAlign instructions at the top
   293 // of a loop. When aligning a loop we need to provide enough instructions
   294 // in cpu's fetch buffer to feed decoders. The loop alignment could be
   295 // avoided if we have enough instructions in fetch buffer at the head of a loop.
   296 // By default, the size is set to 999999 by Block's constructor so that
   297 // a loop will be aligned if the size is not reset here.
   298 //
   299 // Note: Mach instructions could contain several HW instructions
   300 // so the size is estimated only.
   301 //
   302 void Compile::compute_loop_first_inst_sizes() {
   303   // The next condition is used to gate the loop alignment optimization.
   304   // Don't aligned a loop if there are enough instructions at the head of a loop
   305   // or alignment padding is larger then MaxLoopPad. By default, MaxLoopPad
   306   // is equal to OptoLoopAlignment-1 except on new Intel cpus, where it is
   307   // equal to 11 bytes which is the largest address NOP instruction.
   308   if( MaxLoopPad < OptoLoopAlignment-1 ) {
   309     uint last_block = _cfg->_num_blocks-1;
   310     for( uint i=1; i <= last_block; i++ ) {
   311       Block *b = _cfg->_blocks[i];
   312       // Check the first loop's block which requires an alignment.
   313       if( b->loop_alignment() > (uint)relocInfo::addr_unit() ) {
   314         uint sum_size = 0;
   315         uint inst_cnt = NumberOfLoopInstrToAlign;
   316         inst_cnt = b->compute_first_inst_size(sum_size, inst_cnt, _regalloc);
   318         // Check subsequent fallthrough blocks if the loop's first
   319         // block(s) does not have enough instructions.
   320         Block *nb = b;
   321         while( inst_cnt > 0 &&
   322                i < last_block &&
   323                !_cfg->_blocks[i+1]->has_loop_alignment() &&
   324                !nb->has_successor(b) ) {
   325           i++;
   326           nb = _cfg->_blocks[i];
   327           inst_cnt  = nb->compute_first_inst_size(sum_size, inst_cnt, _regalloc);
   328         } // while( inst_cnt > 0 && i < last_block  )
   330         b->set_first_inst_size(sum_size);
   331       } // f( b->head()->is_Loop() )
   332     } // for( i <= last_block )
   333   } // if( MaxLoopPad < OptoLoopAlignment-1 )
   334 }
   336 //----------------------shorten_branches---------------------------------------
   337 // The architecture description provides short branch variants for some long
   338 // branch instructions. Replace eligible long branches with short branches.
   339 void Compile::shorten_branches(uint* blk_starts, int& code_size, int& reloc_size, int& stub_size) {
   341   // ------------------
   342   // Compute size of each block, method size, and relocation information size
   343   uint nblocks  = _cfg->_num_blocks;
   345   uint*      jmp_offset = NEW_RESOURCE_ARRAY(uint,nblocks);
   346   uint*      jmp_size   = NEW_RESOURCE_ARRAY(uint,nblocks);
   347   int*       jmp_nidx   = NEW_RESOURCE_ARRAY(int ,nblocks);
   348   DEBUG_ONLY( uint *jmp_target = NEW_RESOURCE_ARRAY(uint,nblocks); )
   349   DEBUG_ONLY( uint *jmp_rule = NEW_RESOURCE_ARRAY(uint,nblocks); )
   351   bool has_short_branch_candidate = false;
   353   // Initialize the sizes to 0
   354   code_size  = 0;          // Size in bytes of generated code
   355   stub_size  = 0;          // Size in bytes of all stub entries
   356   // Size in bytes of all relocation entries, including those in local stubs.
   357   // Start with 2-bytes of reloc info for the unvalidated entry point
   358   reloc_size = 1;          // Number of relocation entries
   360   // Make three passes.  The first computes pessimistic blk_starts,
   361   // relative jmp_offset and reloc_size information.  The second performs
   362   // short branch substitution using the pessimistic sizing.  The
   363   // third inserts nops where needed.
   365   // Step one, perform a pessimistic sizing pass.
   366   uint last_call_adr = max_uint;
   367   uint last_avoid_back_to_back_adr = max_uint;
   368   uint nop_size = (new (this) MachNopNode())->size(_regalloc);
   369   for (uint i = 0; i < nblocks; i++) { // For all blocks
   370     Block *b = _cfg->_blocks[i];
   372     // During short branch replacement, we store the relative (to blk_starts)
   373     // offset of jump in jmp_offset, rather than the absolute offset of jump.
   374     // This is so that we do not need to recompute sizes of all nodes when
   375     // we compute correct blk_starts in our next sizing pass.
   376     jmp_offset[i] = 0;
   377     jmp_size[i]   = 0;
   378     jmp_nidx[i]   = -1;
   379     DEBUG_ONLY( jmp_target[i] = 0; )
   380     DEBUG_ONLY( jmp_rule[i]   = 0; )
   382     // Sum all instruction sizes to compute block size
   383     uint last_inst = b->_nodes.size();
   384     uint blk_size = 0;
   385     for (uint j = 0; j < last_inst; j++) {
   386       Node* nj = b->_nodes[j];
   387       // Handle machine instruction nodes
   388       if (nj->is_Mach()) {
   389         MachNode *mach = nj->as_Mach();
   390         blk_size += (mach->alignment_required() - 1) * relocInfo::addr_unit(); // assume worst case padding
   391         reloc_size += mach->reloc();
   392         if( mach->is_MachCall() ) {
   393           MachCallNode *mcall = mach->as_MachCall();
   394           // This destination address is NOT PC-relative
   396           mcall->method_set((intptr_t)mcall->entry_point());
   398           if( mcall->is_MachCallJava() && mcall->as_MachCallJava()->_method ) {
   399             stub_size  += size_java_to_interp();
   400             reloc_size += reloc_java_to_interp();
   401           }
   402         } else if (mach->is_MachSafePoint()) {
   403           // If call/safepoint are adjacent, account for possible
   404           // nop to disambiguate the two safepoints.
   405           // ScheduleAndBundle() can rearrange nodes in a block,
   406           // check for all offsets inside this block.
   407           if (last_call_adr >= blk_starts[i]) {
   408             blk_size += nop_size;
   409           }
   410         }
   411         if (mach->avoid_back_to_back()) {
   412           // Nop is inserted between "avoid back to back" instructions.
   413           // ScheduleAndBundle() can rearrange nodes in a block,
   414           // check for all offsets inside this block.
   415           if (last_avoid_back_to_back_adr >= blk_starts[i]) {
   416             blk_size += nop_size;
   417           }
   418         }
   419         if (mach->may_be_short_branch()) {
   420           if (!nj->is_MachBranch()) {
   421 #ifndef PRODUCT
   422             nj->dump(3);
   423 #endif
   424             Unimplemented();
   425           }
   426           assert(jmp_nidx[i] == -1, "block should have only one branch");
   427           jmp_offset[i] = blk_size;
   428           jmp_size[i]   = nj->size(_regalloc);
   429           jmp_nidx[i]   = j;
   430           has_short_branch_candidate = true;
   431         }
   432       }
   433       blk_size += nj->size(_regalloc);
   434       // Remember end of call offset
   435       if (nj->is_MachCall() && !nj->is_MachCallLeaf()) {
   436         last_call_adr = blk_starts[i]+blk_size;
   437       }
   438       // Remember end of avoid_back_to_back offset
   439       if (nj->is_Mach() && nj->as_Mach()->avoid_back_to_back()) {
   440         last_avoid_back_to_back_adr = blk_starts[i]+blk_size;
   441       }
   442     }
   444     // When the next block starts a loop, we may insert pad NOP
   445     // instructions.  Since we cannot know our future alignment,
   446     // assume the worst.
   447     if (i< nblocks-1) {
   448       Block *nb = _cfg->_blocks[i+1];
   449       int max_loop_pad = nb->code_alignment()-relocInfo::addr_unit();
   450       if (max_loop_pad > 0) {
   451         assert(is_power_of_2(max_loop_pad+relocInfo::addr_unit()), "");
   452         blk_size += max_loop_pad;
   453       }
   454     }
   456     // Save block size; update total method size
   457     blk_starts[i+1] = blk_starts[i]+blk_size;
   458   }
   460   // Step two, replace eligible long jumps.
   461   bool progress = true;
   462   uint last_may_be_short_branch_adr = max_uint;
   463   while (has_short_branch_candidate && progress) {
   464     progress = false;
   465     has_short_branch_candidate = false;
   466     int adjust_block_start = 0;
   467     for (uint i = 0; i < nblocks; i++) {
   468       Block *b = _cfg->_blocks[i];
   469       int idx = jmp_nidx[i];
   470       MachNode* mach = (idx == -1) ? NULL: b->_nodes[idx]->as_Mach();
   471       if (mach != NULL && mach->may_be_short_branch()) {
   472 #ifdef ASSERT
   473         assert(jmp_size[i] > 0 && mach->is_MachBranch(), "sanity");
   474         int j;
   475         // Find the branch; ignore trailing NOPs.
   476         for (j = b->_nodes.size()-1; j>=0; j--) {
   477           Node* n = b->_nodes[j];
   478           if (!n->is_Mach() || n->as_Mach()->ideal_Opcode() != Op_Con)
   479             break;
   480         }
   481         assert(j >= 0 && j == idx && b->_nodes[j] == (Node*)mach, "sanity");
   482 #endif
   483         int br_size = jmp_size[i];
   484         int br_offs = blk_starts[i] + jmp_offset[i];
   486         // This requires the TRUE branch target be in succs[0]
   487         uint bnum = b->non_connector_successor(0)->_pre_order;
   488         int offset = blk_starts[bnum] - br_offs;
   489         if (bnum > i) { // adjust following block's offset
   490           offset -= adjust_block_start;
   491         }
   492         // In the following code a nop could be inserted before
   493         // the branch which will increase the backward distance.
   494         bool needs_padding = ((uint)br_offs == last_may_be_short_branch_adr);
   495         if (needs_padding && offset <= 0)
   496           offset -= nop_size;
   498         if (_matcher->is_short_branch_offset(mach->rule(), br_size, offset)) {
   499           // We've got a winner.  Replace this branch.
   500           MachNode* replacement = mach->as_MachBranch()->short_branch_version(this);
   502           // Update the jmp_size.
   503           int new_size = replacement->size(_regalloc);
   504           int diff     = br_size - new_size;
   505           assert(diff >= (int)nop_size, "short_branch size should be smaller");
   506           // Conservatively take into accound padding between
   507           // avoid_back_to_back branches. Previous branch could be
   508           // converted into avoid_back_to_back branch during next
   509           // rounds.
   510           if (needs_padding && replacement->avoid_back_to_back()) {
   511             jmp_offset[i] += nop_size;
   512             diff -= nop_size;
   513           }
   514           adjust_block_start += diff;
   515           b->_nodes.map(idx, replacement);
   516           mach->subsume_by(replacement);
   517           mach = replacement;
   518           progress = true;
   520           jmp_size[i] = new_size;
   521           DEBUG_ONLY( jmp_target[i] = bnum; );
   522           DEBUG_ONLY( jmp_rule[i] = mach->rule(); );
   523         } else {
   524           // The jump distance is not short, try again during next iteration.
   525           has_short_branch_candidate = true;
   526         }
   527       } // (mach->may_be_short_branch())
   528       if (mach != NULL && (mach->may_be_short_branch() ||
   529                            mach->avoid_back_to_back())) {
   530         last_may_be_short_branch_adr = blk_starts[i] + jmp_offset[i] + jmp_size[i];
   531       }
   532       blk_starts[i+1] -= adjust_block_start;
   533     }
   534   }
   536 #ifdef ASSERT
   537   for (uint i = 0; i < nblocks; i++) { // For all blocks
   538     if (jmp_target[i] != 0) {
   539       int br_size = jmp_size[i];
   540       int offset = blk_starts[jmp_target[i]]-(blk_starts[i] + jmp_offset[i]);
   541       if (!_matcher->is_short_branch_offset(jmp_rule[i], br_size, offset)) {
   542         tty->print_cr("target (%d) - jmp_offset(%d) = offset (%d), jump_size(%d), jmp_block B%d, target_block B%d", blk_starts[jmp_target[i]], blk_starts[i] + jmp_offset[i], offset, br_size, i, jmp_target[i]);
   543       }
   544       assert(_matcher->is_short_branch_offset(jmp_rule[i], br_size, offset), "Displacement too large for short jmp");
   545     }
   546   }
   547 #endif
   549   // Step 3, compute the offsets of all blocks, will be done in fill_buffer()
   550   // after ScheduleAndBundle().
   552   // ------------------
   553   // Compute size for code buffer
   554   code_size = blk_starts[nblocks];
   556   // Relocation records
   557   reloc_size += 1;              // Relo entry for exception handler
   559   // Adjust reloc_size to number of record of relocation info
   560   // Min is 2 bytes, max is probably 6 or 8, with a tax up to 25% for
   561   // a relocation index.
   562   // The CodeBuffer will expand the locs array if this estimate is too low.
   563   reloc_size *= 10 / sizeof(relocInfo);
   564 }
   566 //------------------------------FillLocArray-----------------------------------
   567 // Create a bit of debug info and append it to the array.  The mapping is from
   568 // Java local or expression stack to constant, register or stack-slot.  For
   569 // doubles, insert 2 mappings and return 1 (to tell the caller that the next
   570 // entry has been taken care of and caller should skip it).
   571 static LocationValue *new_loc_value( PhaseRegAlloc *ra, OptoReg::Name regnum, Location::Type l_type ) {
   572   // This should never have accepted Bad before
   573   assert(OptoReg::is_valid(regnum), "location must be valid");
   574   return (OptoReg::is_reg(regnum))
   575     ? new LocationValue(Location::new_reg_loc(l_type, OptoReg::as_VMReg(regnum)) )
   576     : new LocationValue(Location::new_stk_loc(l_type,  ra->reg2offset(regnum)));
   577 }
   580 ObjectValue*
   581 Compile::sv_for_node_id(GrowableArray<ScopeValue*> *objs, int id) {
   582   for (int i = 0; i < objs->length(); i++) {
   583     assert(objs->at(i)->is_object(), "corrupt object cache");
   584     ObjectValue* sv = (ObjectValue*) objs->at(i);
   585     if (sv->id() == id) {
   586       return sv;
   587     }
   588   }
   589   // Otherwise..
   590   return NULL;
   591 }
   593 void Compile::set_sv_for_object_node(GrowableArray<ScopeValue*> *objs,
   594                                      ObjectValue* sv ) {
   595   assert(sv_for_node_id(objs, sv->id()) == NULL, "Precondition");
   596   objs->append(sv);
   597 }
   600 void Compile::FillLocArray( int idx, MachSafePointNode* sfpt, Node *local,
   601                             GrowableArray<ScopeValue*> *array,
   602                             GrowableArray<ScopeValue*> *objs ) {
   603   assert( local, "use _top instead of null" );
   604   if (array->length() != idx) {
   605     assert(array->length() == idx + 1, "Unexpected array count");
   606     // Old functionality:
   607     //   return
   608     // New functionality:
   609     //   Assert if the local is not top. In product mode let the new node
   610     //   override the old entry.
   611     assert(local == top(), "LocArray collision");
   612     if (local == top()) {
   613       return;
   614     }
   615     array->pop();
   616   }
   617   const Type *t = local->bottom_type();
   619   // Is it a safepoint scalar object node?
   620   if (local->is_SafePointScalarObject()) {
   621     SafePointScalarObjectNode* spobj = local->as_SafePointScalarObject();
   623     ObjectValue* sv = Compile::sv_for_node_id(objs, spobj->_idx);
   624     if (sv == NULL) {
   625       ciKlass* cik = t->is_oopptr()->klass();
   626       assert(cik->is_instance_klass() ||
   627              cik->is_array_klass(), "Not supported allocation.");
   628       sv = new ObjectValue(spobj->_idx,
   629                            new ConstantOopWriteValue(cik->constant_encoding()));
   630       Compile::set_sv_for_object_node(objs, sv);
   632       uint first_ind = spobj->first_index();
   633       for (uint i = 0; i < spobj->n_fields(); i++) {
   634         Node* fld_node = sfpt->in(first_ind+i);
   635         (void)FillLocArray(sv->field_values()->length(), sfpt, fld_node, sv->field_values(), objs);
   636       }
   637     }
   638     array->append(sv);
   639     return;
   640   }
   642   // Grab the register number for the local
   643   OptoReg::Name regnum = _regalloc->get_reg_first(local);
   644   if( OptoReg::is_valid(regnum) ) {// Got a register/stack?
   645     // Record the double as two float registers.
   646     // The register mask for such a value always specifies two adjacent
   647     // float registers, with the lower register number even.
   648     // Normally, the allocation of high and low words to these registers
   649     // is irrelevant, because nearly all operations on register pairs
   650     // (e.g., StoreD) treat them as a single unit.
   651     // Here, we assume in addition that the words in these two registers
   652     // stored "naturally" (by operations like StoreD and double stores
   653     // within the interpreter) such that the lower-numbered register
   654     // is written to the lower memory address.  This may seem like
   655     // a machine dependency, but it is not--it is a requirement on
   656     // the author of the <arch>.ad file to ensure that, for every
   657     // even/odd double-register pair to which a double may be allocated,
   658     // the word in the even single-register is stored to the first
   659     // memory word.  (Note that register numbers are completely
   660     // arbitrary, and are not tied to any machine-level encodings.)
   661 #ifdef _LP64
   662     if( t->base() == Type::DoubleBot || t->base() == Type::DoubleCon ) {
   663       array->append(new ConstantIntValue(0));
   664       array->append(new_loc_value( _regalloc, regnum, Location::dbl ));
   665     } else if ( t->base() == Type::Long ) {
   666       array->append(new ConstantIntValue(0));
   667       array->append(new_loc_value( _regalloc, regnum, Location::lng ));
   668     } else if ( t->base() == Type::RawPtr ) {
   669       // jsr/ret return address which must be restored into a the full
   670       // width 64-bit stack slot.
   671       array->append(new_loc_value( _regalloc, regnum, Location::lng ));
   672     }
   673 #else //_LP64
   674 #ifdef SPARC
   675     if (t->base() == Type::Long && OptoReg::is_reg(regnum)) {
   676       // For SPARC we have to swap high and low words for
   677       // long values stored in a single-register (g0-g7).
   678       array->append(new_loc_value( _regalloc,              regnum   , Location::normal ));
   679       array->append(new_loc_value( _regalloc, OptoReg::add(regnum,1), Location::normal ));
   680     } else
   681 #endif //SPARC
   682     if( t->base() == Type::DoubleBot || t->base() == Type::DoubleCon || t->base() == Type::Long ) {
   683       // Repack the double/long as two jints.
   684       // The convention the interpreter uses is that the second local
   685       // holds the first raw word of the native double representation.
   686       // This is actually reasonable, since locals and stack arrays
   687       // grow downwards in all implementations.
   688       // (If, on some machine, the interpreter's Java locals or stack
   689       // were to grow upwards, the embedded doubles would be word-swapped.)
   690       array->append(new_loc_value( _regalloc, OptoReg::add(regnum,1), Location::normal ));
   691       array->append(new_loc_value( _regalloc,              regnum   , Location::normal ));
   692     }
   693 #endif //_LP64
   694     else if( (t->base() == Type::FloatBot || t->base() == Type::FloatCon) &&
   695                OptoReg::is_reg(regnum) ) {
   696       array->append(new_loc_value( _regalloc, regnum, Matcher::float_in_double()
   697                                    ? Location::float_in_dbl : Location::normal ));
   698     } else if( t->base() == Type::Int && OptoReg::is_reg(regnum) ) {
   699       array->append(new_loc_value( _regalloc, regnum, Matcher::int_in_long
   700                                    ? Location::int_in_long : Location::normal ));
   701     } else if( t->base() == Type::NarrowOop ) {
   702       array->append(new_loc_value( _regalloc, regnum, Location::narrowoop ));
   703     } else {
   704       array->append(new_loc_value( _regalloc, regnum, _regalloc->is_oop(local) ? Location::oop : Location::normal ));
   705     }
   706     return;
   707   }
   709   // No register.  It must be constant data.
   710   switch (t->base()) {
   711   case Type::Half:              // Second half of a double
   712     ShouldNotReachHere();       // Caller should skip 2nd halves
   713     break;
   714   case Type::AnyPtr:
   715     array->append(new ConstantOopWriteValue(NULL));
   716     break;
   717   case Type::AryPtr:
   718   case Type::InstPtr:
   719   case Type::KlassPtr:          // fall through
   720     array->append(new ConstantOopWriteValue(t->isa_oopptr()->const_oop()->constant_encoding()));
   721     break;
   722   case Type::NarrowOop:
   723     if (t == TypeNarrowOop::NULL_PTR) {
   724       array->append(new ConstantOopWriteValue(NULL));
   725     } else {
   726       array->append(new ConstantOopWriteValue(t->make_ptr()->isa_oopptr()->const_oop()->constant_encoding()));
   727     }
   728     break;
   729   case Type::Int:
   730     array->append(new ConstantIntValue(t->is_int()->get_con()));
   731     break;
   732   case Type::RawPtr:
   733     // A return address (T_ADDRESS).
   734     assert((intptr_t)t->is_ptr()->get_con() < (intptr_t)0x10000, "must be a valid BCI");
   735 #ifdef _LP64
   736     // Must be restored to the full-width 64-bit stack slot.
   737     array->append(new ConstantLongValue(t->is_ptr()->get_con()));
   738 #else
   739     array->append(new ConstantIntValue(t->is_ptr()->get_con()));
   740 #endif
   741     break;
   742   case Type::FloatCon: {
   743     float f = t->is_float_constant()->getf();
   744     array->append(new ConstantIntValue(jint_cast(f)));
   745     break;
   746   }
   747   case Type::DoubleCon: {
   748     jdouble d = t->is_double_constant()->getd();
   749 #ifdef _LP64
   750     array->append(new ConstantIntValue(0));
   751     array->append(new ConstantDoubleValue(d));
   752 #else
   753     // Repack the double as two jints.
   754     // The convention the interpreter uses is that the second local
   755     // holds the first raw word of the native double representation.
   756     // This is actually reasonable, since locals and stack arrays
   757     // grow downwards in all implementations.
   758     // (If, on some machine, the interpreter's Java locals or stack
   759     // were to grow upwards, the embedded doubles would be word-swapped.)
   760     jint   *dp = (jint*)&d;
   761     array->append(new ConstantIntValue(dp[1]));
   762     array->append(new ConstantIntValue(dp[0]));
   763 #endif
   764     break;
   765   }
   766   case Type::Long: {
   767     jlong d = t->is_long()->get_con();
   768 #ifdef _LP64
   769     array->append(new ConstantIntValue(0));
   770     array->append(new ConstantLongValue(d));
   771 #else
   772     // Repack the long as two jints.
   773     // The convention the interpreter uses is that the second local
   774     // holds the first raw word of the native double representation.
   775     // This is actually reasonable, since locals and stack arrays
   776     // grow downwards in all implementations.
   777     // (If, on some machine, the interpreter's Java locals or stack
   778     // were to grow upwards, the embedded doubles would be word-swapped.)
   779     jint *dp = (jint*)&d;
   780     array->append(new ConstantIntValue(dp[1]));
   781     array->append(new ConstantIntValue(dp[0]));
   782 #endif
   783     break;
   784   }
   785   case Type::Top:               // Add an illegal value here
   786     array->append(new LocationValue(Location()));
   787     break;
   788   default:
   789     ShouldNotReachHere();
   790     break;
   791   }
   792 }
   794 // Determine if this node starts a bundle
   795 bool Compile::starts_bundle(const Node *n) const {
   796   return (_node_bundling_limit > n->_idx &&
   797           _node_bundling_base[n->_idx].starts_bundle());
   798 }
   800 //--------------------------Process_OopMap_Node--------------------------------
   801 void Compile::Process_OopMap_Node(MachNode *mach, int current_offset) {
   803   // Handle special safepoint nodes for synchronization
   804   MachSafePointNode *sfn   = mach->as_MachSafePoint();
   805   MachCallNode      *mcall;
   807 #ifdef ENABLE_ZAP_DEAD_LOCALS
   808   assert( is_node_getting_a_safepoint(mach),  "logic does not match; false negative");
   809 #endif
   811   int safepoint_pc_offset = current_offset;
   812   bool is_method_handle_invoke = false;
   813   bool return_oop = false;
   815   // Add the safepoint in the DebugInfoRecorder
   816   if( !mach->is_MachCall() ) {
   817     mcall = NULL;
   818     debug_info()->add_safepoint(safepoint_pc_offset, sfn->_oop_map);
   819   } else {
   820     mcall = mach->as_MachCall();
   822     // Is the call a MethodHandle call?
   823     if (mcall->is_MachCallJava()) {
   824       if (mcall->as_MachCallJava()->_method_handle_invoke) {
   825         assert(has_method_handle_invokes(), "must have been set during call generation");
   826         is_method_handle_invoke = true;
   827       }
   828     }
   830     // Check if a call returns an object.
   831     if (mcall->return_value_is_used() &&
   832         mcall->tf()->range()->field_at(TypeFunc::Parms)->isa_ptr()) {
   833       return_oop = true;
   834     }
   835     safepoint_pc_offset += mcall->ret_addr_offset();
   836     debug_info()->add_safepoint(safepoint_pc_offset, mcall->_oop_map);
   837   }
   839   // Loop over the JVMState list to add scope information
   840   // Do not skip safepoints with a NULL method, they need monitor info
   841   JVMState* youngest_jvms = sfn->jvms();
   842   int max_depth = youngest_jvms->depth();
   844   // Allocate the object pool for scalar-replaced objects -- the map from
   845   // small-integer keys (which can be recorded in the local and ostack
   846   // arrays) to descriptions of the object state.
   847   GrowableArray<ScopeValue*> *objs = new GrowableArray<ScopeValue*>();
   849   // Visit scopes from oldest to youngest.
   850   for (int depth = 1; depth <= max_depth; depth++) {
   851     JVMState* jvms = youngest_jvms->of_depth(depth);
   852     int idx;
   853     ciMethod* method = jvms->has_method() ? jvms->method() : NULL;
   854     // Safepoints that do not have method() set only provide oop-map and monitor info
   855     // to support GC; these do not support deoptimization.
   856     int num_locs = (method == NULL) ? 0 : jvms->loc_size();
   857     int num_exps = (method == NULL) ? 0 : jvms->stk_size();
   858     int num_mon  = jvms->nof_monitors();
   859     assert(method == NULL || jvms->bci() < 0 || num_locs == method->max_locals(),
   860            "JVMS local count must match that of the method");
   862     // Add Local and Expression Stack Information
   864     // Insert locals into the locarray
   865     GrowableArray<ScopeValue*> *locarray = new GrowableArray<ScopeValue*>(num_locs);
   866     for( idx = 0; idx < num_locs; idx++ ) {
   867       FillLocArray( idx, sfn, sfn->local(jvms, idx), locarray, objs );
   868     }
   870     // Insert expression stack entries into the exparray
   871     GrowableArray<ScopeValue*> *exparray = new GrowableArray<ScopeValue*>(num_exps);
   872     for( idx = 0; idx < num_exps; idx++ ) {
   873       FillLocArray( idx,  sfn, sfn->stack(jvms, idx), exparray, objs );
   874     }
   876     // Add in mappings of the monitors
   877     assert( !method ||
   878             !method->is_synchronized() ||
   879             method->is_native() ||
   880             num_mon > 0 ||
   881             !GenerateSynchronizationCode,
   882             "monitors must always exist for synchronized methods");
   884     // Build the growable array of ScopeValues for exp stack
   885     GrowableArray<MonitorValue*> *monarray = new GrowableArray<MonitorValue*>(num_mon);
   887     // Loop over monitors and insert into array
   888     for(idx = 0; idx < num_mon; idx++) {
   889       // Grab the node that defines this monitor
   890       Node* box_node = sfn->monitor_box(jvms, idx);
   891       Node* obj_node = sfn->monitor_obj(jvms, idx);
   893       // Create ScopeValue for object
   894       ScopeValue *scval = NULL;
   896       if( obj_node->is_SafePointScalarObject() ) {
   897         SafePointScalarObjectNode* spobj = obj_node->as_SafePointScalarObject();
   898         scval = Compile::sv_for_node_id(objs, spobj->_idx);
   899         if (scval == NULL) {
   900           const Type *t = obj_node->bottom_type();
   901           ciKlass* cik = t->is_oopptr()->klass();
   902           assert(cik->is_instance_klass() ||
   903                  cik->is_array_klass(), "Not supported allocation.");
   904           ObjectValue* sv = new ObjectValue(spobj->_idx,
   905                                 new ConstantOopWriteValue(cik->constant_encoding()));
   906           Compile::set_sv_for_object_node(objs, sv);
   908           uint first_ind = spobj->first_index();
   909           for (uint i = 0; i < spobj->n_fields(); i++) {
   910             Node* fld_node = sfn->in(first_ind+i);
   911             (void)FillLocArray(sv->field_values()->length(), sfn, fld_node, sv->field_values(), objs);
   912           }
   913           scval = sv;
   914         }
   915       } else if( !obj_node->is_Con() ) {
   916         OptoReg::Name obj_reg = _regalloc->get_reg_first(obj_node);
   917         if( obj_node->bottom_type()->base() == Type::NarrowOop ) {
   918           scval = new_loc_value( _regalloc, obj_reg, Location::narrowoop );
   919         } else {
   920           scval = new_loc_value( _regalloc, obj_reg, Location::oop );
   921         }
   922       } else {
   923         const TypePtr *tp = obj_node->bottom_type()->make_ptr();
   924         scval = new ConstantOopWriteValue(tp->is_oopptr()->const_oop()->constant_encoding());
   925       }
   927       OptoReg::Name box_reg = BoxLockNode::stack_slot(box_node);
   928       Location basic_lock = Location::new_stk_loc(Location::normal,_regalloc->reg2offset(box_reg));
   929       while( !box_node->is_BoxLock() )  box_node = box_node->in(1);
   930       monarray->append(new MonitorValue(scval, basic_lock, box_node->as_BoxLock()->is_eliminated()));
   931     }
   933     // We dump the object pool first, since deoptimization reads it in first.
   934     debug_info()->dump_object_pool(objs);
   936     // Build first class objects to pass to scope
   937     DebugToken *locvals = debug_info()->create_scope_values(locarray);
   938     DebugToken *expvals = debug_info()->create_scope_values(exparray);
   939     DebugToken *monvals = debug_info()->create_monitor_values(monarray);
   941     // Make method available for all Safepoints
   942     ciMethod* scope_method = method ? method : _method;
   943     // Describe the scope here
   944     assert(jvms->bci() >= InvocationEntryBci && jvms->bci() <= 0x10000, "must be a valid or entry BCI");
   945     assert(!jvms->should_reexecute() || depth == max_depth, "reexecute allowed only for the youngest");
   946     // Now we can describe the scope.
   947     debug_info()->describe_scope(safepoint_pc_offset, scope_method, jvms->bci(), jvms->should_reexecute(), is_method_handle_invoke, return_oop, locvals, expvals, monvals);
   948   } // End jvms loop
   950   // Mark the end of the scope set.
   951   debug_info()->end_safepoint(safepoint_pc_offset);
   952 }
   956 // A simplified version of Process_OopMap_Node, to handle non-safepoints.
   957 class NonSafepointEmitter {
   958   Compile*  C;
   959   JVMState* _pending_jvms;
   960   int       _pending_offset;
   962   void emit_non_safepoint();
   964  public:
   965   NonSafepointEmitter(Compile* compile) {
   966     this->C = compile;
   967     _pending_jvms = NULL;
   968     _pending_offset = 0;
   969   }
   971   void observe_instruction(Node* n, int pc_offset) {
   972     if (!C->debug_info()->recording_non_safepoints())  return;
   974     Node_Notes* nn = C->node_notes_at(n->_idx);
   975     if (nn == NULL || nn->jvms() == NULL)  return;
   976     if (_pending_jvms != NULL &&
   977         _pending_jvms->same_calls_as(nn->jvms())) {
   978       // Repeated JVMS?  Stretch it up here.
   979       _pending_offset = pc_offset;
   980     } else {
   981       if (_pending_jvms != NULL &&
   982           _pending_offset < pc_offset) {
   983         emit_non_safepoint();
   984       }
   985       _pending_jvms = NULL;
   986       if (pc_offset > C->debug_info()->last_pc_offset()) {
   987         // This is the only way _pending_jvms can become non-NULL:
   988         _pending_jvms = nn->jvms();
   989         _pending_offset = pc_offset;
   990       }
   991     }
   992   }
   994   // Stay out of the way of real safepoints:
   995   void observe_safepoint(JVMState* jvms, int pc_offset) {
   996     if (_pending_jvms != NULL &&
   997         !_pending_jvms->same_calls_as(jvms) &&
   998         _pending_offset < pc_offset) {
   999       emit_non_safepoint();
  1001     _pending_jvms = NULL;
  1004   void flush_at_end() {
  1005     if (_pending_jvms != NULL) {
  1006       emit_non_safepoint();
  1008     _pending_jvms = NULL;
  1010 };
  1012 void NonSafepointEmitter::emit_non_safepoint() {
  1013   JVMState* youngest_jvms = _pending_jvms;
  1014   int       pc_offset     = _pending_offset;
  1016   // Clear it now:
  1017   _pending_jvms = NULL;
  1019   DebugInformationRecorder* debug_info = C->debug_info();
  1020   assert(debug_info->recording_non_safepoints(), "sanity");
  1022   debug_info->add_non_safepoint(pc_offset);
  1023   int max_depth = youngest_jvms->depth();
  1025   // Visit scopes from oldest to youngest.
  1026   for (int depth = 1; depth <= max_depth; depth++) {
  1027     JVMState* jvms = youngest_jvms->of_depth(depth);
  1028     ciMethod* method = jvms->has_method() ? jvms->method() : NULL;
  1029     assert(!jvms->should_reexecute() || depth==max_depth, "reexecute allowed only for the youngest");
  1030     debug_info->describe_scope(pc_offset, method, jvms->bci(), jvms->should_reexecute());
  1033   // Mark the end of the scope set.
  1034   debug_info->end_non_safepoint(pc_offset);
  1039 // helper for fill_buffer bailout logic
  1040 static void turn_off_compiler(Compile* C) {
  1041   if (CodeCache::largest_free_block() >= CodeCacheMinimumFreeSpace*10) {
  1042     // Do not turn off compilation if a single giant method has
  1043     // blown the code cache size.
  1044     C->record_failure("excessive request to CodeCache");
  1045   } else {
  1046     // Let CompilerBroker disable further compilations.
  1047     C->record_failure("CodeCache is full");
  1052 //------------------------------init_buffer------------------------------------
  1053 CodeBuffer* Compile::init_buffer(uint* blk_starts) {
  1055   // Set the initially allocated size
  1056   int  code_req   = initial_code_capacity;
  1057   int  locs_req   = initial_locs_capacity;
  1058   int  stub_req   = TraceJumps ? initial_stub_capacity * 10 : initial_stub_capacity;
  1059   int  const_req  = initial_const_capacity;
  1061   int  pad_req    = NativeCall::instruction_size;
  1062   // The extra spacing after the code is necessary on some platforms.
  1063   // Sometimes we need to patch in a jump after the last instruction,
  1064   // if the nmethod has been deoptimized.  (See 4932387, 4894843.)
  1066   // Compute the byte offset where we can store the deopt pc.
  1067   if (fixed_slots() != 0) {
  1068     _orig_pc_slot_offset_in_bytes = _regalloc->reg2offset(OptoReg::stack2reg(_orig_pc_slot));
  1071   // Compute prolog code size
  1072   _method_size = 0;
  1073   _frame_slots = OptoReg::reg2stack(_matcher->_old_SP)+_regalloc->_framesize;
  1074 #ifdef IA64
  1075   if (save_argument_registers()) {
  1076     // 4815101: this is a stub with implicit and unknown precision fp args.
  1077     // The usual spill mechanism can only generate stfd's in this case, which
  1078     // doesn't work if the fp reg to spill contains a single-precision denorm.
  1079     // Instead, we hack around the normal spill mechanism using stfspill's and
  1080     // ldffill's in the MachProlog and MachEpilog emit methods.  We allocate
  1081     // space here for the fp arg regs (f8-f15) we're going to thusly spill.
  1082     //
  1083     // If we ever implement 16-byte 'registers' == stack slots, we can
  1084     // get rid of this hack and have SpillCopy generate stfspill/ldffill
  1085     // instead of stfd/stfs/ldfd/ldfs.
  1086     _frame_slots += 8*(16/BytesPerInt);
  1088 #endif
  1089   assert(_frame_slots >= 0 && _frame_slots < 1000000, "sanity check");
  1091   if (has_mach_constant_base_node()) {
  1092     // Fill the constant table.
  1093     // Note:  This must happen before shorten_branches.
  1094     for (uint i = 0; i < _cfg->_num_blocks; i++) {
  1095       Block* b = _cfg->_blocks[i];
  1097       for (uint j = 0; j < b->_nodes.size(); j++) {
  1098         Node* n = b->_nodes[j];
  1100         // If the node is a MachConstantNode evaluate the constant
  1101         // value section.
  1102         if (n->is_MachConstant()) {
  1103           MachConstantNode* machcon = n->as_MachConstant();
  1104           machcon->eval_constant(C);
  1109     // Calculate the offsets of the constants and the size of the
  1110     // constant table (including the padding to the next section).
  1111     constant_table().calculate_offsets_and_size();
  1112     const_req = constant_table().size();
  1115   // Initialize the space for the BufferBlob used to find and verify
  1116   // instruction size in MachNode::emit_size()
  1117   init_scratch_buffer_blob(const_req);
  1118   if (failing())  return NULL; // Out of memory
  1120   // Pre-compute the length of blocks and replace
  1121   // long branches with short if machine supports it.
  1122   shorten_branches(blk_starts, code_req, locs_req, stub_req);
  1124   // nmethod and CodeBuffer count stubs & constants as part of method's code.
  1125   int exception_handler_req = size_exception_handler();
  1126   int deopt_handler_req = size_deopt_handler();
  1127   exception_handler_req += MAX_stubs_size; // add marginal slop for handler
  1128   deopt_handler_req += MAX_stubs_size; // add marginal slop for handler
  1129   stub_req += MAX_stubs_size;   // ensure per-stub margin
  1130   code_req += MAX_inst_size;    // ensure per-instruction margin
  1132   if (StressCodeBuffers)
  1133     code_req = const_req = stub_req = exception_handler_req = deopt_handler_req = 0x10;  // force expansion
  1135   int total_req =
  1136     const_req +
  1137     code_req +
  1138     pad_req +
  1139     stub_req +
  1140     exception_handler_req +
  1141     deopt_handler_req;               // deopt handler
  1143   if (has_method_handle_invokes())
  1144     total_req += deopt_handler_req;  // deopt MH handler
  1146   CodeBuffer* cb = code_buffer();
  1147   cb->initialize(total_req, locs_req);
  1149   // Have we run out of code space?
  1150   if ((cb->blob() == NULL) || (!CompileBroker::should_compile_new_jobs())) {
  1151     turn_off_compiler(this);
  1152     return NULL;
  1154   // Configure the code buffer.
  1155   cb->initialize_consts_size(const_req);
  1156   cb->initialize_stubs_size(stub_req);
  1157   cb->initialize_oop_recorder(env()->oop_recorder());
  1159   // fill in the nop array for bundling computations
  1160   MachNode *_nop_list[Bundle::_nop_count];
  1161   Bundle::initialize_nops(_nop_list, this);
  1163   return cb;
  1166 //------------------------------fill_buffer------------------------------------
  1167 void Compile::fill_buffer(CodeBuffer* cb, uint* blk_starts) {
  1168   // blk_starts[] contains offsets calculated during short branches processing,
  1169   // offsets should not be increased during following steps.
  1171   // Compute the size of first NumberOfLoopInstrToAlign instructions at head
  1172   // of a loop. It is used to determine the padding for loop alignment.
  1173   compute_loop_first_inst_sizes();
  1175   // Create oopmap set.
  1176   _oop_map_set = new OopMapSet();
  1178   // !!!!! This preserves old handling of oopmaps for now
  1179   debug_info()->set_oopmaps(_oop_map_set);
  1181   uint nblocks  = _cfg->_num_blocks;
  1182   // Count and start of implicit null check instructions
  1183   uint inct_cnt = 0;
  1184   uint *inct_starts = NEW_RESOURCE_ARRAY(uint, nblocks+1);
  1186   // Count and start of calls
  1187   uint *call_returns = NEW_RESOURCE_ARRAY(uint, nblocks+1);
  1189   uint  return_offset = 0;
  1190   int nop_size = (new (this) MachNopNode())->size(_regalloc);
  1192   int previous_offset = 0;
  1193   int current_offset  = 0;
  1194   int last_call_offset = -1;
  1195   int last_avoid_back_to_back_offset = -1;
  1196 #ifdef ASSERT
  1197   int block_alignment_padding = 0;
  1199   uint* jmp_target = NEW_RESOURCE_ARRAY(uint,nblocks);
  1200   uint* jmp_offset = NEW_RESOURCE_ARRAY(uint,nblocks);
  1201   uint* jmp_size   = NEW_RESOURCE_ARRAY(uint,nblocks);
  1202   uint* jmp_rule   = NEW_RESOURCE_ARRAY(uint,nblocks);
  1203 #endif
  1205   // Create an array of unused labels, one for each basic block, if printing is enabled
  1206 #ifndef PRODUCT
  1207   int *node_offsets      = NULL;
  1208   uint node_offset_limit = unique();
  1210   if (print_assembly())
  1211     node_offsets         = NEW_RESOURCE_ARRAY(int, node_offset_limit);
  1212 #endif
  1214   NonSafepointEmitter non_safepoints(this);  // emit non-safepoints lazily
  1216   // Emit the constant table.
  1217   if (has_mach_constant_base_node()) {
  1218     constant_table().emit(*cb);
  1221   // Create an array of labels, one for each basic block
  1222   Label *blk_labels = NEW_RESOURCE_ARRAY(Label, nblocks+1);
  1223   for (uint i=0; i <= nblocks; i++) {
  1224     blk_labels[i].init();
  1227   // ------------------
  1228   // Now fill in the code buffer
  1229   Node *delay_slot = NULL;
  1231   for (uint i=0; i < nblocks; i++) {
  1232     guarantee(blk_starts[i] >= (uint)cb->insts_size(),"should not increase size");
  1234     Block *b = _cfg->_blocks[i];
  1236     Node *head = b->head();
  1238     // If this block needs to start aligned (i.e, can be reached other
  1239     // than by falling-thru from the previous block), then force the
  1240     // start of a new bundle.
  1241     if (Pipeline::requires_bundling() && starts_bundle(head))
  1242       cb->flush_bundle(true);
  1244 #ifdef ASSERT
  1245     if (!b->is_connector()) {
  1246       stringStream st;
  1247       b->dump_head(&_cfg->_bbs, &st);
  1248       MacroAssembler(cb).block_comment(st.as_string());
  1250     jmp_target[i] = 0;
  1251     jmp_offset[i] = 0;
  1252     jmp_size[i]   = 0;
  1253     jmp_rule[i]   = 0;
  1255     // Maximum alignment padding for loop block was used
  1256     // during first round of branches shortening, as result
  1257     // padding for nodes (sfpt after call) was not added.
  1258     // Take this into account for block's size change check
  1259     // and allow increase block's size by the difference
  1260     // of maximum and actual alignment paddings.
  1261     int orig_blk_size = blk_starts[i+1] - blk_starts[i] + block_alignment_padding;
  1262 #endif
  1263     int blk_offset = current_offset;
  1265     // Define the label at the beginning of the basic block
  1266     MacroAssembler(cb).bind(blk_labels[b->_pre_order]);
  1268     uint last_inst = b->_nodes.size();
  1270     // Emit block normally, except for last instruction.
  1271     // Emit means "dump code bits into code buffer".
  1272     for (uint j = 0; j<last_inst; j++) {
  1274       // Get the node
  1275       Node* n = b->_nodes[j];
  1277       // See if delay slots are supported
  1278       if (valid_bundle_info(n) &&
  1279           node_bundling(n)->used_in_unconditional_delay()) {
  1280         assert(delay_slot == NULL, "no use of delay slot node");
  1281         assert(n->size(_regalloc) == Pipeline::instr_unit_size(), "delay slot instruction wrong size");
  1283         delay_slot = n;
  1284         continue;
  1287       // If this starts a new instruction group, then flush the current one
  1288       // (but allow split bundles)
  1289       if (Pipeline::requires_bundling() && starts_bundle(n))
  1290         cb->flush_bundle(false);
  1292       // The following logic is duplicated in the code ifdeffed for
  1293       // ENABLE_ZAP_DEAD_LOCALS which appears above in this file.  It
  1294       // should be factored out.  Or maybe dispersed to the nodes?
  1296       // Special handling for SafePoint/Call Nodes
  1297       bool is_mcall = false;
  1298       if (n->is_Mach()) {
  1299         MachNode *mach = n->as_Mach();
  1300         is_mcall = n->is_MachCall();
  1301         bool is_sfn = n->is_MachSafePoint();
  1303         // If this requires all previous instructions be flushed, then do so
  1304         if (is_sfn || is_mcall || mach->alignment_required() != 1) {
  1305           cb->flush_bundle(true);
  1306           current_offset = cb->insts_size();
  1309         // A padding may be needed again since a previous instruction
  1310         // could be moved to delay slot.
  1312         // align the instruction if necessary
  1313         int padding = mach->compute_padding(current_offset);
  1314         // Make sure safepoint node for polling is distinct from a call's
  1315         // return by adding a nop if needed.
  1316         if (is_sfn && !is_mcall && padding == 0 && current_offset == last_call_offset) {
  1317           padding = nop_size;
  1319         if (padding == 0 && mach->avoid_back_to_back() &&
  1320             current_offset == last_avoid_back_to_back_offset) {
  1321           // Avoid back to back some instructions.
  1322           padding = nop_size;
  1325         if(padding > 0) {
  1326           assert((padding % nop_size) == 0, "padding is not a multiple of NOP size");
  1327           int nops_cnt = padding / nop_size;
  1328           MachNode *nop = new (this) MachNopNode(nops_cnt);
  1329           b->_nodes.insert(j++, nop);
  1330           last_inst++;
  1331           _cfg->_bbs.map( nop->_idx, b );
  1332           nop->emit(*cb, _regalloc);
  1333           cb->flush_bundle(true);
  1334           current_offset = cb->insts_size();
  1337         // Remember the start of the last call in a basic block
  1338         if (is_mcall) {
  1339           MachCallNode *mcall = mach->as_MachCall();
  1341           // This destination address is NOT PC-relative
  1342           mcall->method_set((intptr_t)mcall->entry_point());
  1344           // Save the return address
  1345           call_returns[b->_pre_order] = current_offset + mcall->ret_addr_offset();
  1347           if (mcall->is_MachCallLeaf()) {
  1348             is_mcall = false;
  1349             is_sfn = false;
  1353         // sfn will be valid whenever mcall is valid now because of inheritance
  1354         if (is_sfn || is_mcall) {
  1356           // Handle special safepoint nodes for synchronization
  1357           if (!is_mcall) {
  1358             MachSafePointNode *sfn = mach->as_MachSafePoint();
  1359             // !!!!! Stubs only need an oopmap right now, so bail out
  1360             if (sfn->jvms()->method() == NULL) {
  1361               // Write the oopmap directly to the code blob??!!
  1362 #             ifdef ENABLE_ZAP_DEAD_LOCALS
  1363               assert( !is_node_getting_a_safepoint(sfn),  "logic does not match; false positive");
  1364 #             endif
  1365               continue;
  1367           } // End synchronization
  1369           non_safepoints.observe_safepoint(mach->as_MachSafePoint()->jvms(),
  1370                                            current_offset);
  1371           Process_OopMap_Node(mach, current_offset);
  1372         } // End if safepoint
  1374         // If this is a null check, then add the start of the previous instruction to the list
  1375         else if( mach->is_MachNullCheck() ) {
  1376           inct_starts[inct_cnt++] = previous_offset;
  1379         // If this is a branch, then fill in the label with the target BB's label
  1380         else if (mach->is_MachBranch()) {
  1381           // This requires the TRUE branch target be in succs[0]
  1382           uint block_num = b->non_connector_successor(0)->_pre_order;
  1384           // Try to replace long branch if delay slot is not used,
  1385           // it is mostly for back branches since forward branch's
  1386           // distance is not updated yet.
  1387           bool delay_slot_is_used = valid_bundle_info(n) &&
  1388                                     node_bundling(n)->use_unconditional_delay();
  1389           if (!delay_slot_is_used && mach->may_be_short_branch()) {
  1390            assert(delay_slot == NULL, "not expecting delay slot node");
  1391            int br_size = n->size(_regalloc);
  1392             int offset = blk_starts[block_num] - current_offset;
  1393             if (block_num >= i) {
  1394               // Current and following block's offset are not
  1395               // finilized yet, adjust distance by the difference
  1396               // between calculated and final offsets of current block.
  1397               offset -= (blk_starts[i] - blk_offset);
  1399             // In the following code a nop could be inserted before
  1400             // the branch which will increase the backward distance.
  1401             bool needs_padding = (current_offset == last_avoid_back_to_back_offset);
  1402             if (needs_padding && offset <= 0)
  1403               offset -= nop_size;
  1405             if (_matcher->is_short_branch_offset(mach->rule(), br_size, offset)) {
  1406               // We've got a winner.  Replace this branch.
  1407               MachNode* replacement = mach->as_MachBranch()->short_branch_version(this);
  1409               // Update the jmp_size.
  1410               int new_size = replacement->size(_regalloc);
  1411               assert((br_size - new_size) >= (int)nop_size, "short_branch size should be smaller");
  1412               // Insert padding between avoid_back_to_back branches.
  1413               if (needs_padding && replacement->avoid_back_to_back()) {
  1414                 MachNode *nop = new (this) MachNopNode();
  1415                 b->_nodes.insert(j++, nop);
  1416                 _cfg->_bbs.map(nop->_idx, b);
  1417                 last_inst++;
  1418                 nop->emit(*cb, _regalloc);
  1419                 cb->flush_bundle(true);
  1420                 current_offset = cb->insts_size();
  1422 #ifdef ASSERT
  1423               jmp_target[i] = block_num;
  1424               jmp_offset[i] = current_offset - blk_offset;
  1425               jmp_size[i]   = new_size;
  1426               jmp_rule[i]   = mach->rule();
  1427 #endif
  1428               b->_nodes.map(j, replacement);
  1429               mach->subsume_by(replacement);
  1430               n    = replacement;
  1431               mach = replacement;
  1434           mach->as_MachBranch()->label_set( &blk_labels[block_num], block_num );
  1435         } else if (mach->ideal_Opcode() == Op_Jump) {
  1436           for (uint h = 0; h < b->_num_succs; h++) {
  1437             Block* succs_block = b->_succs[h];
  1438             for (uint j = 1; j < succs_block->num_preds(); j++) {
  1439               Node* jpn = succs_block->pred(j);
  1440               if (jpn->is_JumpProj() && jpn->in(0) == mach) {
  1441                 uint block_num = succs_block->non_connector()->_pre_order;
  1442                 Label *blkLabel = &blk_labels[block_num];
  1443                 mach->add_case_label(jpn->as_JumpProj()->proj_no(), blkLabel);
  1449 #ifdef ASSERT
  1450         // Check that oop-store precedes the card-mark
  1451         else if (mach->ideal_Opcode() == Op_StoreCM) {
  1452           uint storeCM_idx = j;
  1453           int count = 0;
  1454           for (uint prec = mach->req(); prec < mach->len(); prec++) {
  1455             Node *oop_store = mach->in(prec);  // Precedence edge
  1456             if (oop_store == NULL) continue;
  1457             count++;
  1458             uint i4;
  1459             for( i4 = 0; i4 < last_inst; ++i4 ) {
  1460               if( b->_nodes[i4] == oop_store ) break;
  1462             // Note: This test can provide a false failure if other precedence
  1463             // edges have been added to the storeCMNode.
  1464             assert( i4 == last_inst || i4 < storeCM_idx, "CM card-mark executes before oop-store");
  1466           assert(count > 0, "storeCM expects at least one precedence edge");
  1468 #endif
  1470         else if (!n->is_Proj()) {
  1471           // Remember the beginning of the previous instruction, in case
  1472           // it's followed by a flag-kill and a null-check.  Happens on
  1473           // Intel all the time, with add-to-memory kind of opcodes.
  1474           previous_offset = current_offset;
  1478       // Verify that there is sufficient space remaining
  1479       cb->insts()->maybe_expand_to_ensure_remaining(MAX_inst_size);
  1480       if ((cb->blob() == NULL) || (!CompileBroker::should_compile_new_jobs())) {
  1481         turn_off_compiler(this);
  1482         return;
  1485       // Save the offset for the listing
  1486 #ifndef PRODUCT
  1487       if (node_offsets && n->_idx < node_offset_limit)
  1488         node_offsets[n->_idx] = cb->insts_size();
  1489 #endif
  1491       // "Normal" instruction case
  1492       DEBUG_ONLY( uint instr_offset = cb->insts_size(); )
  1493       n->emit(*cb, _regalloc);
  1494       current_offset  = cb->insts_size();
  1496 #ifdef ASSERT
  1497       if (n->size(_regalloc) < (current_offset-instr_offset)) {
  1498         n->dump();
  1499         assert(false, "wrong size of mach node");
  1501 #endif
  1502       non_safepoints.observe_instruction(n, current_offset);
  1504       // mcall is last "call" that can be a safepoint
  1505       // record it so we can see if a poll will directly follow it
  1506       // in which case we'll need a pad to make the PcDesc sites unique
  1507       // see  5010568. This can be slightly inaccurate but conservative
  1508       // in the case that return address is not actually at current_offset.
  1509       // This is a small price to pay.
  1511       if (is_mcall) {
  1512         last_call_offset = current_offset;
  1515       if (n->is_Mach() && n->as_Mach()->avoid_back_to_back()) {
  1516         // Avoid back to back some instructions.
  1517         last_avoid_back_to_back_offset = current_offset;
  1520       // See if this instruction has a delay slot
  1521       if (valid_bundle_info(n) && node_bundling(n)->use_unconditional_delay()) {
  1522         assert(delay_slot != NULL, "expecting delay slot node");
  1524         // Back up 1 instruction
  1525         cb->set_insts_end(cb->insts_end() - Pipeline::instr_unit_size());
  1527         // Save the offset for the listing
  1528 #ifndef PRODUCT
  1529         if (node_offsets && delay_slot->_idx < node_offset_limit)
  1530           node_offsets[delay_slot->_idx] = cb->insts_size();
  1531 #endif
  1533         // Support a SafePoint in the delay slot
  1534         if (delay_slot->is_MachSafePoint()) {
  1535           MachNode *mach = delay_slot->as_Mach();
  1536           // !!!!! Stubs only need an oopmap right now, so bail out
  1537           if (!mach->is_MachCall() && mach->as_MachSafePoint()->jvms()->method() == NULL) {
  1538             // Write the oopmap directly to the code blob??!!
  1539 #           ifdef ENABLE_ZAP_DEAD_LOCALS
  1540             assert( !is_node_getting_a_safepoint(mach),  "logic does not match; false positive");
  1541 #           endif
  1542             delay_slot = NULL;
  1543             continue;
  1546           int adjusted_offset = current_offset - Pipeline::instr_unit_size();
  1547           non_safepoints.observe_safepoint(mach->as_MachSafePoint()->jvms(),
  1548                                            adjusted_offset);
  1549           // Generate an OopMap entry
  1550           Process_OopMap_Node(mach, adjusted_offset);
  1553         // Insert the delay slot instruction
  1554         delay_slot->emit(*cb, _regalloc);
  1556         // Don't reuse it
  1557         delay_slot = NULL;
  1560     } // End for all instructions in block
  1561     assert((uint)blk_offset <= blk_starts[i], "shouldn't increase distance");
  1562     blk_starts[i] = blk_offset;
  1564     // If the next block is the top of a loop, pad this block out to align
  1565     // the loop top a little. Helps prevent pipe stalls at loop back branches.
  1566     if (i < nblocks-1) {
  1567       Block *nb = _cfg->_blocks[i+1];
  1568       int padding = nb->alignment_padding(current_offset);
  1569       if( padding > 0 ) {
  1570         MachNode *nop = new (this) MachNopNode(padding / nop_size);
  1571         b->_nodes.insert( b->_nodes.size(), nop );
  1572         _cfg->_bbs.map( nop->_idx, b );
  1573         nop->emit(*cb, _regalloc);
  1574         current_offset = cb->insts_size();
  1576 #ifdef ASSERT
  1577       int max_loop_pad = nb->code_alignment()-relocInfo::addr_unit();
  1578       block_alignment_padding = (max_loop_pad - padding);
  1579       assert(block_alignment_padding >= 0, "sanity");
  1580 #endif
  1582     // Verify that the distance for generated before forward
  1583     // short branches is still valid.
  1584     assert(orig_blk_size >= (current_offset - blk_offset), "shouldn't increase block size");
  1586   } // End of for all blocks
  1587   blk_starts[nblocks] = current_offset;
  1589   non_safepoints.flush_at_end();
  1591   // Offset too large?
  1592   if (failing())  return;
  1594   // Define a pseudo-label at the end of the code
  1595   MacroAssembler(cb).bind( blk_labels[nblocks] );
  1597   // Compute the size of the first block
  1598   _first_block_size = blk_labels[1].loc_pos() - blk_labels[0].loc_pos();
  1600   assert(cb->insts_size() < 500000, "method is unreasonably large");
  1602 #ifdef ASSERT
  1603   for (uint i = 0; i < nblocks; i++) { // For all blocks
  1604     if (jmp_target[i] != 0) {
  1605       int br_size = jmp_size[i];
  1606       int offset = blk_starts[jmp_target[i]]-(blk_starts[i] + jmp_offset[i]);
  1607       if (!_matcher->is_short_branch_offset(jmp_rule[i], br_size, offset)) {
  1608         tty->print_cr("target (%d) - jmp_offset(%d) = offset (%d), jump_size(%d), jmp_block B%d, target_block B%d", blk_starts[jmp_target[i]], blk_starts[i] + jmp_offset[i], offset, br_size, i, jmp_target[i]);
  1609         assert(false, "Displacement too large for short jmp");
  1613 #endif
  1615   // ------------------
  1617 #ifndef PRODUCT
  1618   // Information on the size of the method, without the extraneous code
  1619   Scheduling::increment_method_size(cb->insts_size());
  1620 #endif
  1622   // ------------------
  1623   // Fill in exception table entries.
  1624   FillExceptionTables(inct_cnt, call_returns, inct_starts, blk_labels);
  1626   // Only java methods have exception handlers and deopt handlers
  1627   if (_method) {
  1628     // Emit the exception handler code.
  1629     _code_offsets.set_value(CodeOffsets::Exceptions, emit_exception_handler(*cb));
  1630     // Emit the deopt handler code.
  1631     _code_offsets.set_value(CodeOffsets::Deopt, emit_deopt_handler(*cb));
  1633     // Emit the MethodHandle deopt handler code (if required).
  1634     if (has_method_handle_invokes()) {
  1635       // We can use the same code as for the normal deopt handler, we
  1636       // just need a different entry point address.
  1637       _code_offsets.set_value(CodeOffsets::DeoptMH, emit_deopt_handler(*cb));
  1641   // One last check for failed CodeBuffer::expand:
  1642   if ((cb->blob() == NULL) || (!CompileBroker::should_compile_new_jobs())) {
  1643     turn_off_compiler(this);
  1644     return;
  1647 #ifndef PRODUCT
  1648   // Dump the assembly code, including basic-block numbers
  1649   if (print_assembly()) {
  1650     ttyLocker ttyl;  // keep the following output all in one block
  1651     if (!VMThread::should_terminate()) {  // test this under the tty lock
  1652       // This output goes directly to the tty, not the compiler log.
  1653       // To enable tools to match it up with the compilation activity,
  1654       // be sure to tag this tty output with the compile ID.
  1655       if (xtty != NULL) {
  1656         xtty->head("opto_assembly compile_id='%d'%s", compile_id(),
  1657                    is_osr_compilation()    ? " compile_kind='osr'" :
  1658                    "");
  1660       if (method() != NULL) {
  1661         method()->print_oop();
  1662         print_codes();
  1664       dump_asm(node_offsets, node_offset_limit);
  1665       if (xtty != NULL) {
  1666         xtty->tail("opto_assembly");
  1670 #endif
  1674 void Compile::FillExceptionTables(uint cnt, uint *call_returns, uint *inct_starts, Label *blk_labels) {
  1675   _inc_table.set_size(cnt);
  1677   uint inct_cnt = 0;
  1678   for( uint i=0; i<_cfg->_num_blocks; i++ ) {
  1679     Block *b = _cfg->_blocks[i];
  1680     Node *n = NULL;
  1681     int j;
  1683     // Find the branch; ignore trailing NOPs.
  1684     for( j = b->_nodes.size()-1; j>=0; j-- ) {
  1685       n = b->_nodes[j];
  1686       if( !n->is_Mach() || n->as_Mach()->ideal_Opcode() != Op_Con )
  1687         break;
  1690     // If we didn't find anything, continue
  1691     if( j < 0 ) continue;
  1693     // Compute ExceptionHandlerTable subtable entry and add it
  1694     // (skip empty blocks)
  1695     if( n->is_Catch() ) {
  1697       // Get the offset of the return from the call
  1698       uint call_return = call_returns[b->_pre_order];
  1699 #ifdef ASSERT
  1700       assert( call_return > 0, "no call seen for this basic block" );
  1701       while( b->_nodes[--j]->is_MachProj() ) ;
  1702       assert( b->_nodes[j]->is_MachCall(), "CatchProj must follow call" );
  1703 #endif
  1704       // last instruction is a CatchNode, find it's CatchProjNodes
  1705       int nof_succs = b->_num_succs;
  1706       // allocate space
  1707       GrowableArray<intptr_t> handler_bcis(nof_succs);
  1708       GrowableArray<intptr_t> handler_pcos(nof_succs);
  1709       // iterate through all successors
  1710       for (int j = 0; j < nof_succs; j++) {
  1711         Block* s = b->_succs[j];
  1712         bool found_p = false;
  1713         for( uint k = 1; k < s->num_preds(); k++ ) {
  1714           Node *pk = s->pred(k);
  1715           if( pk->is_CatchProj() && pk->in(0) == n ) {
  1716             const CatchProjNode* p = pk->as_CatchProj();
  1717             found_p = true;
  1718             // add the corresponding handler bci & pco information
  1719             if( p->_con != CatchProjNode::fall_through_index ) {
  1720               // p leads to an exception handler (and is not fall through)
  1721               assert(s == _cfg->_blocks[s->_pre_order],"bad numbering");
  1722               // no duplicates, please
  1723               if( !handler_bcis.contains(p->handler_bci()) ) {
  1724                 uint block_num = s->non_connector()->_pre_order;
  1725                 handler_bcis.append(p->handler_bci());
  1726                 handler_pcos.append(blk_labels[block_num].loc_pos());
  1731         assert(found_p, "no matching predecessor found");
  1732         // Note:  Due to empty block removal, one block may have
  1733         // several CatchProj inputs, from the same Catch.
  1736       // Set the offset of the return from the call
  1737       _handler_table.add_subtable(call_return, &handler_bcis, NULL, &handler_pcos);
  1738       continue;
  1741     // Handle implicit null exception table updates
  1742     if( n->is_MachNullCheck() ) {
  1743       uint block_num = b->non_connector_successor(0)->_pre_order;
  1744       _inc_table.append( inct_starts[inct_cnt++], blk_labels[block_num].loc_pos() );
  1745       continue;
  1747   } // End of for all blocks fill in exception table entries
  1750 // Static Variables
  1751 #ifndef PRODUCT
  1752 uint Scheduling::_total_nop_size = 0;
  1753 uint Scheduling::_total_method_size = 0;
  1754 uint Scheduling::_total_branches = 0;
  1755 uint Scheduling::_total_unconditional_delays = 0;
  1756 uint Scheduling::_total_instructions_per_bundle[Pipeline::_max_instrs_per_cycle+1];
  1757 #endif
  1759 // Initializer for class Scheduling
  1761 Scheduling::Scheduling(Arena *arena, Compile &compile)
  1762   : _arena(arena),
  1763     _cfg(compile.cfg()),
  1764     _bbs(compile.cfg()->_bbs),
  1765     _regalloc(compile.regalloc()),
  1766     _reg_node(arena),
  1767     _bundle_instr_count(0),
  1768     _bundle_cycle_number(0),
  1769     _scheduled(arena),
  1770     _available(arena),
  1771     _next_node(NULL),
  1772     _bundle_use(0, 0, resource_count, &_bundle_use_elements[0]),
  1773     _pinch_free_list(arena)
  1774 #ifndef PRODUCT
  1775   , _branches(0)
  1776   , _unconditional_delays(0)
  1777 #endif
  1779   // Create a MachNopNode
  1780   _nop = new (&compile) MachNopNode();
  1782   // Now that the nops are in the array, save the count
  1783   // (but allow entries for the nops)
  1784   _node_bundling_limit = compile.unique();
  1785   uint node_max = _regalloc->node_regs_max_index();
  1787   compile.set_node_bundling_limit(_node_bundling_limit);
  1789   // This one is persistent within the Compile class
  1790   _node_bundling_base = NEW_ARENA_ARRAY(compile.comp_arena(), Bundle, node_max);
  1792   // Allocate space for fixed-size arrays
  1793   _node_latency    = NEW_ARENA_ARRAY(arena, unsigned short, node_max);
  1794   _uses            = NEW_ARENA_ARRAY(arena, short,          node_max);
  1795   _current_latency = NEW_ARENA_ARRAY(arena, unsigned short, node_max);
  1797   // Clear the arrays
  1798   memset(_node_bundling_base, 0, node_max * sizeof(Bundle));
  1799   memset(_node_latency,       0, node_max * sizeof(unsigned short));
  1800   memset(_uses,               0, node_max * sizeof(short));
  1801   memset(_current_latency,    0, node_max * sizeof(unsigned short));
  1803   // Clear the bundling information
  1804   memcpy(_bundle_use_elements,
  1805     Pipeline_Use::elaborated_elements,
  1806     sizeof(Pipeline_Use::elaborated_elements));
  1808   // Get the last node
  1809   Block *bb = _cfg->_blocks[_cfg->_blocks.size()-1];
  1811   _next_node = bb->_nodes[bb->_nodes.size()-1];
  1814 #ifndef PRODUCT
  1815 // Scheduling destructor
  1816 Scheduling::~Scheduling() {
  1817   _total_branches             += _branches;
  1818   _total_unconditional_delays += _unconditional_delays;
  1820 #endif
  1822 // Step ahead "i" cycles
  1823 void Scheduling::step(uint i) {
  1825   Bundle *bundle = node_bundling(_next_node);
  1826   bundle->set_starts_bundle();
  1828   // Update the bundle record, but leave the flags information alone
  1829   if (_bundle_instr_count > 0) {
  1830     bundle->set_instr_count(_bundle_instr_count);
  1831     bundle->set_resources_used(_bundle_use.resourcesUsed());
  1834   // Update the state information
  1835   _bundle_instr_count = 0;
  1836   _bundle_cycle_number += i;
  1837   _bundle_use.step(i);
  1840 void Scheduling::step_and_clear() {
  1841   Bundle *bundle = node_bundling(_next_node);
  1842   bundle->set_starts_bundle();
  1844   // Update the bundle record
  1845   if (_bundle_instr_count > 0) {
  1846     bundle->set_instr_count(_bundle_instr_count);
  1847     bundle->set_resources_used(_bundle_use.resourcesUsed());
  1849     _bundle_cycle_number += 1;
  1852   // Clear the bundling information
  1853   _bundle_instr_count = 0;
  1854   _bundle_use.reset();
  1856   memcpy(_bundle_use_elements,
  1857     Pipeline_Use::elaborated_elements,
  1858     sizeof(Pipeline_Use::elaborated_elements));
  1861 //------------------------------ScheduleAndBundle------------------------------
  1862 // Perform instruction scheduling and bundling over the sequence of
  1863 // instructions in backwards order.
  1864 void Compile::ScheduleAndBundle() {
  1866   // Don't optimize this if it isn't a method
  1867   if (!_method)
  1868     return;
  1870   // Don't optimize this if scheduling is disabled
  1871   if (!do_scheduling())
  1872     return;
  1874   NOT_PRODUCT( TracePhase t2("isched", &_t_instrSched, TimeCompiler); )
  1876   // Create a data structure for all the scheduling information
  1877   Scheduling scheduling(Thread::current()->resource_area(), *this);
  1879   // Walk backwards over each basic block, computing the needed alignment
  1880   // Walk over all the basic blocks
  1881   scheduling.DoScheduling();
  1884 //------------------------------ComputeLocalLatenciesForward-------------------
  1885 // Compute the latency of all the instructions.  This is fairly simple,
  1886 // because we already have a legal ordering.  Walk over the instructions
  1887 // from first to last, and compute the latency of the instruction based
  1888 // on the latency of the preceding instruction(s).
  1889 void Scheduling::ComputeLocalLatenciesForward(const Block *bb) {
  1890 #ifndef PRODUCT
  1891   if (_cfg->C->trace_opto_output())
  1892     tty->print("# -> ComputeLocalLatenciesForward\n");
  1893 #endif
  1895   // Walk over all the schedulable instructions
  1896   for( uint j=_bb_start; j < _bb_end; j++ ) {
  1898     // This is a kludge, forcing all latency calculations to start at 1.
  1899     // Used to allow latency 0 to force an instruction to the beginning
  1900     // of the bb
  1901     uint latency = 1;
  1902     Node *use = bb->_nodes[j];
  1903     uint nlen = use->len();
  1905     // Walk over all the inputs
  1906     for ( uint k=0; k < nlen; k++ ) {
  1907       Node *def = use->in(k);
  1908       if (!def)
  1909         continue;
  1911       uint l = _node_latency[def->_idx] + use->latency(k);
  1912       if (latency < l)
  1913         latency = l;
  1916     _node_latency[use->_idx] = latency;
  1918 #ifndef PRODUCT
  1919     if (_cfg->C->trace_opto_output()) {
  1920       tty->print("# latency %4d: ", latency);
  1921       use->dump();
  1923 #endif
  1926 #ifndef PRODUCT
  1927   if (_cfg->C->trace_opto_output())
  1928     tty->print("# <- ComputeLocalLatenciesForward\n");
  1929 #endif
  1931 } // end ComputeLocalLatenciesForward
  1933 // See if this node fits into the present instruction bundle
  1934 bool Scheduling::NodeFitsInBundle(Node *n) {
  1935   uint n_idx = n->_idx;
  1937   // If this is the unconditional delay instruction, then it fits
  1938   if (n == _unconditional_delay_slot) {
  1939 #ifndef PRODUCT
  1940     if (_cfg->C->trace_opto_output())
  1941       tty->print("#     NodeFitsInBundle [%4d]: TRUE; is in unconditional delay slot\n", n->_idx);
  1942 #endif
  1943     return (true);
  1946   // If the node cannot be scheduled this cycle, skip it
  1947   if (_current_latency[n_idx] > _bundle_cycle_number) {
  1948 #ifndef PRODUCT
  1949     if (_cfg->C->trace_opto_output())
  1950       tty->print("#     NodeFitsInBundle [%4d]: FALSE; latency %4d > %d\n",
  1951         n->_idx, _current_latency[n_idx], _bundle_cycle_number);
  1952 #endif
  1953     return (false);
  1956   const Pipeline *node_pipeline = n->pipeline();
  1958   uint instruction_count = node_pipeline->instructionCount();
  1959   if (node_pipeline->mayHaveNoCode() && n->size(_regalloc) == 0)
  1960     instruction_count = 0;
  1961   else if (node_pipeline->hasBranchDelay() && !_unconditional_delay_slot)
  1962     instruction_count++;
  1964   if (_bundle_instr_count + instruction_count > Pipeline::_max_instrs_per_cycle) {
  1965 #ifndef PRODUCT
  1966     if (_cfg->C->trace_opto_output())
  1967       tty->print("#     NodeFitsInBundle [%4d]: FALSE; too many instructions: %d > %d\n",
  1968         n->_idx, _bundle_instr_count + instruction_count, Pipeline::_max_instrs_per_cycle);
  1969 #endif
  1970     return (false);
  1973   // Don't allow non-machine nodes to be handled this way
  1974   if (!n->is_Mach() && instruction_count == 0)
  1975     return (false);
  1977   // See if there is any overlap
  1978   uint delay = _bundle_use.full_latency(0, node_pipeline->resourceUse());
  1980   if (delay > 0) {
  1981 #ifndef PRODUCT
  1982     if (_cfg->C->trace_opto_output())
  1983       tty->print("#     NodeFitsInBundle [%4d]: FALSE; functional units overlap\n", n_idx);
  1984 #endif
  1985     return false;
  1988 #ifndef PRODUCT
  1989   if (_cfg->C->trace_opto_output())
  1990     tty->print("#     NodeFitsInBundle [%4d]:  TRUE\n", n_idx);
  1991 #endif
  1993   return true;
  1996 Node * Scheduling::ChooseNodeToBundle() {
  1997   uint siz = _available.size();
  1999   if (siz == 0) {
  2001 #ifndef PRODUCT
  2002     if (_cfg->C->trace_opto_output())
  2003       tty->print("#   ChooseNodeToBundle: NULL\n");
  2004 #endif
  2005     return (NULL);
  2008   // Fast path, if only 1 instruction in the bundle
  2009   if (siz == 1) {
  2010 #ifndef PRODUCT
  2011     if (_cfg->C->trace_opto_output()) {
  2012       tty->print("#   ChooseNodeToBundle (only 1): ");
  2013       _available[0]->dump();
  2015 #endif
  2016     return (_available[0]);
  2019   // Don't bother, if the bundle is already full
  2020   if (_bundle_instr_count < Pipeline::_max_instrs_per_cycle) {
  2021     for ( uint i = 0; i < siz; i++ ) {
  2022       Node *n = _available[i];
  2024       // Skip projections, we'll handle them another way
  2025       if (n->is_Proj())
  2026         continue;
  2028       // This presupposed that instructions are inserted into the
  2029       // available list in a legality order; i.e. instructions that
  2030       // must be inserted first are at the head of the list
  2031       if (NodeFitsInBundle(n)) {
  2032 #ifndef PRODUCT
  2033         if (_cfg->C->trace_opto_output()) {
  2034           tty->print("#   ChooseNodeToBundle: ");
  2035           n->dump();
  2037 #endif
  2038         return (n);
  2043   // Nothing fits in this bundle, choose the highest priority
  2044 #ifndef PRODUCT
  2045   if (_cfg->C->trace_opto_output()) {
  2046     tty->print("#   ChooseNodeToBundle: ");
  2047     _available[0]->dump();
  2049 #endif
  2051   return _available[0];
  2054 //------------------------------AddNodeToAvailableList-------------------------
  2055 void Scheduling::AddNodeToAvailableList(Node *n) {
  2056   assert( !n->is_Proj(), "projections never directly made available" );
  2057 #ifndef PRODUCT
  2058   if (_cfg->C->trace_opto_output()) {
  2059     tty->print("#   AddNodeToAvailableList: ");
  2060     n->dump();
  2062 #endif
  2064   int latency = _current_latency[n->_idx];
  2066   // Insert in latency order (insertion sort)
  2067   uint i;
  2068   for ( i=0; i < _available.size(); i++ )
  2069     if (_current_latency[_available[i]->_idx] > latency)
  2070       break;
  2072   // Special Check for compares following branches
  2073   if( n->is_Mach() && _scheduled.size() > 0 ) {
  2074     int op = n->as_Mach()->ideal_Opcode();
  2075     Node *last = _scheduled[0];
  2076     if( last->is_MachIf() && last->in(1) == n &&
  2077         ( op == Op_CmpI ||
  2078           op == Op_CmpU ||
  2079           op == Op_CmpP ||
  2080           op == Op_CmpF ||
  2081           op == Op_CmpD ||
  2082           op == Op_CmpL ) ) {
  2084       // Recalculate position, moving to front of same latency
  2085       for ( i=0 ; i < _available.size(); i++ )
  2086         if (_current_latency[_available[i]->_idx] >= latency)
  2087           break;
  2091   // Insert the node in the available list
  2092   _available.insert(i, n);
  2094 #ifndef PRODUCT
  2095   if (_cfg->C->trace_opto_output())
  2096     dump_available();
  2097 #endif
  2100 //------------------------------DecrementUseCounts-----------------------------
  2101 void Scheduling::DecrementUseCounts(Node *n, const Block *bb) {
  2102   for ( uint i=0; i < n->len(); i++ ) {
  2103     Node *def = n->in(i);
  2104     if (!def) continue;
  2105     if( def->is_Proj() )        // If this is a machine projection, then
  2106       def = def->in(0);         // propagate usage thru to the base instruction
  2108     if( _bbs[def->_idx] != bb ) // Ignore if not block-local
  2109       continue;
  2111     // Compute the latency
  2112     uint l = _bundle_cycle_number + n->latency(i);
  2113     if (_current_latency[def->_idx] < l)
  2114       _current_latency[def->_idx] = l;
  2116     // If this does not have uses then schedule it
  2117     if ((--_uses[def->_idx]) == 0)
  2118       AddNodeToAvailableList(def);
  2122 //------------------------------AddNodeToBundle--------------------------------
  2123 void Scheduling::AddNodeToBundle(Node *n, const Block *bb) {
  2124 #ifndef PRODUCT
  2125   if (_cfg->C->trace_opto_output()) {
  2126     tty->print("#   AddNodeToBundle: ");
  2127     n->dump();
  2129 #endif
  2131   // Remove this from the available list
  2132   uint i;
  2133   for (i = 0; i < _available.size(); i++)
  2134     if (_available[i] == n)
  2135       break;
  2136   assert(i < _available.size(), "entry in _available list not found");
  2137   _available.remove(i);
  2139   // See if this fits in the current bundle
  2140   const Pipeline *node_pipeline = n->pipeline();
  2141   const Pipeline_Use& node_usage = node_pipeline->resourceUse();
  2143   // Check for instructions to be placed in the delay slot. We
  2144   // do this before we actually schedule the current instruction,
  2145   // because the delay slot follows the current instruction.
  2146   if (Pipeline::_branch_has_delay_slot &&
  2147       node_pipeline->hasBranchDelay() &&
  2148       !_unconditional_delay_slot) {
  2150     uint siz = _available.size();
  2152     // Conditional branches can support an instruction that
  2153     // is unconditionally executed and not dependent by the
  2154     // branch, OR a conditionally executed instruction if
  2155     // the branch is taken.  In practice, this means that
  2156     // the first instruction at the branch target is
  2157     // copied to the delay slot, and the branch goes to
  2158     // the instruction after that at the branch target
  2159     if ( n->is_MachBranch() ) {
  2161       assert( !n->is_MachNullCheck(), "should not look for delay slot for Null Check" );
  2162       assert( !n->is_Catch(),         "should not look for delay slot for Catch" );
  2164 #ifndef PRODUCT
  2165       _branches++;
  2166 #endif
  2168       // At least 1 instruction is on the available list
  2169       // that is not dependent on the branch
  2170       for (uint i = 0; i < siz; i++) {
  2171         Node *d = _available[i];
  2172         const Pipeline *avail_pipeline = d->pipeline();
  2174         // Don't allow safepoints in the branch shadow, that will
  2175         // cause a number of difficulties
  2176         if ( avail_pipeline->instructionCount() == 1 &&
  2177             !avail_pipeline->hasMultipleBundles() &&
  2178             !avail_pipeline->hasBranchDelay() &&
  2179             Pipeline::instr_has_unit_size() &&
  2180             d->size(_regalloc) == Pipeline::instr_unit_size() &&
  2181             NodeFitsInBundle(d) &&
  2182             !node_bundling(d)->used_in_delay()) {
  2184           if (d->is_Mach() && !d->is_MachSafePoint()) {
  2185             // A node that fits in the delay slot was found, so we need to
  2186             // set the appropriate bits in the bundle pipeline information so
  2187             // that it correctly indicates resource usage.  Later, when we
  2188             // attempt to add this instruction to the bundle, we will skip
  2189             // setting the resource usage.
  2190             _unconditional_delay_slot = d;
  2191             node_bundling(n)->set_use_unconditional_delay();
  2192             node_bundling(d)->set_used_in_unconditional_delay();
  2193             _bundle_use.add_usage(avail_pipeline->resourceUse());
  2194             _current_latency[d->_idx] = _bundle_cycle_number;
  2195             _next_node = d;
  2196             ++_bundle_instr_count;
  2197 #ifndef PRODUCT
  2198             _unconditional_delays++;
  2199 #endif
  2200             break;
  2206     // No delay slot, add a nop to the usage
  2207     if (!_unconditional_delay_slot) {
  2208       // See if adding an instruction in the delay slot will overflow
  2209       // the bundle.
  2210       if (!NodeFitsInBundle(_nop)) {
  2211 #ifndef PRODUCT
  2212         if (_cfg->C->trace_opto_output())
  2213           tty->print("#  *** STEP(1 instruction for delay slot) ***\n");
  2214 #endif
  2215         step(1);
  2218       _bundle_use.add_usage(_nop->pipeline()->resourceUse());
  2219       _next_node = _nop;
  2220       ++_bundle_instr_count;
  2223     // See if the instruction in the delay slot requires a
  2224     // step of the bundles
  2225     if (!NodeFitsInBundle(n)) {
  2226 #ifndef PRODUCT
  2227         if (_cfg->C->trace_opto_output())
  2228           tty->print("#  *** STEP(branch won't fit) ***\n");
  2229 #endif
  2230         // Update the state information
  2231         _bundle_instr_count = 0;
  2232         _bundle_cycle_number += 1;
  2233         _bundle_use.step(1);
  2237   // Get the number of instructions
  2238   uint instruction_count = node_pipeline->instructionCount();
  2239   if (node_pipeline->mayHaveNoCode() && n->size(_regalloc) == 0)
  2240     instruction_count = 0;
  2242   // Compute the latency information
  2243   uint delay = 0;
  2245   if (instruction_count > 0 || !node_pipeline->mayHaveNoCode()) {
  2246     int relative_latency = _current_latency[n->_idx] - _bundle_cycle_number;
  2247     if (relative_latency < 0)
  2248       relative_latency = 0;
  2250     delay = _bundle_use.full_latency(relative_latency, node_usage);
  2252     // Does not fit in this bundle, start a new one
  2253     if (delay > 0) {
  2254       step(delay);
  2256 #ifndef PRODUCT
  2257       if (_cfg->C->trace_opto_output())
  2258         tty->print("#  *** STEP(%d) ***\n", delay);
  2259 #endif
  2263   // If this was placed in the delay slot, ignore it
  2264   if (n != _unconditional_delay_slot) {
  2266     if (delay == 0) {
  2267       if (node_pipeline->hasMultipleBundles()) {
  2268 #ifndef PRODUCT
  2269         if (_cfg->C->trace_opto_output())
  2270           tty->print("#  *** STEP(multiple instructions) ***\n");
  2271 #endif
  2272         step(1);
  2275       else if (instruction_count + _bundle_instr_count > Pipeline::_max_instrs_per_cycle) {
  2276 #ifndef PRODUCT
  2277         if (_cfg->C->trace_opto_output())
  2278           tty->print("#  *** STEP(%d >= %d instructions) ***\n",
  2279             instruction_count + _bundle_instr_count,
  2280             Pipeline::_max_instrs_per_cycle);
  2281 #endif
  2282         step(1);
  2286     if (node_pipeline->hasBranchDelay() && !_unconditional_delay_slot)
  2287       _bundle_instr_count++;
  2289     // Set the node's latency
  2290     _current_latency[n->_idx] = _bundle_cycle_number;
  2292     // Now merge the functional unit information
  2293     if (instruction_count > 0 || !node_pipeline->mayHaveNoCode())
  2294       _bundle_use.add_usage(node_usage);
  2296     // Increment the number of instructions in this bundle
  2297     _bundle_instr_count += instruction_count;
  2299     // Remember this node for later
  2300     if (n->is_Mach())
  2301       _next_node = n;
  2304   // It's possible to have a BoxLock in the graph and in the _bbs mapping but
  2305   // not in the bb->_nodes array.  This happens for debug-info-only BoxLocks.
  2306   // 'Schedule' them (basically ignore in the schedule) but do not insert them
  2307   // into the block.  All other scheduled nodes get put in the schedule here.
  2308   int op = n->Opcode();
  2309   if( (op == Op_Node && n->req() == 0) || // anti-dependence node OR
  2310       (op != Op_Node &&         // Not an unused antidepedence node and
  2311        // not an unallocated boxlock
  2312        (OptoReg::is_valid(_regalloc->get_reg_first(n)) || op != Op_BoxLock)) ) {
  2314     // Push any trailing projections
  2315     if( bb->_nodes[bb->_nodes.size()-1] != n ) {
  2316       for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
  2317         Node *foi = n->fast_out(i);
  2318         if( foi->is_Proj() )
  2319           _scheduled.push(foi);
  2323     // Put the instruction in the schedule list
  2324     _scheduled.push(n);
  2327 #ifndef PRODUCT
  2328   if (_cfg->C->trace_opto_output())
  2329     dump_available();
  2330 #endif
  2332   // Walk all the definitions, decrementing use counts, and
  2333   // if a definition has a 0 use count, place it in the available list.
  2334   DecrementUseCounts(n,bb);
  2337 //------------------------------ComputeUseCount--------------------------------
  2338 // This method sets the use count within a basic block.  We will ignore all
  2339 // uses outside the current basic block.  As we are doing a backwards walk,
  2340 // any node we reach that has a use count of 0 may be scheduled.  This also
  2341 // avoids the problem of cyclic references from phi nodes, as long as phi
  2342 // nodes are at the front of the basic block.  This method also initializes
  2343 // the available list to the set of instructions that have no uses within this
  2344 // basic block.
  2345 void Scheduling::ComputeUseCount(const Block *bb) {
  2346 #ifndef PRODUCT
  2347   if (_cfg->C->trace_opto_output())
  2348     tty->print("# -> ComputeUseCount\n");
  2349 #endif
  2351   // Clear the list of available and scheduled instructions, just in case
  2352   _available.clear();
  2353   _scheduled.clear();
  2355   // No delay slot specified
  2356   _unconditional_delay_slot = NULL;
  2358 #ifdef ASSERT
  2359   for( uint i=0; i < bb->_nodes.size(); i++ )
  2360     assert( _uses[bb->_nodes[i]->_idx] == 0, "_use array not clean" );
  2361 #endif
  2363   // Force the _uses count to never go to zero for unscheduable pieces
  2364   // of the block
  2365   for( uint k = 0; k < _bb_start; k++ )
  2366     _uses[bb->_nodes[k]->_idx] = 1;
  2367   for( uint l = _bb_end; l < bb->_nodes.size(); l++ )
  2368     _uses[bb->_nodes[l]->_idx] = 1;
  2370   // Iterate backwards over the instructions in the block.  Don't count the
  2371   // branch projections at end or the block header instructions.
  2372   for( uint j = _bb_end-1; j >= _bb_start; j-- ) {
  2373     Node *n = bb->_nodes[j];
  2374     if( n->is_Proj() ) continue; // Projections handled another way
  2376     // Account for all uses
  2377     for ( uint k = 0; k < n->len(); k++ ) {
  2378       Node *inp = n->in(k);
  2379       if (!inp) continue;
  2380       assert(inp != n, "no cycles allowed" );
  2381       if( _bbs[inp->_idx] == bb ) { // Block-local use?
  2382         if( inp->is_Proj() )    // Skip through Proj's
  2383           inp = inp->in(0);
  2384         ++_uses[inp->_idx];     // Count 1 block-local use
  2388     // If this instruction has a 0 use count, then it is available
  2389     if (!_uses[n->_idx]) {
  2390       _current_latency[n->_idx] = _bundle_cycle_number;
  2391       AddNodeToAvailableList(n);
  2394 #ifndef PRODUCT
  2395     if (_cfg->C->trace_opto_output()) {
  2396       tty->print("#   uses: %3d: ", _uses[n->_idx]);
  2397       n->dump();
  2399 #endif
  2402 #ifndef PRODUCT
  2403   if (_cfg->C->trace_opto_output())
  2404     tty->print("# <- ComputeUseCount\n");
  2405 #endif
  2408 // This routine performs scheduling on each basic block in reverse order,
  2409 // using instruction latencies and taking into account function unit
  2410 // availability.
  2411 void Scheduling::DoScheduling() {
  2412 #ifndef PRODUCT
  2413   if (_cfg->C->trace_opto_output())
  2414     tty->print("# -> DoScheduling\n");
  2415 #endif
  2417   Block *succ_bb = NULL;
  2418   Block *bb;
  2420   // Walk over all the basic blocks in reverse order
  2421   for( int i=_cfg->_num_blocks-1; i >= 0; succ_bb = bb, i-- ) {
  2422     bb = _cfg->_blocks[i];
  2424 #ifndef PRODUCT
  2425     if (_cfg->C->trace_opto_output()) {
  2426       tty->print("#  Schedule BB#%03d (initial)\n", i);
  2427       for (uint j = 0; j < bb->_nodes.size(); j++)
  2428         bb->_nodes[j]->dump();
  2430 #endif
  2432     // On the head node, skip processing
  2433     if( bb == _cfg->_broot )
  2434       continue;
  2436     // Skip empty, connector blocks
  2437     if (bb->is_connector())
  2438       continue;
  2440     // If the following block is not the sole successor of
  2441     // this one, then reset the pipeline information
  2442     if (bb->_num_succs != 1 || bb->non_connector_successor(0) != succ_bb) {
  2443 #ifndef PRODUCT
  2444       if (_cfg->C->trace_opto_output()) {
  2445         tty->print("*** bundle start of next BB, node %d, for %d instructions\n",
  2446                    _next_node->_idx, _bundle_instr_count);
  2448 #endif
  2449       step_and_clear();
  2452     // Leave untouched the starting instruction, any Phis, a CreateEx node
  2453     // or Top.  bb->_nodes[_bb_start] is the first schedulable instruction.
  2454     _bb_end = bb->_nodes.size()-1;
  2455     for( _bb_start=1; _bb_start <= _bb_end; _bb_start++ ) {
  2456       Node *n = bb->_nodes[_bb_start];
  2457       // Things not matched, like Phinodes and ProjNodes don't get scheduled.
  2458       // Also, MachIdealNodes do not get scheduled
  2459       if( !n->is_Mach() ) continue;     // Skip non-machine nodes
  2460       MachNode *mach = n->as_Mach();
  2461       int iop = mach->ideal_Opcode();
  2462       if( iop == Op_CreateEx ) continue; // CreateEx is pinned
  2463       if( iop == Op_Con ) continue;      // Do not schedule Top
  2464       if( iop == Op_Node &&     // Do not schedule PhiNodes, ProjNodes
  2465           mach->pipeline() == MachNode::pipeline_class() &&
  2466           !n->is_SpillCopy() )  // Breakpoints, Prolog, etc
  2467         continue;
  2468       break;                    // Funny loop structure to be sure...
  2470     // Compute last "interesting" instruction in block - last instruction we
  2471     // might schedule.  _bb_end points just after last schedulable inst.  We
  2472     // normally schedule conditional branches (despite them being forced last
  2473     // in the block), because they have delay slots we can fill.  Calls all
  2474     // have their delay slots filled in the template expansions, so we don't
  2475     // bother scheduling them.
  2476     Node *last = bb->_nodes[_bb_end];
  2477     // Ignore trailing NOPs.
  2478     while (_bb_end > 0 && last->is_Mach() &&
  2479            last->as_Mach()->ideal_Opcode() == Op_Con) {
  2480       last = bb->_nodes[--_bb_end];
  2482     assert(!last->is_Mach() || last->as_Mach()->ideal_Opcode() != Op_Con, "");
  2483     if( last->is_Catch() ||
  2484        // Exclude unreachable path case when Halt node is in a separate block.
  2485        (_bb_end > 1 && last->is_Mach() && last->as_Mach()->ideal_Opcode() == Op_Halt) ) {
  2486       // There must be a prior call.  Skip it.
  2487       while( !bb->_nodes[--_bb_end]->is_MachCall() ) {
  2488         assert( bb->_nodes[_bb_end]->is_MachProj(), "skipping projections after expected call" );
  2490     } else if( last->is_MachNullCheck() ) {
  2491       // Backup so the last null-checked memory instruction is
  2492       // outside the schedulable range. Skip over the nullcheck,
  2493       // projection, and the memory nodes.
  2494       Node *mem = last->in(1);
  2495       do {
  2496         _bb_end--;
  2497       } while (mem != bb->_nodes[_bb_end]);
  2498     } else {
  2499       // Set _bb_end to point after last schedulable inst.
  2500       _bb_end++;
  2503     assert( _bb_start <= _bb_end, "inverted block ends" );
  2505     // Compute the register antidependencies for the basic block
  2506     ComputeRegisterAntidependencies(bb);
  2507     if (_cfg->C->failing())  return;  // too many D-U pinch points
  2509     // Compute intra-bb latencies for the nodes
  2510     ComputeLocalLatenciesForward(bb);
  2512     // Compute the usage within the block, and set the list of all nodes
  2513     // in the block that have no uses within the block.
  2514     ComputeUseCount(bb);
  2516     // Schedule the remaining instructions in the block
  2517     while ( _available.size() > 0 ) {
  2518       Node *n = ChooseNodeToBundle();
  2519       AddNodeToBundle(n,bb);
  2522     assert( _scheduled.size() == _bb_end - _bb_start, "wrong number of instructions" );
  2523 #ifdef ASSERT
  2524     for( uint l = _bb_start; l < _bb_end; l++ ) {
  2525       Node *n = bb->_nodes[l];
  2526       uint m;
  2527       for( m = 0; m < _bb_end-_bb_start; m++ )
  2528         if( _scheduled[m] == n )
  2529           break;
  2530       assert( m < _bb_end-_bb_start, "instruction missing in schedule" );
  2532 #endif
  2534     // Now copy the instructions (in reverse order) back to the block
  2535     for ( uint k = _bb_start; k < _bb_end; k++ )
  2536       bb->_nodes.map(k, _scheduled[_bb_end-k-1]);
  2538 #ifndef PRODUCT
  2539     if (_cfg->C->trace_opto_output()) {
  2540       tty->print("#  Schedule BB#%03d (final)\n", i);
  2541       uint current = 0;
  2542       for (uint j = 0; j < bb->_nodes.size(); j++) {
  2543         Node *n = bb->_nodes[j];
  2544         if( valid_bundle_info(n) ) {
  2545           Bundle *bundle = node_bundling(n);
  2546           if (bundle->instr_count() > 0 || bundle->flags() > 0) {
  2547             tty->print("*** Bundle: ");
  2548             bundle->dump();
  2550           n->dump();
  2554 #endif
  2555 #ifdef ASSERT
  2556   verify_good_schedule(bb,"after block local scheduling");
  2557 #endif
  2560 #ifndef PRODUCT
  2561   if (_cfg->C->trace_opto_output())
  2562     tty->print("# <- DoScheduling\n");
  2563 #endif
  2565   // Record final node-bundling array location
  2566   _regalloc->C->set_node_bundling_base(_node_bundling_base);
  2568 } // end DoScheduling
  2570 //------------------------------verify_good_schedule---------------------------
  2571 // Verify that no live-range used in the block is killed in the block by a
  2572 // wrong DEF.  This doesn't verify live-ranges that span blocks.
  2574 // Check for edge existence.  Used to avoid adding redundant precedence edges.
  2575 static bool edge_from_to( Node *from, Node *to ) {
  2576   for( uint i=0; i<from->len(); i++ )
  2577     if( from->in(i) == to )
  2578       return true;
  2579   return false;
  2582 #ifdef ASSERT
  2583 //------------------------------verify_do_def----------------------------------
  2584 void Scheduling::verify_do_def( Node *n, OptoReg::Name def, const char *msg ) {
  2585   // Check for bad kills
  2586   if( OptoReg::is_valid(def) ) { // Ignore stores & control flow
  2587     Node *prior_use = _reg_node[def];
  2588     if( prior_use && !edge_from_to(prior_use,n) ) {
  2589       tty->print("%s = ",OptoReg::as_VMReg(def)->name());
  2590       n->dump();
  2591       tty->print_cr("...");
  2592       prior_use->dump();
  2593       assert(edge_from_to(prior_use,n),msg);
  2595     _reg_node.map(def,NULL); // Kill live USEs
  2599 //------------------------------verify_good_schedule---------------------------
  2600 void Scheduling::verify_good_schedule( Block *b, const char *msg ) {
  2602   // Zap to something reasonable for the verify code
  2603   _reg_node.clear();
  2605   // Walk over the block backwards.  Check to make sure each DEF doesn't
  2606   // kill a live value (other than the one it's supposed to).  Add each
  2607   // USE to the live set.
  2608   for( uint i = b->_nodes.size()-1; i >= _bb_start; i-- ) {
  2609     Node *n = b->_nodes[i];
  2610     int n_op = n->Opcode();
  2611     if( n_op == Op_MachProj && n->ideal_reg() == MachProjNode::fat_proj ) {
  2612       // Fat-proj kills a slew of registers
  2613       RegMask rm = n->out_RegMask();// Make local copy
  2614       while( rm.is_NotEmpty() ) {
  2615         OptoReg::Name kill = rm.find_first_elem();
  2616         rm.Remove(kill);
  2617         verify_do_def( n, kill, msg );
  2619     } else if( n_op != Op_Node ) { // Avoid brand new antidependence nodes
  2620       // Get DEF'd registers the normal way
  2621       verify_do_def( n, _regalloc->get_reg_first(n), msg );
  2622       verify_do_def( n, _regalloc->get_reg_second(n), msg );
  2625     // Now make all USEs live
  2626     for( uint i=1; i<n->req(); i++ ) {
  2627       Node *def = n->in(i);
  2628       assert(def != 0, "input edge required");
  2629       OptoReg::Name reg_lo = _regalloc->get_reg_first(def);
  2630       OptoReg::Name reg_hi = _regalloc->get_reg_second(def);
  2631       if( OptoReg::is_valid(reg_lo) ) {
  2632         assert(!_reg_node[reg_lo] || edge_from_to(_reg_node[reg_lo],def), msg);
  2633         _reg_node.map(reg_lo,n);
  2635       if( OptoReg::is_valid(reg_hi) ) {
  2636         assert(!_reg_node[reg_hi] || edge_from_to(_reg_node[reg_hi],def), msg);
  2637         _reg_node.map(reg_hi,n);
  2643   // Zap to something reasonable for the Antidependence code
  2644   _reg_node.clear();
  2646 #endif
  2648 // Conditionally add precedence edges.  Avoid putting edges on Projs.
  2649 static void add_prec_edge_from_to( Node *from, Node *to ) {
  2650   if( from->is_Proj() ) {       // Put precedence edge on Proj's input
  2651     assert( from->req() == 1 && (from->len() == 1 || from->in(1)==0), "no precedence edges on projections" );
  2652     from = from->in(0);
  2654   if( from != to &&             // No cycles (for things like LD L0,[L0+4] )
  2655       !edge_from_to( from, to ) ) // Avoid duplicate edge
  2656     from->add_prec(to);
  2659 //------------------------------anti_do_def------------------------------------
  2660 void Scheduling::anti_do_def( Block *b, Node *def, OptoReg::Name def_reg, int is_def ) {
  2661   if( !OptoReg::is_valid(def_reg) ) // Ignore stores & control flow
  2662     return;
  2664   Node *pinch = _reg_node[def_reg]; // Get pinch point
  2665   if( !pinch || _bbs[pinch->_idx] != b || // No pinch-point yet?
  2666       is_def ) {    // Check for a true def (not a kill)
  2667     _reg_node.map(def_reg,def); // Record def/kill as the optimistic pinch-point
  2668     return;
  2671   Node *kill = def;             // Rename 'def' to more descriptive 'kill'
  2672   debug_only( def = (Node*)0xdeadbeef; )
  2674   // After some number of kills there _may_ be a later def
  2675   Node *later_def = NULL;
  2677   // Finding a kill requires a real pinch-point.
  2678   // Check for not already having a pinch-point.
  2679   // Pinch points are Op_Node's.
  2680   if( pinch->Opcode() != Op_Node ) { // Or later-def/kill as pinch-point?
  2681     later_def = pinch;            // Must be def/kill as optimistic pinch-point
  2682     if ( _pinch_free_list.size() > 0) {
  2683       pinch = _pinch_free_list.pop();
  2684     } else {
  2685       pinch = new (_cfg->C, 1) Node(1); // Pinch point to-be
  2687     if (pinch->_idx >= _regalloc->node_regs_max_index()) {
  2688       _cfg->C->record_method_not_compilable("too many D-U pinch points");
  2689       return;
  2691     _bbs.map(pinch->_idx,b);      // Pretend it's valid in this block (lazy init)
  2692     _reg_node.map(def_reg,pinch); // Record pinch-point
  2693     //_regalloc->set_bad(pinch->_idx); // Already initialized this way.
  2694     if( later_def->outcnt() == 0 || later_def->ideal_reg() == MachProjNode::fat_proj ) { // Distinguish def from kill
  2695       pinch->init_req(0, _cfg->C->top());     // set not NULL for the next call
  2696       add_prec_edge_from_to(later_def,pinch); // Add edge from kill to pinch
  2697       later_def = NULL;           // and no later def
  2699     pinch->set_req(0,later_def);  // Hook later def so we can find it
  2700   } else {                        // Else have valid pinch point
  2701     if( pinch->in(0) )            // If there is a later-def
  2702       later_def = pinch->in(0);   // Get it
  2705   // Add output-dependence edge from later def to kill
  2706   if( later_def )               // If there is some original def
  2707     add_prec_edge_from_to(later_def,kill); // Add edge from def to kill
  2709   // See if current kill is also a use, and so is forced to be the pinch-point.
  2710   if( pinch->Opcode() == Op_Node ) {
  2711     Node *uses = kill->is_Proj() ? kill->in(0) : kill;
  2712     for( uint i=1; i<uses->req(); i++ ) {
  2713       if( _regalloc->get_reg_first(uses->in(i)) == def_reg ||
  2714           _regalloc->get_reg_second(uses->in(i)) == def_reg ) {
  2715         // Yes, found a use/kill pinch-point
  2716         pinch->set_req(0,NULL);  //
  2717         pinch->replace_by(kill); // Move anti-dep edges up
  2718         pinch = kill;
  2719         _reg_node.map(def_reg,pinch);
  2720         return;
  2725   // Add edge from kill to pinch-point
  2726   add_prec_edge_from_to(kill,pinch);
  2729 //------------------------------anti_do_use------------------------------------
  2730 void Scheduling::anti_do_use( Block *b, Node *use, OptoReg::Name use_reg ) {
  2731   if( !OptoReg::is_valid(use_reg) ) // Ignore stores & control flow
  2732     return;
  2733   Node *pinch = _reg_node[use_reg]; // Get pinch point
  2734   // Check for no later def_reg/kill in block
  2735   if( pinch && _bbs[pinch->_idx] == b &&
  2736       // Use has to be block-local as well
  2737       _bbs[use->_idx] == b ) {
  2738     if( pinch->Opcode() == Op_Node && // Real pinch-point (not optimistic?)
  2739         pinch->req() == 1 ) {   // pinch not yet in block?
  2740       pinch->del_req(0);        // yank pointer to later-def, also set flag
  2741       // Insert the pinch-point in the block just after the last use
  2742       b->_nodes.insert(b->find_node(use)+1,pinch);
  2743       _bb_end++;                // Increase size scheduled region in block
  2746     add_prec_edge_from_to(pinch,use);
  2750 //------------------------------ComputeRegisterAntidependences-----------------
  2751 // We insert antidependences between the reads and following write of
  2752 // allocated registers to prevent illegal code motion. Hopefully, the
  2753 // number of added references should be fairly small, especially as we
  2754 // are only adding references within the current basic block.
  2755 void Scheduling::ComputeRegisterAntidependencies(Block *b) {
  2757 #ifdef ASSERT
  2758   verify_good_schedule(b,"before block local scheduling");
  2759 #endif
  2761   // A valid schedule, for each register independently, is an endless cycle
  2762   // of: a def, then some uses (connected to the def by true dependencies),
  2763   // then some kills (defs with no uses), finally the cycle repeats with a new
  2764   // def.  The uses are allowed to float relative to each other, as are the
  2765   // kills.  No use is allowed to slide past a kill (or def).  This requires
  2766   // antidependencies between all uses of a single def and all kills that
  2767   // follow, up to the next def.  More edges are redundant, because later defs
  2768   // & kills are already serialized with true or antidependencies.  To keep
  2769   // the edge count down, we add a 'pinch point' node if there's more than
  2770   // one use or more than one kill/def.
  2772   // We add dependencies in one bottom-up pass.
  2774   // For each instruction we handle it's DEFs/KILLs, then it's USEs.
  2776   // For each DEF/KILL, we check to see if there's a prior DEF/KILL for this
  2777   // register.  If not, we record the DEF/KILL in _reg_node, the
  2778   // register-to-def mapping.  If there is a prior DEF/KILL, we insert a
  2779   // "pinch point", a new Node that's in the graph but not in the block.
  2780   // We put edges from the prior and current DEF/KILLs to the pinch point.
  2781   // We put the pinch point in _reg_node.  If there's already a pinch point
  2782   // we merely add an edge from the current DEF/KILL to the pinch point.
  2784   // After doing the DEF/KILLs, we handle USEs.  For each used register, we
  2785   // put an edge from the pinch point to the USE.
  2787   // To be expedient, the _reg_node array is pre-allocated for the whole
  2788   // compilation.  _reg_node is lazily initialized; it either contains a NULL,
  2789   // or a valid def/kill/pinch-point, or a leftover node from some prior
  2790   // block.  Leftover node from some prior block is treated like a NULL (no
  2791   // prior def, so no anti-dependence needed).  Valid def is distinguished by
  2792   // it being in the current block.
  2793   bool fat_proj_seen = false;
  2794   uint last_safept = _bb_end-1;
  2795   Node* end_node         = (_bb_end-1 >= _bb_start) ? b->_nodes[last_safept] : NULL;
  2796   Node* last_safept_node = end_node;
  2797   for( uint i = _bb_end-1; i >= _bb_start; i-- ) {
  2798     Node *n = b->_nodes[i];
  2799     int is_def = n->outcnt();   // def if some uses prior to adding precedence edges
  2800     if( n->is_MachProj() && n->ideal_reg() == MachProjNode::fat_proj ) {
  2801       // Fat-proj kills a slew of registers
  2802       // This can add edges to 'n' and obscure whether or not it was a def,
  2803       // hence the is_def flag.
  2804       fat_proj_seen = true;
  2805       RegMask rm = n->out_RegMask();// Make local copy
  2806       while( rm.is_NotEmpty() ) {
  2807         OptoReg::Name kill = rm.find_first_elem();
  2808         rm.Remove(kill);
  2809         anti_do_def( b, n, kill, is_def );
  2811     } else {
  2812       // Get DEF'd registers the normal way
  2813       anti_do_def( b, n, _regalloc->get_reg_first(n), is_def );
  2814       anti_do_def( b, n, _regalloc->get_reg_second(n), is_def );
  2817     // Kill projections on a branch should appear to occur on the
  2818     // branch, not afterwards, so grab the masks from the projections
  2819     // and process them.
  2820     if (n->is_MachBranch() || n->is_Mach() && n->as_Mach()->ideal_Opcode() == Op_Jump) {
  2821       for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
  2822         Node* use = n->fast_out(i);
  2823         if (use->is_Proj()) {
  2824           RegMask rm = use->out_RegMask();// Make local copy
  2825           while( rm.is_NotEmpty() ) {
  2826             OptoReg::Name kill = rm.find_first_elem();
  2827             rm.Remove(kill);
  2828             anti_do_def( b, n, kill, false );
  2834     // Check each register used by this instruction for a following DEF/KILL
  2835     // that must occur afterward and requires an anti-dependence edge.
  2836     for( uint j=0; j<n->req(); j++ ) {
  2837       Node *def = n->in(j);
  2838       if( def ) {
  2839         assert( !def->is_MachProj() || def->ideal_reg() != MachProjNode::fat_proj, "" );
  2840         anti_do_use( b, n, _regalloc->get_reg_first(def) );
  2841         anti_do_use( b, n, _regalloc->get_reg_second(def) );
  2844     // Do not allow defs of new derived values to float above GC
  2845     // points unless the base is definitely available at the GC point.
  2847     Node *m = b->_nodes[i];
  2849     // Add precedence edge from following safepoint to use of derived pointer
  2850     if( last_safept_node != end_node &&
  2851         m != last_safept_node) {
  2852       for (uint k = 1; k < m->req(); k++) {
  2853         const Type *t = m->in(k)->bottom_type();
  2854         if( t->isa_oop_ptr() &&
  2855             t->is_ptr()->offset() != 0 ) {
  2856           last_safept_node->add_prec( m );
  2857           break;
  2862     if( n->jvms() ) {           // Precedence edge from derived to safept
  2863       // Check if last_safept_node was moved by pinch-point insertion in anti_do_use()
  2864       if( b->_nodes[last_safept] != last_safept_node ) {
  2865         last_safept = b->find_node(last_safept_node);
  2867       for( uint j=last_safept; j > i; j-- ) {
  2868         Node *mach = b->_nodes[j];
  2869         if( mach->is_Mach() && mach->as_Mach()->ideal_Opcode() == Op_AddP )
  2870           mach->add_prec( n );
  2872       last_safept = i;
  2873       last_safept_node = m;
  2877   if (fat_proj_seen) {
  2878     // Garbage collect pinch nodes that were not consumed.
  2879     // They are usually created by a fat kill MachProj for a call.
  2880     garbage_collect_pinch_nodes();
  2884 //------------------------------garbage_collect_pinch_nodes-------------------------------
  2886 // Garbage collect pinch nodes for reuse by other blocks.
  2887 //
  2888 // The block scheduler's insertion of anti-dependence
  2889 // edges creates many pinch nodes when the block contains
  2890 // 2 or more Calls.  A pinch node is used to prevent a
  2891 // combinatorial explosion of edges.  If a set of kills for a
  2892 // register is anti-dependent on a set of uses (or defs), rather
  2893 // than adding an edge in the graph between each pair of kill
  2894 // and use (or def), a pinch is inserted between them:
  2895 //
  2896 //            use1   use2  use3
  2897 //                \   |   /
  2898 //                 \  |  /
  2899 //                  pinch
  2900 //                 /  |  \
  2901 //                /   |   \
  2902 //            kill1 kill2 kill3
  2903 //
  2904 // One pinch node is created per register killed when
  2905 // the second call is encountered during a backwards pass
  2906 // over the block.  Most of these pinch nodes are never
  2907 // wired into the graph because the register is never
  2908 // used or def'ed in the block.
  2909 //
  2910 void Scheduling::garbage_collect_pinch_nodes() {
  2911 #ifndef PRODUCT
  2912     if (_cfg->C->trace_opto_output()) tty->print("Reclaimed pinch nodes:");
  2913 #endif
  2914     int trace_cnt = 0;
  2915     for (uint k = 0; k < _reg_node.Size(); k++) {
  2916       Node* pinch = _reg_node[k];
  2917       if (pinch != NULL && pinch->Opcode() == Op_Node &&
  2918           // no predecence input edges
  2919           (pinch->req() == pinch->len() || pinch->in(pinch->req()) == NULL) ) {
  2920         cleanup_pinch(pinch);
  2921         _pinch_free_list.push(pinch);
  2922         _reg_node.map(k, NULL);
  2923 #ifndef PRODUCT
  2924         if (_cfg->C->trace_opto_output()) {
  2925           trace_cnt++;
  2926           if (trace_cnt > 40) {
  2927             tty->print("\n");
  2928             trace_cnt = 0;
  2930           tty->print(" %d", pinch->_idx);
  2932 #endif
  2935 #ifndef PRODUCT
  2936     if (_cfg->C->trace_opto_output()) tty->print("\n");
  2937 #endif
  2940 // Clean up a pinch node for reuse.
  2941 void Scheduling::cleanup_pinch( Node *pinch ) {
  2942   assert (pinch && pinch->Opcode() == Op_Node && pinch->req() == 1, "just checking");
  2944   for (DUIterator_Last imin, i = pinch->last_outs(imin); i >= imin; ) {
  2945     Node* use = pinch->last_out(i);
  2946     uint uses_found = 0;
  2947     for (uint j = use->req(); j < use->len(); j++) {
  2948       if (use->in(j) == pinch) {
  2949         use->rm_prec(j);
  2950         uses_found++;
  2953     assert(uses_found > 0, "must be a precedence edge");
  2954     i -= uses_found;    // we deleted 1 or more copies of this edge
  2956   // May have a later_def entry
  2957   pinch->set_req(0, NULL);
  2960 //------------------------------print_statistics-------------------------------
  2961 #ifndef PRODUCT
  2963 void Scheduling::dump_available() const {
  2964   tty->print("#Availist  ");
  2965   for (uint i = 0; i < _available.size(); i++)
  2966     tty->print(" N%d/l%d", _available[i]->_idx,_current_latency[_available[i]->_idx]);
  2967   tty->cr();
  2970 // Print Scheduling Statistics
  2971 void Scheduling::print_statistics() {
  2972   // Print the size added by nops for bundling
  2973   tty->print("Nops added %d bytes to total of %d bytes",
  2974     _total_nop_size, _total_method_size);
  2975   if (_total_method_size > 0)
  2976     tty->print(", for %.2f%%",
  2977       ((double)_total_nop_size) / ((double) _total_method_size) * 100.0);
  2978   tty->print("\n");
  2980   // Print the number of branch shadows filled
  2981   if (Pipeline::_branch_has_delay_slot) {
  2982     tty->print("Of %d branches, %d had unconditional delay slots filled",
  2983       _total_branches, _total_unconditional_delays);
  2984     if (_total_branches > 0)
  2985       tty->print(", for %.2f%%",
  2986         ((double)_total_unconditional_delays) / ((double)_total_branches) * 100.0);
  2987     tty->print("\n");
  2990   uint total_instructions = 0, total_bundles = 0;
  2992   for (uint i = 1; i <= Pipeline::_max_instrs_per_cycle; i++) {
  2993     uint bundle_count   = _total_instructions_per_bundle[i];
  2994     total_instructions += bundle_count * i;
  2995     total_bundles      += bundle_count;
  2998   if (total_bundles > 0)
  2999     tty->print("Average ILP (excluding nops) is %.2f\n",
  3000       ((double)total_instructions) / ((double)total_bundles));
  3002 #endif

mercurial