src/share/vm/opto/output.cpp

Tue, 29 Jul 2014 13:54:16 +0200

author
thartmann
date
Tue, 29 Jul 2014 13:54:16 +0200
changeset 7001
b6a8cc1e0d92
parent 6723
0bf37f737702
child 7003
69ea58782b1a
permissions
-rw-r--r--

8040121: Load variable through a pointer of an incompatible type in src/hotspot/src/share/vm: opto/output.cpp, runtime/sharedRuntimeTrans.cpp, utilities/globalDefinitions_visCPP.hpp
Summary: Fixed parfait warnings in globalDefinitions files by using a union for casts.
Reviewed-by: kvn

     1 /*
     2  * Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    20  * or visit www.oracle.com if you need additional information or have any
    21  * questions.
    22  *
    23  */
    25 #include "precompiled.hpp"
    26 #include "asm/assembler.inline.hpp"
    27 #include "code/compiledIC.hpp"
    28 #include "code/debugInfo.hpp"
    29 #include "code/debugInfoRec.hpp"
    30 #include "compiler/compileBroker.hpp"
    31 #include "compiler/oopMap.hpp"
    32 #include "memory/allocation.inline.hpp"
    33 #include "opto/callnode.hpp"
    34 #include "opto/cfgnode.hpp"
    35 #include "opto/locknode.hpp"
    36 #include "opto/machnode.hpp"
    37 #include "opto/output.hpp"
    38 #include "opto/regalloc.hpp"
    39 #include "opto/runtime.hpp"
    40 #include "opto/subnode.hpp"
    41 #include "opto/type.hpp"
    42 #include "runtime/handles.inline.hpp"
    43 #include "utilities/xmlstream.hpp"
    45 #ifndef PRODUCT
    46 #define DEBUG_ARG(x) , x
    47 #else
    48 #define DEBUG_ARG(x)
    49 #endif
    51 // Convert Nodes to instruction bits and pass off to the VM
    52 void Compile::Output() {
    53   // RootNode goes
    54   assert( _cfg->get_root_block()->number_of_nodes() == 0, "" );
    56   // The number of new nodes (mostly MachNop) is proportional to
    57   // the number of java calls and inner loops which are aligned.
    58   if ( C->check_node_count((NodeLimitFudgeFactor + C->java_calls()*3 +
    59                             C->inner_loops()*(OptoLoopAlignment-1)),
    60                            "out of nodes before code generation" ) ) {
    61     return;
    62   }
    63   // Make sure I can find the Start Node
    64   Block *entry = _cfg->get_block(1);
    65   Block *broot = _cfg->get_root_block();
    67   const StartNode *start = entry->head()->as_Start();
    69   // Replace StartNode with prolog
    70   MachPrologNode *prolog = new (this) MachPrologNode();
    71   entry->map_node(prolog, 0);
    72   _cfg->map_node_to_block(prolog, entry);
    73   _cfg->unmap_node_from_block(start); // start is no longer in any block
    75   // Virtual methods need an unverified entry point
    77   if( is_osr_compilation() ) {
    78     if( PoisonOSREntry ) {
    79       // TODO: Should use a ShouldNotReachHereNode...
    80       _cfg->insert( broot, 0, new (this) MachBreakpointNode() );
    81     }
    82   } else {
    83     if( _method && !_method->flags().is_static() ) {
    84       // Insert unvalidated entry point
    85       _cfg->insert( broot, 0, new (this) MachUEPNode() );
    86     }
    88   }
    91   // Break before main entry point
    92   if( (_method && _method->break_at_execute())
    93 #ifndef PRODUCT
    94     ||(OptoBreakpoint && is_method_compilation())
    95     ||(OptoBreakpointOSR && is_osr_compilation())
    96     ||(OptoBreakpointC2R && !_method)
    97 #endif
    98     ) {
    99     // checking for _method means that OptoBreakpoint does not apply to
   100     // runtime stubs or frame converters
   101     _cfg->insert( entry, 1, new (this) MachBreakpointNode() );
   102   }
   104   // Insert epilogs before every return
   105   for (uint i = 0; i < _cfg->number_of_blocks(); i++) {
   106     Block* block = _cfg->get_block(i);
   107     if (!block->is_connector() && block->non_connector_successor(0) == _cfg->get_root_block()) { // Found a program exit point?
   108       Node* m = block->end();
   109       if (m->is_Mach() && m->as_Mach()->ideal_Opcode() != Op_Halt) {
   110         MachEpilogNode* epilog = new (this) MachEpilogNode(m->as_Mach()->ideal_Opcode() == Op_Return);
   111         block->add_inst(epilog);
   112         _cfg->map_node_to_block(epilog, block);
   113       }
   114     }
   115   }
   117 # ifdef ENABLE_ZAP_DEAD_LOCALS
   118   if (ZapDeadCompiledLocals) {
   119     Insert_zap_nodes();
   120   }
   121 # endif
   123   uint* blk_starts = NEW_RESOURCE_ARRAY(uint, _cfg->number_of_blocks() + 1);
   124   blk_starts[0] = 0;
   126   // Initialize code buffer and process short branches.
   127   CodeBuffer* cb = init_buffer(blk_starts);
   129   if (cb == NULL || failing()) {
   130     return;
   131   }
   133   ScheduleAndBundle();
   135 #ifndef PRODUCT
   136   if (trace_opto_output()) {
   137     tty->print("\n---- After ScheduleAndBundle ----\n");
   138     for (uint i = 0; i < _cfg->number_of_blocks(); i++) {
   139       tty->print("\nBB#%03d:\n", i);
   140       Block* block = _cfg->get_block(i);
   141       for (uint j = 0; j < block->number_of_nodes(); j++) {
   142         Node* n = block->get_node(j);
   143         OptoReg::Name reg = _regalloc->get_reg_first(n);
   144         tty->print(" %-6s ", reg >= 0 && reg < REG_COUNT ? Matcher::regName[reg] : "");
   145         n->dump();
   146       }
   147     }
   148   }
   149 #endif
   151   if (failing()) {
   152     return;
   153   }
   155   BuildOopMaps();
   157   if (failing())  {
   158     return;
   159   }
   161   fill_buffer(cb, blk_starts);
   162 }
   164 bool Compile::need_stack_bang(int frame_size_in_bytes) const {
   165   // Determine if we need to generate a stack overflow check.
   166   // Do it if the method is not a stub function and
   167   // has java calls or has frame size > vm_page_size/8.
   168   // The debug VM checks that deoptimization doesn't trigger an
   169   // unexpected stack overflow (compiled method stack banging should
   170   // guarantee it doesn't happen) so we always need the stack bang in
   171   // a debug VM.
   172   return (UseStackBanging && stub_function() == NULL &&
   173           (has_java_calls() || frame_size_in_bytes > os::vm_page_size()>>3
   174            DEBUG_ONLY(|| true)));
   175 }
   177 bool Compile::need_register_stack_bang() const {
   178   // Determine if we need to generate a register stack overflow check.
   179   // This is only used on architectures which have split register
   180   // and memory stacks (ie. IA64).
   181   // Bang if the method is not a stub function and has java calls
   182   return (stub_function() == NULL && has_java_calls());
   183 }
   185 # ifdef ENABLE_ZAP_DEAD_LOCALS
   188 // In order to catch compiler oop-map bugs, we have implemented
   189 // a debugging mode called ZapDeadCompilerLocals.
   190 // This mode causes the compiler to insert a call to a runtime routine,
   191 // "zap_dead_locals", right before each place in compiled code
   192 // that could potentially be a gc-point (i.e., a safepoint or oop map point).
   193 // The runtime routine checks that locations mapped as oops are really
   194 // oops, that locations mapped as values do not look like oops,
   195 // and that locations mapped as dead are not used later
   196 // (by zapping them to an invalid address).
   198 int Compile::_CompiledZap_count = 0;
   200 void Compile::Insert_zap_nodes() {
   201   bool skip = false;
   204   // Dink with static counts because code code without the extra
   205   // runtime calls is MUCH faster for debugging purposes
   207        if ( CompileZapFirst  ==  0  ) ; // nothing special
   208   else if ( CompileZapFirst  >  CompiledZap_count() )  skip = true;
   209   else if ( CompileZapFirst  == CompiledZap_count() )
   210     warning("starting zap compilation after skipping");
   212        if ( CompileZapLast  ==  -1  ) ; // nothing special
   213   else if ( CompileZapLast  <   CompiledZap_count() )  skip = true;
   214   else if ( CompileZapLast  ==  CompiledZap_count() )
   215     warning("about to compile last zap");
   217   ++_CompiledZap_count; // counts skipped zaps, too
   219   if ( skip )  return;
   222   if ( _method == NULL )
   223     return; // no safepoints/oopmaps emitted for calls in stubs,so we don't care
   225   // Insert call to zap runtime stub before every node with an oop map
   226   for( uint i=0; i<_cfg->number_of_blocks(); i++ ) {
   227     Block *b = _cfg->get_block(i);
   228     for ( uint j = 0;  j < b->number_of_nodes();  ++j ) {
   229       Node *n = b->get_node(j);
   231       // Determining if we should insert a zap-a-lot node in output.
   232       // We do that for all nodes that has oopmap info, except for calls
   233       // to allocation.  Calls to allocation passes in the old top-of-eden pointer
   234       // and expect the C code to reset it.  Hence, there can be no safepoints between
   235       // the inlined-allocation and the call to new_Java, etc.
   236       // We also cannot zap monitor calls, as they must hold the microlock
   237       // during the call to Zap, which also wants to grab the microlock.
   238       bool insert = n->is_MachSafePoint() && (n->as_MachSafePoint()->oop_map() != NULL);
   239       if ( insert ) { // it is MachSafePoint
   240         if ( !n->is_MachCall() ) {
   241           insert = false;
   242         } else if ( n->is_MachCall() ) {
   243           MachCallNode* call = n->as_MachCall();
   244           if (call->entry_point() == OptoRuntime::new_instance_Java() ||
   245               call->entry_point() == OptoRuntime::new_array_Java() ||
   246               call->entry_point() == OptoRuntime::multianewarray2_Java() ||
   247               call->entry_point() == OptoRuntime::multianewarray3_Java() ||
   248               call->entry_point() == OptoRuntime::multianewarray4_Java() ||
   249               call->entry_point() == OptoRuntime::multianewarray5_Java() ||
   250               call->entry_point() == OptoRuntime::slow_arraycopy_Java() ||
   251               call->entry_point() == OptoRuntime::complete_monitor_locking_Java()
   252               ) {
   253             insert = false;
   254           }
   255         }
   256         if (insert) {
   257           Node *zap = call_zap_node(n->as_MachSafePoint(), i);
   258           b->insert_node(zap, j);
   259           _cfg->map_node_to_block(zap, b);
   260           ++j;
   261         }
   262       }
   263     }
   264   }
   265 }
   268 Node* Compile::call_zap_node(MachSafePointNode* node_to_check, int block_no) {
   269   const TypeFunc *tf = OptoRuntime::zap_dead_locals_Type();
   270   CallStaticJavaNode* ideal_node =
   271     new (this) CallStaticJavaNode( tf,
   272          OptoRuntime::zap_dead_locals_stub(_method->flags().is_native()),
   273                        "call zap dead locals stub", 0, TypePtr::BOTTOM);
   274   // We need to copy the OopMap from the site we're zapping at.
   275   // We have to make a copy, because the zap site might not be
   276   // a call site, and zap_dead is a call site.
   277   OopMap* clone = node_to_check->oop_map()->deep_copy();
   279   // Add the cloned OopMap to the zap node
   280   ideal_node->set_oop_map(clone);
   281   return _matcher->match_sfpt(ideal_node);
   282 }
   284 bool Compile::is_node_getting_a_safepoint( Node* n) {
   285   // This code duplicates the logic prior to the call of add_safepoint
   286   // below in this file.
   287   if( n->is_MachSafePoint() ) return true;
   288   return false;
   289 }
   291 # endif // ENABLE_ZAP_DEAD_LOCALS
   293 // Compute the size of first NumberOfLoopInstrToAlign instructions at the top
   294 // of a loop. When aligning a loop we need to provide enough instructions
   295 // in cpu's fetch buffer to feed decoders. The loop alignment could be
   296 // avoided if we have enough instructions in fetch buffer at the head of a loop.
   297 // By default, the size is set to 999999 by Block's constructor so that
   298 // a loop will be aligned if the size is not reset here.
   299 //
   300 // Note: Mach instructions could contain several HW instructions
   301 // so the size is estimated only.
   302 //
   303 void Compile::compute_loop_first_inst_sizes() {
   304   // The next condition is used to gate the loop alignment optimization.
   305   // Don't aligned a loop if there are enough instructions at the head of a loop
   306   // or alignment padding is larger then MaxLoopPad. By default, MaxLoopPad
   307   // is equal to OptoLoopAlignment-1 except on new Intel cpus, where it is
   308   // equal to 11 bytes which is the largest address NOP instruction.
   309   if (MaxLoopPad < OptoLoopAlignment - 1) {
   310     uint last_block = _cfg->number_of_blocks() - 1;
   311     for (uint i = 1; i <= last_block; i++) {
   312       Block* block = _cfg->get_block(i);
   313       // Check the first loop's block which requires an alignment.
   314       if (block->loop_alignment() > (uint)relocInfo::addr_unit()) {
   315         uint sum_size = 0;
   316         uint inst_cnt = NumberOfLoopInstrToAlign;
   317         inst_cnt = block->compute_first_inst_size(sum_size, inst_cnt, _regalloc);
   319         // Check subsequent fallthrough blocks if the loop's first
   320         // block(s) does not have enough instructions.
   321         Block *nb = block;
   322         while(inst_cnt > 0 &&
   323               i < last_block &&
   324               !_cfg->get_block(i + 1)->has_loop_alignment() &&
   325               !nb->has_successor(block)) {
   326           i++;
   327           nb = _cfg->get_block(i);
   328           inst_cnt  = nb->compute_first_inst_size(sum_size, inst_cnt, _regalloc);
   329         } // while( inst_cnt > 0 && i < last_block  )
   331         block->set_first_inst_size(sum_size);
   332       } // f( b->head()->is_Loop() )
   333     } // for( i <= last_block )
   334   } // if( MaxLoopPad < OptoLoopAlignment-1 )
   335 }
   337 // The architecture description provides short branch variants for some long
   338 // branch instructions. Replace eligible long branches with short branches.
   339 void Compile::shorten_branches(uint* blk_starts, int& code_size, int& reloc_size, int& stub_size) {
   340   // Compute size of each block, method size, and relocation information size
   341   uint nblocks  = _cfg->number_of_blocks();
   343   uint*      jmp_offset = NEW_RESOURCE_ARRAY(uint,nblocks);
   344   uint*      jmp_size   = NEW_RESOURCE_ARRAY(uint,nblocks);
   345   int*       jmp_nidx   = NEW_RESOURCE_ARRAY(int ,nblocks);
   347   // Collect worst case block paddings
   348   int* block_worst_case_pad = NEW_RESOURCE_ARRAY(int, nblocks);
   349   memset(block_worst_case_pad, 0, nblocks * sizeof(int));
   351   DEBUG_ONLY( uint *jmp_target = NEW_RESOURCE_ARRAY(uint,nblocks); )
   352   DEBUG_ONLY( uint *jmp_rule = NEW_RESOURCE_ARRAY(uint,nblocks); )
   354   bool has_short_branch_candidate = false;
   356   // Initialize the sizes to 0
   357   code_size  = 0;          // Size in bytes of generated code
   358   stub_size  = 0;          // Size in bytes of all stub entries
   359   // Size in bytes of all relocation entries, including those in local stubs.
   360   // Start with 2-bytes of reloc info for the unvalidated entry point
   361   reloc_size = 1;          // Number of relocation entries
   363   // Make three passes.  The first computes pessimistic blk_starts,
   364   // relative jmp_offset and reloc_size information.  The second performs
   365   // short branch substitution using the pessimistic sizing.  The
   366   // third inserts nops where needed.
   368   // Step one, perform a pessimistic sizing pass.
   369   uint last_call_adr = max_uint;
   370   uint last_avoid_back_to_back_adr = max_uint;
   371   uint nop_size = (new (this) MachNopNode())->size(_regalloc);
   372   for (uint i = 0; i < nblocks; i++) { // For all blocks
   373     Block* block = _cfg->get_block(i);
   375     // During short branch replacement, we store the relative (to blk_starts)
   376     // offset of jump in jmp_offset, rather than the absolute offset of jump.
   377     // This is so that we do not need to recompute sizes of all nodes when
   378     // we compute correct blk_starts in our next sizing pass.
   379     jmp_offset[i] = 0;
   380     jmp_size[i]   = 0;
   381     jmp_nidx[i]   = -1;
   382     DEBUG_ONLY( jmp_target[i] = 0; )
   383     DEBUG_ONLY( jmp_rule[i]   = 0; )
   385     // Sum all instruction sizes to compute block size
   386     uint last_inst = block->number_of_nodes();
   387     uint blk_size = 0;
   388     for (uint j = 0; j < last_inst; j++) {
   389       Node* nj = block->get_node(j);
   390       // Handle machine instruction nodes
   391       if (nj->is_Mach()) {
   392         MachNode *mach = nj->as_Mach();
   393         blk_size += (mach->alignment_required() - 1) * relocInfo::addr_unit(); // assume worst case padding
   394         reloc_size += mach->reloc();
   395         if (mach->is_MachCall()) {
   396           // add size information for trampoline stub
   397           // class CallStubImpl is platform-specific and defined in the *.ad files.
   398           stub_size  += CallStubImpl::size_call_trampoline();
   399           reloc_size += CallStubImpl::reloc_call_trampoline();
   401           MachCallNode *mcall = mach->as_MachCall();
   402           // This destination address is NOT PC-relative
   404           mcall->method_set((intptr_t)mcall->entry_point());
   406           if (mcall->is_MachCallJava() && mcall->as_MachCallJava()->_method) {
   407             stub_size  += CompiledStaticCall::to_interp_stub_size();
   408             reloc_size += CompiledStaticCall::reloc_to_interp_stub();
   409           }
   410         } else if (mach->is_MachSafePoint()) {
   411           // If call/safepoint are adjacent, account for possible
   412           // nop to disambiguate the two safepoints.
   413           // ScheduleAndBundle() can rearrange nodes in a block,
   414           // check for all offsets inside this block.
   415           if (last_call_adr >= blk_starts[i]) {
   416             blk_size += nop_size;
   417           }
   418         }
   419         if (mach->avoid_back_to_back(MachNode::AVOID_BEFORE)) {
   420           // Nop is inserted between "avoid back to back" instructions.
   421           // ScheduleAndBundle() can rearrange nodes in a block,
   422           // check for all offsets inside this block.
   423           if (last_avoid_back_to_back_adr >= blk_starts[i]) {
   424             blk_size += nop_size;
   425           }
   426         }
   427         if (mach->may_be_short_branch()) {
   428           if (!nj->is_MachBranch()) {
   429 #ifndef PRODUCT
   430             nj->dump(3);
   431 #endif
   432             Unimplemented();
   433           }
   434           assert(jmp_nidx[i] == -1, "block should have only one branch");
   435           jmp_offset[i] = blk_size;
   436           jmp_size[i]   = nj->size(_regalloc);
   437           jmp_nidx[i]   = j;
   438           has_short_branch_candidate = true;
   439         }
   440       }
   441       blk_size += nj->size(_regalloc);
   442       // Remember end of call offset
   443       if (nj->is_MachCall() && !nj->is_MachCallLeaf()) {
   444         last_call_adr = blk_starts[i]+blk_size;
   445       }
   446       // Remember end of avoid_back_to_back offset
   447       if (nj->is_Mach() && nj->as_Mach()->avoid_back_to_back(MachNode::AVOID_AFTER)) {
   448         last_avoid_back_to_back_adr = blk_starts[i]+blk_size;
   449       }
   450     }
   452     // When the next block starts a loop, we may insert pad NOP
   453     // instructions.  Since we cannot know our future alignment,
   454     // assume the worst.
   455     if (i < nblocks - 1) {
   456       Block* nb = _cfg->get_block(i + 1);
   457       int max_loop_pad = nb->code_alignment()-relocInfo::addr_unit();
   458       if (max_loop_pad > 0) {
   459         assert(is_power_of_2(max_loop_pad+relocInfo::addr_unit()), "");
   460         // Adjust last_call_adr and/or last_avoid_back_to_back_adr.
   461         // If either is the last instruction in this block, bump by
   462         // max_loop_pad in lock-step with blk_size, so sizing
   463         // calculations in subsequent blocks still can conservatively
   464         // detect that it may the last instruction in this block.
   465         if (last_call_adr == blk_starts[i]+blk_size) {
   466           last_call_adr += max_loop_pad;
   467         }
   468         if (last_avoid_back_to_back_adr == blk_starts[i]+blk_size) {
   469           last_avoid_back_to_back_adr += max_loop_pad;
   470         }
   471         blk_size += max_loop_pad;
   472         block_worst_case_pad[i + 1] = max_loop_pad;
   473       }
   474     }
   476     // Save block size; update total method size
   477     blk_starts[i+1] = blk_starts[i]+blk_size;
   478   }
   480   // Step two, replace eligible long jumps.
   481   bool progress = true;
   482   uint last_may_be_short_branch_adr = max_uint;
   483   while (has_short_branch_candidate && progress) {
   484     progress = false;
   485     has_short_branch_candidate = false;
   486     int adjust_block_start = 0;
   487     for (uint i = 0; i < nblocks; i++) {
   488       Block* block = _cfg->get_block(i);
   489       int idx = jmp_nidx[i];
   490       MachNode* mach = (idx == -1) ? NULL: block->get_node(idx)->as_Mach();
   491       if (mach != NULL && mach->may_be_short_branch()) {
   492 #ifdef ASSERT
   493         assert(jmp_size[i] > 0 && mach->is_MachBranch(), "sanity");
   494         int j;
   495         // Find the branch; ignore trailing NOPs.
   496         for (j = block->number_of_nodes()-1; j>=0; j--) {
   497           Node* n = block->get_node(j);
   498           if (!n->is_Mach() || n->as_Mach()->ideal_Opcode() != Op_Con)
   499             break;
   500         }
   501         assert(j >= 0 && j == idx && block->get_node(j) == (Node*)mach, "sanity");
   502 #endif
   503         int br_size = jmp_size[i];
   504         int br_offs = blk_starts[i] + jmp_offset[i];
   506         // This requires the TRUE branch target be in succs[0]
   507         uint bnum = block->non_connector_successor(0)->_pre_order;
   508         int offset = blk_starts[bnum] - br_offs;
   509         if (bnum > i) { // adjust following block's offset
   510           offset -= adjust_block_start;
   511         }
   513         // This block can be a loop header, account for the padding
   514         // in the previous block.
   515         int block_padding = block_worst_case_pad[i];
   516         assert(i == 0 || block_padding == 0 || br_offs >= block_padding, "Should have at least a padding on top");
   517         // In the following code a nop could be inserted before
   518         // the branch which will increase the backward distance.
   519         bool needs_padding = ((uint)(br_offs - block_padding) == last_may_be_short_branch_adr);
   520         assert(!needs_padding || jmp_offset[i] == 0, "padding only branches at the beginning of block");
   522         if (needs_padding && offset <= 0)
   523           offset -= nop_size;
   525         if (_matcher->is_short_branch_offset(mach->rule(), br_size, offset)) {
   526           // We've got a winner.  Replace this branch.
   527           MachNode* replacement = mach->as_MachBranch()->short_branch_version(this);
   529           // Update the jmp_size.
   530           int new_size = replacement->size(_regalloc);
   531           int diff     = br_size - new_size;
   532           assert(diff >= (int)nop_size, "short_branch size should be smaller");
   533           // Conservatively take into account padding between
   534           // avoid_back_to_back branches. Previous branch could be
   535           // converted into avoid_back_to_back branch during next
   536           // rounds.
   537           if (needs_padding && replacement->avoid_back_to_back(MachNode::AVOID_BEFORE)) {
   538             jmp_offset[i] += nop_size;
   539             diff -= nop_size;
   540           }
   541           adjust_block_start += diff;
   542           block->map_node(replacement, idx);
   543           mach->subsume_by(replacement, C);
   544           mach = replacement;
   545           progress = true;
   547           jmp_size[i] = new_size;
   548           DEBUG_ONLY( jmp_target[i] = bnum; );
   549           DEBUG_ONLY( jmp_rule[i] = mach->rule(); );
   550         } else {
   551           // The jump distance is not short, try again during next iteration.
   552           has_short_branch_candidate = true;
   553         }
   554       } // (mach->may_be_short_branch())
   555       if (mach != NULL && (mach->may_be_short_branch() ||
   556                            mach->avoid_back_to_back(MachNode::AVOID_AFTER))) {
   557         last_may_be_short_branch_adr = blk_starts[i] + jmp_offset[i] + jmp_size[i];
   558       }
   559       blk_starts[i+1] -= adjust_block_start;
   560     }
   561   }
   563 #ifdef ASSERT
   564   for (uint i = 0; i < nblocks; i++) { // For all blocks
   565     if (jmp_target[i] != 0) {
   566       int br_size = jmp_size[i];
   567       int offset = blk_starts[jmp_target[i]]-(blk_starts[i] + jmp_offset[i]);
   568       if (!_matcher->is_short_branch_offset(jmp_rule[i], br_size, offset)) {
   569         tty->print_cr("target (%d) - jmp_offset(%d) = offset (%d), jump_size(%d), jmp_block B%d, target_block B%d", blk_starts[jmp_target[i]], blk_starts[i] + jmp_offset[i], offset, br_size, i, jmp_target[i]);
   570       }
   571       assert(_matcher->is_short_branch_offset(jmp_rule[i], br_size, offset), "Displacement too large for short jmp");
   572     }
   573   }
   574 #endif
   576   // Step 3, compute the offsets of all blocks, will be done in fill_buffer()
   577   // after ScheduleAndBundle().
   579   // ------------------
   580   // Compute size for code buffer
   581   code_size = blk_starts[nblocks];
   583   // Relocation records
   584   reloc_size += 1;              // Relo entry for exception handler
   586   // Adjust reloc_size to number of record of relocation info
   587   // Min is 2 bytes, max is probably 6 or 8, with a tax up to 25% for
   588   // a relocation index.
   589   // The CodeBuffer will expand the locs array if this estimate is too low.
   590   reloc_size *= 10 / sizeof(relocInfo);
   591 }
   593 //------------------------------FillLocArray-----------------------------------
   594 // Create a bit of debug info and append it to the array.  The mapping is from
   595 // Java local or expression stack to constant, register or stack-slot.  For
   596 // doubles, insert 2 mappings and return 1 (to tell the caller that the next
   597 // entry has been taken care of and caller should skip it).
   598 static LocationValue *new_loc_value( PhaseRegAlloc *ra, OptoReg::Name regnum, Location::Type l_type ) {
   599   // This should never have accepted Bad before
   600   assert(OptoReg::is_valid(regnum), "location must be valid");
   601   return (OptoReg::is_reg(regnum))
   602     ? new LocationValue(Location::new_reg_loc(l_type, OptoReg::as_VMReg(regnum)) )
   603     : new LocationValue(Location::new_stk_loc(l_type,  ra->reg2offset(regnum)));
   604 }
   607 ObjectValue*
   608 Compile::sv_for_node_id(GrowableArray<ScopeValue*> *objs, int id) {
   609   for (int i = 0; i < objs->length(); i++) {
   610     assert(objs->at(i)->is_object(), "corrupt object cache");
   611     ObjectValue* sv = (ObjectValue*) objs->at(i);
   612     if (sv->id() == id) {
   613       return sv;
   614     }
   615   }
   616   // Otherwise..
   617   return NULL;
   618 }
   620 void Compile::set_sv_for_object_node(GrowableArray<ScopeValue*> *objs,
   621                                      ObjectValue* sv ) {
   622   assert(sv_for_node_id(objs, sv->id()) == NULL, "Precondition");
   623   objs->append(sv);
   624 }
   627 void Compile::FillLocArray( int idx, MachSafePointNode* sfpt, Node *local,
   628                             GrowableArray<ScopeValue*> *array,
   629                             GrowableArray<ScopeValue*> *objs ) {
   630   assert( local, "use _top instead of null" );
   631   if (array->length() != idx) {
   632     assert(array->length() == idx + 1, "Unexpected array count");
   633     // Old functionality:
   634     //   return
   635     // New functionality:
   636     //   Assert if the local is not top. In product mode let the new node
   637     //   override the old entry.
   638     assert(local == top(), "LocArray collision");
   639     if (local == top()) {
   640       return;
   641     }
   642     array->pop();
   643   }
   644   const Type *t = local->bottom_type();
   646   // Is it a safepoint scalar object node?
   647   if (local->is_SafePointScalarObject()) {
   648     SafePointScalarObjectNode* spobj = local->as_SafePointScalarObject();
   650     ObjectValue* sv = Compile::sv_for_node_id(objs, spobj->_idx);
   651     if (sv == NULL) {
   652       ciKlass* cik = t->is_oopptr()->klass();
   653       assert(cik->is_instance_klass() ||
   654              cik->is_array_klass(), "Not supported allocation.");
   655       sv = new ObjectValue(spobj->_idx,
   656                            new ConstantOopWriteValue(cik->java_mirror()->constant_encoding()));
   657       Compile::set_sv_for_object_node(objs, sv);
   659       uint first_ind = spobj->first_index(sfpt->jvms());
   660       for (uint i = 0; i < spobj->n_fields(); i++) {
   661         Node* fld_node = sfpt->in(first_ind+i);
   662         (void)FillLocArray(sv->field_values()->length(), sfpt, fld_node, sv->field_values(), objs);
   663       }
   664     }
   665     array->append(sv);
   666     return;
   667   }
   669   // Grab the register number for the local
   670   OptoReg::Name regnum = _regalloc->get_reg_first(local);
   671   if( OptoReg::is_valid(regnum) ) {// Got a register/stack?
   672     // Record the double as two float registers.
   673     // The register mask for such a value always specifies two adjacent
   674     // float registers, with the lower register number even.
   675     // Normally, the allocation of high and low words to these registers
   676     // is irrelevant, because nearly all operations on register pairs
   677     // (e.g., StoreD) treat them as a single unit.
   678     // Here, we assume in addition that the words in these two registers
   679     // stored "naturally" (by operations like StoreD and double stores
   680     // within the interpreter) such that the lower-numbered register
   681     // is written to the lower memory address.  This may seem like
   682     // a machine dependency, but it is not--it is a requirement on
   683     // the author of the <arch>.ad file to ensure that, for every
   684     // even/odd double-register pair to which a double may be allocated,
   685     // the word in the even single-register is stored to the first
   686     // memory word.  (Note that register numbers are completely
   687     // arbitrary, and are not tied to any machine-level encodings.)
   688 #ifdef _LP64
   689     if( t->base() == Type::DoubleBot || t->base() == Type::DoubleCon ) {
   690       array->append(new ConstantIntValue(0));
   691       array->append(new_loc_value( _regalloc, regnum, Location::dbl ));
   692     } else if ( t->base() == Type::Long ) {
   693       array->append(new ConstantIntValue(0));
   694       array->append(new_loc_value( _regalloc, regnum, Location::lng ));
   695     } else if ( t->base() == Type::RawPtr ) {
   696       // jsr/ret return address which must be restored into a the full
   697       // width 64-bit stack slot.
   698       array->append(new_loc_value( _regalloc, regnum, Location::lng ));
   699     }
   700 #else //_LP64
   701 #ifdef SPARC
   702     if (t->base() == Type::Long && OptoReg::is_reg(regnum)) {
   703       // For SPARC we have to swap high and low words for
   704       // long values stored in a single-register (g0-g7).
   705       array->append(new_loc_value( _regalloc,              regnum   , Location::normal ));
   706       array->append(new_loc_value( _regalloc, OptoReg::add(regnum,1), Location::normal ));
   707     } else
   708 #endif //SPARC
   709     if( t->base() == Type::DoubleBot || t->base() == Type::DoubleCon || t->base() == Type::Long ) {
   710       // Repack the double/long as two jints.
   711       // The convention the interpreter uses is that the second local
   712       // holds the first raw word of the native double representation.
   713       // This is actually reasonable, since locals and stack arrays
   714       // grow downwards in all implementations.
   715       // (If, on some machine, the interpreter's Java locals or stack
   716       // were to grow upwards, the embedded doubles would be word-swapped.)
   717       array->append(new_loc_value( _regalloc, OptoReg::add(regnum,1), Location::normal ));
   718       array->append(new_loc_value( _regalloc,              regnum   , Location::normal ));
   719     }
   720 #endif //_LP64
   721     else if( (t->base() == Type::FloatBot || t->base() == Type::FloatCon) &&
   722                OptoReg::is_reg(regnum) ) {
   723       array->append(new_loc_value( _regalloc, regnum, Matcher::float_in_double()
   724                                    ? Location::float_in_dbl : Location::normal ));
   725     } else if( t->base() == Type::Int && OptoReg::is_reg(regnum) ) {
   726       array->append(new_loc_value( _regalloc, regnum, Matcher::int_in_long
   727                                    ? Location::int_in_long : Location::normal ));
   728     } else if( t->base() == Type::NarrowOop ) {
   729       array->append(new_loc_value( _regalloc, regnum, Location::narrowoop ));
   730     } else {
   731       array->append(new_loc_value( _regalloc, regnum, _regalloc->is_oop(local) ? Location::oop : Location::normal ));
   732     }
   733     return;
   734   }
   736   // No register.  It must be constant data.
   737   switch (t->base()) {
   738   case Type::Half:              // Second half of a double
   739     ShouldNotReachHere();       // Caller should skip 2nd halves
   740     break;
   741   case Type::AnyPtr:
   742     array->append(new ConstantOopWriteValue(NULL));
   743     break;
   744   case Type::AryPtr:
   745   case Type::InstPtr:          // fall through
   746     array->append(new ConstantOopWriteValue(t->isa_oopptr()->const_oop()->constant_encoding()));
   747     break;
   748   case Type::NarrowOop:
   749     if (t == TypeNarrowOop::NULL_PTR) {
   750       array->append(new ConstantOopWriteValue(NULL));
   751     } else {
   752       array->append(new ConstantOopWriteValue(t->make_ptr()->isa_oopptr()->const_oop()->constant_encoding()));
   753     }
   754     break;
   755   case Type::Int:
   756     array->append(new ConstantIntValue(t->is_int()->get_con()));
   757     break;
   758   case Type::RawPtr:
   759     // A return address (T_ADDRESS).
   760     assert((intptr_t)t->is_ptr()->get_con() < (intptr_t)0x10000, "must be a valid BCI");
   761 #ifdef _LP64
   762     // Must be restored to the full-width 64-bit stack slot.
   763     array->append(new ConstantLongValue(t->is_ptr()->get_con()));
   764 #else
   765     array->append(new ConstantIntValue(t->is_ptr()->get_con()));
   766 #endif
   767     break;
   768   case Type::FloatCon: {
   769     float f = t->is_float_constant()->getf();
   770     array->append(new ConstantIntValue(jint_cast(f)));
   771     break;
   772   }
   773   case Type::DoubleCon: {
   774     jdouble d = t->is_double_constant()->getd();
   775 #ifdef _LP64
   776     array->append(new ConstantIntValue(0));
   777     array->append(new ConstantDoubleValue(d));
   778 #else
   779     // Repack the double as two jints.
   780     // The convention the interpreter uses is that the second local
   781     // holds the first raw word of the native double representation.
   782     // This is actually reasonable, since locals and stack arrays
   783     // grow downwards in all implementations.
   784     // (If, on some machine, the interpreter's Java locals or stack
   785     // were to grow upwards, the embedded doubles would be word-swapped.)
   786     jlong_accessor acc = { jlong_cast(d) };
   787     array->append(new ConstantIntValue(acc.words[1]));
   788     array->append(new ConstantIntValue(acc.words[0]));
   789 #endif
   790     break;
   791   }
   792   case Type::Long: {
   793     jlong d = t->is_long()->get_con();
   794 #ifdef _LP64
   795     array->append(new ConstantIntValue(0));
   796     array->append(new ConstantLongValue(d));
   797 #else
   798     // Repack the long as two jints.
   799     // The convention the interpreter uses is that the second local
   800     // holds the first raw word of the native double representation.
   801     // This is actually reasonable, since locals and stack arrays
   802     // grow downwards in all implementations.
   803     // (If, on some machine, the interpreter's Java locals or stack
   804     // were to grow upwards, the embedded doubles would be word-swapped.)
   805     jlong_accessor acc = { d };
   806     array->append(new ConstantIntValue(acc.words[1]));
   807     array->append(new ConstantIntValue(acc.words[0]));
   808 #endif
   809     break;
   810   }
   811   case Type::Top:               // Add an illegal value here
   812     array->append(new LocationValue(Location()));
   813     break;
   814   default:
   815     ShouldNotReachHere();
   816     break;
   817   }
   818 }
   820 // Determine if this node starts a bundle
   821 bool Compile::starts_bundle(const Node *n) const {
   822   return (_node_bundling_limit > n->_idx &&
   823           _node_bundling_base[n->_idx].starts_bundle());
   824 }
   826 //--------------------------Process_OopMap_Node--------------------------------
   827 void Compile::Process_OopMap_Node(MachNode *mach, int current_offset) {
   829   // Handle special safepoint nodes for synchronization
   830   MachSafePointNode *sfn   = mach->as_MachSafePoint();
   831   MachCallNode      *mcall;
   833 #ifdef ENABLE_ZAP_DEAD_LOCALS
   834   assert( is_node_getting_a_safepoint(mach),  "logic does not match; false negative");
   835 #endif
   837   int safepoint_pc_offset = current_offset;
   838   bool is_method_handle_invoke = false;
   839   bool return_oop = false;
   841   // Add the safepoint in the DebugInfoRecorder
   842   if( !mach->is_MachCall() ) {
   843     mcall = NULL;
   844     debug_info()->add_safepoint(safepoint_pc_offset, sfn->_oop_map);
   845   } else {
   846     mcall = mach->as_MachCall();
   848     // Is the call a MethodHandle call?
   849     if (mcall->is_MachCallJava()) {
   850       if (mcall->as_MachCallJava()->_method_handle_invoke) {
   851         assert(has_method_handle_invokes(), "must have been set during call generation");
   852         is_method_handle_invoke = true;
   853       }
   854     }
   856     // Check if a call returns an object.
   857     if (mcall->return_value_is_used() &&
   858         mcall->tf()->range()->field_at(TypeFunc::Parms)->isa_ptr()) {
   859       return_oop = true;
   860     }
   861     safepoint_pc_offset += mcall->ret_addr_offset();
   862     debug_info()->add_safepoint(safepoint_pc_offset, mcall->_oop_map);
   863   }
   865   // Loop over the JVMState list to add scope information
   866   // Do not skip safepoints with a NULL method, they need monitor info
   867   JVMState* youngest_jvms = sfn->jvms();
   868   int max_depth = youngest_jvms->depth();
   870   // Allocate the object pool for scalar-replaced objects -- the map from
   871   // small-integer keys (which can be recorded in the local and ostack
   872   // arrays) to descriptions of the object state.
   873   GrowableArray<ScopeValue*> *objs = new GrowableArray<ScopeValue*>();
   875   // Visit scopes from oldest to youngest.
   876   for (int depth = 1; depth <= max_depth; depth++) {
   877     JVMState* jvms = youngest_jvms->of_depth(depth);
   878     int idx;
   879     ciMethod* method = jvms->has_method() ? jvms->method() : NULL;
   880     // Safepoints that do not have method() set only provide oop-map and monitor info
   881     // to support GC; these do not support deoptimization.
   882     int num_locs = (method == NULL) ? 0 : jvms->loc_size();
   883     int num_exps = (method == NULL) ? 0 : jvms->stk_size();
   884     int num_mon  = jvms->nof_monitors();
   885     assert(method == NULL || jvms->bci() < 0 || num_locs == method->max_locals(),
   886            "JVMS local count must match that of the method");
   888     // Add Local and Expression Stack Information
   890     // Insert locals into the locarray
   891     GrowableArray<ScopeValue*> *locarray = new GrowableArray<ScopeValue*>(num_locs);
   892     for( idx = 0; idx < num_locs; idx++ ) {
   893       FillLocArray( idx, sfn, sfn->local(jvms, idx), locarray, objs );
   894     }
   896     // Insert expression stack entries into the exparray
   897     GrowableArray<ScopeValue*> *exparray = new GrowableArray<ScopeValue*>(num_exps);
   898     for( idx = 0; idx < num_exps; idx++ ) {
   899       FillLocArray( idx,  sfn, sfn->stack(jvms, idx), exparray, objs );
   900     }
   902     // Add in mappings of the monitors
   903     assert( !method ||
   904             !method->is_synchronized() ||
   905             method->is_native() ||
   906             num_mon > 0 ||
   907             !GenerateSynchronizationCode,
   908             "monitors must always exist for synchronized methods");
   910     // Build the growable array of ScopeValues for exp stack
   911     GrowableArray<MonitorValue*> *monarray = new GrowableArray<MonitorValue*>(num_mon);
   913     // Loop over monitors and insert into array
   914     for (idx = 0; idx < num_mon; idx++) {
   915       // Grab the node that defines this monitor
   916       Node* box_node = sfn->monitor_box(jvms, idx);
   917       Node* obj_node = sfn->monitor_obj(jvms, idx);
   919       // Create ScopeValue for object
   920       ScopeValue *scval = NULL;
   922       if (obj_node->is_SafePointScalarObject()) {
   923         SafePointScalarObjectNode* spobj = obj_node->as_SafePointScalarObject();
   924         scval = Compile::sv_for_node_id(objs, spobj->_idx);
   925         if (scval == NULL) {
   926           const Type *t = spobj->bottom_type();
   927           ciKlass* cik = t->is_oopptr()->klass();
   928           assert(cik->is_instance_klass() ||
   929                  cik->is_array_klass(), "Not supported allocation.");
   930           ObjectValue* sv = new ObjectValue(spobj->_idx,
   931                                             new ConstantOopWriteValue(cik->java_mirror()->constant_encoding()));
   932           Compile::set_sv_for_object_node(objs, sv);
   934           uint first_ind = spobj->first_index(youngest_jvms);
   935           for (uint i = 0; i < spobj->n_fields(); i++) {
   936             Node* fld_node = sfn->in(first_ind+i);
   937             (void)FillLocArray(sv->field_values()->length(), sfn, fld_node, sv->field_values(), objs);
   938           }
   939           scval = sv;
   940         }
   941       } else if (!obj_node->is_Con()) {
   942         OptoReg::Name obj_reg = _regalloc->get_reg_first(obj_node);
   943         if( obj_node->bottom_type()->base() == Type::NarrowOop ) {
   944           scval = new_loc_value( _regalloc, obj_reg, Location::narrowoop );
   945         } else {
   946           scval = new_loc_value( _regalloc, obj_reg, Location::oop );
   947         }
   948       } else {
   949         const TypePtr *tp = obj_node->get_ptr_type();
   950         scval = new ConstantOopWriteValue(tp->is_oopptr()->const_oop()->constant_encoding());
   951       }
   953       OptoReg::Name box_reg = BoxLockNode::reg(box_node);
   954       Location basic_lock = Location::new_stk_loc(Location::normal,_regalloc->reg2offset(box_reg));
   955       bool eliminated = (box_node->is_BoxLock() && box_node->as_BoxLock()->is_eliminated());
   956       monarray->append(new MonitorValue(scval, basic_lock, eliminated));
   957     }
   959     // We dump the object pool first, since deoptimization reads it in first.
   960     debug_info()->dump_object_pool(objs);
   962     // Build first class objects to pass to scope
   963     DebugToken *locvals = debug_info()->create_scope_values(locarray);
   964     DebugToken *expvals = debug_info()->create_scope_values(exparray);
   965     DebugToken *monvals = debug_info()->create_monitor_values(monarray);
   967     // Make method available for all Safepoints
   968     ciMethod* scope_method = method ? method : _method;
   969     // Describe the scope here
   970     assert(jvms->bci() >= InvocationEntryBci && jvms->bci() <= 0x10000, "must be a valid or entry BCI");
   971     assert(!jvms->should_reexecute() || depth == max_depth, "reexecute allowed only for the youngest");
   972     // Now we can describe the scope.
   973     debug_info()->describe_scope(safepoint_pc_offset, scope_method, jvms->bci(), jvms->should_reexecute(), is_method_handle_invoke, return_oop, locvals, expvals, monvals);
   974   } // End jvms loop
   976   // Mark the end of the scope set.
   977   debug_info()->end_safepoint(safepoint_pc_offset);
   978 }
   982 // A simplified version of Process_OopMap_Node, to handle non-safepoints.
   983 class NonSafepointEmitter {
   984   Compile*  C;
   985   JVMState* _pending_jvms;
   986   int       _pending_offset;
   988   void emit_non_safepoint();
   990  public:
   991   NonSafepointEmitter(Compile* compile) {
   992     this->C = compile;
   993     _pending_jvms = NULL;
   994     _pending_offset = 0;
   995   }
   997   void observe_instruction(Node* n, int pc_offset) {
   998     if (!C->debug_info()->recording_non_safepoints())  return;
  1000     Node_Notes* nn = C->node_notes_at(n->_idx);
  1001     if (nn == NULL || nn->jvms() == NULL)  return;
  1002     if (_pending_jvms != NULL &&
  1003         _pending_jvms->same_calls_as(nn->jvms())) {
  1004       // Repeated JVMS?  Stretch it up here.
  1005       _pending_offset = pc_offset;
  1006     } else {
  1007       if (_pending_jvms != NULL &&
  1008           _pending_offset < pc_offset) {
  1009         emit_non_safepoint();
  1011       _pending_jvms = NULL;
  1012       if (pc_offset > C->debug_info()->last_pc_offset()) {
  1013         // This is the only way _pending_jvms can become non-NULL:
  1014         _pending_jvms = nn->jvms();
  1015         _pending_offset = pc_offset;
  1020   // Stay out of the way of real safepoints:
  1021   void observe_safepoint(JVMState* jvms, int pc_offset) {
  1022     if (_pending_jvms != NULL &&
  1023         !_pending_jvms->same_calls_as(jvms) &&
  1024         _pending_offset < pc_offset) {
  1025       emit_non_safepoint();
  1027     _pending_jvms = NULL;
  1030   void flush_at_end() {
  1031     if (_pending_jvms != NULL) {
  1032       emit_non_safepoint();
  1034     _pending_jvms = NULL;
  1036 };
  1038 void NonSafepointEmitter::emit_non_safepoint() {
  1039   JVMState* youngest_jvms = _pending_jvms;
  1040   int       pc_offset     = _pending_offset;
  1042   // Clear it now:
  1043   _pending_jvms = NULL;
  1045   DebugInformationRecorder* debug_info = C->debug_info();
  1046   assert(debug_info->recording_non_safepoints(), "sanity");
  1048   debug_info->add_non_safepoint(pc_offset);
  1049   int max_depth = youngest_jvms->depth();
  1051   // Visit scopes from oldest to youngest.
  1052   for (int depth = 1; depth <= max_depth; depth++) {
  1053     JVMState* jvms = youngest_jvms->of_depth(depth);
  1054     ciMethod* method = jvms->has_method() ? jvms->method() : NULL;
  1055     assert(!jvms->should_reexecute() || depth==max_depth, "reexecute allowed only for the youngest");
  1056     debug_info->describe_scope(pc_offset, method, jvms->bci(), jvms->should_reexecute());
  1059   // Mark the end of the scope set.
  1060   debug_info->end_non_safepoint(pc_offset);
  1063 //------------------------------init_buffer------------------------------------
  1064 CodeBuffer* Compile::init_buffer(uint* blk_starts) {
  1066   // Set the initially allocated size
  1067   int  code_req   = initial_code_capacity;
  1068   int  locs_req   = initial_locs_capacity;
  1069   int  stub_req   = TraceJumps ? initial_stub_capacity * 10 : initial_stub_capacity;
  1070   int  const_req  = initial_const_capacity;
  1072   int  pad_req    = NativeCall::instruction_size;
  1073   // The extra spacing after the code is necessary on some platforms.
  1074   // Sometimes we need to patch in a jump after the last instruction,
  1075   // if the nmethod has been deoptimized.  (See 4932387, 4894843.)
  1077   // Compute the byte offset where we can store the deopt pc.
  1078   if (fixed_slots() != 0) {
  1079     _orig_pc_slot_offset_in_bytes = _regalloc->reg2offset(OptoReg::stack2reg(_orig_pc_slot));
  1082   // Compute prolog code size
  1083   _method_size = 0;
  1084   _frame_slots = OptoReg::reg2stack(_matcher->_old_SP)+_regalloc->_framesize;
  1085 #if defined(IA64) && !defined(AIX)
  1086   if (save_argument_registers()) {
  1087     // 4815101: this is a stub with implicit and unknown precision fp args.
  1088     // The usual spill mechanism can only generate stfd's in this case, which
  1089     // doesn't work if the fp reg to spill contains a single-precision denorm.
  1090     // Instead, we hack around the normal spill mechanism using stfspill's and
  1091     // ldffill's in the MachProlog and MachEpilog emit methods.  We allocate
  1092     // space here for the fp arg regs (f8-f15) we're going to thusly spill.
  1093     //
  1094     // If we ever implement 16-byte 'registers' == stack slots, we can
  1095     // get rid of this hack and have SpillCopy generate stfspill/ldffill
  1096     // instead of stfd/stfs/ldfd/ldfs.
  1097     _frame_slots += 8*(16/BytesPerInt);
  1099 #endif
  1100   assert(_frame_slots >= 0 && _frame_slots < 1000000, "sanity check");
  1102   if (has_mach_constant_base_node()) {
  1103     uint add_size = 0;
  1104     // Fill the constant table.
  1105     // Note:  This must happen before shorten_branches.
  1106     for (uint i = 0; i < _cfg->number_of_blocks(); i++) {
  1107       Block* b = _cfg->get_block(i);
  1109       for (uint j = 0; j < b->number_of_nodes(); j++) {
  1110         Node* n = b->get_node(j);
  1112         // If the node is a MachConstantNode evaluate the constant
  1113         // value section.
  1114         if (n->is_MachConstant()) {
  1115           MachConstantNode* machcon = n->as_MachConstant();
  1116           machcon->eval_constant(C);
  1117         } else if (n->is_Mach()) {
  1118           // On Power there are more nodes that issue constants.
  1119           add_size += (n->as_Mach()->ins_num_consts() * 8);
  1124     // Calculate the offsets of the constants and the size of the
  1125     // constant table (including the padding to the next section).
  1126     constant_table().calculate_offsets_and_size();
  1127     const_req = constant_table().size() + add_size;
  1130   // Initialize the space for the BufferBlob used to find and verify
  1131   // instruction size in MachNode::emit_size()
  1132   init_scratch_buffer_blob(const_req);
  1133   if (failing())  return NULL; // Out of memory
  1135   // Pre-compute the length of blocks and replace
  1136   // long branches with short if machine supports it.
  1137   shorten_branches(blk_starts, code_req, locs_req, stub_req);
  1139   // nmethod and CodeBuffer count stubs & constants as part of method's code.
  1140   // class HandlerImpl is platform-specific and defined in the *.ad files.
  1141   int exception_handler_req = HandlerImpl::size_exception_handler() + MAX_stubs_size; // add marginal slop for handler
  1142   int deopt_handler_req     = HandlerImpl::size_deopt_handler()     + MAX_stubs_size; // add marginal slop for handler
  1143   stub_req += MAX_stubs_size;   // ensure per-stub margin
  1144   code_req += MAX_inst_size;    // ensure per-instruction margin
  1146   if (StressCodeBuffers)
  1147     code_req = const_req = stub_req = exception_handler_req = deopt_handler_req = 0x10;  // force expansion
  1149   int total_req =
  1150     const_req +
  1151     code_req +
  1152     pad_req +
  1153     stub_req +
  1154     exception_handler_req +
  1155     deopt_handler_req;               // deopt handler
  1157   if (has_method_handle_invokes())
  1158     total_req += deopt_handler_req;  // deopt MH handler
  1160   CodeBuffer* cb = code_buffer();
  1161   cb->initialize(total_req, locs_req);
  1163   // Have we run out of code space?
  1164   if ((cb->blob() == NULL) || (!CompileBroker::should_compile_new_jobs())) {
  1165     C->record_failure("CodeCache is full");
  1166     return NULL;
  1168   // Configure the code buffer.
  1169   cb->initialize_consts_size(const_req);
  1170   cb->initialize_stubs_size(stub_req);
  1171   cb->initialize_oop_recorder(env()->oop_recorder());
  1173   // fill in the nop array for bundling computations
  1174   MachNode *_nop_list[Bundle::_nop_count];
  1175   Bundle::initialize_nops(_nop_list, this);
  1177   return cb;
  1180 //------------------------------fill_buffer------------------------------------
  1181 void Compile::fill_buffer(CodeBuffer* cb, uint* blk_starts) {
  1182   // blk_starts[] contains offsets calculated during short branches processing,
  1183   // offsets should not be increased during following steps.
  1185   // Compute the size of first NumberOfLoopInstrToAlign instructions at head
  1186   // of a loop. It is used to determine the padding for loop alignment.
  1187   compute_loop_first_inst_sizes();
  1189   // Create oopmap set.
  1190   _oop_map_set = new OopMapSet();
  1192   // !!!!! This preserves old handling of oopmaps for now
  1193   debug_info()->set_oopmaps(_oop_map_set);
  1195   uint nblocks  = _cfg->number_of_blocks();
  1196   // Count and start of implicit null check instructions
  1197   uint inct_cnt = 0;
  1198   uint *inct_starts = NEW_RESOURCE_ARRAY(uint, nblocks+1);
  1200   // Count and start of calls
  1201   uint *call_returns = NEW_RESOURCE_ARRAY(uint, nblocks+1);
  1203   uint  return_offset = 0;
  1204   int nop_size = (new (this) MachNopNode())->size(_regalloc);
  1206   int previous_offset = 0;
  1207   int current_offset  = 0;
  1208   int last_call_offset = -1;
  1209   int last_avoid_back_to_back_offset = -1;
  1210 #ifdef ASSERT
  1211   uint* jmp_target = NEW_RESOURCE_ARRAY(uint,nblocks);
  1212   uint* jmp_offset = NEW_RESOURCE_ARRAY(uint,nblocks);
  1213   uint* jmp_size   = NEW_RESOURCE_ARRAY(uint,nblocks);
  1214   uint* jmp_rule   = NEW_RESOURCE_ARRAY(uint,nblocks);
  1215 #endif
  1217   // Create an array of unused labels, one for each basic block, if printing is enabled
  1218 #ifndef PRODUCT
  1219   int *node_offsets      = NULL;
  1220   uint node_offset_limit = unique();
  1222   if (print_assembly())
  1223     node_offsets         = NEW_RESOURCE_ARRAY(int, node_offset_limit);
  1224 #endif
  1226   NonSafepointEmitter non_safepoints(this);  // emit non-safepoints lazily
  1228   // Emit the constant table.
  1229   if (has_mach_constant_base_node()) {
  1230     constant_table().emit(*cb);
  1233   // Create an array of labels, one for each basic block
  1234   Label *blk_labels = NEW_RESOURCE_ARRAY(Label, nblocks+1);
  1235   for (uint i=0; i <= nblocks; i++) {
  1236     blk_labels[i].init();
  1239   // ------------------
  1240   // Now fill in the code buffer
  1241   Node *delay_slot = NULL;
  1243   for (uint i = 0; i < nblocks; i++) {
  1244     Block* block = _cfg->get_block(i);
  1245     Node* head = block->head();
  1247     // If this block needs to start aligned (i.e, can be reached other
  1248     // than by falling-thru from the previous block), then force the
  1249     // start of a new bundle.
  1250     if (Pipeline::requires_bundling() && starts_bundle(head)) {
  1251       cb->flush_bundle(true);
  1254 #ifdef ASSERT
  1255     if (!block->is_connector()) {
  1256       stringStream st;
  1257       block->dump_head(_cfg, &st);
  1258       MacroAssembler(cb).block_comment(st.as_string());
  1260     jmp_target[i] = 0;
  1261     jmp_offset[i] = 0;
  1262     jmp_size[i]   = 0;
  1263     jmp_rule[i]   = 0;
  1264 #endif
  1265     int blk_offset = current_offset;
  1267     // Define the label at the beginning of the basic block
  1268     MacroAssembler(cb).bind(blk_labels[block->_pre_order]);
  1270     uint last_inst = block->number_of_nodes();
  1272     // Emit block normally, except for last instruction.
  1273     // Emit means "dump code bits into code buffer".
  1274     for (uint j = 0; j<last_inst; j++) {
  1276       // Get the node
  1277       Node* n = block->get_node(j);
  1279       // See if delay slots are supported
  1280       if (valid_bundle_info(n) &&
  1281           node_bundling(n)->used_in_unconditional_delay()) {
  1282         assert(delay_slot == NULL, "no use of delay slot node");
  1283         assert(n->size(_regalloc) == Pipeline::instr_unit_size(), "delay slot instruction wrong size");
  1285         delay_slot = n;
  1286         continue;
  1289       // If this starts a new instruction group, then flush the current one
  1290       // (but allow split bundles)
  1291       if (Pipeline::requires_bundling() && starts_bundle(n))
  1292         cb->flush_bundle(false);
  1294       // The following logic is duplicated in the code ifdeffed for
  1295       // ENABLE_ZAP_DEAD_LOCALS which appears above in this file.  It
  1296       // should be factored out.  Or maybe dispersed to the nodes?
  1298       // Special handling for SafePoint/Call Nodes
  1299       bool is_mcall = false;
  1300       if (n->is_Mach()) {
  1301         MachNode *mach = n->as_Mach();
  1302         is_mcall = n->is_MachCall();
  1303         bool is_sfn = n->is_MachSafePoint();
  1305         // If this requires all previous instructions be flushed, then do so
  1306         if (is_sfn || is_mcall || mach->alignment_required() != 1) {
  1307           cb->flush_bundle(true);
  1308           current_offset = cb->insts_size();
  1311         // A padding may be needed again since a previous instruction
  1312         // could be moved to delay slot.
  1314         // align the instruction if necessary
  1315         int padding = mach->compute_padding(current_offset);
  1316         // Make sure safepoint node for polling is distinct from a call's
  1317         // return by adding a nop if needed.
  1318         if (is_sfn && !is_mcall && padding == 0 && current_offset == last_call_offset) {
  1319           padding = nop_size;
  1321         if (padding == 0 && mach->avoid_back_to_back(MachNode::AVOID_BEFORE) &&
  1322             current_offset == last_avoid_back_to_back_offset) {
  1323           // Avoid back to back some instructions.
  1324           padding = nop_size;
  1327         if(padding > 0) {
  1328           assert((padding % nop_size) == 0, "padding is not a multiple of NOP size");
  1329           int nops_cnt = padding / nop_size;
  1330           MachNode *nop = new (this) MachNopNode(nops_cnt);
  1331           block->insert_node(nop, j++);
  1332           last_inst++;
  1333           _cfg->map_node_to_block(nop, block);
  1334           nop->emit(*cb, _regalloc);
  1335           cb->flush_bundle(true);
  1336           current_offset = cb->insts_size();
  1339         // Remember the start of the last call in a basic block
  1340         if (is_mcall) {
  1341           MachCallNode *mcall = mach->as_MachCall();
  1343           // This destination address is NOT PC-relative
  1344           mcall->method_set((intptr_t)mcall->entry_point());
  1346           // Save the return address
  1347           call_returns[block->_pre_order] = current_offset + mcall->ret_addr_offset();
  1349           if (mcall->is_MachCallLeaf()) {
  1350             is_mcall = false;
  1351             is_sfn = false;
  1355         // sfn will be valid whenever mcall is valid now because of inheritance
  1356         if (is_sfn || is_mcall) {
  1358           // Handle special safepoint nodes for synchronization
  1359           if (!is_mcall) {
  1360             MachSafePointNode *sfn = mach->as_MachSafePoint();
  1361             // !!!!! Stubs only need an oopmap right now, so bail out
  1362             if (sfn->jvms()->method() == NULL) {
  1363               // Write the oopmap directly to the code blob??!!
  1364 #             ifdef ENABLE_ZAP_DEAD_LOCALS
  1365               assert( !is_node_getting_a_safepoint(sfn),  "logic does not match; false positive");
  1366 #             endif
  1367               continue;
  1369           } // End synchronization
  1371           non_safepoints.observe_safepoint(mach->as_MachSafePoint()->jvms(),
  1372                                            current_offset);
  1373           Process_OopMap_Node(mach, current_offset);
  1374         } // End if safepoint
  1376         // If this is a null check, then add the start of the previous instruction to the list
  1377         else if( mach->is_MachNullCheck() ) {
  1378           inct_starts[inct_cnt++] = previous_offset;
  1381         // If this is a branch, then fill in the label with the target BB's label
  1382         else if (mach->is_MachBranch()) {
  1383           // This requires the TRUE branch target be in succs[0]
  1384           uint block_num = block->non_connector_successor(0)->_pre_order;
  1386           // Try to replace long branch if delay slot is not used,
  1387           // it is mostly for back branches since forward branch's
  1388           // distance is not updated yet.
  1389           bool delay_slot_is_used = valid_bundle_info(n) &&
  1390                                     node_bundling(n)->use_unconditional_delay();
  1391           if (!delay_slot_is_used && mach->may_be_short_branch()) {
  1392            assert(delay_slot == NULL, "not expecting delay slot node");
  1393            int br_size = n->size(_regalloc);
  1394             int offset = blk_starts[block_num] - current_offset;
  1395             if (block_num >= i) {
  1396               // Current and following block's offset are not
  1397               // finalized yet, adjust distance by the difference
  1398               // between calculated and final offsets of current block.
  1399               offset -= (blk_starts[i] - blk_offset);
  1401             // In the following code a nop could be inserted before
  1402             // the branch which will increase the backward distance.
  1403             bool needs_padding = (current_offset == last_avoid_back_to_back_offset);
  1404             if (needs_padding && offset <= 0)
  1405               offset -= nop_size;
  1407             if (_matcher->is_short_branch_offset(mach->rule(), br_size, offset)) {
  1408               // We've got a winner.  Replace this branch.
  1409               MachNode* replacement = mach->as_MachBranch()->short_branch_version(this);
  1411               // Update the jmp_size.
  1412               int new_size = replacement->size(_regalloc);
  1413               assert((br_size - new_size) >= (int)nop_size, "short_branch size should be smaller");
  1414               // Insert padding between avoid_back_to_back branches.
  1415               if (needs_padding && replacement->avoid_back_to_back(MachNode::AVOID_BEFORE)) {
  1416                 MachNode *nop = new (this) MachNopNode();
  1417                 block->insert_node(nop, j++);
  1418                 _cfg->map_node_to_block(nop, block);
  1419                 last_inst++;
  1420                 nop->emit(*cb, _regalloc);
  1421                 cb->flush_bundle(true);
  1422                 current_offset = cb->insts_size();
  1424 #ifdef ASSERT
  1425               jmp_target[i] = block_num;
  1426               jmp_offset[i] = current_offset - blk_offset;
  1427               jmp_size[i]   = new_size;
  1428               jmp_rule[i]   = mach->rule();
  1429 #endif
  1430               block->map_node(replacement, j);
  1431               mach->subsume_by(replacement, C);
  1432               n    = replacement;
  1433               mach = replacement;
  1436           mach->as_MachBranch()->label_set( &blk_labels[block_num], block_num );
  1437         } else if (mach->ideal_Opcode() == Op_Jump) {
  1438           for (uint h = 0; h < block->_num_succs; h++) {
  1439             Block* succs_block = block->_succs[h];
  1440             for (uint j = 1; j < succs_block->num_preds(); j++) {
  1441               Node* jpn = succs_block->pred(j);
  1442               if (jpn->is_JumpProj() && jpn->in(0) == mach) {
  1443                 uint block_num = succs_block->non_connector()->_pre_order;
  1444                 Label *blkLabel = &blk_labels[block_num];
  1445                 mach->add_case_label(jpn->as_JumpProj()->proj_no(), blkLabel);
  1450 #ifdef ASSERT
  1451         // Check that oop-store precedes the card-mark
  1452         else if (mach->ideal_Opcode() == Op_StoreCM) {
  1453           uint storeCM_idx = j;
  1454           int count = 0;
  1455           for (uint prec = mach->req(); prec < mach->len(); prec++) {
  1456             Node *oop_store = mach->in(prec);  // Precedence edge
  1457             if (oop_store == NULL) continue;
  1458             count++;
  1459             uint i4;
  1460             for (i4 = 0; i4 < last_inst; ++i4) {
  1461               if (block->get_node(i4) == oop_store) {
  1462                 break;
  1465             // Note: This test can provide a false failure if other precedence
  1466             // edges have been added to the storeCMNode.
  1467             assert(i4 == last_inst || i4 < storeCM_idx, "CM card-mark executes before oop-store");
  1469           assert(count > 0, "storeCM expects at least one precedence edge");
  1471 #endif
  1472         else if (!n->is_Proj()) {
  1473           // Remember the beginning of the previous instruction, in case
  1474           // it's followed by a flag-kill and a null-check.  Happens on
  1475           // Intel all the time, with add-to-memory kind of opcodes.
  1476           previous_offset = current_offset;
  1479         // Not an else-if!
  1480         // If this is a trap based cmp then add its offset to the list.
  1481         if (mach->is_TrapBasedCheckNode()) {
  1482           inct_starts[inct_cnt++] = current_offset;
  1486       // Verify that there is sufficient space remaining
  1487       cb->insts()->maybe_expand_to_ensure_remaining(MAX_inst_size);
  1488       if ((cb->blob() == NULL) || (!CompileBroker::should_compile_new_jobs())) {
  1489         C->record_failure("CodeCache is full");
  1490         return;
  1493       // Save the offset for the listing
  1494 #ifndef PRODUCT
  1495       if (node_offsets && n->_idx < node_offset_limit)
  1496         node_offsets[n->_idx] = cb->insts_size();
  1497 #endif
  1499       // "Normal" instruction case
  1500       DEBUG_ONLY( uint instr_offset = cb->insts_size(); )
  1501       n->emit(*cb, _regalloc);
  1502       current_offset  = cb->insts_size();
  1504 #ifdef ASSERT
  1505       if (n->size(_regalloc) < (current_offset-instr_offset)) {
  1506         n->dump();
  1507         assert(false, "wrong size of mach node");
  1509 #endif
  1510       non_safepoints.observe_instruction(n, current_offset);
  1512       // mcall is last "call" that can be a safepoint
  1513       // record it so we can see if a poll will directly follow it
  1514       // in which case we'll need a pad to make the PcDesc sites unique
  1515       // see  5010568. This can be slightly inaccurate but conservative
  1516       // in the case that return address is not actually at current_offset.
  1517       // This is a small price to pay.
  1519       if (is_mcall) {
  1520         last_call_offset = current_offset;
  1523       if (n->is_Mach() && n->as_Mach()->avoid_back_to_back(MachNode::AVOID_AFTER)) {
  1524         // Avoid back to back some instructions.
  1525         last_avoid_back_to_back_offset = current_offset;
  1528       // See if this instruction has a delay slot
  1529       if (valid_bundle_info(n) && node_bundling(n)->use_unconditional_delay()) {
  1530         assert(delay_slot != NULL, "expecting delay slot node");
  1532         // Back up 1 instruction
  1533         cb->set_insts_end(cb->insts_end() - Pipeline::instr_unit_size());
  1535         // Save the offset for the listing
  1536 #ifndef PRODUCT
  1537         if (node_offsets && delay_slot->_idx < node_offset_limit)
  1538           node_offsets[delay_slot->_idx] = cb->insts_size();
  1539 #endif
  1541         // Support a SafePoint in the delay slot
  1542         if (delay_slot->is_MachSafePoint()) {
  1543           MachNode *mach = delay_slot->as_Mach();
  1544           // !!!!! Stubs only need an oopmap right now, so bail out
  1545           if (!mach->is_MachCall() && mach->as_MachSafePoint()->jvms()->method() == NULL) {
  1546             // Write the oopmap directly to the code blob??!!
  1547 #           ifdef ENABLE_ZAP_DEAD_LOCALS
  1548             assert( !is_node_getting_a_safepoint(mach),  "logic does not match; false positive");
  1549 #           endif
  1550             delay_slot = NULL;
  1551             continue;
  1554           int adjusted_offset = current_offset - Pipeline::instr_unit_size();
  1555           non_safepoints.observe_safepoint(mach->as_MachSafePoint()->jvms(),
  1556                                            adjusted_offset);
  1557           // Generate an OopMap entry
  1558           Process_OopMap_Node(mach, adjusted_offset);
  1561         // Insert the delay slot instruction
  1562         delay_slot->emit(*cb, _regalloc);
  1564         // Don't reuse it
  1565         delay_slot = NULL;
  1568     } // End for all instructions in block
  1570     // If the next block is the top of a loop, pad this block out to align
  1571     // the loop top a little. Helps prevent pipe stalls at loop back branches.
  1572     if (i < nblocks-1) {
  1573       Block *nb = _cfg->get_block(i + 1);
  1574       int padding = nb->alignment_padding(current_offset);
  1575       if( padding > 0 ) {
  1576         MachNode *nop = new (this) MachNopNode(padding / nop_size);
  1577         block->insert_node(nop, block->number_of_nodes());
  1578         _cfg->map_node_to_block(nop, block);
  1579         nop->emit(*cb, _regalloc);
  1580         current_offset = cb->insts_size();
  1583     // Verify that the distance for generated before forward
  1584     // short branches is still valid.
  1585     guarantee((int)(blk_starts[i+1] - blk_starts[i]) >= (current_offset - blk_offset), "shouldn't increase block size");
  1587     // Save new block start offset
  1588     blk_starts[i] = blk_offset;
  1589   } // End of for all blocks
  1590   blk_starts[nblocks] = current_offset;
  1592   non_safepoints.flush_at_end();
  1594   // Offset too large?
  1595   if (failing())  return;
  1597   // Define a pseudo-label at the end of the code
  1598   MacroAssembler(cb).bind( blk_labels[nblocks] );
  1600   // Compute the size of the first block
  1601   _first_block_size = blk_labels[1].loc_pos() - blk_labels[0].loc_pos();
  1603   assert(cb->insts_size() < 500000, "method is unreasonably large");
  1605 #ifdef ASSERT
  1606   for (uint i = 0; i < nblocks; i++) { // For all blocks
  1607     if (jmp_target[i] != 0) {
  1608       int br_size = jmp_size[i];
  1609       int offset = blk_starts[jmp_target[i]]-(blk_starts[i] + jmp_offset[i]);
  1610       if (!_matcher->is_short_branch_offset(jmp_rule[i], br_size, offset)) {
  1611         tty->print_cr("target (%d) - jmp_offset(%d) = offset (%d), jump_size(%d), jmp_block B%d, target_block B%d", blk_starts[jmp_target[i]], blk_starts[i] + jmp_offset[i], offset, br_size, i, jmp_target[i]);
  1612         assert(false, "Displacement too large for short jmp");
  1616 #endif
  1618 #ifndef PRODUCT
  1619   // Information on the size of the method, without the extraneous code
  1620   Scheduling::increment_method_size(cb->insts_size());
  1621 #endif
  1623   // ------------------
  1624   // Fill in exception table entries.
  1625   FillExceptionTables(inct_cnt, call_returns, inct_starts, blk_labels);
  1627   // Only java methods have exception handlers and deopt handlers
  1628   // class HandlerImpl is platform-specific and defined in the *.ad files.
  1629   if (_method) {
  1630     // Emit the exception handler code.
  1631     _code_offsets.set_value(CodeOffsets::Exceptions, HandlerImpl::emit_exception_handler(*cb));
  1632     // Emit the deopt handler code.
  1633     _code_offsets.set_value(CodeOffsets::Deopt, HandlerImpl::emit_deopt_handler(*cb));
  1635     // Emit the MethodHandle deopt handler code (if required).
  1636     if (has_method_handle_invokes()) {
  1637       // We can use the same code as for the normal deopt handler, we
  1638       // just need a different entry point address.
  1639       _code_offsets.set_value(CodeOffsets::DeoptMH, HandlerImpl::emit_deopt_handler(*cb));
  1643   // One last check for failed CodeBuffer::expand:
  1644   if ((cb->blob() == NULL) || (!CompileBroker::should_compile_new_jobs())) {
  1645     C->record_failure("CodeCache is full");
  1646     return;
  1649 #ifndef PRODUCT
  1650   // Dump the assembly code, including basic-block numbers
  1651   if (print_assembly()) {
  1652     ttyLocker ttyl;  // keep the following output all in one block
  1653     if (!VMThread::should_terminate()) {  // test this under the tty lock
  1654       // This output goes directly to the tty, not the compiler log.
  1655       // To enable tools to match it up with the compilation activity,
  1656       // be sure to tag this tty output with the compile ID.
  1657       if (xtty != NULL) {
  1658         xtty->head("opto_assembly compile_id='%d'%s", compile_id(),
  1659                    is_osr_compilation()    ? " compile_kind='osr'" :
  1660                    "");
  1662       if (method() != NULL) {
  1663         method()->print_metadata();
  1665       dump_asm(node_offsets, node_offset_limit);
  1666       if (xtty != NULL) {
  1667         xtty->tail("opto_assembly");
  1671 #endif
  1675 void Compile::FillExceptionTables(uint cnt, uint *call_returns, uint *inct_starts, Label *blk_labels) {
  1676   _inc_table.set_size(cnt);
  1678   uint inct_cnt = 0;
  1679   for (uint i = 0; i < _cfg->number_of_blocks(); i++) {
  1680     Block* block = _cfg->get_block(i);
  1681     Node *n = NULL;
  1682     int j;
  1684     // Find the branch; ignore trailing NOPs.
  1685     for (j = block->number_of_nodes() - 1; j >= 0; j--) {
  1686       n = block->get_node(j);
  1687       if (!n->is_Mach() || n->as_Mach()->ideal_Opcode() != Op_Con) {
  1688         break;
  1692     // If we didn't find anything, continue
  1693     if (j < 0) {
  1694       continue;
  1697     // Compute ExceptionHandlerTable subtable entry and add it
  1698     // (skip empty blocks)
  1699     if (n->is_Catch()) {
  1701       // Get the offset of the return from the call
  1702       uint call_return = call_returns[block->_pre_order];
  1703 #ifdef ASSERT
  1704       assert( call_return > 0, "no call seen for this basic block" );
  1705       while (block->get_node(--j)->is_MachProj()) ;
  1706       assert(block->get_node(j)->is_MachCall(), "CatchProj must follow call");
  1707 #endif
  1708       // last instruction is a CatchNode, find it's CatchProjNodes
  1709       int nof_succs = block->_num_succs;
  1710       // allocate space
  1711       GrowableArray<intptr_t> handler_bcis(nof_succs);
  1712       GrowableArray<intptr_t> handler_pcos(nof_succs);
  1713       // iterate through all successors
  1714       for (int j = 0; j < nof_succs; j++) {
  1715         Block* s = block->_succs[j];
  1716         bool found_p = false;
  1717         for (uint k = 1; k < s->num_preds(); k++) {
  1718           Node* pk = s->pred(k);
  1719           if (pk->is_CatchProj() && pk->in(0) == n) {
  1720             const CatchProjNode* p = pk->as_CatchProj();
  1721             found_p = true;
  1722             // add the corresponding handler bci & pco information
  1723             if (p->_con != CatchProjNode::fall_through_index) {
  1724               // p leads to an exception handler (and is not fall through)
  1725               assert(s == _cfg->get_block(s->_pre_order), "bad numbering");
  1726               // no duplicates, please
  1727               if (!handler_bcis.contains(p->handler_bci())) {
  1728                 uint block_num = s->non_connector()->_pre_order;
  1729                 handler_bcis.append(p->handler_bci());
  1730                 handler_pcos.append(blk_labels[block_num].loc_pos());
  1735         assert(found_p, "no matching predecessor found");
  1736         // Note:  Due to empty block removal, one block may have
  1737         // several CatchProj inputs, from the same Catch.
  1740       // Set the offset of the return from the call
  1741       _handler_table.add_subtable(call_return, &handler_bcis, NULL, &handler_pcos);
  1742       continue;
  1745     // Handle implicit null exception table updates
  1746     if (n->is_MachNullCheck()) {
  1747       uint block_num = block->non_connector_successor(0)->_pre_order;
  1748       _inc_table.append(inct_starts[inct_cnt++], blk_labels[block_num].loc_pos());
  1749       continue;
  1751     // Handle implicit exception table updates: trap instructions.
  1752     if (n->is_Mach() && n->as_Mach()->is_TrapBasedCheckNode()) {
  1753       uint block_num = block->non_connector_successor(0)->_pre_order;
  1754       _inc_table.append(inct_starts[inct_cnt++], blk_labels[block_num].loc_pos());
  1755       continue;
  1757   } // End of for all blocks fill in exception table entries
  1760 // Static Variables
  1761 #ifndef PRODUCT
  1762 uint Scheduling::_total_nop_size = 0;
  1763 uint Scheduling::_total_method_size = 0;
  1764 uint Scheduling::_total_branches = 0;
  1765 uint Scheduling::_total_unconditional_delays = 0;
  1766 uint Scheduling::_total_instructions_per_bundle[Pipeline::_max_instrs_per_cycle+1];
  1767 #endif
  1769 // Initializer for class Scheduling
  1771 Scheduling::Scheduling(Arena *arena, Compile &compile)
  1772   : _arena(arena),
  1773     _cfg(compile.cfg()),
  1774     _regalloc(compile.regalloc()),
  1775     _reg_node(arena),
  1776     _bundle_instr_count(0),
  1777     _bundle_cycle_number(0),
  1778     _scheduled(arena),
  1779     _available(arena),
  1780     _next_node(NULL),
  1781     _bundle_use(0, 0, resource_count, &_bundle_use_elements[0]),
  1782     _pinch_free_list(arena)
  1783 #ifndef PRODUCT
  1784   , _branches(0)
  1785   , _unconditional_delays(0)
  1786 #endif
  1788   // Create a MachNopNode
  1789   _nop = new (&compile) MachNopNode();
  1791   // Now that the nops are in the array, save the count
  1792   // (but allow entries for the nops)
  1793   _node_bundling_limit = compile.unique();
  1794   uint node_max = _regalloc->node_regs_max_index();
  1796   compile.set_node_bundling_limit(_node_bundling_limit);
  1798   // This one is persistent within the Compile class
  1799   _node_bundling_base = NEW_ARENA_ARRAY(compile.comp_arena(), Bundle, node_max);
  1801   // Allocate space for fixed-size arrays
  1802   _node_latency    = NEW_ARENA_ARRAY(arena, unsigned short, node_max);
  1803   _uses            = NEW_ARENA_ARRAY(arena, short,          node_max);
  1804   _current_latency = NEW_ARENA_ARRAY(arena, unsigned short, node_max);
  1806   // Clear the arrays
  1807   memset(_node_bundling_base, 0, node_max * sizeof(Bundle));
  1808   memset(_node_latency,       0, node_max * sizeof(unsigned short));
  1809   memset(_uses,               0, node_max * sizeof(short));
  1810   memset(_current_latency,    0, node_max * sizeof(unsigned short));
  1812   // Clear the bundling information
  1813   memcpy(_bundle_use_elements, Pipeline_Use::elaborated_elements, sizeof(Pipeline_Use::elaborated_elements));
  1815   // Get the last node
  1816   Block* block = _cfg->get_block(_cfg->number_of_blocks() - 1);
  1818   _next_node = block->get_node(block->number_of_nodes() - 1);
  1821 #ifndef PRODUCT
  1822 // Scheduling destructor
  1823 Scheduling::~Scheduling() {
  1824   _total_branches             += _branches;
  1825   _total_unconditional_delays += _unconditional_delays;
  1827 #endif
  1829 // Step ahead "i" cycles
  1830 void Scheduling::step(uint i) {
  1832   Bundle *bundle = node_bundling(_next_node);
  1833   bundle->set_starts_bundle();
  1835   // Update the bundle record, but leave the flags information alone
  1836   if (_bundle_instr_count > 0) {
  1837     bundle->set_instr_count(_bundle_instr_count);
  1838     bundle->set_resources_used(_bundle_use.resourcesUsed());
  1841   // Update the state information
  1842   _bundle_instr_count = 0;
  1843   _bundle_cycle_number += i;
  1844   _bundle_use.step(i);
  1847 void Scheduling::step_and_clear() {
  1848   Bundle *bundle = node_bundling(_next_node);
  1849   bundle->set_starts_bundle();
  1851   // Update the bundle record
  1852   if (_bundle_instr_count > 0) {
  1853     bundle->set_instr_count(_bundle_instr_count);
  1854     bundle->set_resources_used(_bundle_use.resourcesUsed());
  1856     _bundle_cycle_number += 1;
  1859   // Clear the bundling information
  1860   _bundle_instr_count = 0;
  1861   _bundle_use.reset();
  1863   memcpy(_bundle_use_elements,
  1864     Pipeline_Use::elaborated_elements,
  1865     sizeof(Pipeline_Use::elaborated_elements));
  1868 // Perform instruction scheduling and bundling over the sequence of
  1869 // instructions in backwards order.
  1870 void Compile::ScheduleAndBundle() {
  1872   // Don't optimize this if it isn't a method
  1873   if (!_method)
  1874     return;
  1876   // Don't optimize this if scheduling is disabled
  1877   if (!do_scheduling())
  1878     return;
  1880   // Scheduling code works only with pairs (8 bytes) maximum.
  1881   if (max_vector_size() > 8)
  1882     return;
  1884   NOT_PRODUCT( TracePhase t2("isched", &_t_instrSched, TimeCompiler); )
  1886   // Create a data structure for all the scheduling information
  1887   Scheduling scheduling(Thread::current()->resource_area(), *this);
  1889   // Walk backwards over each basic block, computing the needed alignment
  1890   // Walk over all the basic blocks
  1891   scheduling.DoScheduling();
  1894 // Compute the latency of all the instructions.  This is fairly simple,
  1895 // because we already have a legal ordering.  Walk over the instructions
  1896 // from first to last, and compute the latency of the instruction based
  1897 // on the latency of the preceding instruction(s).
  1898 void Scheduling::ComputeLocalLatenciesForward(const Block *bb) {
  1899 #ifndef PRODUCT
  1900   if (_cfg->C->trace_opto_output())
  1901     tty->print("# -> ComputeLocalLatenciesForward\n");
  1902 #endif
  1904   // Walk over all the schedulable instructions
  1905   for( uint j=_bb_start; j < _bb_end; j++ ) {
  1907     // This is a kludge, forcing all latency calculations to start at 1.
  1908     // Used to allow latency 0 to force an instruction to the beginning
  1909     // of the bb
  1910     uint latency = 1;
  1911     Node *use = bb->get_node(j);
  1912     uint nlen = use->len();
  1914     // Walk over all the inputs
  1915     for ( uint k=0; k < nlen; k++ ) {
  1916       Node *def = use->in(k);
  1917       if (!def)
  1918         continue;
  1920       uint l = _node_latency[def->_idx] + use->latency(k);
  1921       if (latency < l)
  1922         latency = l;
  1925     _node_latency[use->_idx] = latency;
  1927 #ifndef PRODUCT
  1928     if (_cfg->C->trace_opto_output()) {
  1929       tty->print("# latency %4d: ", latency);
  1930       use->dump();
  1932 #endif
  1935 #ifndef PRODUCT
  1936   if (_cfg->C->trace_opto_output())
  1937     tty->print("# <- ComputeLocalLatenciesForward\n");
  1938 #endif
  1940 } // end ComputeLocalLatenciesForward
  1942 // See if this node fits into the present instruction bundle
  1943 bool Scheduling::NodeFitsInBundle(Node *n) {
  1944   uint n_idx = n->_idx;
  1946   // If this is the unconditional delay instruction, then it fits
  1947   if (n == _unconditional_delay_slot) {
  1948 #ifndef PRODUCT
  1949     if (_cfg->C->trace_opto_output())
  1950       tty->print("#     NodeFitsInBundle [%4d]: TRUE; is in unconditional delay slot\n", n->_idx);
  1951 #endif
  1952     return (true);
  1955   // If the node cannot be scheduled this cycle, skip it
  1956   if (_current_latency[n_idx] > _bundle_cycle_number) {
  1957 #ifndef PRODUCT
  1958     if (_cfg->C->trace_opto_output())
  1959       tty->print("#     NodeFitsInBundle [%4d]: FALSE; latency %4d > %d\n",
  1960         n->_idx, _current_latency[n_idx], _bundle_cycle_number);
  1961 #endif
  1962     return (false);
  1965   const Pipeline *node_pipeline = n->pipeline();
  1967   uint instruction_count = node_pipeline->instructionCount();
  1968   if (node_pipeline->mayHaveNoCode() && n->size(_regalloc) == 0)
  1969     instruction_count = 0;
  1970   else if (node_pipeline->hasBranchDelay() && !_unconditional_delay_slot)
  1971     instruction_count++;
  1973   if (_bundle_instr_count + instruction_count > Pipeline::_max_instrs_per_cycle) {
  1974 #ifndef PRODUCT
  1975     if (_cfg->C->trace_opto_output())
  1976       tty->print("#     NodeFitsInBundle [%4d]: FALSE; too many instructions: %d > %d\n",
  1977         n->_idx, _bundle_instr_count + instruction_count, Pipeline::_max_instrs_per_cycle);
  1978 #endif
  1979     return (false);
  1982   // Don't allow non-machine nodes to be handled this way
  1983   if (!n->is_Mach() && instruction_count == 0)
  1984     return (false);
  1986   // See if there is any overlap
  1987   uint delay = _bundle_use.full_latency(0, node_pipeline->resourceUse());
  1989   if (delay > 0) {
  1990 #ifndef PRODUCT
  1991     if (_cfg->C->trace_opto_output())
  1992       tty->print("#     NodeFitsInBundle [%4d]: FALSE; functional units overlap\n", n_idx);
  1993 #endif
  1994     return false;
  1997 #ifndef PRODUCT
  1998   if (_cfg->C->trace_opto_output())
  1999     tty->print("#     NodeFitsInBundle [%4d]:  TRUE\n", n_idx);
  2000 #endif
  2002   return true;
  2005 Node * Scheduling::ChooseNodeToBundle() {
  2006   uint siz = _available.size();
  2008   if (siz == 0) {
  2010 #ifndef PRODUCT
  2011     if (_cfg->C->trace_opto_output())
  2012       tty->print("#   ChooseNodeToBundle: NULL\n");
  2013 #endif
  2014     return (NULL);
  2017   // Fast path, if only 1 instruction in the bundle
  2018   if (siz == 1) {
  2019 #ifndef PRODUCT
  2020     if (_cfg->C->trace_opto_output()) {
  2021       tty->print("#   ChooseNodeToBundle (only 1): ");
  2022       _available[0]->dump();
  2024 #endif
  2025     return (_available[0]);
  2028   // Don't bother, if the bundle is already full
  2029   if (_bundle_instr_count < Pipeline::_max_instrs_per_cycle) {
  2030     for ( uint i = 0; i < siz; i++ ) {
  2031       Node *n = _available[i];
  2033       // Skip projections, we'll handle them another way
  2034       if (n->is_Proj())
  2035         continue;
  2037       // This presupposed that instructions are inserted into the
  2038       // available list in a legality order; i.e. instructions that
  2039       // must be inserted first are at the head of the list
  2040       if (NodeFitsInBundle(n)) {
  2041 #ifndef PRODUCT
  2042         if (_cfg->C->trace_opto_output()) {
  2043           tty->print("#   ChooseNodeToBundle: ");
  2044           n->dump();
  2046 #endif
  2047         return (n);
  2052   // Nothing fits in this bundle, choose the highest priority
  2053 #ifndef PRODUCT
  2054   if (_cfg->C->trace_opto_output()) {
  2055     tty->print("#   ChooseNodeToBundle: ");
  2056     _available[0]->dump();
  2058 #endif
  2060   return _available[0];
  2063 void Scheduling::AddNodeToAvailableList(Node *n) {
  2064   assert( !n->is_Proj(), "projections never directly made available" );
  2065 #ifndef PRODUCT
  2066   if (_cfg->C->trace_opto_output()) {
  2067     tty->print("#   AddNodeToAvailableList: ");
  2068     n->dump();
  2070 #endif
  2072   int latency = _current_latency[n->_idx];
  2074   // Insert in latency order (insertion sort)
  2075   uint i;
  2076   for ( i=0; i < _available.size(); i++ )
  2077     if (_current_latency[_available[i]->_idx] > latency)
  2078       break;
  2080   // Special Check for compares following branches
  2081   if( n->is_Mach() && _scheduled.size() > 0 ) {
  2082     int op = n->as_Mach()->ideal_Opcode();
  2083     Node *last = _scheduled[0];
  2084     if( last->is_MachIf() && last->in(1) == n &&
  2085         ( op == Op_CmpI ||
  2086           op == Op_CmpU ||
  2087           op == Op_CmpP ||
  2088           op == Op_CmpF ||
  2089           op == Op_CmpD ||
  2090           op == Op_CmpL ) ) {
  2092       // Recalculate position, moving to front of same latency
  2093       for ( i=0 ; i < _available.size(); i++ )
  2094         if (_current_latency[_available[i]->_idx] >= latency)
  2095           break;
  2099   // Insert the node in the available list
  2100   _available.insert(i, n);
  2102 #ifndef PRODUCT
  2103   if (_cfg->C->trace_opto_output())
  2104     dump_available();
  2105 #endif
  2108 void Scheduling::DecrementUseCounts(Node *n, const Block *bb) {
  2109   for ( uint i=0; i < n->len(); i++ ) {
  2110     Node *def = n->in(i);
  2111     if (!def) continue;
  2112     if( def->is_Proj() )        // If this is a machine projection, then
  2113       def = def->in(0);         // propagate usage thru to the base instruction
  2115     if(_cfg->get_block_for_node(def) != bb) { // Ignore if not block-local
  2116       continue;
  2119     // Compute the latency
  2120     uint l = _bundle_cycle_number + n->latency(i);
  2121     if (_current_latency[def->_idx] < l)
  2122       _current_latency[def->_idx] = l;
  2124     // If this does not have uses then schedule it
  2125     if ((--_uses[def->_idx]) == 0)
  2126       AddNodeToAvailableList(def);
  2130 void Scheduling::AddNodeToBundle(Node *n, const Block *bb) {
  2131 #ifndef PRODUCT
  2132   if (_cfg->C->trace_opto_output()) {
  2133     tty->print("#   AddNodeToBundle: ");
  2134     n->dump();
  2136 #endif
  2138   // Remove this from the available list
  2139   uint i;
  2140   for (i = 0; i < _available.size(); i++)
  2141     if (_available[i] == n)
  2142       break;
  2143   assert(i < _available.size(), "entry in _available list not found");
  2144   _available.remove(i);
  2146   // See if this fits in the current bundle
  2147   const Pipeline *node_pipeline = n->pipeline();
  2148   const Pipeline_Use& node_usage = node_pipeline->resourceUse();
  2150   // Check for instructions to be placed in the delay slot. We
  2151   // do this before we actually schedule the current instruction,
  2152   // because the delay slot follows the current instruction.
  2153   if (Pipeline::_branch_has_delay_slot &&
  2154       node_pipeline->hasBranchDelay() &&
  2155       !_unconditional_delay_slot) {
  2157     uint siz = _available.size();
  2159     // Conditional branches can support an instruction that
  2160     // is unconditionally executed and not dependent by the
  2161     // branch, OR a conditionally executed instruction if
  2162     // the branch is taken.  In practice, this means that
  2163     // the first instruction at the branch target is
  2164     // copied to the delay slot, and the branch goes to
  2165     // the instruction after that at the branch target
  2166     if ( n->is_MachBranch() ) {
  2168       assert( !n->is_MachNullCheck(), "should not look for delay slot for Null Check" );
  2169       assert( !n->is_Catch(),         "should not look for delay slot for Catch" );
  2171 #ifndef PRODUCT
  2172       _branches++;
  2173 #endif
  2175       // At least 1 instruction is on the available list
  2176       // that is not dependent on the branch
  2177       for (uint i = 0; i < siz; i++) {
  2178         Node *d = _available[i];
  2179         const Pipeline *avail_pipeline = d->pipeline();
  2181         // Don't allow safepoints in the branch shadow, that will
  2182         // cause a number of difficulties
  2183         if ( avail_pipeline->instructionCount() == 1 &&
  2184             !avail_pipeline->hasMultipleBundles() &&
  2185             !avail_pipeline->hasBranchDelay() &&
  2186             Pipeline::instr_has_unit_size() &&
  2187             d->size(_regalloc) == Pipeline::instr_unit_size() &&
  2188             NodeFitsInBundle(d) &&
  2189             !node_bundling(d)->used_in_delay()) {
  2191           if (d->is_Mach() && !d->is_MachSafePoint()) {
  2192             // A node that fits in the delay slot was found, so we need to
  2193             // set the appropriate bits in the bundle pipeline information so
  2194             // that it correctly indicates resource usage.  Later, when we
  2195             // attempt to add this instruction to the bundle, we will skip
  2196             // setting the resource usage.
  2197             _unconditional_delay_slot = d;
  2198             node_bundling(n)->set_use_unconditional_delay();
  2199             node_bundling(d)->set_used_in_unconditional_delay();
  2200             _bundle_use.add_usage(avail_pipeline->resourceUse());
  2201             _current_latency[d->_idx] = _bundle_cycle_number;
  2202             _next_node = d;
  2203             ++_bundle_instr_count;
  2204 #ifndef PRODUCT
  2205             _unconditional_delays++;
  2206 #endif
  2207             break;
  2213     // No delay slot, add a nop to the usage
  2214     if (!_unconditional_delay_slot) {
  2215       // See if adding an instruction in the delay slot will overflow
  2216       // the bundle.
  2217       if (!NodeFitsInBundle(_nop)) {
  2218 #ifndef PRODUCT
  2219         if (_cfg->C->trace_opto_output())
  2220           tty->print("#  *** STEP(1 instruction for delay slot) ***\n");
  2221 #endif
  2222         step(1);
  2225       _bundle_use.add_usage(_nop->pipeline()->resourceUse());
  2226       _next_node = _nop;
  2227       ++_bundle_instr_count;
  2230     // See if the instruction in the delay slot requires a
  2231     // step of the bundles
  2232     if (!NodeFitsInBundle(n)) {
  2233 #ifndef PRODUCT
  2234         if (_cfg->C->trace_opto_output())
  2235           tty->print("#  *** STEP(branch won't fit) ***\n");
  2236 #endif
  2237         // Update the state information
  2238         _bundle_instr_count = 0;
  2239         _bundle_cycle_number += 1;
  2240         _bundle_use.step(1);
  2244   // Get the number of instructions
  2245   uint instruction_count = node_pipeline->instructionCount();
  2246   if (node_pipeline->mayHaveNoCode() && n->size(_regalloc) == 0)
  2247     instruction_count = 0;
  2249   // Compute the latency information
  2250   uint delay = 0;
  2252   if (instruction_count > 0 || !node_pipeline->mayHaveNoCode()) {
  2253     int relative_latency = _current_latency[n->_idx] - _bundle_cycle_number;
  2254     if (relative_latency < 0)
  2255       relative_latency = 0;
  2257     delay = _bundle_use.full_latency(relative_latency, node_usage);
  2259     // Does not fit in this bundle, start a new one
  2260     if (delay > 0) {
  2261       step(delay);
  2263 #ifndef PRODUCT
  2264       if (_cfg->C->trace_opto_output())
  2265         tty->print("#  *** STEP(%d) ***\n", delay);
  2266 #endif
  2270   // If this was placed in the delay slot, ignore it
  2271   if (n != _unconditional_delay_slot) {
  2273     if (delay == 0) {
  2274       if (node_pipeline->hasMultipleBundles()) {
  2275 #ifndef PRODUCT
  2276         if (_cfg->C->trace_opto_output())
  2277           tty->print("#  *** STEP(multiple instructions) ***\n");
  2278 #endif
  2279         step(1);
  2282       else if (instruction_count + _bundle_instr_count > Pipeline::_max_instrs_per_cycle) {
  2283 #ifndef PRODUCT
  2284         if (_cfg->C->trace_opto_output())
  2285           tty->print("#  *** STEP(%d >= %d instructions) ***\n",
  2286             instruction_count + _bundle_instr_count,
  2287             Pipeline::_max_instrs_per_cycle);
  2288 #endif
  2289         step(1);
  2293     if (node_pipeline->hasBranchDelay() && !_unconditional_delay_slot)
  2294       _bundle_instr_count++;
  2296     // Set the node's latency
  2297     _current_latency[n->_idx] = _bundle_cycle_number;
  2299     // Now merge the functional unit information
  2300     if (instruction_count > 0 || !node_pipeline->mayHaveNoCode())
  2301       _bundle_use.add_usage(node_usage);
  2303     // Increment the number of instructions in this bundle
  2304     _bundle_instr_count += instruction_count;
  2306     // Remember this node for later
  2307     if (n->is_Mach())
  2308       _next_node = n;
  2311   // It's possible to have a BoxLock in the graph and in the _bbs mapping but
  2312   // not in the bb->_nodes array.  This happens for debug-info-only BoxLocks.
  2313   // 'Schedule' them (basically ignore in the schedule) but do not insert them
  2314   // into the block.  All other scheduled nodes get put in the schedule here.
  2315   int op = n->Opcode();
  2316   if( (op == Op_Node && n->req() == 0) || // anti-dependence node OR
  2317       (op != Op_Node &&         // Not an unused antidepedence node and
  2318        // not an unallocated boxlock
  2319        (OptoReg::is_valid(_regalloc->get_reg_first(n)) || op != Op_BoxLock)) ) {
  2321     // Push any trailing projections
  2322     if( bb->get_node(bb->number_of_nodes()-1) != n ) {
  2323       for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
  2324         Node *foi = n->fast_out(i);
  2325         if( foi->is_Proj() )
  2326           _scheduled.push(foi);
  2330     // Put the instruction in the schedule list
  2331     _scheduled.push(n);
  2334 #ifndef PRODUCT
  2335   if (_cfg->C->trace_opto_output())
  2336     dump_available();
  2337 #endif
  2339   // Walk all the definitions, decrementing use counts, and
  2340   // if a definition has a 0 use count, place it in the available list.
  2341   DecrementUseCounts(n,bb);
  2344 // This method sets the use count within a basic block.  We will ignore all
  2345 // uses outside the current basic block.  As we are doing a backwards walk,
  2346 // any node we reach that has a use count of 0 may be scheduled.  This also
  2347 // avoids the problem of cyclic references from phi nodes, as long as phi
  2348 // nodes are at the front of the basic block.  This method also initializes
  2349 // the available list to the set of instructions that have no uses within this
  2350 // basic block.
  2351 void Scheduling::ComputeUseCount(const Block *bb) {
  2352 #ifndef PRODUCT
  2353   if (_cfg->C->trace_opto_output())
  2354     tty->print("# -> ComputeUseCount\n");
  2355 #endif
  2357   // Clear the list of available and scheduled instructions, just in case
  2358   _available.clear();
  2359   _scheduled.clear();
  2361   // No delay slot specified
  2362   _unconditional_delay_slot = NULL;
  2364 #ifdef ASSERT
  2365   for( uint i=0; i < bb->number_of_nodes(); i++ )
  2366     assert( _uses[bb->get_node(i)->_idx] == 0, "_use array not clean" );
  2367 #endif
  2369   // Force the _uses count to never go to zero for unscheduable pieces
  2370   // of the block
  2371   for( uint k = 0; k < _bb_start; k++ )
  2372     _uses[bb->get_node(k)->_idx] = 1;
  2373   for( uint l = _bb_end; l < bb->number_of_nodes(); l++ )
  2374     _uses[bb->get_node(l)->_idx] = 1;
  2376   // Iterate backwards over the instructions in the block.  Don't count the
  2377   // branch projections at end or the block header instructions.
  2378   for( uint j = _bb_end-1; j >= _bb_start; j-- ) {
  2379     Node *n = bb->get_node(j);
  2380     if( n->is_Proj() ) continue; // Projections handled another way
  2382     // Account for all uses
  2383     for ( uint k = 0; k < n->len(); k++ ) {
  2384       Node *inp = n->in(k);
  2385       if (!inp) continue;
  2386       assert(inp != n, "no cycles allowed" );
  2387       if (_cfg->get_block_for_node(inp) == bb) { // Block-local use?
  2388         if (inp->is_Proj()) { // Skip through Proj's
  2389           inp = inp->in(0);
  2391         ++_uses[inp->_idx];     // Count 1 block-local use
  2395     // If this instruction has a 0 use count, then it is available
  2396     if (!_uses[n->_idx]) {
  2397       _current_latency[n->_idx] = _bundle_cycle_number;
  2398       AddNodeToAvailableList(n);
  2401 #ifndef PRODUCT
  2402     if (_cfg->C->trace_opto_output()) {
  2403       tty->print("#   uses: %3d: ", _uses[n->_idx]);
  2404       n->dump();
  2406 #endif
  2409 #ifndef PRODUCT
  2410   if (_cfg->C->trace_opto_output())
  2411     tty->print("# <- ComputeUseCount\n");
  2412 #endif
  2415 // This routine performs scheduling on each basic block in reverse order,
  2416 // using instruction latencies and taking into account function unit
  2417 // availability.
  2418 void Scheduling::DoScheduling() {
  2419 #ifndef PRODUCT
  2420   if (_cfg->C->trace_opto_output())
  2421     tty->print("# -> DoScheduling\n");
  2422 #endif
  2424   Block *succ_bb = NULL;
  2425   Block *bb;
  2427   // Walk over all the basic blocks in reverse order
  2428   for (int i = _cfg->number_of_blocks() - 1; i >= 0; succ_bb = bb, i--) {
  2429     bb = _cfg->get_block(i);
  2431 #ifndef PRODUCT
  2432     if (_cfg->C->trace_opto_output()) {
  2433       tty->print("#  Schedule BB#%03d (initial)\n", i);
  2434       for (uint j = 0; j < bb->number_of_nodes(); j++) {
  2435         bb->get_node(j)->dump();
  2438 #endif
  2440     // On the head node, skip processing
  2441     if (bb == _cfg->get_root_block()) {
  2442       continue;
  2445     // Skip empty, connector blocks
  2446     if (bb->is_connector())
  2447       continue;
  2449     // If the following block is not the sole successor of
  2450     // this one, then reset the pipeline information
  2451     if (bb->_num_succs != 1 || bb->non_connector_successor(0) != succ_bb) {
  2452 #ifndef PRODUCT
  2453       if (_cfg->C->trace_opto_output()) {
  2454         tty->print("*** bundle start of next BB, node %d, for %d instructions\n",
  2455                    _next_node->_idx, _bundle_instr_count);
  2457 #endif
  2458       step_and_clear();
  2461     // Leave untouched the starting instruction, any Phis, a CreateEx node
  2462     // or Top.  bb->get_node(_bb_start) is the first schedulable instruction.
  2463     _bb_end = bb->number_of_nodes()-1;
  2464     for( _bb_start=1; _bb_start <= _bb_end; _bb_start++ ) {
  2465       Node *n = bb->get_node(_bb_start);
  2466       // Things not matched, like Phinodes and ProjNodes don't get scheduled.
  2467       // Also, MachIdealNodes do not get scheduled
  2468       if( !n->is_Mach() ) continue;     // Skip non-machine nodes
  2469       MachNode *mach = n->as_Mach();
  2470       int iop = mach->ideal_Opcode();
  2471       if( iop == Op_CreateEx ) continue; // CreateEx is pinned
  2472       if( iop == Op_Con ) continue;      // Do not schedule Top
  2473       if( iop == Op_Node &&     // Do not schedule PhiNodes, ProjNodes
  2474           mach->pipeline() == MachNode::pipeline_class() &&
  2475           !n->is_SpillCopy() )  // Breakpoints, Prolog, etc
  2476         continue;
  2477       break;                    // Funny loop structure to be sure...
  2479     // Compute last "interesting" instruction in block - last instruction we
  2480     // might schedule.  _bb_end points just after last schedulable inst.  We
  2481     // normally schedule conditional branches (despite them being forced last
  2482     // in the block), because they have delay slots we can fill.  Calls all
  2483     // have their delay slots filled in the template expansions, so we don't
  2484     // bother scheduling them.
  2485     Node *last = bb->get_node(_bb_end);
  2486     // Ignore trailing NOPs.
  2487     while (_bb_end > 0 && last->is_Mach() &&
  2488            last->as_Mach()->ideal_Opcode() == Op_Con) {
  2489       last = bb->get_node(--_bb_end);
  2491     assert(!last->is_Mach() || last->as_Mach()->ideal_Opcode() != Op_Con, "");
  2492     if( last->is_Catch() ||
  2493        // Exclude unreachable path case when Halt node is in a separate block.
  2494        (_bb_end > 1 && last->is_Mach() && last->as_Mach()->ideal_Opcode() == Op_Halt) ) {
  2495       // There must be a prior call.  Skip it.
  2496       while( !bb->get_node(--_bb_end)->is_MachCall() ) {
  2497         assert( bb->get_node(_bb_end)->is_MachProj(), "skipping projections after expected call" );
  2499     } else if( last->is_MachNullCheck() ) {
  2500       // Backup so the last null-checked memory instruction is
  2501       // outside the schedulable range. Skip over the nullcheck,
  2502       // projection, and the memory nodes.
  2503       Node *mem = last->in(1);
  2504       do {
  2505         _bb_end--;
  2506       } while (mem != bb->get_node(_bb_end));
  2507     } else {
  2508       // Set _bb_end to point after last schedulable inst.
  2509       _bb_end++;
  2512     assert( _bb_start <= _bb_end, "inverted block ends" );
  2514     // Compute the register antidependencies for the basic block
  2515     ComputeRegisterAntidependencies(bb);
  2516     if (_cfg->C->failing())  return;  // too many D-U pinch points
  2518     // Compute intra-bb latencies for the nodes
  2519     ComputeLocalLatenciesForward(bb);
  2521     // Compute the usage within the block, and set the list of all nodes
  2522     // in the block that have no uses within the block.
  2523     ComputeUseCount(bb);
  2525     // Schedule the remaining instructions in the block
  2526     while ( _available.size() > 0 ) {
  2527       Node *n = ChooseNodeToBundle();
  2528       guarantee(n != NULL, "no nodes available");
  2529       AddNodeToBundle(n,bb);
  2532     assert( _scheduled.size() == _bb_end - _bb_start, "wrong number of instructions" );
  2533 #ifdef ASSERT
  2534     for( uint l = _bb_start; l < _bb_end; l++ ) {
  2535       Node *n = bb->get_node(l);
  2536       uint m;
  2537       for( m = 0; m < _bb_end-_bb_start; m++ )
  2538         if( _scheduled[m] == n )
  2539           break;
  2540       assert( m < _bb_end-_bb_start, "instruction missing in schedule" );
  2542 #endif
  2544     // Now copy the instructions (in reverse order) back to the block
  2545     for ( uint k = _bb_start; k < _bb_end; k++ )
  2546       bb->map_node(_scheduled[_bb_end-k-1], k);
  2548 #ifndef PRODUCT
  2549     if (_cfg->C->trace_opto_output()) {
  2550       tty->print("#  Schedule BB#%03d (final)\n", i);
  2551       uint current = 0;
  2552       for (uint j = 0; j < bb->number_of_nodes(); j++) {
  2553         Node *n = bb->get_node(j);
  2554         if( valid_bundle_info(n) ) {
  2555           Bundle *bundle = node_bundling(n);
  2556           if (bundle->instr_count() > 0 || bundle->flags() > 0) {
  2557             tty->print("*** Bundle: ");
  2558             bundle->dump();
  2560           n->dump();
  2564 #endif
  2565 #ifdef ASSERT
  2566   verify_good_schedule(bb,"after block local scheduling");
  2567 #endif
  2570 #ifndef PRODUCT
  2571   if (_cfg->C->trace_opto_output())
  2572     tty->print("# <- DoScheduling\n");
  2573 #endif
  2575   // Record final node-bundling array location
  2576   _regalloc->C->set_node_bundling_base(_node_bundling_base);
  2578 } // end DoScheduling
  2580 // Verify that no live-range used in the block is killed in the block by a
  2581 // wrong DEF.  This doesn't verify live-ranges that span blocks.
  2583 // Check for edge existence.  Used to avoid adding redundant precedence edges.
  2584 static bool edge_from_to( Node *from, Node *to ) {
  2585   for( uint i=0; i<from->len(); i++ )
  2586     if( from->in(i) == to )
  2587       return true;
  2588   return false;
  2591 #ifdef ASSERT
  2592 void Scheduling::verify_do_def( Node *n, OptoReg::Name def, const char *msg ) {
  2593   // Check for bad kills
  2594   if( OptoReg::is_valid(def) ) { // Ignore stores & control flow
  2595     Node *prior_use = _reg_node[def];
  2596     if( prior_use && !edge_from_to(prior_use,n) ) {
  2597       tty->print("%s = ",OptoReg::as_VMReg(def)->name());
  2598       n->dump();
  2599       tty->print_cr("...");
  2600       prior_use->dump();
  2601       assert(edge_from_to(prior_use,n),msg);
  2603     _reg_node.map(def,NULL); // Kill live USEs
  2607 void Scheduling::verify_good_schedule( Block *b, const char *msg ) {
  2609   // Zap to something reasonable for the verify code
  2610   _reg_node.clear();
  2612   // Walk over the block backwards.  Check to make sure each DEF doesn't
  2613   // kill a live value (other than the one it's supposed to).  Add each
  2614   // USE to the live set.
  2615   for( uint i = b->number_of_nodes()-1; i >= _bb_start; i-- ) {
  2616     Node *n = b->get_node(i);
  2617     int n_op = n->Opcode();
  2618     if( n_op == Op_MachProj && n->ideal_reg() == MachProjNode::fat_proj ) {
  2619       // Fat-proj kills a slew of registers
  2620       RegMask rm = n->out_RegMask();// Make local copy
  2621       while( rm.is_NotEmpty() ) {
  2622         OptoReg::Name kill = rm.find_first_elem();
  2623         rm.Remove(kill);
  2624         verify_do_def( n, kill, msg );
  2626     } else if( n_op != Op_Node ) { // Avoid brand new antidependence nodes
  2627       // Get DEF'd registers the normal way
  2628       verify_do_def( n, _regalloc->get_reg_first(n), msg );
  2629       verify_do_def( n, _regalloc->get_reg_second(n), msg );
  2632     // Now make all USEs live
  2633     for( uint i=1; i<n->req(); i++ ) {
  2634       Node *def = n->in(i);
  2635       assert(def != 0, "input edge required");
  2636       OptoReg::Name reg_lo = _regalloc->get_reg_first(def);
  2637       OptoReg::Name reg_hi = _regalloc->get_reg_second(def);
  2638       if( OptoReg::is_valid(reg_lo) ) {
  2639         assert(!_reg_node[reg_lo] || edge_from_to(_reg_node[reg_lo],def), msg);
  2640         _reg_node.map(reg_lo,n);
  2642       if( OptoReg::is_valid(reg_hi) ) {
  2643         assert(!_reg_node[reg_hi] || edge_from_to(_reg_node[reg_hi],def), msg);
  2644         _reg_node.map(reg_hi,n);
  2650   // Zap to something reasonable for the Antidependence code
  2651   _reg_node.clear();
  2653 #endif
  2655 // Conditionally add precedence edges.  Avoid putting edges on Projs.
  2656 static void add_prec_edge_from_to( Node *from, Node *to ) {
  2657   if( from->is_Proj() ) {       // Put precedence edge on Proj's input
  2658     assert( from->req() == 1 && (from->len() == 1 || from->in(1)==0), "no precedence edges on projections" );
  2659     from = from->in(0);
  2661   if( from != to &&             // No cycles (for things like LD L0,[L0+4] )
  2662       !edge_from_to( from, to ) ) // Avoid duplicate edge
  2663     from->add_prec(to);
  2666 void Scheduling::anti_do_def( Block *b, Node *def, OptoReg::Name def_reg, int is_def ) {
  2667   if( !OptoReg::is_valid(def_reg) ) // Ignore stores & control flow
  2668     return;
  2670   Node *pinch = _reg_node[def_reg]; // Get pinch point
  2671   if ((pinch == NULL) || _cfg->get_block_for_node(pinch) != b || // No pinch-point yet?
  2672       is_def ) {    // Check for a true def (not a kill)
  2673     _reg_node.map(def_reg,def); // Record def/kill as the optimistic pinch-point
  2674     return;
  2677   Node *kill = def;             // Rename 'def' to more descriptive 'kill'
  2678   debug_only( def = (Node*)0xdeadbeef; )
  2680   // After some number of kills there _may_ be a later def
  2681   Node *later_def = NULL;
  2683   // Finding a kill requires a real pinch-point.
  2684   // Check for not already having a pinch-point.
  2685   // Pinch points are Op_Node's.
  2686   if( pinch->Opcode() != Op_Node ) { // Or later-def/kill as pinch-point?
  2687     later_def = pinch;            // Must be def/kill as optimistic pinch-point
  2688     if ( _pinch_free_list.size() > 0) {
  2689       pinch = _pinch_free_list.pop();
  2690     } else {
  2691       pinch = new (_cfg->C) Node(1); // Pinch point to-be
  2693     if (pinch->_idx >= _regalloc->node_regs_max_index()) {
  2694       _cfg->C->record_method_not_compilable("too many D-U pinch points");
  2695       return;
  2697     _cfg->map_node_to_block(pinch, b);      // Pretend it's valid in this block (lazy init)
  2698     _reg_node.map(def_reg,pinch); // Record pinch-point
  2699     //_regalloc->set_bad(pinch->_idx); // Already initialized this way.
  2700     if( later_def->outcnt() == 0 || later_def->ideal_reg() == MachProjNode::fat_proj ) { // Distinguish def from kill
  2701       pinch->init_req(0, _cfg->C->top());     // set not NULL for the next call
  2702       add_prec_edge_from_to(later_def,pinch); // Add edge from kill to pinch
  2703       later_def = NULL;           // and no later def
  2705     pinch->set_req(0,later_def);  // Hook later def so we can find it
  2706   } else {                        // Else have valid pinch point
  2707     if( pinch->in(0) )            // If there is a later-def
  2708       later_def = pinch->in(0);   // Get it
  2711   // Add output-dependence edge from later def to kill
  2712   if( later_def )               // If there is some original def
  2713     add_prec_edge_from_to(later_def,kill); // Add edge from def to kill
  2715   // See if current kill is also a use, and so is forced to be the pinch-point.
  2716   if( pinch->Opcode() == Op_Node ) {
  2717     Node *uses = kill->is_Proj() ? kill->in(0) : kill;
  2718     for( uint i=1; i<uses->req(); i++ ) {
  2719       if( _regalloc->get_reg_first(uses->in(i)) == def_reg ||
  2720           _regalloc->get_reg_second(uses->in(i)) == def_reg ) {
  2721         // Yes, found a use/kill pinch-point
  2722         pinch->set_req(0,NULL);  //
  2723         pinch->replace_by(kill); // Move anti-dep edges up
  2724         pinch = kill;
  2725         _reg_node.map(def_reg,pinch);
  2726         return;
  2731   // Add edge from kill to pinch-point
  2732   add_prec_edge_from_to(kill,pinch);
  2735 void Scheduling::anti_do_use( Block *b, Node *use, OptoReg::Name use_reg ) {
  2736   if( !OptoReg::is_valid(use_reg) ) // Ignore stores & control flow
  2737     return;
  2738   Node *pinch = _reg_node[use_reg]; // Get pinch point
  2739   // Check for no later def_reg/kill in block
  2740   if ((pinch != NULL) && _cfg->get_block_for_node(pinch) == b &&
  2741       // Use has to be block-local as well
  2742       _cfg->get_block_for_node(use) == b) {
  2743     if( pinch->Opcode() == Op_Node && // Real pinch-point (not optimistic?)
  2744         pinch->req() == 1 ) {   // pinch not yet in block?
  2745       pinch->del_req(0);        // yank pointer to later-def, also set flag
  2746       // Insert the pinch-point in the block just after the last use
  2747       b->insert_node(pinch, b->find_node(use) + 1);
  2748       _bb_end++;                // Increase size scheduled region in block
  2751     add_prec_edge_from_to(pinch,use);
  2755 // We insert antidependences between the reads and following write of
  2756 // allocated registers to prevent illegal code motion. Hopefully, the
  2757 // number of added references should be fairly small, especially as we
  2758 // are only adding references within the current basic block.
  2759 void Scheduling::ComputeRegisterAntidependencies(Block *b) {
  2761 #ifdef ASSERT
  2762   verify_good_schedule(b,"before block local scheduling");
  2763 #endif
  2765   // A valid schedule, for each register independently, is an endless cycle
  2766   // of: a def, then some uses (connected to the def by true dependencies),
  2767   // then some kills (defs with no uses), finally the cycle repeats with a new
  2768   // def.  The uses are allowed to float relative to each other, as are the
  2769   // kills.  No use is allowed to slide past a kill (or def).  This requires
  2770   // antidependencies between all uses of a single def and all kills that
  2771   // follow, up to the next def.  More edges are redundant, because later defs
  2772   // & kills are already serialized with true or antidependencies.  To keep
  2773   // the edge count down, we add a 'pinch point' node if there's more than
  2774   // one use or more than one kill/def.
  2776   // We add dependencies in one bottom-up pass.
  2778   // For each instruction we handle it's DEFs/KILLs, then it's USEs.
  2780   // For each DEF/KILL, we check to see if there's a prior DEF/KILL for this
  2781   // register.  If not, we record the DEF/KILL in _reg_node, the
  2782   // register-to-def mapping.  If there is a prior DEF/KILL, we insert a
  2783   // "pinch point", a new Node that's in the graph but not in the block.
  2784   // We put edges from the prior and current DEF/KILLs to the pinch point.
  2785   // We put the pinch point in _reg_node.  If there's already a pinch point
  2786   // we merely add an edge from the current DEF/KILL to the pinch point.
  2788   // After doing the DEF/KILLs, we handle USEs.  For each used register, we
  2789   // put an edge from the pinch point to the USE.
  2791   // To be expedient, the _reg_node array is pre-allocated for the whole
  2792   // compilation.  _reg_node is lazily initialized; it either contains a NULL,
  2793   // or a valid def/kill/pinch-point, or a leftover node from some prior
  2794   // block.  Leftover node from some prior block is treated like a NULL (no
  2795   // prior def, so no anti-dependence needed).  Valid def is distinguished by
  2796   // it being in the current block.
  2797   bool fat_proj_seen = false;
  2798   uint last_safept = _bb_end-1;
  2799   Node* end_node         = (_bb_end-1 >= _bb_start) ? b->get_node(last_safept) : NULL;
  2800   Node* last_safept_node = end_node;
  2801   for( uint i = _bb_end-1; i >= _bb_start; i-- ) {
  2802     Node *n = b->get_node(i);
  2803     int is_def = n->outcnt();   // def if some uses prior to adding precedence edges
  2804     if( n->is_MachProj() && n->ideal_reg() == MachProjNode::fat_proj ) {
  2805       // Fat-proj kills a slew of registers
  2806       // This can add edges to 'n' and obscure whether or not it was a def,
  2807       // hence the is_def flag.
  2808       fat_proj_seen = true;
  2809       RegMask rm = n->out_RegMask();// Make local copy
  2810       while( rm.is_NotEmpty() ) {
  2811         OptoReg::Name kill = rm.find_first_elem();
  2812         rm.Remove(kill);
  2813         anti_do_def( b, n, kill, is_def );
  2815     } else {
  2816       // Get DEF'd registers the normal way
  2817       anti_do_def( b, n, _regalloc->get_reg_first(n), is_def );
  2818       anti_do_def( b, n, _regalloc->get_reg_second(n), is_def );
  2821     // Kill projections on a branch should appear to occur on the
  2822     // branch, not afterwards, so grab the masks from the projections
  2823     // and process them.
  2824     if (n->is_MachBranch() || n->is_Mach() && n->as_Mach()->ideal_Opcode() == Op_Jump) {
  2825       for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
  2826         Node* use = n->fast_out(i);
  2827         if (use->is_Proj()) {
  2828           RegMask rm = use->out_RegMask();// Make local copy
  2829           while( rm.is_NotEmpty() ) {
  2830             OptoReg::Name kill = rm.find_first_elem();
  2831             rm.Remove(kill);
  2832             anti_do_def( b, n, kill, false );
  2838     // Check each register used by this instruction for a following DEF/KILL
  2839     // that must occur afterward and requires an anti-dependence edge.
  2840     for( uint j=0; j<n->req(); j++ ) {
  2841       Node *def = n->in(j);
  2842       if( def ) {
  2843         assert( !def->is_MachProj() || def->ideal_reg() != MachProjNode::fat_proj, "" );
  2844         anti_do_use( b, n, _regalloc->get_reg_first(def) );
  2845         anti_do_use( b, n, _regalloc->get_reg_second(def) );
  2848     // Do not allow defs of new derived values to float above GC
  2849     // points unless the base is definitely available at the GC point.
  2851     Node *m = b->get_node(i);
  2853     // Add precedence edge from following safepoint to use of derived pointer
  2854     if( last_safept_node != end_node &&
  2855         m != last_safept_node) {
  2856       for (uint k = 1; k < m->req(); k++) {
  2857         const Type *t = m->in(k)->bottom_type();
  2858         if( t->isa_oop_ptr() &&
  2859             t->is_ptr()->offset() != 0 ) {
  2860           last_safept_node->add_prec( m );
  2861           break;
  2866     if( n->jvms() ) {           // Precedence edge from derived to safept
  2867       // Check if last_safept_node was moved by pinch-point insertion in anti_do_use()
  2868       if( b->get_node(last_safept) != last_safept_node ) {
  2869         last_safept = b->find_node(last_safept_node);
  2871       for( uint j=last_safept; j > i; j-- ) {
  2872         Node *mach = b->get_node(j);
  2873         if( mach->is_Mach() && mach->as_Mach()->ideal_Opcode() == Op_AddP )
  2874           mach->add_prec( n );
  2876       last_safept = i;
  2877       last_safept_node = m;
  2881   if (fat_proj_seen) {
  2882     // Garbage collect pinch nodes that were not consumed.
  2883     // They are usually created by a fat kill MachProj for a call.
  2884     garbage_collect_pinch_nodes();
  2888 // Garbage collect pinch nodes for reuse by other blocks.
  2889 //
  2890 // The block scheduler's insertion of anti-dependence
  2891 // edges creates many pinch nodes when the block contains
  2892 // 2 or more Calls.  A pinch node is used to prevent a
  2893 // combinatorial explosion of edges.  If a set of kills for a
  2894 // register is anti-dependent on a set of uses (or defs), rather
  2895 // than adding an edge in the graph between each pair of kill
  2896 // and use (or def), a pinch is inserted between them:
  2897 //
  2898 //            use1   use2  use3
  2899 //                \   |   /
  2900 //                 \  |  /
  2901 //                  pinch
  2902 //                 /  |  \
  2903 //                /   |   \
  2904 //            kill1 kill2 kill3
  2905 //
  2906 // One pinch node is created per register killed when
  2907 // the second call is encountered during a backwards pass
  2908 // over the block.  Most of these pinch nodes are never
  2909 // wired into the graph because the register is never
  2910 // used or def'ed in the block.
  2911 //
  2912 void Scheduling::garbage_collect_pinch_nodes() {
  2913 #ifndef PRODUCT
  2914     if (_cfg->C->trace_opto_output()) tty->print("Reclaimed pinch nodes:");
  2915 #endif
  2916     int trace_cnt = 0;
  2917     for (uint k = 0; k < _reg_node.Size(); k++) {
  2918       Node* pinch = _reg_node[k];
  2919       if ((pinch != NULL) && pinch->Opcode() == Op_Node &&
  2920           // no predecence input edges
  2921           (pinch->req() == pinch->len() || pinch->in(pinch->req()) == NULL) ) {
  2922         cleanup_pinch(pinch);
  2923         _pinch_free_list.push(pinch);
  2924         _reg_node.map(k, NULL);
  2925 #ifndef PRODUCT
  2926         if (_cfg->C->trace_opto_output()) {
  2927           trace_cnt++;
  2928           if (trace_cnt > 40) {
  2929             tty->print("\n");
  2930             trace_cnt = 0;
  2932           tty->print(" %d", pinch->_idx);
  2934 #endif
  2937 #ifndef PRODUCT
  2938     if (_cfg->C->trace_opto_output()) tty->print("\n");
  2939 #endif
  2942 // Clean up a pinch node for reuse.
  2943 void Scheduling::cleanup_pinch( Node *pinch ) {
  2944   assert (pinch && pinch->Opcode() == Op_Node && pinch->req() == 1, "just checking");
  2946   for (DUIterator_Last imin, i = pinch->last_outs(imin); i >= imin; ) {
  2947     Node* use = pinch->last_out(i);
  2948     uint uses_found = 0;
  2949     for (uint j = use->req(); j < use->len(); j++) {
  2950       if (use->in(j) == pinch) {
  2951         use->rm_prec(j);
  2952         uses_found++;
  2955     assert(uses_found > 0, "must be a precedence edge");
  2956     i -= uses_found;    // we deleted 1 or more copies of this edge
  2958   // May have a later_def entry
  2959   pinch->set_req(0, NULL);
  2962 #ifndef PRODUCT
  2964 void Scheduling::dump_available() const {
  2965   tty->print("#Availist  ");
  2966   for (uint i = 0; i < _available.size(); i++)
  2967     tty->print(" N%d/l%d", _available[i]->_idx,_current_latency[_available[i]->_idx]);
  2968   tty->cr();
  2971 // Print Scheduling Statistics
  2972 void Scheduling::print_statistics() {
  2973   // Print the size added by nops for bundling
  2974   tty->print("Nops added %d bytes to total of %d bytes",
  2975     _total_nop_size, _total_method_size);
  2976   if (_total_method_size > 0)
  2977     tty->print(", for %.2f%%",
  2978       ((double)_total_nop_size) / ((double) _total_method_size) * 100.0);
  2979   tty->print("\n");
  2981   // Print the number of branch shadows filled
  2982   if (Pipeline::_branch_has_delay_slot) {
  2983     tty->print("Of %d branches, %d had unconditional delay slots filled",
  2984       _total_branches, _total_unconditional_delays);
  2985     if (_total_branches > 0)
  2986       tty->print(", for %.2f%%",
  2987         ((double)_total_unconditional_delays) / ((double)_total_branches) * 100.0);
  2988     tty->print("\n");
  2991   uint total_instructions = 0, total_bundles = 0;
  2993   for (uint i = 1; i <= Pipeline::_max_instrs_per_cycle; i++) {
  2994     uint bundle_count   = _total_instructions_per_bundle[i];
  2995     total_instructions += bundle_count * i;
  2996     total_bundles      += bundle_count;
  2999   if (total_bundles > 0)
  3000     tty->print("Average ILP (excluding nops) is %.2f\n",
  3001       ((double)total_instructions) / ((double)total_bundles));
  3003 #endif

mercurial