src/share/vm/opto/compile.cpp

Tue, 14 Jan 2014 17:46:48 -0800

author
kvn
date
Tue, 14 Jan 2014 17:46:48 -0800
changeset 6312
04d32e7fad07
parent 6217
849eb7bfceac
child 6313
de95063c0e34
child 6503
a9becfeecd1b
permissions
-rw-r--r--

8002074: Support for AES on SPARC
Summary: Add intrinsics/stub routines support for single-block and multi-block (as used by Cipher Block Chaining mode) AES encryption and decryption operations on the SPARC platform.
Reviewed-by: kvn, roland
Contributed-by: shrinivas.joshi@oracle.com

     1 /*
     2  * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    20  * or visit www.oracle.com if you need additional information or have any
    21  * questions.
    22  *
    23  */
    25 #include "precompiled.hpp"
    26 #include "asm/macroAssembler.hpp"
    27 #include "asm/macroAssembler.inline.hpp"
    28 #include "ci/ciReplay.hpp"
    29 #include "classfile/systemDictionary.hpp"
    30 #include "code/exceptionHandlerTable.hpp"
    31 #include "code/nmethod.hpp"
    32 #include "compiler/compileLog.hpp"
    33 #include "compiler/disassembler.hpp"
    34 #include "compiler/oopMap.hpp"
    35 #include "opto/addnode.hpp"
    36 #include "opto/block.hpp"
    37 #include "opto/c2compiler.hpp"
    38 #include "opto/callGenerator.hpp"
    39 #include "opto/callnode.hpp"
    40 #include "opto/cfgnode.hpp"
    41 #include "opto/chaitin.hpp"
    42 #include "opto/compile.hpp"
    43 #include "opto/connode.hpp"
    44 #include "opto/divnode.hpp"
    45 #include "opto/escape.hpp"
    46 #include "opto/idealGraphPrinter.hpp"
    47 #include "opto/loopnode.hpp"
    48 #include "opto/machnode.hpp"
    49 #include "opto/macro.hpp"
    50 #include "opto/matcher.hpp"
    51 #include "opto/mathexactnode.hpp"
    52 #include "opto/memnode.hpp"
    53 #include "opto/mulnode.hpp"
    54 #include "opto/node.hpp"
    55 #include "opto/opcodes.hpp"
    56 #include "opto/output.hpp"
    57 #include "opto/parse.hpp"
    58 #include "opto/phaseX.hpp"
    59 #include "opto/rootnode.hpp"
    60 #include "opto/runtime.hpp"
    61 #include "opto/stringopts.hpp"
    62 #include "opto/type.hpp"
    63 #include "opto/vectornode.hpp"
    64 #include "runtime/arguments.hpp"
    65 #include "runtime/signature.hpp"
    66 #include "runtime/stubRoutines.hpp"
    67 #include "runtime/timer.hpp"
    68 #include "trace/tracing.hpp"
    69 #include "utilities/copy.hpp"
    70 #ifdef TARGET_ARCH_MODEL_x86_32
    71 # include "adfiles/ad_x86_32.hpp"
    72 #endif
    73 #ifdef TARGET_ARCH_MODEL_x86_64
    74 # include "adfiles/ad_x86_64.hpp"
    75 #endif
    76 #ifdef TARGET_ARCH_MODEL_sparc
    77 # include "adfiles/ad_sparc.hpp"
    78 #endif
    79 #ifdef TARGET_ARCH_MODEL_zero
    80 # include "adfiles/ad_zero.hpp"
    81 #endif
    82 #ifdef TARGET_ARCH_MODEL_arm
    83 # include "adfiles/ad_arm.hpp"
    84 #endif
    85 #ifdef TARGET_ARCH_MODEL_ppc
    86 # include "adfiles/ad_ppc.hpp"
    87 #endif
    90 // -------------------- Compile::mach_constant_base_node -----------------------
    91 // Constant table base node singleton.
    92 MachConstantBaseNode* Compile::mach_constant_base_node() {
    93   if (_mach_constant_base_node == NULL) {
    94     _mach_constant_base_node = new (C) MachConstantBaseNode();
    95     _mach_constant_base_node->add_req(C->root());
    96   }
    97   return _mach_constant_base_node;
    98 }
   101 /// Support for intrinsics.
   103 // Return the index at which m must be inserted (or already exists).
   104 // The sort order is by the address of the ciMethod, with is_virtual as minor key.
   105 int Compile::intrinsic_insertion_index(ciMethod* m, bool is_virtual) {
   106 #ifdef ASSERT
   107   for (int i = 1; i < _intrinsics->length(); i++) {
   108     CallGenerator* cg1 = _intrinsics->at(i-1);
   109     CallGenerator* cg2 = _intrinsics->at(i);
   110     assert(cg1->method() != cg2->method()
   111            ? cg1->method()     < cg2->method()
   112            : cg1->is_virtual() < cg2->is_virtual(),
   113            "compiler intrinsics list must stay sorted");
   114   }
   115 #endif
   116   // Binary search sorted list, in decreasing intervals [lo, hi].
   117   int lo = 0, hi = _intrinsics->length()-1;
   118   while (lo <= hi) {
   119     int mid = (uint)(hi + lo) / 2;
   120     ciMethod* mid_m = _intrinsics->at(mid)->method();
   121     if (m < mid_m) {
   122       hi = mid-1;
   123     } else if (m > mid_m) {
   124       lo = mid+1;
   125     } else {
   126       // look at minor sort key
   127       bool mid_virt = _intrinsics->at(mid)->is_virtual();
   128       if (is_virtual < mid_virt) {
   129         hi = mid-1;
   130       } else if (is_virtual > mid_virt) {
   131         lo = mid+1;
   132       } else {
   133         return mid;  // exact match
   134       }
   135     }
   136   }
   137   return lo;  // inexact match
   138 }
   140 void Compile::register_intrinsic(CallGenerator* cg) {
   141   if (_intrinsics == NULL) {
   142     _intrinsics = new (comp_arena())GrowableArray<CallGenerator*>(comp_arena(), 60, 0, NULL);
   143   }
   144   // This code is stolen from ciObjectFactory::insert.
   145   // Really, GrowableArray should have methods for
   146   // insert_at, remove_at, and binary_search.
   147   int len = _intrinsics->length();
   148   int index = intrinsic_insertion_index(cg->method(), cg->is_virtual());
   149   if (index == len) {
   150     _intrinsics->append(cg);
   151   } else {
   152 #ifdef ASSERT
   153     CallGenerator* oldcg = _intrinsics->at(index);
   154     assert(oldcg->method() != cg->method() || oldcg->is_virtual() != cg->is_virtual(), "don't register twice");
   155 #endif
   156     _intrinsics->append(_intrinsics->at(len-1));
   157     int pos;
   158     for (pos = len-2; pos >= index; pos--) {
   159       _intrinsics->at_put(pos+1,_intrinsics->at(pos));
   160     }
   161     _intrinsics->at_put(index, cg);
   162   }
   163   assert(find_intrinsic(cg->method(), cg->is_virtual()) == cg, "registration worked");
   164 }
   166 CallGenerator* Compile::find_intrinsic(ciMethod* m, bool is_virtual) {
   167   assert(m->is_loaded(), "don't try this on unloaded methods");
   168   if (_intrinsics != NULL) {
   169     int index = intrinsic_insertion_index(m, is_virtual);
   170     if (index < _intrinsics->length()
   171         && _intrinsics->at(index)->method() == m
   172         && _intrinsics->at(index)->is_virtual() == is_virtual) {
   173       return _intrinsics->at(index);
   174     }
   175   }
   176   // Lazily create intrinsics for intrinsic IDs well-known in the runtime.
   177   if (m->intrinsic_id() != vmIntrinsics::_none &&
   178       m->intrinsic_id() <= vmIntrinsics::LAST_COMPILER_INLINE) {
   179     CallGenerator* cg = make_vm_intrinsic(m, is_virtual);
   180     if (cg != NULL) {
   181       // Save it for next time:
   182       register_intrinsic(cg);
   183       return cg;
   184     } else {
   185       gather_intrinsic_statistics(m->intrinsic_id(), is_virtual, _intrinsic_disabled);
   186     }
   187   }
   188   return NULL;
   189 }
   191 // Compile:: register_library_intrinsics and make_vm_intrinsic are defined
   192 // in library_call.cpp.
   195 #ifndef PRODUCT
   196 // statistics gathering...
   198 juint  Compile::_intrinsic_hist_count[vmIntrinsics::ID_LIMIT] = {0};
   199 jubyte Compile::_intrinsic_hist_flags[vmIntrinsics::ID_LIMIT] = {0};
   201 bool Compile::gather_intrinsic_statistics(vmIntrinsics::ID id, bool is_virtual, int flags) {
   202   assert(id > vmIntrinsics::_none && id < vmIntrinsics::ID_LIMIT, "oob");
   203   int oflags = _intrinsic_hist_flags[id];
   204   assert(flags != 0, "what happened?");
   205   if (is_virtual) {
   206     flags |= _intrinsic_virtual;
   207   }
   208   bool changed = (flags != oflags);
   209   if ((flags & _intrinsic_worked) != 0) {
   210     juint count = (_intrinsic_hist_count[id] += 1);
   211     if (count == 1) {
   212       changed = true;           // first time
   213     }
   214     // increment the overall count also:
   215     _intrinsic_hist_count[vmIntrinsics::_none] += 1;
   216   }
   217   if (changed) {
   218     if (((oflags ^ flags) & _intrinsic_virtual) != 0) {
   219       // Something changed about the intrinsic's virtuality.
   220       if ((flags & _intrinsic_virtual) != 0) {
   221         // This is the first use of this intrinsic as a virtual call.
   222         if (oflags != 0) {
   223           // We already saw it as a non-virtual, so note both cases.
   224           flags |= _intrinsic_both;
   225         }
   226       } else if ((oflags & _intrinsic_both) == 0) {
   227         // This is the first use of this intrinsic as a non-virtual
   228         flags |= _intrinsic_both;
   229       }
   230     }
   231     _intrinsic_hist_flags[id] = (jubyte) (oflags | flags);
   232   }
   233   // update the overall flags also:
   234   _intrinsic_hist_flags[vmIntrinsics::_none] |= (jubyte) flags;
   235   return changed;
   236 }
   238 static char* format_flags(int flags, char* buf) {
   239   buf[0] = 0;
   240   if ((flags & Compile::_intrinsic_worked) != 0)    strcat(buf, ",worked");
   241   if ((flags & Compile::_intrinsic_failed) != 0)    strcat(buf, ",failed");
   242   if ((flags & Compile::_intrinsic_disabled) != 0)  strcat(buf, ",disabled");
   243   if ((flags & Compile::_intrinsic_virtual) != 0)   strcat(buf, ",virtual");
   244   if ((flags & Compile::_intrinsic_both) != 0)      strcat(buf, ",nonvirtual");
   245   if (buf[0] == 0)  strcat(buf, ",");
   246   assert(buf[0] == ',', "must be");
   247   return &buf[1];
   248 }
   250 void Compile::print_intrinsic_statistics() {
   251   char flagsbuf[100];
   252   ttyLocker ttyl;
   253   if (xtty != NULL)  xtty->head("statistics type='intrinsic'");
   254   tty->print_cr("Compiler intrinsic usage:");
   255   juint total = _intrinsic_hist_count[vmIntrinsics::_none];
   256   if (total == 0)  total = 1;  // avoid div0 in case of no successes
   257   #define PRINT_STAT_LINE(name, c, f) \
   258     tty->print_cr("  %4d (%4.1f%%) %s (%s)", (int)(c), ((c) * 100.0) / total, name, f);
   259   for (int index = 1 + (int)vmIntrinsics::_none; index < (int)vmIntrinsics::ID_LIMIT; index++) {
   260     vmIntrinsics::ID id = (vmIntrinsics::ID) index;
   261     int   flags = _intrinsic_hist_flags[id];
   262     juint count = _intrinsic_hist_count[id];
   263     if ((flags | count) != 0) {
   264       PRINT_STAT_LINE(vmIntrinsics::name_at(id), count, format_flags(flags, flagsbuf));
   265     }
   266   }
   267   PRINT_STAT_LINE("total", total, format_flags(_intrinsic_hist_flags[vmIntrinsics::_none], flagsbuf));
   268   if (xtty != NULL)  xtty->tail("statistics");
   269 }
   271 void Compile::print_statistics() {
   272   { ttyLocker ttyl;
   273     if (xtty != NULL)  xtty->head("statistics type='opto'");
   274     Parse::print_statistics();
   275     PhaseCCP::print_statistics();
   276     PhaseRegAlloc::print_statistics();
   277     Scheduling::print_statistics();
   278     PhasePeephole::print_statistics();
   279     PhaseIdealLoop::print_statistics();
   280     if (xtty != NULL)  xtty->tail("statistics");
   281   }
   282   if (_intrinsic_hist_flags[vmIntrinsics::_none] != 0) {
   283     // put this under its own <statistics> element.
   284     print_intrinsic_statistics();
   285   }
   286 }
   287 #endif //PRODUCT
   289 // Support for bundling info
   290 Bundle* Compile::node_bundling(const Node *n) {
   291   assert(valid_bundle_info(n), "oob");
   292   return &_node_bundling_base[n->_idx];
   293 }
   295 bool Compile::valid_bundle_info(const Node *n) {
   296   return (_node_bundling_limit > n->_idx);
   297 }
   300 void Compile::gvn_replace_by(Node* n, Node* nn) {
   301   for (DUIterator_Last imin, i = n->last_outs(imin); i >= imin; ) {
   302     Node* use = n->last_out(i);
   303     bool is_in_table = initial_gvn()->hash_delete(use);
   304     uint uses_found = 0;
   305     for (uint j = 0; j < use->len(); j++) {
   306       if (use->in(j) == n) {
   307         if (j < use->req())
   308           use->set_req(j, nn);
   309         else
   310           use->set_prec(j, nn);
   311         uses_found++;
   312       }
   313     }
   314     if (is_in_table) {
   315       // reinsert into table
   316       initial_gvn()->hash_find_insert(use);
   317     }
   318     record_for_igvn(use);
   319     i -= uses_found;    // we deleted 1 or more copies of this edge
   320   }
   321 }
   324 static inline bool not_a_node(const Node* n) {
   325   if (n == NULL)                   return true;
   326   if (((intptr_t)n & 1) != 0)      return true;  // uninitialized, etc.
   327   if (*(address*)n == badAddress)  return true;  // kill by Node::destruct
   328   return false;
   329 }
   331 // Identify all nodes that are reachable from below, useful.
   332 // Use breadth-first pass that records state in a Unique_Node_List,
   333 // recursive traversal is slower.
   334 void Compile::identify_useful_nodes(Unique_Node_List &useful) {
   335   int estimated_worklist_size = unique();
   336   useful.map( estimated_worklist_size, NULL );  // preallocate space
   338   // Initialize worklist
   339   if (root() != NULL)     { useful.push(root()); }
   340   // If 'top' is cached, declare it useful to preserve cached node
   341   if( cached_top_node() ) { useful.push(cached_top_node()); }
   343   // Push all useful nodes onto the list, breadthfirst
   344   for( uint next = 0; next < useful.size(); ++next ) {
   345     assert( next < unique(), "Unique useful nodes < total nodes");
   346     Node *n  = useful.at(next);
   347     uint max = n->len();
   348     for( uint i = 0; i < max; ++i ) {
   349       Node *m = n->in(i);
   350       if (not_a_node(m))  continue;
   351       useful.push(m);
   352     }
   353   }
   354 }
   356 // Update dead_node_list with any missing dead nodes using useful
   357 // list. Consider all non-useful nodes to be useless i.e., dead nodes.
   358 void Compile::update_dead_node_list(Unique_Node_List &useful) {
   359   uint max_idx = unique();
   360   VectorSet& useful_node_set = useful.member_set();
   362   for (uint node_idx = 0; node_idx < max_idx; node_idx++) {
   363     // If node with index node_idx is not in useful set,
   364     // mark it as dead in dead node list.
   365     if (! useful_node_set.test(node_idx) ) {
   366       record_dead_node(node_idx);
   367     }
   368   }
   369 }
   371 void Compile::remove_useless_late_inlines(GrowableArray<CallGenerator*>* inlines, Unique_Node_List &useful) {
   372   int shift = 0;
   373   for (int i = 0; i < inlines->length(); i++) {
   374     CallGenerator* cg = inlines->at(i);
   375     CallNode* call = cg->call_node();
   376     if (shift > 0) {
   377       inlines->at_put(i-shift, cg);
   378     }
   379     if (!useful.member(call)) {
   380       shift++;
   381     }
   382   }
   383   inlines->trunc_to(inlines->length()-shift);
   384 }
   386 // Disconnect all useless nodes by disconnecting those at the boundary.
   387 void Compile::remove_useless_nodes(Unique_Node_List &useful) {
   388   uint next = 0;
   389   while (next < useful.size()) {
   390     Node *n = useful.at(next++);
   391     // Use raw traversal of out edges since this code removes out edges
   392     int max = n->outcnt();
   393     for (int j = 0; j < max; ++j) {
   394       Node* child = n->raw_out(j);
   395       if (! useful.member(child)) {
   396         assert(!child->is_top() || child != top(),
   397                "If top is cached in Compile object it is in useful list");
   398         // Only need to remove this out-edge to the useless node
   399         n->raw_del_out(j);
   400         --j;
   401         --max;
   402       }
   403     }
   404     if (n->outcnt() == 1 && n->has_special_unique_user()) {
   405       record_for_igvn(n->unique_out());
   406     }
   407   }
   408   // Remove useless macro and predicate opaq nodes
   409   for (int i = C->macro_count()-1; i >= 0; i--) {
   410     Node* n = C->macro_node(i);
   411     if (!useful.member(n)) {
   412       remove_macro_node(n);
   413     }
   414   }
   415   // Remove useless expensive node
   416   for (int i = C->expensive_count()-1; i >= 0; i--) {
   417     Node* n = C->expensive_node(i);
   418     if (!useful.member(n)) {
   419       remove_expensive_node(n);
   420     }
   421   }
   422   // clean up the late inline lists
   423   remove_useless_late_inlines(&_string_late_inlines, useful);
   424   remove_useless_late_inlines(&_boxing_late_inlines, useful);
   425   remove_useless_late_inlines(&_late_inlines, useful);
   426   debug_only(verify_graph_edges(true/*check for no_dead_code*/);)
   427 }
   429 //------------------------------frame_size_in_words-----------------------------
   430 // frame_slots in units of words
   431 int Compile::frame_size_in_words() const {
   432   // shift is 0 in LP32 and 1 in LP64
   433   const int shift = (LogBytesPerWord - LogBytesPerInt);
   434   int words = _frame_slots >> shift;
   435   assert( words << shift == _frame_slots, "frame size must be properly aligned in LP64" );
   436   return words;
   437 }
   439 // ============================================================================
   440 //------------------------------CompileWrapper---------------------------------
   441 class CompileWrapper : public StackObj {
   442   Compile *const _compile;
   443  public:
   444   CompileWrapper(Compile* compile);
   446   ~CompileWrapper();
   447 };
   449 CompileWrapper::CompileWrapper(Compile* compile) : _compile(compile) {
   450   // the Compile* pointer is stored in the current ciEnv:
   451   ciEnv* env = compile->env();
   452   assert(env == ciEnv::current(), "must already be a ciEnv active");
   453   assert(env->compiler_data() == NULL, "compile already active?");
   454   env->set_compiler_data(compile);
   455   assert(compile == Compile::current(), "sanity");
   457   compile->set_type_dict(NULL);
   458   compile->set_type_hwm(NULL);
   459   compile->set_type_last_size(0);
   460   compile->set_last_tf(NULL, NULL);
   461   compile->set_indexSet_arena(NULL);
   462   compile->set_indexSet_free_block_list(NULL);
   463   compile->init_type_arena();
   464   Type::Initialize(compile);
   465   _compile->set_scratch_buffer_blob(NULL);
   466   _compile->begin_method();
   467 }
   468 CompileWrapper::~CompileWrapper() {
   469   _compile->end_method();
   470   if (_compile->scratch_buffer_blob() != NULL)
   471     BufferBlob::free(_compile->scratch_buffer_blob());
   472   _compile->env()->set_compiler_data(NULL);
   473 }
   476 //----------------------------print_compile_messages---------------------------
   477 void Compile::print_compile_messages() {
   478 #ifndef PRODUCT
   479   // Check if recompiling
   480   if (_subsume_loads == false && PrintOpto) {
   481     // Recompiling without allowing machine instructions to subsume loads
   482     tty->print_cr("*********************************************************");
   483     tty->print_cr("** Bailout: Recompile without subsuming loads          **");
   484     tty->print_cr("*********************************************************");
   485   }
   486   if (_do_escape_analysis != DoEscapeAnalysis && PrintOpto) {
   487     // Recompiling without escape analysis
   488     tty->print_cr("*********************************************************");
   489     tty->print_cr("** Bailout: Recompile without escape analysis          **");
   490     tty->print_cr("*********************************************************");
   491   }
   492   if (_eliminate_boxing != EliminateAutoBox && PrintOpto) {
   493     // Recompiling without boxing elimination
   494     tty->print_cr("*********************************************************");
   495     tty->print_cr("** Bailout: Recompile without boxing elimination       **");
   496     tty->print_cr("*********************************************************");
   497   }
   498   if (env()->break_at_compile()) {
   499     // Open the debugger when compiling this method.
   500     tty->print("### Breaking when compiling: ");
   501     method()->print_short_name();
   502     tty->cr();
   503     BREAKPOINT;
   504   }
   506   if( PrintOpto ) {
   507     if (is_osr_compilation()) {
   508       tty->print("[OSR]%3d", _compile_id);
   509     } else {
   510       tty->print("%3d", _compile_id);
   511     }
   512   }
   513 #endif
   514 }
   517 //-----------------------init_scratch_buffer_blob------------------------------
   518 // Construct a temporary BufferBlob and cache it for this compile.
   519 void Compile::init_scratch_buffer_blob(int const_size) {
   520   // If there is already a scratch buffer blob allocated and the
   521   // constant section is big enough, use it.  Otherwise free the
   522   // current and allocate a new one.
   523   BufferBlob* blob = scratch_buffer_blob();
   524   if ((blob != NULL) && (const_size <= _scratch_const_size)) {
   525     // Use the current blob.
   526   } else {
   527     if (blob != NULL) {
   528       BufferBlob::free(blob);
   529     }
   531     ResourceMark rm;
   532     _scratch_const_size = const_size;
   533     int size = (MAX_inst_size + MAX_stubs_size + _scratch_const_size);
   534     blob = BufferBlob::create("Compile::scratch_buffer", size);
   535     // Record the buffer blob for next time.
   536     set_scratch_buffer_blob(blob);
   537     // Have we run out of code space?
   538     if (scratch_buffer_blob() == NULL) {
   539       // Let CompilerBroker disable further compilations.
   540       record_failure("Not enough space for scratch buffer in CodeCache");
   541       return;
   542     }
   543   }
   545   // Initialize the relocation buffers
   546   relocInfo* locs_buf = (relocInfo*) blob->content_end() - MAX_locs_size;
   547   set_scratch_locs_memory(locs_buf);
   548 }
   551 //-----------------------scratch_emit_size-------------------------------------
   552 // Helper function that computes size by emitting code
   553 uint Compile::scratch_emit_size(const Node* n) {
   554   // Start scratch_emit_size section.
   555   set_in_scratch_emit_size(true);
   557   // Emit into a trash buffer and count bytes emitted.
   558   // This is a pretty expensive way to compute a size,
   559   // but it works well enough if seldom used.
   560   // All common fixed-size instructions are given a size
   561   // method by the AD file.
   562   // Note that the scratch buffer blob and locs memory are
   563   // allocated at the beginning of the compile task, and
   564   // may be shared by several calls to scratch_emit_size.
   565   // The allocation of the scratch buffer blob is particularly
   566   // expensive, since it has to grab the code cache lock.
   567   BufferBlob* blob = this->scratch_buffer_blob();
   568   assert(blob != NULL, "Initialize BufferBlob at start");
   569   assert(blob->size() > MAX_inst_size, "sanity");
   570   relocInfo* locs_buf = scratch_locs_memory();
   571   address blob_begin = blob->content_begin();
   572   address blob_end   = (address)locs_buf;
   573   assert(blob->content_contains(blob_end), "sanity");
   574   CodeBuffer buf(blob_begin, blob_end - blob_begin);
   575   buf.initialize_consts_size(_scratch_const_size);
   576   buf.initialize_stubs_size(MAX_stubs_size);
   577   assert(locs_buf != NULL, "sanity");
   578   int lsize = MAX_locs_size / 3;
   579   buf.consts()->initialize_shared_locs(&locs_buf[lsize * 0], lsize);
   580   buf.insts()->initialize_shared_locs( &locs_buf[lsize * 1], lsize);
   581   buf.stubs()->initialize_shared_locs( &locs_buf[lsize * 2], lsize);
   583   // Do the emission.
   585   Label fakeL; // Fake label for branch instructions.
   586   Label*   saveL = NULL;
   587   uint save_bnum = 0;
   588   bool is_branch = n->is_MachBranch();
   589   if (is_branch) {
   590     MacroAssembler masm(&buf);
   591     masm.bind(fakeL);
   592     n->as_MachBranch()->save_label(&saveL, &save_bnum);
   593     n->as_MachBranch()->label_set(&fakeL, 0);
   594   }
   595   n->emit(buf, this->regalloc());
   596   if (is_branch) // Restore label.
   597     n->as_MachBranch()->label_set(saveL, save_bnum);
   599   // End scratch_emit_size section.
   600   set_in_scratch_emit_size(false);
   602   return buf.insts_size();
   603 }
   606 // ============================================================================
   607 //------------------------------Compile standard-------------------------------
   608 debug_only( int Compile::_debug_idx = 100000; )
   610 // Compile a method.  entry_bci is -1 for normal compilations and indicates
   611 // the continuation bci for on stack replacement.
   614 Compile::Compile( ciEnv* ci_env, C2Compiler* compiler, ciMethod* target, int osr_bci,
   615                   bool subsume_loads, bool do_escape_analysis, bool eliminate_boxing )
   616                 : Phase(Compiler),
   617                   _env(ci_env),
   618                   _log(ci_env->log()),
   619                   _compile_id(ci_env->compile_id()),
   620                   _save_argument_registers(false),
   621                   _stub_name(NULL),
   622                   _stub_function(NULL),
   623                   _stub_entry_point(NULL),
   624                   _method(target),
   625                   _entry_bci(osr_bci),
   626                   _initial_gvn(NULL),
   627                   _for_igvn(NULL),
   628                   _warm_calls(NULL),
   629                   _subsume_loads(subsume_loads),
   630                   _do_escape_analysis(do_escape_analysis),
   631                   _eliminate_boxing(eliminate_boxing),
   632                   _failure_reason(NULL),
   633                   _code_buffer("Compile::Fill_buffer"),
   634                   _orig_pc_slot(0),
   635                   _orig_pc_slot_offset_in_bytes(0),
   636                   _has_method_handle_invokes(false),
   637                   _mach_constant_base_node(NULL),
   638                   _node_bundling_limit(0),
   639                   _node_bundling_base(NULL),
   640                   _java_calls(0),
   641                   _inner_loops(0),
   642                   _scratch_const_size(-1),
   643                   _in_scratch_emit_size(false),
   644                   _dead_node_list(comp_arena()),
   645                   _dead_node_count(0),
   646 #ifndef PRODUCT
   647                   _trace_opto_output(TraceOptoOutput || method()->has_option("TraceOptoOutput")),
   648                   _printer(IdealGraphPrinter::printer()),
   649 #endif
   650                   _congraph(NULL),
   651                   _replay_inline_data(NULL),
   652                   _late_inlines(comp_arena(), 2, 0, NULL),
   653                   _string_late_inlines(comp_arena(), 2, 0, NULL),
   654                   _boxing_late_inlines(comp_arena(), 2, 0, NULL),
   655                   _late_inlines_pos(0),
   656                   _number_of_mh_late_inlines(0),
   657                   _inlining_progress(false),
   658                   _inlining_incrementally(false),
   659                   _print_inlining_list(NULL),
   660                   _print_inlining_idx(0),
   661                   _preserve_jvm_state(0) {
   662   C = this;
   664   CompileWrapper cw(this);
   665 #ifndef PRODUCT
   666   if (TimeCompiler2) {
   667     tty->print(" ");
   668     target->holder()->name()->print();
   669     tty->print(".");
   670     target->print_short_name();
   671     tty->print("  ");
   672   }
   673   TraceTime t1("Total compilation time", &_t_totalCompilation, TimeCompiler, TimeCompiler2);
   674   TraceTime t2(NULL, &_t_methodCompilation, TimeCompiler, false);
   675   bool print_opto_assembly = PrintOptoAssembly || _method->has_option("PrintOptoAssembly");
   676   if (!print_opto_assembly) {
   677     bool print_assembly = (PrintAssembly || _method->should_print_assembly());
   678     if (print_assembly && !Disassembler::can_decode()) {
   679       tty->print_cr("PrintAssembly request changed to PrintOptoAssembly");
   680       print_opto_assembly = true;
   681     }
   682   }
   683   set_print_assembly(print_opto_assembly);
   684   set_parsed_irreducible_loop(false);
   686   if (method()->has_option("ReplayInline")) {
   687     _replay_inline_data = ciReplay::load_inline_data(method(), entry_bci(), ci_env->comp_level());
   688   }
   689 #endif
   690   set_print_inlining(PrintInlining || method()->has_option("PrintInlining") NOT_PRODUCT( || PrintOptoInlining));
   691   set_print_intrinsics(PrintIntrinsics || method()->has_option("PrintIntrinsics"));
   693   if (ProfileTraps) {
   694     // Make sure the method being compiled gets its own MDO,
   695     // so we can at least track the decompile_count().
   696     method()->ensure_method_data();
   697   }
   699   Init(::AliasLevel);
   702   print_compile_messages();
   704   if (UseOldInlining || PrintCompilation NOT_PRODUCT( || PrintOpto) )
   705     _ilt = InlineTree::build_inline_tree_root();
   706   else
   707     _ilt = NULL;
   709   // Even if NO memory addresses are used, MergeMem nodes must have at least 1 slice
   710   assert(num_alias_types() >= AliasIdxRaw, "");
   712 #define MINIMUM_NODE_HASH  1023
   713   // Node list that Iterative GVN will start with
   714   Unique_Node_List for_igvn(comp_arena());
   715   set_for_igvn(&for_igvn);
   717   // GVN that will be run immediately on new nodes
   718   uint estimated_size = method()->code_size()*4+64;
   719   estimated_size = (estimated_size < MINIMUM_NODE_HASH ? MINIMUM_NODE_HASH : estimated_size);
   720   PhaseGVN gvn(node_arena(), estimated_size);
   721   set_initial_gvn(&gvn);
   723   if (print_inlining() || print_intrinsics()) {
   724     _print_inlining_list = new (comp_arena())GrowableArray<PrintInliningBuffer>(comp_arena(), 1, 1, PrintInliningBuffer());
   725   }
   726   { // Scope for timing the parser
   727     TracePhase t3("parse", &_t_parser, true);
   729     // Put top into the hash table ASAP.
   730     initial_gvn()->transform_no_reclaim(top());
   732     // Set up tf(), start(), and find a CallGenerator.
   733     CallGenerator* cg = NULL;
   734     if (is_osr_compilation()) {
   735       const TypeTuple *domain = StartOSRNode::osr_domain();
   736       const TypeTuple *range = TypeTuple::make_range(method()->signature());
   737       init_tf(TypeFunc::make(domain, range));
   738       StartNode* s = new (this) StartOSRNode(root(), domain);
   739       initial_gvn()->set_type_bottom(s);
   740       init_start(s);
   741       cg = CallGenerator::for_osr(method(), entry_bci());
   742     } else {
   743       // Normal case.
   744       init_tf(TypeFunc::make(method()));
   745       StartNode* s = new (this) StartNode(root(), tf()->domain());
   746       initial_gvn()->set_type_bottom(s);
   747       init_start(s);
   748       if (method()->intrinsic_id() == vmIntrinsics::_Reference_get && UseG1GC) {
   749         // With java.lang.ref.reference.get() we must go through the
   750         // intrinsic when G1 is enabled - even when get() is the root
   751         // method of the compile - so that, if necessary, the value in
   752         // the referent field of the reference object gets recorded by
   753         // the pre-barrier code.
   754         // Specifically, if G1 is enabled, the value in the referent
   755         // field is recorded by the G1 SATB pre barrier. This will
   756         // result in the referent being marked live and the reference
   757         // object removed from the list of discovered references during
   758         // reference processing.
   759         cg = find_intrinsic(method(), false);
   760       }
   761       if (cg == NULL) {
   762         float past_uses = method()->interpreter_invocation_count();
   763         float expected_uses = past_uses;
   764         cg = CallGenerator::for_inline(method(), expected_uses);
   765       }
   766     }
   767     if (failing())  return;
   768     if (cg == NULL) {
   769       record_method_not_compilable_all_tiers("cannot parse method");
   770       return;
   771     }
   772     JVMState* jvms = build_start_state(start(), tf());
   773     if ((jvms = cg->generate(jvms, NULL)) == NULL) {
   774       record_method_not_compilable("method parse failed");
   775       return;
   776     }
   777     GraphKit kit(jvms);
   779     if (!kit.stopped()) {
   780       // Accept return values, and transfer control we know not where.
   781       // This is done by a special, unique ReturnNode bound to root.
   782       return_values(kit.jvms());
   783     }
   785     if (kit.has_exceptions()) {
   786       // Any exceptions that escape from this call must be rethrown
   787       // to whatever caller is dynamically above us on the stack.
   788       // This is done by a special, unique RethrowNode bound to root.
   789       rethrow_exceptions(kit.transfer_exceptions_into_jvms());
   790     }
   792     assert(IncrementalInline || (_late_inlines.length() == 0 && !has_mh_late_inlines()), "incremental inlining is off");
   794     if (_late_inlines.length() == 0 && !has_mh_late_inlines() && !failing() && has_stringbuilder()) {
   795       inline_string_calls(true);
   796     }
   798     if (failing())  return;
   800     print_method(PHASE_BEFORE_REMOVEUSELESS, 3);
   802     // Remove clutter produced by parsing.
   803     if (!failing()) {
   804       ResourceMark rm;
   805       PhaseRemoveUseless pru(initial_gvn(), &for_igvn);
   806     }
   807   }
   809   // Note:  Large methods are capped off in do_one_bytecode().
   810   if (failing())  return;
   812   // After parsing, node notes are no longer automagic.
   813   // They must be propagated by register_new_node_with_optimizer(),
   814   // clone(), or the like.
   815   set_default_node_notes(NULL);
   817   for (;;) {
   818     int successes = Inline_Warm();
   819     if (failing())  return;
   820     if (successes == 0)  break;
   821   }
   823   // Drain the list.
   824   Finish_Warm();
   825 #ifndef PRODUCT
   826   if (_printer) {
   827     _printer->print_inlining(this);
   828   }
   829 #endif
   831   if (failing())  return;
   832   NOT_PRODUCT( verify_graph_edges(); )
   834   // Now optimize
   835   Optimize();
   836   if (failing())  return;
   837   NOT_PRODUCT( verify_graph_edges(); )
   839 #ifndef PRODUCT
   840   if (PrintIdeal) {
   841     ttyLocker ttyl;  // keep the following output all in one block
   842     // This output goes directly to the tty, not the compiler log.
   843     // To enable tools to match it up with the compilation activity,
   844     // be sure to tag this tty output with the compile ID.
   845     if (xtty != NULL) {
   846       xtty->head("ideal compile_id='%d'%s", compile_id(),
   847                  is_osr_compilation()    ? " compile_kind='osr'" :
   848                  "");
   849     }
   850     root()->dump(9999);
   851     if (xtty != NULL) {
   852       xtty->tail("ideal");
   853     }
   854   }
   855 #endif
   857   NOT_PRODUCT( verify_barriers(); )
   859   // Dump compilation data to replay it.
   860   if (method()->has_option("DumpReplay")) {
   861     env()->dump_replay_data(_compile_id);
   862   }
   863   if (method()->has_option("DumpInline") && (ilt() != NULL)) {
   864     env()->dump_inline_data(_compile_id);
   865   }
   867   // Now that we know the size of all the monitors we can add a fixed slot
   868   // for the original deopt pc.
   870   _orig_pc_slot =  fixed_slots();
   871   int next_slot = _orig_pc_slot + (sizeof(address) / VMRegImpl::stack_slot_size);
   872   set_fixed_slots(next_slot);
   874   // Now generate code
   875   Code_Gen();
   876   if (failing())  return;
   878   // Check if we want to skip execution of all compiled code.
   879   {
   880 #ifndef PRODUCT
   881     if (OptoNoExecute) {
   882       record_method_not_compilable("+OptoNoExecute");  // Flag as failed
   883       return;
   884     }
   885     TracePhase t2("install_code", &_t_registerMethod, TimeCompiler);
   886 #endif
   888     if (is_osr_compilation()) {
   889       _code_offsets.set_value(CodeOffsets::Verified_Entry, 0);
   890       _code_offsets.set_value(CodeOffsets::OSR_Entry, _first_block_size);
   891     } else {
   892       _code_offsets.set_value(CodeOffsets::Verified_Entry, _first_block_size);
   893       _code_offsets.set_value(CodeOffsets::OSR_Entry, 0);
   894     }
   896     env()->register_method(_method, _entry_bci,
   897                            &_code_offsets,
   898                            _orig_pc_slot_offset_in_bytes,
   899                            code_buffer(),
   900                            frame_size_in_words(), _oop_map_set,
   901                            &_handler_table, &_inc_table,
   902                            compiler,
   903                            env()->comp_level(),
   904                            has_unsafe_access(),
   905                            SharedRuntime::is_wide_vector(max_vector_size())
   906                            );
   908     if (log() != NULL) // Print code cache state into compiler log
   909       log()->code_cache_state();
   910   }
   911 }
   913 //------------------------------Compile----------------------------------------
   914 // Compile a runtime stub
   915 Compile::Compile( ciEnv* ci_env,
   916                   TypeFunc_generator generator,
   917                   address stub_function,
   918                   const char *stub_name,
   919                   int is_fancy_jump,
   920                   bool pass_tls,
   921                   bool save_arg_registers,
   922                   bool return_pc )
   923   : Phase(Compiler),
   924     _env(ci_env),
   925     _log(ci_env->log()),
   926     _compile_id(0),
   927     _save_argument_registers(save_arg_registers),
   928     _method(NULL),
   929     _stub_name(stub_name),
   930     _stub_function(stub_function),
   931     _stub_entry_point(NULL),
   932     _entry_bci(InvocationEntryBci),
   933     _initial_gvn(NULL),
   934     _for_igvn(NULL),
   935     _warm_calls(NULL),
   936     _orig_pc_slot(0),
   937     _orig_pc_slot_offset_in_bytes(0),
   938     _subsume_loads(true),
   939     _do_escape_analysis(false),
   940     _eliminate_boxing(false),
   941     _failure_reason(NULL),
   942     _code_buffer("Compile::Fill_buffer"),
   943     _has_method_handle_invokes(false),
   944     _mach_constant_base_node(NULL),
   945     _node_bundling_limit(0),
   946     _node_bundling_base(NULL),
   947     _java_calls(0),
   948     _inner_loops(0),
   949 #ifndef PRODUCT
   950     _trace_opto_output(TraceOptoOutput),
   951     _printer(NULL),
   952 #endif
   953     _dead_node_list(comp_arena()),
   954     _dead_node_count(0),
   955     _congraph(NULL),
   956     _replay_inline_data(NULL),
   957     _number_of_mh_late_inlines(0),
   958     _inlining_progress(false),
   959     _inlining_incrementally(false),
   960     _print_inlining_list(NULL),
   961     _print_inlining_idx(0),
   962     _preserve_jvm_state(0) {
   963   C = this;
   965 #ifndef PRODUCT
   966   TraceTime t1(NULL, &_t_totalCompilation, TimeCompiler, false);
   967   TraceTime t2(NULL, &_t_stubCompilation, TimeCompiler, false);
   968   set_print_assembly(PrintFrameConverterAssembly);
   969   set_parsed_irreducible_loop(false);
   970 #endif
   971   CompileWrapper cw(this);
   972   Init(/*AliasLevel=*/ 0);
   973   init_tf((*generator)());
   975   {
   976     // The following is a dummy for the sake of GraphKit::gen_stub
   977     Unique_Node_List for_igvn(comp_arena());
   978     set_for_igvn(&for_igvn);  // not used, but some GraphKit guys push on this
   979     PhaseGVN gvn(Thread::current()->resource_area(),255);
   980     set_initial_gvn(&gvn);    // not significant, but GraphKit guys use it pervasively
   981     gvn.transform_no_reclaim(top());
   983     GraphKit kit;
   984     kit.gen_stub(stub_function, stub_name, is_fancy_jump, pass_tls, return_pc);
   985   }
   987   NOT_PRODUCT( verify_graph_edges(); )
   988   Code_Gen();
   989   if (failing())  return;
   992   // Entry point will be accessed using compile->stub_entry_point();
   993   if (code_buffer() == NULL) {
   994     Matcher::soft_match_failure();
   995   } else {
   996     if (PrintAssembly && (WizardMode || Verbose))
   997       tty->print_cr("### Stub::%s", stub_name);
   999     if (!failing()) {
  1000       assert(_fixed_slots == 0, "no fixed slots used for runtime stubs");
  1002       // Make the NMethod
  1003       // For now we mark the frame as never safe for profile stackwalking
  1004       RuntimeStub *rs = RuntimeStub::new_runtime_stub(stub_name,
  1005                                                       code_buffer(),
  1006                                                       CodeOffsets::frame_never_safe,
  1007                                                       // _code_offsets.value(CodeOffsets::Frame_Complete),
  1008                                                       frame_size_in_words(),
  1009                                                       _oop_map_set,
  1010                                                       save_arg_registers);
  1011       assert(rs != NULL && rs->is_runtime_stub(), "sanity check");
  1013       _stub_entry_point = rs->entry_point();
  1018 //------------------------------Init-------------------------------------------
  1019 // Prepare for a single compilation
  1020 void Compile::Init(int aliaslevel) {
  1021   _unique  = 0;
  1022   _regalloc = NULL;
  1024   _tf      = NULL;  // filled in later
  1025   _top     = NULL;  // cached later
  1026   _matcher = NULL;  // filled in later
  1027   _cfg     = NULL;  // filled in later
  1029   set_24_bit_selection_and_mode(Use24BitFP, false);
  1031   _node_note_array = NULL;
  1032   _default_node_notes = NULL;
  1034   _immutable_memory = NULL; // filled in at first inquiry
  1036   // Globally visible Nodes
  1037   // First set TOP to NULL to give safe behavior during creation of RootNode
  1038   set_cached_top_node(NULL);
  1039   set_root(new (this) RootNode());
  1040   // Now that you have a Root to point to, create the real TOP
  1041   set_cached_top_node( new (this) ConNode(Type::TOP) );
  1042   set_recent_alloc(NULL, NULL);
  1044   // Create Debug Information Recorder to record scopes, oopmaps, etc.
  1045   env()->set_oop_recorder(new OopRecorder(env()->arena()));
  1046   env()->set_debug_info(new DebugInformationRecorder(env()->oop_recorder()));
  1047   env()->set_dependencies(new Dependencies(env()));
  1049   _fixed_slots = 0;
  1050   set_has_split_ifs(false);
  1051   set_has_loops(has_method() && method()->has_loops()); // first approximation
  1052   set_has_stringbuilder(false);
  1053   set_has_boxed_value(false);
  1054   _trap_can_recompile = false;  // no traps emitted yet
  1055   _major_progress = true; // start out assuming good things will happen
  1056   set_has_unsafe_access(false);
  1057   set_max_vector_size(0);
  1058   Copy::zero_to_bytes(_trap_hist, sizeof(_trap_hist));
  1059   set_decompile_count(0);
  1061   set_do_freq_based_layout(BlockLayoutByFrequency || method_has_option("BlockLayoutByFrequency"));
  1062   set_num_loop_opts(LoopOptsCount);
  1063   set_do_inlining(Inline);
  1064   set_max_inline_size(MaxInlineSize);
  1065   set_freq_inline_size(FreqInlineSize);
  1066   set_do_scheduling(OptoScheduling);
  1067   set_do_count_invocations(false);
  1068   set_do_method_data_update(false);
  1070   if (debug_info()->recording_non_safepoints()) {
  1071     set_node_note_array(new(comp_arena()) GrowableArray<Node_Notes*>
  1072                         (comp_arena(), 8, 0, NULL));
  1073     set_default_node_notes(Node_Notes::make(this));
  1076   // // -- Initialize types before each compile --
  1077   // // Update cached type information
  1078   // if( _method && _method->constants() )
  1079   //   Type::update_loaded_types(_method, _method->constants());
  1081   // Init alias_type map.
  1082   if (!_do_escape_analysis && aliaslevel == 3)
  1083     aliaslevel = 2;  // No unique types without escape analysis
  1084   _AliasLevel = aliaslevel;
  1085   const int grow_ats = 16;
  1086   _max_alias_types = grow_ats;
  1087   _alias_types   = NEW_ARENA_ARRAY(comp_arena(), AliasType*, grow_ats);
  1088   AliasType* ats = NEW_ARENA_ARRAY(comp_arena(), AliasType,  grow_ats);
  1089   Copy::zero_to_bytes(ats, sizeof(AliasType)*grow_ats);
  1091     for (int i = 0; i < grow_ats; i++)  _alias_types[i] = &ats[i];
  1093   // Initialize the first few types.
  1094   _alias_types[AliasIdxTop]->Init(AliasIdxTop, NULL);
  1095   _alias_types[AliasIdxBot]->Init(AliasIdxBot, TypePtr::BOTTOM);
  1096   _alias_types[AliasIdxRaw]->Init(AliasIdxRaw, TypeRawPtr::BOTTOM);
  1097   _num_alias_types = AliasIdxRaw+1;
  1098   // Zero out the alias type cache.
  1099   Copy::zero_to_bytes(_alias_cache, sizeof(_alias_cache));
  1100   // A NULL adr_type hits in the cache right away.  Preload the right answer.
  1101   probe_alias_cache(NULL)->_index = AliasIdxTop;
  1103   _intrinsics = NULL;
  1104   _macro_nodes = new(comp_arena()) GrowableArray<Node*>(comp_arena(), 8,  0, NULL);
  1105   _predicate_opaqs = new(comp_arena()) GrowableArray<Node*>(comp_arena(), 8,  0, NULL);
  1106   _expensive_nodes = new(comp_arena()) GrowableArray<Node*>(comp_arena(), 8,  0, NULL);
  1107   register_library_intrinsics();
  1110 //---------------------------init_start----------------------------------------
  1111 // Install the StartNode on this compile object.
  1112 void Compile::init_start(StartNode* s) {
  1113   if (failing())
  1114     return; // already failing
  1115   assert(s == start(), "");
  1118 StartNode* Compile::start() const {
  1119   assert(!failing(), "");
  1120   for (DUIterator_Fast imax, i = root()->fast_outs(imax); i < imax; i++) {
  1121     Node* start = root()->fast_out(i);
  1122     if( start->is_Start() )
  1123       return start->as_Start();
  1125   ShouldNotReachHere();
  1126   return NULL;
  1129 //-------------------------------immutable_memory-------------------------------------
  1130 // Access immutable memory
  1131 Node* Compile::immutable_memory() {
  1132   if (_immutable_memory != NULL) {
  1133     return _immutable_memory;
  1135   StartNode* s = start();
  1136   for (DUIterator_Fast imax, i = s->fast_outs(imax); true; i++) {
  1137     Node *p = s->fast_out(i);
  1138     if (p != s && p->as_Proj()->_con == TypeFunc::Memory) {
  1139       _immutable_memory = p;
  1140       return _immutable_memory;
  1143   ShouldNotReachHere();
  1144   return NULL;
  1147 //----------------------set_cached_top_node------------------------------------
  1148 // Install the cached top node, and make sure Node::is_top works correctly.
  1149 void Compile::set_cached_top_node(Node* tn) {
  1150   if (tn != NULL)  verify_top(tn);
  1151   Node* old_top = _top;
  1152   _top = tn;
  1153   // Calling Node::setup_is_top allows the nodes the chance to adjust
  1154   // their _out arrays.
  1155   if (_top != NULL)     _top->setup_is_top();
  1156   if (old_top != NULL)  old_top->setup_is_top();
  1157   assert(_top == NULL || top()->is_top(), "");
  1160 #ifdef ASSERT
  1161 uint Compile::count_live_nodes_by_graph_walk() {
  1162   Unique_Node_List useful(comp_arena());
  1163   // Get useful node list by walking the graph.
  1164   identify_useful_nodes(useful);
  1165   return useful.size();
  1168 void Compile::print_missing_nodes() {
  1170   // Return if CompileLog is NULL and PrintIdealNodeCount is false.
  1171   if ((_log == NULL) && (! PrintIdealNodeCount)) {
  1172     return;
  1175   // This is an expensive function. It is executed only when the user
  1176   // specifies VerifyIdealNodeCount option or otherwise knows the
  1177   // additional work that needs to be done to identify reachable nodes
  1178   // by walking the flow graph and find the missing ones using
  1179   // _dead_node_list.
  1181   Unique_Node_List useful(comp_arena());
  1182   // Get useful node list by walking the graph.
  1183   identify_useful_nodes(useful);
  1185   uint l_nodes = C->live_nodes();
  1186   uint l_nodes_by_walk = useful.size();
  1188   if (l_nodes != l_nodes_by_walk) {
  1189     if (_log != NULL) {
  1190       _log->begin_head("mismatched_nodes count='%d'", abs((int) (l_nodes - l_nodes_by_walk)));
  1191       _log->stamp();
  1192       _log->end_head();
  1194     VectorSet& useful_member_set = useful.member_set();
  1195     int last_idx = l_nodes_by_walk;
  1196     for (int i = 0; i < last_idx; i++) {
  1197       if (useful_member_set.test(i)) {
  1198         if (_dead_node_list.test(i)) {
  1199           if (_log != NULL) {
  1200             _log->elem("mismatched_node_info node_idx='%d' type='both live and dead'", i);
  1202           if (PrintIdealNodeCount) {
  1203             // Print the log message to tty
  1204               tty->print_cr("mismatched_node idx='%d' both live and dead'", i);
  1205               useful.at(i)->dump();
  1209       else if (! _dead_node_list.test(i)) {
  1210         if (_log != NULL) {
  1211           _log->elem("mismatched_node_info node_idx='%d' type='neither live nor dead'", i);
  1213         if (PrintIdealNodeCount) {
  1214           // Print the log message to tty
  1215           tty->print_cr("mismatched_node idx='%d' type='neither live nor dead'", i);
  1219     if (_log != NULL) {
  1220       _log->tail("mismatched_nodes");
  1224 #endif
  1226 #ifndef PRODUCT
  1227 void Compile::verify_top(Node* tn) const {
  1228   if (tn != NULL) {
  1229     assert(tn->is_Con(), "top node must be a constant");
  1230     assert(((ConNode*)tn)->type() == Type::TOP, "top node must have correct type");
  1231     assert(tn->in(0) != NULL, "must have live top node");
  1234 #endif
  1237 ///-------------------Managing Per-Node Debug & Profile Info-------------------
  1239 void Compile::grow_node_notes(GrowableArray<Node_Notes*>* arr, int grow_by) {
  1240   guarantee(arr != NULL, "");
  1241   int num_blocks = arr->length();
  1242   if (grow_by < num_blocks)  grow_by = num_blocks;
  1243   int num_notes = grow_by * _node_notes_block_size;
  1244   Node_Notes* notes = NEW_ARENA_ARRAY(node_arena(), Node_Notes, num_notes);
  1245   Copy::zero_to_bytes(notes, num_notes * sizeof(Node_Notes));
  1246   while (num_notes > 0) {
  1247     arr->append(notes);
  1248     notes     += _node_notes_block_size;
  1249     num_notes -= _node_notes_block_size;
  1251   assert(num_notes == 0, "exact multiple, please");
  1254 bool Compile::copy_node_notes_to(Node* dest, Node* source) {
  1255   if (source == NULL || dest == NULL)  return false;
  1257   if (dest->is_Con())
  1258     return false;               // Do not push debug info onto constants.
  1260 #ifdef ASSERT
  1261   // Leave a bread crumb trail pointing to the original node:
  1262   if (dest != NULL && dest != source && dest->debug_orig() == NULL) {
  1263     dest->set_debug_orig(source);
  1265 #endif
  1267   if (node_note_array() == NULL)
  1268     return false;               // Not collecting any notes now.
  1270   // This is a copy onto a pre-existing node, which may already have notes.
  1271   // If both nodes have notes, do not overwrite any pre-existing notes.
  1272   Node_Notes* source_notes = node_notes_at(source->_idx);
  1273   if (source_notes == NULL || source_notes->is_clear())  return false;
  1274   Node_Notes* dest_notes   = node_notes_at(dest->_idx);
  1275   if (dest_notes == NULL || dest_notes->is_clear()) {
  1276     return set_node_notes_at(dest->_idx, source_notes);
  1279   Node_Notes merged_notes = (*source_notes);
  1280   // The order of operations here ensures that dest notes will win...
  1281   merged_notes.update_from(dest_notes);
  1282   return set_node_notes_at(dest->_idx, &merged_notes);
  1286 //--------------------------allow_range_check_smearing-------------------------
  1287 // Gating condition for coalescing similar range checks.
  1288 // Sometimes we try 'speculatively' replacing a series of a range checks by a
  1289 // single covering check that is at least as strong as any of them.
  1290 // If the optimization succeeds, the simplified (strengthened) range check
  1291 // will always succeed.  If it fails, we will deopt, and then give up
  1292 // on the optimization.
  1293 bool Compile::allow_range_check_smearing() const {
  1294   // If this method has already thrown a range-check,
  1295   // assume it was because we already tried range smearing
  1296   // and it failed.
  1297   uint already_trapped = trap_count(Deoptimization::Reason_range_check);
  1298   return !already_trapped;
  1302 //------------------------------flatten_alias_type-----------------------------
  1303 const TypePtr *Compile::flatten_alias_type( const TypePtr *tj ) const {
  1304   int offset = tj->offset();
  1305   TypePtr::PTR ptr = tj->ptr();
  1307   // Known instance (scalarizable allocation) alias only with itself.
  1308   bool is_known_inst = tj->isa_oopptr() != NULL &&
  1309                        tj->is_oopptr()->is_known_instance();
  1311   // Process weird unsafe references.
  1312   if (offset == Type::OffsetBot && (tj->isa_instptr() /*|| tj->isa_klassptr()*/)) {
  1313     assert(InlineUnsafeOps, "indeterminate pointers come only from unsafe ops");
  1314     assert(!is_known_inst, "scalarizable allocation should not have unsafe references");
  1315     tj = TypeOopPtr::BOTTOM;
  1316     ptr = tj->ptr();
  1317     offset = tj->offset();
  1320   // Array pointers need some flattening
  1321   const TypeAryPtr *ta = tj->isa_aryptr();
  1322   if (ta && ta->is_stable()) {
  1323     // Erase stability property for alias analysis.
  1324     tj = ta = ta->cast_to_stable(false);
  1326   if( ta && is_known_inst ) {
  1327     if ( offset != Type::OffsetBot &&
  1328          offset > arrayOopDesc::length_offset_in_bytes() ) {
  1329       offset = Type::OffsetBot; // Flatten constant access into array body only
  1330       tj = ta = TypeAryPtr::make(ptr, ta->ary(), ta->klass(), true, offset, ta->instance_id());
  1332   } else if( ta && _AliasLevel >= 2 ) {
  1333     // For arrays indexed by constant indices, we flatten the alias
  1334     // space to include all of the array body.  Only the header, klass
  1335     // and array length can be accessed un-aliased.
  1336     if( offset != Type::OffsetBot ) {
  1337       if( ta->const_oop() ) { // MethodData* or Method*
  1338         offset = Type::OffsetBot;   // Flatten constant access into array body
  1339         tj = ta = TypeAryPtr::make(ptr,ta->const_oop(),ta->ary(),ta->klass(),false,offset);
  1340       } else if( offset == arrayOopDesc::length_offset_in_bytes() ) {
  1341         // range is OK as-is.
  1342         tj = ta = TypeAryPtr::RANGE;
  1343       } else if( offset == oopDesc::klass_offset_in_bytes() ) {
  1344         tj = TypeInstPtr::KLASS; // all klass loads look alike
  1345         ta = TypeAryPtr::RANGE; // generic ignored junk
  1346         ptr = TypePtr::BotPTR;
  1347       } else if( offset == oopDesc::mark_offset_in_bytes() ) {
  1348         tj = TypeInstPtr::MARK;
  1349         ta = TypeAryPtr::RANGE; // generic ignored junk
  1350         ptr = TypePtr::BotPTR;
  1351       } else {                  // Random constant offset into array body
  1352         offset = Type::OffsetBot;   // Flatten constant access into array body
  1353         tj = ta = TypeAryPtr::make(ptr,ta->ary(),ta->klass(),false,offset);
  1356     // Arrays of fixed size alias with arrays of unknown size.
  1357     if (ta->size() != TypeInt::POS) {
  1358       const TypeAry *tary = TypeAry::make(ta->elem(), TypeInt::POS);
  1359       tj = ta = TypeAryPtr::make(ptr,ta->const_oop(),tary,ta->klass(),false,offset);
  1361     // Arrays of known objects become arrays of unknown objects.
  1362     if (ta->elem()->isa_narrowoop() && ta->elem() != TypeNarrowOop::BOTTOM) {
  1363       const TypeAry *tary = TypeAry::make(TypeNarrowOop::BOTTOM, ta->size());
  1364       tj = ta = TypeAryPtr::make(ptr,ta->const_oop(),tary,NULL,false,offset);
  1366     if (ta->elem()->isa_oopptr() && ta->elem() != TypeInstPtr::BOTTOM) {
  1367       const TypeAry *tary = TypeAry::make(TypeInstPtr::BOTTOM, ta->size());
  1368       tj = ta = TypeAryPtr::make(ptr,ta->const_oop(),tary,NULL,false,offset);
  1370     // Arrays of bytes and of booleans both use 'bastore' and 'baload' so
  1371     // cannot be distinguished by bytecode alone.
  1372     if (ta->elem() == TypeInt::BOOL) {
  1373       const TypeAry *tary = TypeAry::make(TypeInt::BYTE, ta->size());
  1374       ciKlass* aklass = ciTypeArrayKlass::make(T_BYTE);
  1375       tj = ta = TypeAryPtr::make(ptr,ta->const_oop(),tary,aklass,false,offset);
  1377     // During the 2nd round of IterGVN, NotNull castings are removed.
  1378     // Make sure the Bottom and NotNull variants alias the same.
  1379     // Also, make sure exact and non-exact variants alias the same.
  1380     if (ptr == TypePtr::NotNull || ta->klass_is_exact() || ta->speculative() != NULL) {
  1381       tj = ta = TypeAryPtr::make(TypePtr::BotPTR,ta->ary(),ta->klass(),false,offset);
  1385   // Oop pointers need some flattening
  1386   const TypeInstPtr *to = tj->isa_instptr();
  1387   if( to && _AliasLevel >= 2 && to != TypeOopPtr::BOTTOM ) {
  1388     ciInstanceKlass *k = to->klass()->as_instance_klass();
  1389     if( ptr == TypePtr::Constant ) {
  1390       if (to->klass() != ciEnv::current()->Class_klass() ||
  1391           offset < k->size_helper() * wordSize) {
  1392         // No constant oop pointers (such as Strings); they alias with
  1393         // unknown strings.
  1394         assert(!is_known_inst, "not scalarizable allocation");
  1395         tj = to = TypeInstPtr::make(TypePtr::BotPTR,to->klass(),false,0,offset);
  1397     } else if( is_known_inst ) {
  1398       tj = to; // Keep NotNull and klass_is_exact for instance type
  1399     } else if( ptr == TypePtr::NotNull || to->klass_is_exact() ) {
  1400       // During the 2nd round of IterGVN, NotNull castings are removed.
  1401       // Make sure the Bottom and NotNull variants alias the same.
  1402       // Also, make sure exact and non-exact variants alias the same.
  1403       tj = to = TypeInstPtr::make(TypePtr::BotPTR,to->klass(),false,0,offset);
  1405     if (to->speculative() != NULL) {
  1406       tj = to = TypeInstPtr::make(to->ptr(),to->klass(),to->klass_is_exact(),to->const_oop(),to->offset(), to->instance_id());
  1408     // Canonicalize the holder of this field
  1409     if (offset >= 0 && offset < instanceOopDesc::base_offset_in_bytes()) {
  1410       // First handle header references such as a LoadKlassNode, even if the
  1411       // object's klass is unloaded at compile time (4965979).
  1412       if (!is_known_inst) { // Do it only for non-instance types
  1413         tj = to = TypeInstPtr::make(TypePtr::BotPTR, env()->Object_klass(), false, NULL, offset);
  1415     } else if (offset < 0 || offset >= k->size_helper() * wordSize) {
  1416       // Static fields are in the space above the normal instance
  1417       // fields in the java.lang.Class instance.
  1418       if (to->klass() != ciEnv::current()->Class_klass()) {
  1419         to = NULL;
  1420         tj = TypeOopPtr::BOTTOM;
  1421         offset = tj->offset();
  1423     } else {
  1424       ciInstanceKlass *canonical_holder = k->get_canonical_holder(offset);
  1425       if (!k->equals(canonical_holder) || tj->offset() != offset) {
  1426         if( is_known_inst ) {
  1427           tj = to = TypeInstPtr::make(to->ptr(), canonical_holder, true, NULL, offset, to->instance_id());
  1428         } else {
  1429           tj = to = TypeInstPtr::make(to->ptr(), canonical_holder, false, NULL, offset);
  1435   // Klass pointers to object array klasses need some flattening
  1436   const TypeKlassPtr *tk = tj->isa_klassptr();
  1437   if( tk ) {
  1438     // If we are referencing a field within a Klass, we need
  1439     // to assume the worst case of an Object.  Both exact and
  1440     // inexact types must flatten to the same alias class so
  1441     // use NotNull as the PTR.
  1442     if ( offset == Type::OffsetBot || (offset >= 0 && (size_t)offset < sizeof(Klass)) ) {
  1444       tj = tk = TypeKlassPtr::make(TypePtr::NotNull,
  1445                                    TypeKlassPtr::OBJECT->klass(),
  1446                                    offset);
  1449     ciKlass* klass = tk->klass();
  1450     if( klass->is_obj_array_klass() ) {
  1451       ciKlass* k = TypeAryPtr::OOPS->klass();
  1452       if( !k || !k->is_loaded() )                  // Only fails for some -Xcomp runs
  1453         k = TypeInstPtr::BOTTOM->klass();
  1454       tj = tk = TypeKlassPtr::make( TypePtr::NotNull, k, offset );
  1457     // Check for precise loads from the primary supertype array and force them
  1458     // to the supertype cache alias index.  Check for generic array loads from
  1459     // the primary supertype array and also force them to the supertype cache
  1460     // alias index.  Since the same load can reach both, we need to merge
  1461     // these 2 disparate memories into the same alias class.  Since the
  1462     // primary supertype array is read-only, there's no chance of confusion
  1463     // where we bypass an array load and an array store.
  1464     int primary_supers_offset = in_bytes(Klass::primary_supers_offset());
  1465     if (offset == Type::OffsetBot ||
  1466         (offset >= primary_supers_offset &&
  1467          offset < (int)(primary_supers_offset + Klass::primary_super_limit() * wordSize)) ||
  1468         offset == (int)in_bytes(Klass::secondary_super_cache_offset())) {
  1469       offset = in_bytes(Klass::secondary_super_cache_offset());
  1470       tj = tk = TypeKlassPtr::make( TypePtr::NotNull, tk->klass(), offset );
  1474   // Flatten all Raw pointers together.
  1475   if (tj->base() == Type::RawPtr)
  1476     tj = TypeRawPtr::BOTTOM;
  1478   if (tj->base() == Type::AnyPtr)
  1479     tj = TypePtr::BOTTOM;      // An error, which the caller must check for.
  1481   // Flatten all to bottom for now
  1482   switch( _AliasLevel ) {
  1483   case 0:
  1484     tj = TypePtr::BOTTOM;
  1485     break;
  1486   case 1:                       // Flatten to: oop, static, field or array
  1487     switch (tj->base()) {
  1488     //case Type::AryPtr: tj = TypeAryPtr::RANGE;    break;
  1489     case Type::RawPtr:   tj = TypeRawPtr::BOTTOM;   break;
  1490     case Type::AryPtr:   // do not distinguish arrays at all
  1491     case Type::InstPtr:  tj = TypeInstPtr::BOTTOM;  break;
  1492     case Type::KlassPtr: tj = TypeKlassPtr::OBJECT; break;
  1493     case Type::AnyPtr:   tj = TypePtr::BOTTOM;      break;  // caller checks it
  1494     default: ShouldNotReachHere();
  1496     break;
  1497   case 2:                       // No collapsing at level 2; keep all splits
  1498   case 3:                       // No collapsing at level 3; keep all splits
  1499     break;
  1500   default:
  1501     Unimplemented();
  1504   offset = tj->offset();
  1505   assert( offset != Type::OffsetTop, "Offset has fallen from constant" );
  1507   assert( (offset != Type::OffsetBot && tj->base() != Type::AryPtr) ||
  1508           (offset == Type::OffsetBot && tj->base() == Type::AryPtr) ||
  1509           (offset == Type::OffsetBot && tj == TypeOopPtr::BOTTOM) ||
  1510           (offset == Type::OffsetBot && tj == TypePtr::BOTTOM) ||
  1511           (offset == oopDesc::mark_offset_in_bytes() && tj->base() == Type::AryPtr) ||
  1512           (offset == oopDesc::klass_offset_in_bytes() && tj->base() == Type::AryPtr) ||
  1513           (offset == arrayOopDesc::length_offset_in_bytes() && tj->base() == Type::AryPtr)  ,
  1514           "For oops, klasses, raw offset must be constant; for arrays the offset is never known" );
  1515   assert( tj->ptr() != TypePtr::TopPTR &&
  1516           tj->ptr() != TypePtr::AnyNull &&
  1517           tj->ptr() != TypePtr::Null, "No imprecise addresses" );
  1518 //    assert( tj->ptr() != TypePtr::Constant ||
  1519 //            tj->base() == Type::RawPtr ||
  1520 //            tj->base() == Type::KlassPtr, "No constant oop addresses" );
  1522   return tj;
  1525 void Compile::AliasType::Init(int i, const TypePtr* at) {
  1526   _index = i;
  1527   _adr_type = at;
  1528   _field = NULL;
  1529   _element = NULL;
  1530   _is_rewritable = true; // default
  1531   const TypeOopPtr *atoop = (at != NULL) ? at->isa_oopptr() : NULL;
  1532   if (atoop != NULL && atoop->is_known_instance()) {
  1533     const TypeOopPtr *gt = atoop->cast_to_instance_id(TypeOopPtr::InstanceBot);
  1534     _general_index = Compile::current()->get_alias_index(gt);
  1535   } else {
  1536     _general_index = 0;
  1540 //---------------------------------print_on------------------------------------
  1541 #ifndef PRODUCT
  1542 void Compile::AliasType::print_on(outputStream* st) {
  1543   if (index() < 10)
  1544         st->print("@ <%d> ", index());
  1545   else  st->print("@ <%d>",  index());
  1546   st->print(is_rewritable() ? "   " : " RO");
  1547   int offset = adr_type()->offset();
  1548   if (offset == Type::OffsetBot)
  1549         st->print(" +any");
  1550   else  st->print(" +%-3d", offset);
  1551   st->print(" in ");
  1552   adr_type()->dump_on(st);
  1553   const TypeOopPtr* tjp = adr_type()->isa_oopptr();
  1554   if (field() != NULL && tjp) {
  1555     if (tjp->klass()  != field()->holder() ||
  1556         tjp->offset() != field()->offset_in_bytes()) {
  1557       st->print(" != ");
  1558       field()->print();
  1559       st->print(" ***");
  1564 void print_alias_types() {
  1565   Compile* C = Compile::current();
  1566   tty->print_cr("--- Alias types, AliasIdxBot .. %d", C->num_alias_types()-1);
  1567   for (int idx = Compile::AliasIdxBot; idx < C->num_alias_types(); idx++) {
  1568     C->alias_type(idx)->print_on(tty);
  1569     tty->cr();
  1572 #endif
  1575 //----------------------------probe_alias_cache--------------------------------
  1576 Compile::AliasCacheEntry* Compile::probe_alias_cache(const TypePtr* adr_type) {
  1577   intptr_t key = (intptr_t) adr_type;
  1578   key ^= key >> logAliasCacheSize;
  1579   return &_alias_cache[key & right_n_bits(logAliasCacheSize)];
  1583 //-----------------------------grow_alias_types--------------------------------
  1584 void Compile::grow_alias_types() {
  1585   const int old_ats  = _max_alias_types; // how many before?
  1586   const int new_ats  = old_ats;          // how many more?
  1587   const int grow_ats = old_ats+new_ats;  // how many now?
  1588   _max_alias_types = grow_ats;
  1589   _alias_types =  REALLOC_ARENA_ARRAY(comp_arena(), AliasType*, _alias_types, old_ats, grow_ats);
  1590   AliasType* ats =    NEW_ARENA_ARRAY(comp_arena(), AliasType, new_ats);
  1591   Copy::zero_to_bytes(ats, sizeof(AliasType)*new_ats);
  1592   for (int i = 0; i < new_ats; i++)  _alias_types[old_ats+i] = &ats[i];
  1596 //--------------------------------find_alias_type------------------------------
  1597 Compile::AliasType* Compile::find_alias_type(const TypePtr* adr_type, bool no_create, ciField* original_field) {
  1598   if (_AliasLevel == 0)
  1599     return alias_type(AliasIdxBot);
  1601   AliasCacheEntry* ace = probe_alias_cache(adr_type);
  1602   if (ace->_adr_type == adr_type) {
  1603     return alias_type(ace->_index);
  1606   // Handle special cases.
  1607   if (adr_type == NULL)             return alias_type(AliasIdxTop);
  1608   if (adr_type == TypePtr::BOTTOM)  return alias_type(AliasIdxBot);
  1610   // Do it the slow way.
  1611   const TypePtr* flat = flatten_alias_type(adr_type);
  1613 #ifdef ASSERT
  1614   assert(flat == flatten_alias_type(flat), "idempotent");
  1615   assert(flat != TypePtr::BOTTOM,     "cannot alias-analyze an untyped ptr");
  1616   if (flat->isa_oopptr() && !flat->isa_klassptr()) {
  1617     const TypeOopPtr* foop = flat->is_oopptr();
  1618     // Scalarizable allocations have exact klass always.
  1619     bool exact = !foop->klass_is_exact() || foop->is_known_instance();
  1620     const TypePtr* xoop = foop->cast_to_exactness(exact)->is_ptr();
  1621     assert(foop == flatten_alias_type(xoop), "exactness must not affect alias type");
  1623   assert(flat == flatten_alias_type(flat), "exact bit doesn't matter");
  1624 #endif
  1626   int idx = AliasIdxTop;
  1627   for (int i = 0; i < num_alias_types(); i++) {
  1628     if (alias_type(i)->adr_type() == flat) {
  1629       idx = i;
  1630       break;
  1634   if (idx == AliasIdxTop) {
  1635     if (no_create)  return NULL;
  1636     // Grow the array if necessary.
  1637     if (_num_alias_types == _max_alias_types)  grow_alias_types();
  1638     // Add a new alias type.
  1639     idx = _num_alias_types++;
  1640     _alias_types[idx]->Init(idx, flat);
  1641     if (flat == TypeInstPtr::KLASS)  alias_type(idx)->set_rewritable(false);
  1642     if (flat == TypeAryPtr::RANGE)   alias_type(idx)->set_rewritable(false);
  1643     if (flat->isa_instptr()) {
  1644       if (flat->offset() == java_lang_Class::klass_offset_in_bytes()
  1645           && flat->is_instptr()->klass() == env()->Class_klass())
  1646         alias_type(idx)->set_rewritable(false);
  1648     if (flat->isa_aryptr()) {
  1649 #ifdef ASSERT
  1650       const int header_size_min  = arrayOopDesc::base_offset_in_bytes(T_BYTE);
  1651       // (T_BYTE has the weakest alignment and size restrictions...)
  1652       assert(flat->offset() < header_size_min, "array body reference must be OffsetBot");
  1653 #endif
  1654       if (flat->offset() == TypePtr::OffsetBot) {
  1655         alias_type(idx)->set_element(flat->is_aryptr()->elem());
  1658     if (flat->isa_klassptr()) {
  1659       if (flat->offset() == in_bytes(Klass::super_check_offset_offset()))
  1660         alias_type(idx)->set_rewritable(false);
  1661       if (flat->offset() == in_bytes(Klass::modifier_flags_offset()))
  1662         alias_type(idx)->set_rewritable(false);
  1663       if (flat->offset() == in_bytes(Klass::access_flags_offset()))
  1664         alias_type(idx)->set_rewritable(false);
  1665       if (flat->offset() == in_bytes(Klass::java_mirror_offset()))
  1666         alias_type(idx)->set_rewritable(false);
  1668     // %%% (We would like to finalize JavaThread::threadObj_offset(),
  1669     // but the base pointer type is not distinctive enough to identify
  1670     // references into JavaThread.)
  1672     // Check for final fields.
  1673     const TypeInstPtr* tinst = flat->isa_instptr();
  1674     if (tinst && tinst->offset() >= instanceOopDesc::base_offset_in_bytes()) {
  1675       ciField* field;
  1676       if (tinst->const_oop() != NULL &&
  1677           tinst->klass() == ciEnv::current()->Class_klass() &&
  1678           tinst->offset() >= (tinst->klass()->as_instance_klass()->size_helper() * wordSize)) {
  1679         // static field
  1680         ciInstanceKlass* k = tinst->const_oop()->as_instance()->java_lang_Class_klass()->as_instance_klass();
  1681         field = k->get_field_by_offset(tinst->offset(), true);
  1682       } else {
  1683         ciInstanceKlass *k = tinst->klass()->as_instance_klass();
  1684         field = k->get_field_by_offset(tinst->offset(), false);
  1686       assert(field == NULL ||
  1687              original_field == NULL ||
  1688              (field->holder() == original_field->holder() &&
  1689               field->offset() == original_field->offset() &&
  1690               field->is_static() == original_field->is_static()), "wrong field?");
  1691       // Set field() and is_rewritable() attributes.
  1692       if (field != NULL)  alias_type(idx)->set_field(field);
  1696   // Fill the cache for next time.
  1697   ace->_adr_type = adr_type;
  1698   ace->_index    = idx;
  1699   assert(alias_type(adr_type) == alias_type(idx),  "type must be installed");
  1701   // Might as well try to fill the cache for the flattened version, too.
  1702   AliasCacheEntry* face = probe_alias_cache(flat);
  1703   if (face->_adr_type == NULL) {
  1704     face->_adr_type = flat;
  1705     face->_index    = idx;
  1706     assert(alias_type(flat) == alias_type(idx), "flat type must work too");
  1709   return alias_type(idx);
  1713 Compile::AliasType* Compile::alias_type(ciField* field) {
  1714   const TypeOopPtr* t;
  1715   if (field->is_static())
  1716     t = TypeInstPtr::make(field->holder()->java_mirror());
  1717   else
  1718     t = TypeOopPtr::make_from_klass_raw(field->holder());
  1719   AliasType* atp = alias_type(t->add_offset(field->offset_in_bytes()), field);
  1720   assert((field->is_final() || field->is_stable()) == !atp->is_rewritable(), "must get the rewritable bits correct");
  1721   return atp;
  1725 //------------------------------have_alias_type--------------------------------
  1726 bool Compile::have_alias_type(const TypePtr* adr_type) {
  1727   AliasCacheEntry* ace = probe_alias_cache(adr_type);
  1728   if (ace->_adr_type == adr_type) {
  1729     return true;
  1732   // Handle special cases.
  1733   if (adr_type == NULL)             return true;
  1734   if (adr_type == TypePtr::BOTTOM)  return true;
  1736   return find_alias_type(adr_type, true, NULL) != NULL;
  1739 //-----------------------------must_alias--------------------------------------
  1740 // True if all values of the given address type are in the given alias category.
  1741 bool Compile::must_alias(const TypePtr* adr_type, int alias_idx) {
  1742   if (alias_idx == AliasIdxBot)         return true;  // the universal category
  1743   if (adr_type == NULL)                 return true;  // NULL serves as TypePtr::TOP
  1744   if (alias_idx == AliasIdxTop)         return false; // the empty category
  1745   if (adr_type->base() == Type::AnyPtr) return false; // TypePtr::BOTTOM or its twins
  1747   // the only remaining possible overlap is identity
  1748   int adr_idx = get_alias_index(adr_type);
  1749   assert(adr_idx != AliasIdxBot && adr_idx != AliasIdxTop, "");
  1750   assert(adr_idx == alias_idx ||
  1751          (alias_type(alias_idx)->adr_type() != TypeOopPtr::BOTTOM
  1752           && adr_type                       != TypeOopPtr::BOTTOM),
  1753          "should not be testing for overlap with an unsafe pointer");
  1754   return adr_idx == alias_idx;
  1757 //------------------------------can_alias--------------------------------------
  1758 // True if any values of the given address type are in the given alias category.
  1759 bool Compile::can_alias(const TypePtr* adr_type, int alias_idx) {
  1760   if (alias_idx == AliasIdxTop)         return false; // the empty category
  1761   if (adr_type == NULL)                 return false; // NULL serves as TypePtr::TOP
  1762   if (alias_idx == AliasIdxBot)         return true;  // the universal category
  1763   if (adr_type->base() == Type::AnyPtr) return true;  // TypePtr::BOTTOM or its twins
  1765   // the only remaining possible overlap is identity
  1766   int adr_idx = get_alias_index(adr_type);
  1767   assert(adr_idx != AliasIdxBot && adr_idx != AliasIdxTop, "");
  1768   return adr_idx == alias_idx;
  1773 //---------------------------pop_warm_call-------------------------------------
  1774 WarmCallInfo* Compile::pop_warm_call() {
  1775   WarmCallInfo* wci = _warm_calls;
  1776   if (wci != NULL)  _warm_calls = wci->remove_from(wci);
  1777   return wci;
  1780 //----------------------------Inline_Warm--------------------------------------
  1781 int Compile::Inline_Warm() {
  1782   // If there is room, try to inline some more warm call sites.
  1783   // %%% Do a graph index compaction pass when we think we're out of space?
  1784   if (!InlineWarmCalls)  return 0;
  1786   int calls_made_hot = 0;
  1787   int room_to_grow   = NodeCountInliningCutoff - unique();
  1788   int amount_to_grow = MIN2(room_to_grow, (int)NodeCountInliningStep);
  1789   int amount_grown   = 0;
  1790   WarmCallInfo* call;
  1791   while (amount_to_grow > 0 && (call = pop_warm_call()) != NULL) {
  1792     int est_size = (int)call->size();
  1793     if (est_size > (room_to_grow - amount_grown)) {
  1794       // This one won't fit anyway.  Get rid of it.
  1795       call->make_cold();
  1796       continue;
  1798     call->make_hot();
  1799     calls_made_hot++;
  1800     amount_grown   += est_size;
  1801     amount_to_grow -= est_size;
  1804   if (calls_made_hot > 0)  set_major_progress();
  1805   return calls_made_hot;
  1809 //----------------------------Finish_Warm--------------------------------------
  1810 void Compile::Finish_Warm() {
  1811   if (!InlineWarmCalls)  return;
  1812   if (failing())  return;
  1813   if (warm_calls() == NULL)  return;
  1815   // Clean up loose ends, if we are out of space for inlining.
  1816   WarmCallInfo* call;
  1817   while ((call = pop_warm_call()) != NULL) {
  1818     call->make_cold();
  1822 //---------------------cleanup_loop_predicates-----------------------
  1823 // Remove the opaque nodes that protect the predicates so that all unused
  1824 // checks and uncommon_traps will be eliminated from the ideal graph
  1825 void Compile::cleanup_loop_predicates(PhaseIterGVN &igvn) {
  1826   if (predicate_count()==0) return;
  1827   for (int i = predicate_count(); i > 0; i--) {
  1828     Node * n = predicate_opaque1_node(i-1);
  1829     assert(n->Opcode() == Op_Opaque1, "must be");
  1830     igvn.replace_node(n, n->in(1));
  1832   assert(predicate_count()==0, "should be clean!");
  1835 // StringOpts and late inlining of string methods
  1836 void Compile::inline_string_calls(bool parse_time) {
  1838     // remove useless nodes to make the usage analysis simpler
  1839     ResourceMark rm;
  1840     PhaseRemoveUseless pru(initial_gvn(), for_igvn());
  1844     ResourceMark rm;
  1845     print_method(PHASE_BEFORE_STRINGOPTS, 3);
  1846     PhaseStringOpts pso(initial_gvn(), for_igvn());
  1847     print_method(PHASE_AFTER_STRINGOPTS, 3);
  1850   // now inline anything that we skipped the first time around
  1851   if (!parse_time) {
  1852     _late_inlines_pos = _late_inlines.length();
  1855   while (_string_late_inlines.length() > 0) {
  1856     CallGenerator* cg = _string_late_inlines.pop();
  1857     cg->do_late_inline();
  1858     if (failing())  return;
  1860   _string_late_inlines.trunc_to(0);
  1863 // Late inlining of boxing methods
  1864 void Compile::inline_boxing_calls(PhaseIterGVN& igvn) {
  1865   if (_boxing_late_inlines.length() > 0) {
  1866     assert(has_boxed_value(), "inconsistent");
  1868     PhaseGVN* gvn = initial_gvn();
  1869     set_inlining_incrementally(true);
  1871     assert( igvn._worklist.size() == 0, "should be done with igvn" );
  1872     for_igvn()->clear();
  1873     gvn->replace_with(&igvn);
  1875     while (_boxing_late_inlines.length() > 0) {
  1876       CallGenerator* cg = _boxing_late_inlines.pop();
  1877       cg->do_late_inline();
  1878       if (failing())  return;
  1880     _boxing_late_inlines.trunc_to(0);
  1883       ResourceMark rm;
  1884       PhaseRemoveUseless pru(gvn, for_igvn());
  1887     igvn = PhaseIterGVN(gvn);
  1888     igvn.optimize();
  1890     set_inlining_progress(false);
  1891     set_inlining_incrementally(false);
  1895 void Compile::inline_incrementally_one(PhaseIterGVN& igvn) {
  1896   assert(IncrementalInline, "incremental inlining should be on");
  1897   PhaseGVN* gvn = initial_gvn();
  1899   set_inlining_progress(false);
  1900   for_igvn()->clear();
  1901   gvn->replace_with(&igvn);
  1903   int i = 0;
  1905   for (; i <_late_inlines.length() && !inlining_progress(); i++) {
  1906     CallGenerator* cg = _late_inlines.at(i);
  1907     _late_inlines_pos = i+1;
  1908     cg->do_late_inline();
  1909     if (failing())  return;
  1911   int j = 0;
  1912   for (; i < _late_inlines.length(); i++, j++) {
  1913     _late_inlines.at_put(j, _late_inlines.at(i));
  1915   _late_inlines.trunc_to(j);
  1918     ResourceMark rm;
  1919     PhaseRemoveUseless pru(gvn, for_igvn());
  1922   igvn = PhaseIterGVN(gvn);
  1925 // Perform incremental inlining until bound on number of live nodes is reached
  1926 void Compile::inline_incrementally(PhaseIterGVN& igvn) {
  1927   PhaseGVN* gvn = initial_gvn();
  1929   set_inlining_incrementally(true);
  1930   set_inlining_progress(true);
  1931   uint low_live_nodes = 0;
  1933   while(inlining_progress() && _late_inlines.length() > 0) {
  1935     if (live_nodes() > (uint)LiveNodeCountInliningCutoff) {
  1936       if (low_live_nodes < (uint)LiveNodeCountInliningCutoff * 8 / 10) {
  1937         // PhaseIdealLoop is expensive so we only try it once we are
  1938         // out of loop and we only try it again if the previous helped
  1939         // got the number of nodes down significantly
  1940         PhaseIdealLoop ideal_loop( igvn, false, true );
  1941         if (failing())  return;
  1942         low_live_nodes = live_nodes();
  1943         _major_progress = true;
  1946       if (live_nodes() > (uint)LiveNodeCountInliningCutoff) {
  1947         break;
  1951     inline_incrementally_one(igvn);
  1953     if (failing())  return;
  1955     igvn.optimize();
  1957     if (failing())  return;
  1960   assert( igvn._worklist.size() == 0, "should be done with igvn" );
  1962   if (_string_late_inlines.length() > 0) {
  1963     assert(has_stringbuilder(), "inconsistent");
  1964     for_igvn()->clear();
  1965     initial_gvn()->replace_with(&igvn);
  1967     inline_string_calls(false);
  1969     if (failing())  return;
  1972       ResourceMark rm;
  1973       PhaseRemoveUseless pru(initial_gvn(), for_igvn());
  1976     igvn = PhaseIterGVN(gvn);
  1978     igvn.optimize();
  1981   set_inlining_incrementally(false);
  1985 //------------------------------Optimize---------------------------------------
  1986 // Given a graph, optimize it.
  1987 void Compile::Optimize() {
  1988   TracePhase t1("optimizer", &_t_optimizer, true);
  1990 #ifndef PRODUCT
  1991   if (env()->break_at_compile()) {
  1992     BREAKPOINT;
  1995 #endif
  1997   ResourceMark rm;
  1998   int          loop_opts_cnt;
  2000   NOT_PRODUCT( verify_graph_edges(); )
  2002   print_method(PHASE_AFTER_PARSING);
  2005   // Iterative Global Value Numbering, including ideal transforms
  2006   // Initialize IterGVN with types and values from parse-time GVN
  2007   PhaseIterGVN igvn(initial_gvn());
  2009     NOT_PRODUCT( TracePhase t2("iterGVN", &_t_iterGVN, TimeCompiler); )
  2010     igvn.optimize();
  2013   print_method(PHASE_ITER_GVN1, 2);
  2015   if (failing())  return;
  2018     NOT_PRODUCT( TracePhase t2("incrementalInline", &_t_incrInline, TimeCompiler); )
  2019     inline_incrementally(igvn);
  2022   print_method(PHASE_INCREMENTAL_INLINE, 2);
  2024   if (failing())  return;
  2026   if (eliminate_boxing()) {
  2027     NOT_PRODUCT( TracePhase t2("incrementalInline", &_t_incrInline, TimeCompiler); )
  2028     // Inline valueOf() methods now.
  2029     inline_boxing_calls(igvn);
  2031     print_method(PHASE_INCREMENTAL_BOXING_INLINE, 2);
  2033     if (failing())  return;
  2036   // Remove the speculative part of types and clean up the graph from
  2037   // the extra CastPP nodes whose only purpose is to carry them. Do
  2038   // that early so that optimizations are not disrupted by the extra
  2039   // CastPP nodes.
  2040   remove_speculative_types(igvn);
  2042   // No more new expensive nodes will be added to the list from here
  2043   // so keep only the actual candidates for optimizations.
  2044   cleanup_expensive_nodes(igvn);
  2046   // Perform escape analysis
  2047   if (_do_escape_analysis && ConnectionGraph::has_candidates(this)) {
  2048     if (has_loops()) {
  2049       // Cleanup graph (remove dead nodes).
  2050       TracePhase t2("idealLoop", &_t_idealLoop, true);
  2051       PhaseIdealLoop ideal_loop( igvn, false, true );
  2052       if (major_progress()) print_method(PHASE_PHASEIDEAL_BEFORE_EA, 2);
  2053       if (failing())  return;
  2055     ConnectionGraph::do_analysis(this, &igvn);
  2057     if (failing())  return;
  2059     // Optimize out fields loads from scalar replaceable allocations.
  2060     igvn.optimize();
  2061     print_method(PHASE_ITER_GVN_AFTER_EA, 2);
  2063     if (failing())  return;
  2065     if (congraph() != NULL && macro_count() > 0) {
  2066       NOT_PRODUCT( TracePhase t2("macroEliminate", &_t_macroEliminate, TimeCompiler); )
  2067       PhaseMacroExpand mexp(igvn);
  2068       mexp.eliminate_macro_nodes();
  2069       igvn.set_delay_transform(false);
  2071       igvn.optimize();
  2072       print_method(PHASE_ITER_GVN_AFTER_ELIMINATION, 2);
  2074       if (failing())  return;
  2078   // Loop transforms on the ideal graph.  Range Check Elimination,
  2079   // peeling, unrolling, etc.
  2081   // Set loop opts counter
  2082   loop_opts_cnt = num_loop_opts();
  2083   if((loop_opts_cnt > 0) && (has_loops() || has_split_ifs())) {
  2085       TracePhase t2("idealLoop", &_t_idealLoop, true);
  2086       PhaseIdealLoop ideal_loop( igvn, true );
  2087       loop_opts_cnt--;
  2088       if (major_progress()) print_method(PHASE_PHASEIDEALLOOP1, 2);
  2089       if (failing())  return;
  2091     // Loop opts pass if partial peeling occurred in previous pass
  2092     if(PartialPeelLoop && major_progress() && (loop_opts_cnt > 0)) {
  2093       TracePhase t3("idealLoop", &_t_idealLoop, true);
  2094       PhaseIdealLoop ideal_loop( igvn, false );
  2095       loop_opts_cnt--;
  2096       if (major_progress()) print_method(PHASE_PHASEIDEALLOOP2, 2);
  2097       if (failing())  return;
  2099     // Loop opts pass for loop-unrolling before CCP
  2100     if(major_progress() && (loop_opts_cnt > 0)) {
  2101       TracePhase t4("idealLoop", &_t_idealLoop, true);
  2102       PhaseIdealLoop ideal_loop( igvn, false );
  2103       loop_opts_cnt--;
  2104       if (major_progress()) print_method(PHASE_PHASEIDEALLOOP3, 2);
  2106     if (!failing()) {
  2107       // Verify that last round of loop opts produced a valid graph
  2108       NOT_PRODUCT( TracePhase t2("idealLoopVerify", &_t_idealLoopVerify, TimeCompiler); )
  2109       PhaseIdealLoop::verify(igvn);
  2112   if (failing())  return;
  2114   // Conditional Constant Propagation;
  2115   PhaseCCP ccp( &igvn );
  2116   assert( true, "Break here to ccp.dump_nodes_and_types(_root,999,1)");
  2118     TracePhase t2("ccp", &_t_ccp, true);
  2119     ccp.do_transform();
  2121   print_method(PHASE_CPP1, 2);
  2123   assert( true, "Break here to ccp.dump_old2new_map()");
  2125   // Iterative Global Value Numbering, including ideal transforms
  2127     NOT_PRODUCT( TracePhase t2("iterGVN2", &_t_iterGVN2, TimeCompiler); )
  2128     igvn = ccp;
  2129     igvn.optimize();
  2132   print_method(PHASE_ITER_GVN2, 2);
  2134   if (failing())  return;
  2136   // Loop transforms on the ideal graph.  Range Check Elimination,
  2137   // peeling, unrolling, etc.
  2138   if(loop_opts_cnt > 0) {
  2139     debug_only( int cnt = 0; );
  2140     while(major_progress() && (loop_opts_cnt > 0)) {
  2141       TracePhase t2("idealLoop", &_t_idealLoop, true);
  2142       assert( cnt++ < 40, "infinite cycle in loop optimization" );
  2143       PhaseIdealLoop ideal_loop( igvn, true);
  2144       loop_opts_cnt--;
  2145       if (major_progress()) print_method(PHASE_PHASEIDEALLOOP_ITERATIONS, 2);
  2146       if (failing())  return;
  2151     // Verify that all previous optimizations produced a valid graph
  2152     // at least to this point, even if no loop optimizations were done.
  2153     NOT_PRODUCT( TracePhase t2("idealLoopVerify", &_t_idealLoopVerify, TimeCompiler); )
  2154     PhaseIdealLoop::verify(igvn);
  2158     NOT_PRODUCT( TracePhase t2("macroExpand", &_t_macroExpand, TimeCompiler); )
  2159     PhaseMacroExpand  mex(igvn);
  2160     if (mex.expand_macro_nodes()) {
  2161       assert(failing(), "must bail out w/ explicit message");
  2162       return;
  2166  } // (End scope of igvn; run destructor if necessary for asserts.)
  2168   dump_inlining();
  2169   // A method with only infinite loops has no edges entering loops from root
  2171     NOT_PRODUCT( TracePhase t2("graphReshape", &_t_graphReshaping, TimeCompiler); )
  2172     if (final_graph_reshaping()) {
  2173       assert(failing(), "must bail out w/ explicit message");
  2174       return;
  2178   print_method(PHASE_OPTIMIZE_FINISHED, 2);
  2182 //------------------------------Code_Gen---------------------------------------
  2183 // Given a graph, generate code for it
  2184 void Compile::Code_Gen() {
  2185   if (failing()) {
  2186     return;
  2189   // Perform instruction selection.  You might think we could reclaim Matcher
  2190   // memory PDQ, but actually the Matcher is used in generating spill code.
  2191   // Internals of the Matcher (including some VectorSets) must remain live
  2192   // for awhile - thus I cannot reclaim Matcher memory lest a VectorSet usage
  2193   // set a bit in reclaimed memory.
  2195   // In debug mode can dump m._nodes.dump() for mapping of ideal to machine
  2196   // nodes.  Mapping is only valid at the root of each matched subtree.
  2197   NOT_PRODUCT( verify_graph_edges(); )
  2199   Matcher matcher;
  2200   _matcher = &matcher;
  2202     TracePhase t2("matcher", &_t_matcher, true);
  2203     matcher.match();
  2205   // In debug mode can dump m._nodes.dump() for mapping of ideal to machine
  2206   // nodes.  Mapping is only valid at the root of each matched subtree.
  2207   NOT_PRODUCT( verify_graph_edges(); )
  2209   // If you have too many nodes, or if matching has failed, bail out
  2210   check_node_count(0, "out of nodes matching instructions");
  2211   if (failing()) {
  2212     return;
  2215   // Build a proper-looking CFG
  2216   PhaseCFG cfg(node_arena(), root(), matcher);
  2217   _cfg = &cfg;
  2219     NOT_PRODUCT( TracePhase t2("scheduler", &_t_scheduler, TimeCompiler); )
  2220     bool success = cfg.do_global_code_motion();
  2221     if (!success) {
  2222       return;
  2225     print_method(PHASE_GLOBAL_CODE_MOTION, 2);
  2226     NOT_PRODUCT( verify_graph_edges(); )
  2227     debug_only( cfg.verify(); )
  2230   PhaseChaitin regalloc(unique(), cfg, matcher);
  2231   _regalloc = &regalloc;
  2233     TracePhase t2("regalloc", &_t_registerAllocation, true);
  2234     // Perform register allocation.  After Chaitin, use-def chains are
  2235     // no longer accurate (at spill code) and so must be ignored.
  2236     // Node->LRG->reg mappings are still accurate.
  2237     _regalloc->Register_Allocate();
  2239     // Bail out if the allocator builds too many nodes
  2240     if (failing()) {
  2241       return;
  2245   // Prior to register allocation we kept empty basic blocks in case the
  2246   // the allocator needed a place to spill.  After register allocation we
  2247   // are not adding any new instructions.  If any basic block is empty, we
  2248   // can now safely remove it.
  2250     NOT_PRODUCT( TracePhase t2("blockOrdering", &_t_blockOrdering, TimeCompiler); )
  2251     cfg.remove_empty_blocks();
  2252     if (do_freq_based_layout()) {
  2253       PhaseBlockLayout layout(cfg);
  2254     } else {
  2255       cfg.set_loop_alignment();
  2257     cfg.fixup_flow();
  2260   // Apply peephole optimizations
  2261   if( OptoPeephole ) {
  2262     NOT_PRODUCT( TracePhase t2("peephole", &_t_peephole, TimeCompiler); )
  2263     PhasePeephole peep( _regalloc, cfg);
  2264     peep.do_transform();
  2267   // Convert Nodes to instruction bits in a buffer
  2269     // %%%% workspace merge brought two timers together for one job
  2270     TracePhase t2a("output", &_t_output, true);
  2271     NOT_PRODUCT( TraceTime t2b(NULL, &_t_codeGeneration, TimeCompiler, false); )
  2272     Output();
  2275   print_method(PHASE_FINAL_CODE);
  2277   // He's dead, Jim.
  2278   _cfg     = (PhaseCFG*)0xdeadbeef;
  2279   _regalloc = (PhaseChaitin*)0xdeadbeef;
  2283 //------------------------------dump_asm---------------------------------------
  2284 // Dump formatted assembly
  2285 #ifndef PRODUCT
  2286 void Compile::dump_asm(int *pcs, uint pc_limit) {
  2287   bool cut_short = false;
  2288   tty->print_cr("#");
  2289   tty->print("#  ");  _tf->dump();  tty->cr();
  2290   tty->print_cr("#");
  2292   // For all blocks
  2293   int pc = 0x0;                 // Program counter
  2294   char starts_bundle = ' ';
  2295   _regalloc->dump_frame();
  2297   Node *n = NULL;
  2298   for (uint i = 0; i < _cfg->number_of_blocks(); i++) {
  2299     if (VMThread::should_terminate()) {
  2300       cut_short = true;
  2301       break;
  2303     Block* block = _cfg->get_block(i);
  2304     if (block->is_connector() && !Verbose) {
  2305       continue;
  2307     n = block->head();
  2308     if (pcs && n->_idx < pc_limit) {
  2309       tty->print("%3.3x   ", pcs[n->_idx]);
  2310     } else {
  2311       tty->print("      ");
  2313     block->dump_head(_cfg);
  2314     if (block->is_connector()) {
  2315       tty->print_cr("        # Empty connector block");
  2316     } else if (block->num_preds() == 2 && block->pred(1)->is_CatchProj() && block->pred(1)->as_CatchProj()->_con == CatchProjNode::fall_through_index) {
  2317       tty->print_cr("        # Block is sole successor of call");
  2320     // For all instructions
  2321     Node *delay = NULL;
  2322     for (uint j = 0; j < block->number_of_nodes(); j++) {
  2323       if (VMThread::should_terminate()) {
  2324         cut_short = true;
  2325         break;
  2327       n = block->get_node(j);
  2328       if (valid_bundle_info(n)) {
  2329         Bundle* bundle = node_bundling(n);
  2330         if (bundle->used_in_unconditional_delay()) {
  2331           delay = n;
  2332           continue;
  2334         if (bundle->starts_bundle()) {
  2335           starts_bundle = '+';
  2339       if (WizardMode) {
  2340         n->dump();
  2343       if( !n->is_Region() &&    // Dont print in the Assembly
  2344           !n->is_Phi() &&       // a few noisely useless nodes
  2345           !n->is_Proj() &&
  2346           !n->is_MachTemp() &&
  2347           !n->is_SafePointScalarObject() &&
  2348           !n->is_Catch() &&     // Would be nice to print exception table targets
  2349           !n->is_MergeMem() &&  // Not very interesting
  2350           !n->is_top() &&       // Debug info table constants
  2351           !(n->is_Con() && !n->is_Mach())// Debug info table constants
  2352           ) {
  2353         if (pcs && n->_idx < pc_limit)
  2354           tty->print("%3.3x", pcs[n->_idx]);
  2355         else
  2356           tty->print("   ");
  2357         tty->print(" %c ", starts_bundle);
  2358         starts_bundle = ' ';
  2359         tty->print("\t");
  2360         n->format(_regalloc, tty);
  2361         tty->cr();
  2364       // If we have an instruction with a delay slot, and have seen a delay,
  2365       // then back up and print it
  2366       if (valid_bundle_info(n) && node_bundling(n)->use_unconditional_delay()) {
  2367         assert(delay != NULL, "no unconditional delay instruction");
  2368         if (WizardMode) delay->dump();
  2370         if (node_bundling(delay)->starts_bundle())
  2371           starts_bundle = '+';
  2372         if (pcs && n->_idx < pc_limit)
  2373           tty->print("%3.3x", pcs[n->_idx]);
  2374         else
  2375           tty->print("   ");
  2376         tty->print(" %c ", starts_bundle);
  2377         starts_bundle = ' ';
  2378         tty->print("\t");
  2379         delay->format(_regalloc, tty);
  2380         tty->print_cr("");
  2381         delay = NULL;
  2384       // Dump the exception table as well
  2385       if( n->is_Catch() && (Verbose || WizardMode) ) {
  2386         // Print the exception table for this offset
  2387         _handler_table.print_subtable_for(pc);
  2391     if (pcs && n->_idx < pc_limit)
  2392       tty->print_cr("%3.3x", pcs[n->_idx]);
  2393     else
  2394       tty->print_cr("");
  2396     assert(cut_short || delay == NULL, "no unconditional delay branch");
  2398   } // End of per-block dump
  2399   tty->print_cr("");
  2401   if (cut_short)  tty->print_cr("*** disassembly is cut short ***");
  2403 #endif
  2405 //------------------------------Final_Reshape_Counts---------------------------
  2406 // This class defines counters to help identify when a method
  2407 // may/must be executed using hardware with only 24-bit precision.
  2408 struct Final_Reshape_Counts : public StackObj {
  2409   int  _call_count;             // count non-inlined 'common' calls
  2410   int  _float_count;            // count float ops requiring 24-bit precision
  2411   int  _double_count;           // count double ops requiring more precision
  2412   int  _java_call_count;        // count non-inlined 'java' calls
  2413   int  _inner_loop_count;       // count loops which need alignment
  2414   VectorSet _visited;           // Visitation flags
  2415   Node_List _tests;             // Set of IfNodes & PCTableNodes
  2417   Final_Reshape_Counts() :
  2418     _call_count(0), _float_count(0), _double_count(0),
  2419     _java_call_count(0), _inner_loop_count(0),
  2420     _visited( Thread::current()->resource_area() ) { }
  2422   void inc_call_count  () { _call_count  ++; }
  2423   void inc_float_count () { _float_count ++; }
  2424   void inc_double_count() { _double_count++; }
  2425   void inc_java_call_count() { _java_call_count++; }
  2426   void inc_inner_loop_count() { _inner_loop_count++; }
  2428   int  get_call_count  () const { return _call_count  ; }
  2429   int  get_float_count () const { return _float_count ; }
  2430   int  get_double_count() const { return _double_count; }
  2431   int  get_java_call_count() const { return _java_call_count; }
  2432   int  get_inner_loop_count() const { return _inner_loop_count; }
  2433 };
  2435 #ifdef ASSERT
  2436 static bool oop_offset_is_sane(const TypeInstPtr* tp) {
  2437   ciInstanceKlass *k = tp->klass()->as_instance_klass();
  2438   // Make sure the offset goes inside the instance layout.
  2439   return k->contains_field_offset(tp->offset());
  2440   // Note that OffsetBot and OffsetTop are very negative.
  2442 #endif
  2444 // Eliminate trivially redundant StoreCMs and accumulate their
  2445 // precedence edges.
  2446 void Compile::eliminate_redundant_card_marks(Node* n) {
  2447   assert(n->Opcode() == Op_StoreCM, "expected StoreCM");
  2448   if (n->in(MemNode::Address)->outcnt() > 1) {
  2449     // There are multiple users of the same address so it might be
  2450     // possible to eliminate some of the StoreCMs
  2451     Node* mem = n->in(MemNode::Memory);
  2452     Node* adr = n->in(MemNode::Address);
  2453     Node* val = n->in(MemNode::ValueIn);
  2454     Node* prev = n;
  2455     bool done = false;
  2456     // Walk the chain of StoreCMs eliminating ones that match.  As
  2457     // long as it's a chain of single users then the optimization is
  2458     // safe.  Eliminating partially redundant StoreCMs would require
  2459     // cloning copies down the other paths.
  2460     while (mem->Opcode() == Op_StoreCM && mem->outcnt() == 1 && !done) {
  2461       if (adr == mem->in(MemNode::Address) &&
  2462           val == mem->in(MemNode::ValueIn)) {
  2463         // redundant StoreCM
  2464         if (mem->req() > MemNode::OopStore) {
  2465           // Hasn't been processed by this code yet.
  2466           n->add_prec(mem->in(MemNode::OopStore));
  2467         } else {
  2468           // Already converted to precedence edge
  2469           for (uint i = mem->req(); i < mem->len(); i++) {
  2470             // Accumulate any precedence edges
  2471             if (mem->in(i) != NULL) {
  2472               n->add_prec(mem->in(i));
  2475           // Everything above this point has been processed.
  2476           done = true;
  2478         // Eliminate the previous StoreCM
  2479         prev->set_req(MemNode::Memory, mem->in(MemNode::Memory));
  2480         assert(mem->outcnt() == 0, "should be dead");
  2481         mem->disconnect_inputs(NULL, this);
  2482       } else {
  2483         prev = mem;
  2485       mem = prev->in(MemNode::Memory);
  2490 //------------------------------final_graph_reshaping_impl----------------------
  2491 // Implement items 1-5 from final_graph_reshaping below.
  2492 void Compile::final_graph_reshaping_impl( Node *n, Final_Reshape_Counts &frc) {
  2494   if ( n->outcnt() == 0 ) return; // dead node
  2495   uint nop = n->Opcode();
  2497   // Check for 2-input instruction with "last use" on right input.
  2498   // Swap to left input.  Implements item (2).
  2499   if( n->req() == 3 &&          // two-input instruction
  2500       n->in(1)->outcnt() > 1 && // left use is NOT a last use
  2501       (!n->in(1)->is_Phi() || n->in(1)->in(2) != n) && // it is not data loop
  2502       n->in(2)->outcnt() == 1 &&// right use IS a last use
  2503       !n->in(2)->is_Con() ) {   // right use is not a constant
  2504     // Check for commutative opcode
  2505     switch( nop ) {
  2506     case Op_AddI:  case Op_AddF:  case Op_AddD:  case Op_AddL:
  2507     case Op_MaxI:  case Op_MinI:
  2508     case Op_MulI:  case Op_MulF:  case Op_MulD:  case Op_MulL:
  2509     case Op_AndL:  case Op_XorL:  case Op_OrL:
  2510     case Op_AndI:  case Op_XorI:  case Op_OrI: {
  2511       // Move "last use" input to left by swapping inputs
  2512       n->swap_edges(1, 2);
  2513       break;
  2515     default:
  2516       break;
  2520 #ifdef ASSERT
  2521   if( n->is_Mem() ) {
  2522     int alias_idx = get_alias_index(n->as_Mem()->adr_type());
  2523     assert( n->in(0) != NULL || alias_idx != Compile::AliasIdxRaw ||
  2524             // oop will be recorded in oop map if load crosses safepoint
  2525             n->is_Load() && (n->as_Load()->bottom_type()->isa_oopptr() ||
  2526                              LoadNode::is_immutable_value(n->in(MemNode::Address))),
  2527             "raw memory operations should have control edge");
  2529 #endif
  2530   // Count FPU ops and common calls, implements item (3)
  2531   switch( nop ) {
  2532   // Count all float operations that may use FPU
  2533   case Op_AddF:
  2534   case Op_SubF:
  2535   case Op_MulF:
  2536   case Op_DivF:
  2537   case Op_NegF:
  2538   case Op_ModF:
  2539   case Op_ConvI2F:
  2540   case Op_ConF:
  2541   case Op_CmpF:
  2542   case Op_CmpF3:
  2543   // case Op_ConvL2F: // longs are split into 32-bit halves
  2544     frc.inc_float_count();
  2545     break;
  2547   case Op_ConvF2D:
  2548   case Op_ConvD2F:
  2549     frc.inc_float_count();
  2550     frc.inc_double_count();
  2551     break;
  2553   // Count all double operations that may use FPU
  2554   case Op_AddD:
  2555   case Op_SubD:
  2556   case Op_MulD:
  2557   case Op_DivD:
  2558   case Op_NegD:
  2559   case Op_ModD:
  2560   case Op_ConvI2D:
  2561   case Op_ConvD2I:
  2562   // case Op_ConvL2D: // handled by leaf call
  2563   // case Op_ConvD2L: // handled by leaf call
  2564   case Op_ConD:
  2565   case Op_CmpD:
  2566   case Op_CmpD3:
  2567     frc.inc_double_count();
  2568     break;
  2569   case Op_Opaque1:              // Remove Opaque Nodes before matching
  2570   case Op_Opaque2:              // Remove Opaque Nodes before matching
  2571     n->subsume_by(n->in(1), this);
  2572     break;
  2573   case Op_CallStaticJava:
  2574   case Op_CallJava:
  2575   case Op_CallDynamicJava:
  2576     frc.inc_java_call_count(); // Count java call site;
  2577   case Op_CallRuntime:
  2578   case Op_CallLeaf:
  2579   case Op_CallLeafNoFP: {
  2580     assert( n->is_Call(), "" );
  2581     CallNode *call = n->as_Call();
  2582     // Count call sites where the FP mode bit would have to be flipped.
  2583     // Do not count uncommon runtime calls:
  2584     // uncommon_trap, _complete_monitor_locking, _complete_monitor_unlocking,
  2585     // _new_Java, _new_typeArray, _new_objArray, _rethrow_Java, ...
  2586     if( !call->is_CallStaticJava() || !call->as_CallStaticJava()->_name ) {
  2587       frc.inc_call_count();   // Count the call site
  2588     } else {                  // See if uncommon argument is shared
  2589       Node *n = call->in(TypeFunc::Parms);
  2590       int nop = n->Opcode();
  2591       // Clone shared simple arguments to uncommon calls, item (1).
  2592       if( n->outcnt() > 1 &&
  2593           !n->is_Proj() &&
  2594           nop != Op_CreateEx &&
  2595           nop != Op_CheckCastPP &&
  2596           nop != Op_DecodeN &&
  2597           nop != Op_DecodeNKlass &&
  2598           !n->is_Mem() ) {
  2599         Node *x = n->clone();
  2600         call->set_req( TypeFunc::Parms, x );
  2603     break;
  2606   case Op_StoreD:
  2607   case Op_LoadD:
  2608   case Op_LoadD_unaligned:
  2609     frc.inc_double_count();
  2610     goto handle_mem;
  2611   case Op_StoreF:
  2612   case Op_LoadF:
  2613     frc.inc_float_count();
  2614     goto handle_mem;
  2616   case Op_StoreCM:
  2618       // Convert OopStore dependence into precedence edge
  2619       Node* prec = n->in(MemNode::OopStore);
  2620       n->del_req(MemNode::OopStore);
  2621       n->add_prec(prec);
  2622       eliminate_redundant_card_marks(n);
  2625     // fall through
  2627   case Op_StoreB:
  2628   case Op_StoreC:
  2629   case Op_StorePConditional:
  2630   case Op_StoreI:
  2631   case Op_StoreL:
  2632   case Op_StoreIConditional:
  2633   case Op_StoreLConditional:
  2634   case Op_CompareAndSwapI:
  2635   case Op_CompareAndSwapL:
  2636   case Op_CompareAndSwapP:
  2637   case Op_CompareAndSwapN:
  2638   case Op_GetAndAddI:
  2639   case Op_GetAndAddL:
  2640   case Op_GetAndSetI:
  2641   case Op_GetAndSetL:
  2642   case Op_GetAndSetP:
  2643   case Op_GetAndSetN:
  2644   case Op_StoreP:
  2645   case Op_StoreN:
  2646   case Op_StoreNKlass:
  2647   case Op_LoadB:
  2648   case Op_LoadUB:
  2649   case Op_LoadUS:
  2650   case Op_LoadI:
  2651   case Op_LoadKlass:
  2652   case Op_LoadNKlass:
  2653   case Op_LoadL:
  2654   case Op_LoadL_unaligned:
  2655   case Op_LoadPLocked:
  2656   case Op_LoadP:
  2657   case Op_LoadN:
  2658   case Op_LoadRange:
  2659   case Op_LoadS: {
  2660   handle_mem:
  2661 #ifdef ASSERT
  2662     if( VerifyOptoOopOffsets ) {
  2663       assert( n->is_Mem(), "" );
  2664       MemNode *mem  = (MemNode*)n;
  2665       // Check to see if address types have grounded out somehow.
  2666       const TypeInstPtr *tp = mem->in(MemNode::Address)->bottom_type()->isa_instptr();
  2667       assert( !tp || oop_offset_is_sane(tp), "" );
  2669 #endif
  2670     break;
  2673   case Op_AddP: {               // Assert sane base pointers
  2674     Node *addp = n->in(AddPNode::Address);
  2675     assert( !addp->is_AddP() ||
  2676             addp->in(AddPNode::Base)->is_top() || // Top OK for allocation
  2677             addp->in(AddPNode::Base) == n->in(AddPNode::Base),
  2678             "Base pointers must match" );
  2679 #ifdef _LP64
  2680     if ((UseCompressedOops || UseCompressedClassPointers) &&
  2681         addp->Opcode() == Op_ConP &&
  2682         addp == n->in(AddPNode::Base) &&
  2683         n->in(AddPNode::Offset)->is_Con()) {
  2684       // Use addressing with narrow klass to load with offset on x86.
  2685       // On sparc loading 32-bits constant and decoding it have less
  2686       // instructions (4) then load 64-bits constant (7).
  2687       // Do this transformation here since IGVN will convert ConN back to ConP.
  2688       const Type* t = addp->bottom_type();
  2689       if (t->isa_oopptr() || t->isa_klassptr()) {
  2690         Node* nn = NULL;
  2692         int op = t->isa_oopptr() ? Op_ConN : Op_ConNKlass;
  2694         // Look for existing ConN node of the same exact type.
  2695         Node* r  = root();
  2696         uint cnt = r->outcnt();
  2697         for (uint i = 0; i < cnt; i++) {
  2698           Node* m = r->raw_out(i);
  2699           if (m!= NULL && m->Opcode() == op &&
  2700               m->bottom_type()->make_ptr() == t) {
  2701             nn = m;
  2702             break;
  2705         if (nn != NULL) {
  2706           // Decode a narrow oop to match address
  2707           // [R12 + narrow_oop_reg<<3 + offset]
  2708           if (t->isa_oopptr()) {
  2709             nn = new (this) DecodeNNode(nn, t);
  2710           } else {
  2711             nn = new (this) DecodeNKlassNode(nn, t);
  2713           n->set_req(AddPNode::Base, nn);
  2714           n->set_req(AddPNode::Address, nn);
  2715           if (addp->outcnt() == 0) {
  2716             addp->disconnect_inputs(NULL, this);
  2721 #endif
  2722     break;
  2725 #ifdef _LP64
  2726   case Op_CastPP:
  2727     if (n->in(1)->is_DecodeN() && Matcher::gen_narrow_oop_implicit_null_checks()) {
  2728       Node* in1 = n->in(1);
  2729       const Type* t = n->bottom_type();
  2730       Node* new_in1 = in1->clone();
  2731       new_in1->as_DecodeN()->set_type(t);
  2733       if (!Matcher::narrow_oop_use_complex_address()) {
  2734         //
  2735         // x86, ARM and friends can handle 2 adds in addressing mode
  2736         // and Matcher can fold a DecodeN node into address by using
  2737         // a narrow oop directly and do implicit NULL check in address:
  2738         //
  2739         // [R12 + narrow_oop_reg<<3 + offset]
  2740         // NullCheck narrow_oop_reg
  2741         //
  2742         // On other platforms (Sparc) we have to keep new DecodeN node and
  2743         // use it to do implicit NULL check in address:
  2744         //
  2745         // decode_not_null narrow_oop_reg, base_reg
  2746         // [base_reg + offset]
  2747         // NullCheck base_reg
  2748         //
  2749         // Pin the new DecodeN node to non-null path on these platform (Sparc)
  2750         // to keep the information to which NULL check the new DecodeN node
  2751         // corresponds to use it as value in implicit_null_check().
  2752         //
  2753         new_in1->set_req(0, n->in(0));
  2756       n->subsume_by(new_in1, this);
  2757       if (in1->outcnt() == 0) {
  2758         in1->disconnect_inputs(NULL, this);
  2761     break;
  2763   case Op_CmpP:
  2764     // Do this transformation here to preserve CmpPNode::sub() and
  2765     // other TypePtr related Ideal optimizations (for example, ptr nullness).
  2766     if (n->in(1)->is_DecodeNarrowPtr() || n->in(2)->is_DecodeNarrowPtr()) {
  2767       Node* in1 = n->in(1);
  2768       Node* in2 = n->in(2);
  2769       if (!in1->is_DecodeNarrowPtr()) {
  2770         in2 = in1;
  2771         in1 = n->in(2);
  2773       assert(in1->is_DecodeNarrowPtr(), "sanity");
  2775       Node* new_in2 = NULL;
  2776       if (in2->is_DecodeNarrowPtr()) {
  2777         assert(in2->Opcode() == in1->Opcode(), "must be same node type");
  2778         new_in2 = in2->in(1);
  2779       } else if (in2->Opcode() == Op_ConP) {
  2780         const Type* t = in2->bottom_type();
  2781         if (t == TypePtr::NULL_PTR) {
  2782           assert(in1->is_DecodeN(), "compare klass to null?");
  2783           // Don't convert CmpP null check into CmpN if compressed
  2784           // oops implicit null check is not generated.
  2785           // This will allow to generate normal oop implicit null check.
  2786           if (Matcher::gen_narrow_oop_implicit_null_checks())
  2787             new_in2 = ConNode::make(this, TypeNarrowOop::NULL_PTR);
  2788           //
  2789           // This transformation together with CastPP transformation above
  2790           // will generated code for implicit NULL checks for compressed oops.
  2791           //
  2792           // The original code after Optimize()
  2793           //
  2794           //    LoadN memory, narrow_oop_reg
  2795           //    decode narrow_oop_reg, base_reg
  2796           //    CmpP base_reg, NULL
  2797           //    CastPP base_reg // NotNull
  2798           //    Load [base_reg + offset], val_reg
  2799           //
  2800           // after these transformations will be
  2801           //
  2802           //    LoadN memory, narrow_oop_reg
  2803           //    CmpN narrow_oop_reg, NULL
  2804           //    decode_not_null narrow_oop_reg, base_reg
  2805           //    Load [base_reg + offset], val_reg
  2806           //
  2807           // and the uncommon path (== NULL) will use narrow_oop_reg directly
  2808           // since narrow oops can be used in debug info now (see the code in
  2809           // final_graph_reshaping_walk()).
  2810           //
  2811           // At the end the code will be matched to
  2812           // on x86:
  2813           //
  2814           //    Load_narrow_oop memory, narrow_oop_reg
  2815           //    Load [R12 + narrow_oop_reg<<3 + offset], val_reg
  2816           //    NullCheck narrow_oop_reg
  2817           //
  2818           // and on sparc:
  2819           //
  2820           //    Load_narrow_oop memory, narrow_oop_reg
  2821           //    decode_not_null narrow_oop_reg, base_reg
  2822           //    Load [base_reg + offset], val_reg
  2823           //    NullCheck base_reg
  2824           //
  2825         } else if (t->isa_oopptr()) {
  2826           new_in2 = ConNode::make(this, t->make_narrowoop());
  2827         } else if (t->isa_klassptr()) {
  2828           new_in2 = ConNode::make(this, t->make_narrowklass());
  2831       if (new_in2 != NULL) {
  2832         Node* cmpN = new (this) CmpNNode(in1->in(1), new_in2);
  2833         n->subsume_by(cmpN, this);
  2834         if (in1->outcnt() == 0) {
  2835           in1->disconnect_inputs(NULL, this);
  2837         if (in2->outcnt() == 0) {
  2838           in2->disconnect_inputs(NULL, this);
  2842     break;
  2844   case Op_DecodeN:
  2845   case Op_DecodeNKlass:
  2846     assert(!n->in(1)->is_EncodeNarrowPtr(), "should be optimized out");
  2847     // DecodeN could be pinned when it can't be fold into
  2848     // an address expression, see the code for Op_CastPP above.
  2849     assert(n->in(0) == NULL || (UseCompressedOops && !Matcher::narrow_oop_use_complex_address()), "no control");
  2850     break;
  2852   case Op_EncodeP:
  2853   case Op_EncodePKlass: {
  2854     Node* in1 = n->in(1);
  2855     if (in1->is_DecodeNarrowPtr()) {
  2856       n->subsume_by(in1->in(1), this);
  2857     } else if (in1->Opcode() == Op_ConP) {
  2858       const Type* t = in1->bottom_type();
  2859       if (t == TypePtr::NULL_PTR) {
  2860         assert(t->isa_oopptr(), "null klass?");
  2861         n->subsume_by(ConNode::make(this, TypeNarrowOop::NULL_PTR), this);
  2862       } else if (t->isa_oopptr()) {
  2863         n->subsume_by(ConNode::make(this, t->make_narrowoop()), this);
  2864       } else if (t->isa_klassptr()) {
  2865         n->subsume_by(ConNode::make(this, t->make_narrowklass()), this);
  2868     if (in1->outcnt() == 0) {
  2869       in1->disconnect_inputs(NULL, this);
  2871     break;
  2874   case Op_Proj: {
  2875     if (OptimizeStringConcat) {
  2876       ProjNode* p = n->as_Proj();
  2877       if (p->_is_io_use) {
  2878         // Separate projections were used for the exception path which
  2879         // are normally removed by a late inline.  If it wasn't inlined
  2880         // then they will hang around and should just be replaced with
  2881         // the original one.
  2882         Node* proj = NULL;
  2883         // Replace with just one
  2884         for (SimpleDUIterator i(p->in(0)); i.has_next(); i.next()) {
  2885           Node *use = i.get();
  2886           if (use->is_Proj() && p != use && use->as_Proj()->_con == p->_con) {
  2887             proj = use;
  2888             break;
  2891         assert(proj != NULL, "must be found");
  2892         p->subsume_by(proj, this);
  2895     break;
  2898   case Op_Phi:
  2899     if (n->as_Phi()->bottom_type()->isa_narrowoop() || n->as_Phi()->bottom_type()->isa_narrowklass()) {
  2900       // The EncodeP optimization may create Phi with the same edges
  2901       // for all paths. It is not handled well by Register Allocator.
  2902       Node* unique_in = n->in(1);
  2903       assert(unique_in != NULL, "");
  2904       uint cnt = n->req();
  2905       for (uint i = 2; i < cnt; i++) {
  2906         Node* m = n->in(i);
  2907         assert(m != NULL, "");
  2908         if (unique_in != m)
  2909           unique_in = NULL;
  2911       if (unique_in != NULL) {
  2912         n->subsume_by(unique_in, this);
  2915     break;
  2917 #endif
  2919   case Op_ModI:
  2920     if (UseDivMod) {
  2921       // Check if a%b and a/b both exist
  2922       Node* d = n->find_similar(Op_DivI);
  2923       if (d) {
  2924         // Replace them with a fused divmod if supported
  2925         if (Matcher::has_match_rule(Op_DivModI)) {
  2926           DivModINode* divmod = DivModINode::make(this, n);
  2927           d->subsume_by(divmod->div_proj(), this);
  2928           n->subsume_by(divmod->mod_proj(), this);
  2929         } else {
  2930           // replace a%b with a-((a/b)*b)
  2931           Node* mult = new (this) MulINode(d, d->in(2));
  2932           Node* sub  = new (this) SubINode(d->in(1), mult);
  2933           n->subsume_by(sub, this);
  2937     break;
  2939   case Op_ModL:
  2940     if (UseDivMod) {
  2941       // Check if a%b and a/b both exist
  2942       Node* d = n->find_similar(Op_DivL);
  2943       if (d) {
  2944         // Replace them with a fused divmod if supported
  2945         if (Matcher::has_match_rule(Op_DivModL)) {
  2946           DivModLNode* divmod = DivModLNode::make(this, n);
  2947           d->subsume_by(divmod->div_proj(), this);
  2948           n->subsume_by(divmod->mod_proj(), this);
  2949         } else {
  2950           // replace a%b with a-((a/b)*b)
  2951           Node* mult = new (this) MulLNode(d, d->in(2));
  2952           Node* sub  = new (this) SubLNode(d->in(1), mult);
  2953           n->subsume_by(sub, this);
  2957     break;
  2959   case Op_LoadVector:
  2960   case Op_StoreVector:
  2961     break;
  2963   case Op_PackB:
  2964   case Op_PackS:
  2965   case Op_PackI:
  2966   case Op_PackF:
  2967   case Op_PackL:
  2968   case Op_PackD:
  2969     if (n->req()-1 > 2) {
  2970       // Replace many operand PackNodes with a binary tree for matching
  2971       PackNode* p = (PackNode*) n;
  2972       Node* btp = p->binary_tree_pack(this, 1, n->req());
  2973       n->subsume_by(btp, this);
  2975     break;
  2976   case Op_Loop:
  2977   case Op_CountedLoop:
  2978     if (n->as_Loop()->is_inner_loop()) {
  2979       frc.inc_inner_loop_count();
  2981     break;
  2982   case Op_LShiftI:
  2983   case Op_RShiftI:
  2984   case Op_URShiftI:
  2985   case Op_LShiftL:
  2986   case Op_RShiftL:
  2987   case Op_URShiftL:
  2988     if (Matcher::need_masked_shift_count) {
  2989       // The cpu's shift instructions don't restrict the count to the
  2990       // lower 5/6 bits. We need to do the masking ourselves.
  2991       Node* in2 = n->in(2);
  2992       juint mask = (n->bottom_type() == TypeInt::INT) ? (BitsPerInt - 1) : (BitsPerLong - 1);
  2993       const TypeInt* t = in2->find_int_type();
  2994       if (t != NULL && t->is_con()) {
  2995         juint shift = t->get_con();
  2996         if (shift > mask) { // Unsigned cmp
  2997           n->set_req(2, ConNode::make(this, TypeInt::make(shift & mask)));
  2999       } else {
  3000         if (t == NULL || t->_lo < 0 || t->_hi > (int)mask) {
  3001           Node* shift = new (this) AndINode(in2, ConNode::make(this, TypeInt::make(mask)));
  3002           n->set_req(2, shift);
  3005       if (in2->outcnt() == 0) { // Remove dead node
  3006         in2->disconnect_inputs(NULL, this);
  3009     break;
  3010   case Op_MemBarStoreStore:
  3011   case Op_MemBarRelease:
  3012     // Break the link with AllocateNode: it is no longer useful and
  3013     // confuses register allocation.
  3014     if (n->req() > MemBarNode::Precedent) {
  3015       n->set_req(MemBarNode::Precedent, top());
  3017     break;
  3018     // Must set a control edge on all nodes that produce a FlagsProj
  3019     // so they can't escape the block that consumes the flags.
  3020     // Must also set the non throwing branch as the control
  3021     // for all nodes that depends on the result. Unless the node
  3022     // already have a control that isn't the control of the
  3023     // flag producer
  3024   case Op_FlagsProj:
  3026       MathExactNode* math = (MathExactNode*)  n->in(0);
  3027       Node* ctrl = math->control_node();
  3028       Node* non_throwing = math->non_throwing_branch();
  3029       math->set_req(0, ctrl);
  3031       Node* result = math->result_node();
  3032       if (result != NULL) {
  3033         for (DUIterator_Fast jmax, j = result->fast_outs(jmax); j < jmax; j++) {
  3034           Node* out = result->fast_out(j);
  3035           // Phi nodes shouldn't be moved. They would only match below if they
  3036           // had the same control as the MathExactNode. The only time that
  3037           // would happen is if the Phi is also an input to the MathExact
  3038           //
  3039           // Cmp nodes shouldn't have control set at all.
  3040           if (out->is_Phi() ||
  3041               out->is_Cmp()) {
  3042             continue;
  3045           if (out->in(0) == NULL) {
  3046             out->set_req(0, non_throwing);
  3047           } else if (out->in(0) == ctrl) {
  3048             out->set_req(0, non_throwing);
  3053     break;
  3054   default:
  3055     assert( !n->is_Call(), "" );
  3056     assert( !n->is_Mem(), "" );
  3057     break;
  3060   // Collect CFG split points
  3061   if (n->is_MultiBranch())
  3062     frc._tests.push(n);
  3065 //------------------------------final_graph_reshaping_walk---------------------
  3066 // Replacing Opaque nodes with their input in final_graph_reshaping_impl(),
  3067 // requires that the walk visits a node's inputs before visiting the node.
  3068 void Compile::final_graph_reshaping_walk( Node_Stack &nstack, Node *root, Final_Reshape_Counts &frc ) {
  3069   ResourceArea *area = Thread::current()->resource_area();
  3070   Unique_Node_List sfpt(area);
  3072   frc._visited.set(root->_idx); // first, mark node as visited
  3073   uint cnt = root->req();
  3074   Node *n = root;
  3075   uint  i = 0;
  3076   while (true) {
  3077     if (i < cnt) {
  3078       // Place all non-visited non-null inputs onto stack
  3079       Node* m = n->in(i);
  3080       ++i;
  3081       if (m != NULL && !frc._visited.test_set(m->_idx)) {
  3082         if (m->is_SafePoint() && m->as_SafePoint()->jvms() != NULL)
  3083           sfpt.push(m);
  3084         cnt = m->req();
  3085         nstack.push(n, i); // put on stack parent and next input's index
  3086         n = m;
  3087         i = 0;
  3089     } else {
  3090       // Now do post-visit work
  3091       final_graph_reshaping_impl( n, frc );
  3092       if (nstack.is_empty())
  3093         break;             // finished
  3094       n = nstack.node();   // Get node from stack
  3095       cnt = n->req();
  3096       i = nstack.index();
  3097       nstack.pop();        // Shift to the next node on stack
  3101   // Skip next transformation if compressed oops are not used.
  3102   if ((UseCompressedOops && !Matcher::gen_narrow_oop_implicit_null_checks()) ||
  3103       (!UseCompressedOops && !UseCompressedClassPointers))
  3104     return;
  3106   // Go over safepoints nodes to skip DecodeN/DecodeNKlass nodes for debug edges.
  3107   // It could be done for an uncommon traps or any safepoints/calls
  3108   // if the DecodeN/DecodeNKlass node is referenced only in a debug info.
  3109   while (sfpt.size() > 0) {
  3110     n = sfpt.pop();
  3111     JVMState *jvms = n->as_SafePoint()->jvms();
  3112     assert(jvms != NULL, "sanity");
  3113     int start = jvms->debug_start();
  3114     int end   = n->req();
  3115     bool is_uncommon = (n->is_CallStaticJava() &&
  3116                         n->as_CallStaticJava()->uncommon_trap_request() != 0);
  3117     for (int j = start; j < end; j++) {
  3118       Node* in = n->in(j);
  3119       if (in->is_DecodeNarrowPtr()) {
  3120         bool safe_to_skip = true;
  3121         if (!is_uncommon ) {
  3122           // Is it safe to skip?
  3123           for (uint i = 0; i < in->outcnt(); i++) {
  3124             Node* u = in->raw_out(i);
  3125             if (!u->is_SafePoint() ||
  3126                  u->is_Call() && u->as_Call()->has_non_debug_use(n)) {
  3127               safe_to_skip = false;
  3131         if (safe_to_skip) {
  3132           n->set_req(j, in->in(1));
  3134         if (in->outcnt() == 0) {
  3135           in->disconnect_inputs(NULL, this);
  3142 //------------------------------final_graph_reshaping--------------------------
  3143 // Final Graph Reshaping.
  3144 //
  3145 // (1) Clone simple inputs to uncommon calls, so they can be scheduled late
  3146 //     and not commoned up and forced early.  Must come after regular
  3147 //     optimizations to avoid GVN undoing the cloning.  Clone constant
  3148 //     inputs to Loop Phis; these will be split by the allocator anyways.
  3149 //     Remove Opaque nodes.
  3150 // (2) Move last-uses by commutative operations to the left input to encourage
  3151 //     Intel update-in-place two-address operations and better register usage
  3152 //     on RISCs.  Must come after regular optimizations to avoid GVN Ideal
  3153 //     calls canonicalizing them back.
  3154 // (3) Count the number of double-precision FP ops, single-precision FP ops
  3155 //     and call sites.  On Intel, we can get correct rounding either by
  3156 //     forcing singles to memory (requires extra stores and loads after each
  3157 //     FP bytecode) or we can set a rounding mode bit (requires setting and
  3158 //     clearing the mode bit around call sites).  The mode bit is only used
  3159 //     if the relative frequency of single FP ops to calls is low enough.
  3160 //     This is a key transform for SPEC mpeg_audio.
  3161 // (4) Detect infinite loops; blobs of code reachable from above but not
  3162 //     below.  Several of the Code_Gen algorithms fail on such code shapes,
  3163 //     so we simply bail out.  Happens a lot in ZKM.jar, but also happens
  3164 //     from time to time in other codes (such as -Xcomp finalizer loops, etc).
  3165 //     Detection is by looking for IfNodes where only 1 projection is
  3166 //     reachable from below or CatchNodes missing some targets.
  3167 // (5) Assert for insane oop offsets in debug mode.
  3169 bool Compile::final_graph_reshaping() {
  3170   // an infinite loop may have been eliminated by the optimizer,
  3171   // in which case the graph will be empty.
  3172   if (root()->req() == 1) {
  3173     record_method_not_compilable("trivial infinite loop");
  3174     return true;
  3177   // Expensive nodes have their control input set to prevent the GVN
  3178   // from freely commoning them. There's no GVN beyond this point so
  3179   // no need to keep the control input. We want the expensive nodes to
  3180   // be freely moved to the least frequent code path by gcm.
  3181   assert(OptimizeExpensiveOps || expensive_count() == 0, "optimization off but list non empty?");
  3182   for (int i = 0; i < expensive_count(); i++) {
  3183     _expensive_nodes->at(i)->set_req(0, NULL);
  3186   Final_Reshape_Counts frc;
  3188   // Visit everybody reachable!
  3189   // Allocate stack of size C->unique()/2 to avoid frequent realloc
  3190   Node_Stack nstack(unique() >> 1);
  3191   final_graph_reshaping_walk(nstack, root(), frc);
  3193   // Check for unreachable (from below) code (i.e., infinite loops).
  3194   for( uint i = 0; i < frc._tests.size(); i++ ) {
  3195     MultiBranchNode *n = frc._tests[i]->as_MultiBranch();
  3196     // Get number of CFG targets.
  3197     // Note that PCTables include exception targets after calls.
  3198     uint required_outcnt = n->required_outcnt();
  3199     if (n->outcnt() != required_outcnt) {
  3200       // Check for a few special cases.  Rethrow Nodes never take the
  3201       // 'fall-thru' path, so expected kids is 1 less.
  3202       if (n->is_PCTable() && n->in(0) && n->in(0)->in(0)) {
  3203         if (n->in(0)->in(0)->is_Call()) {
  3204           CallNode *call = n->in(0)->in(0)->as_Call();
  3205           if (call->entry_point() == OptoRuntime::rethrow_stub()) {
  3206             required_outcnt--;      // Rethrow always has 1 less kid
  3207           } else if (call->req() > TypeFunc::Parms &&
  3208                      call->is_CallDynamicJava()) {
  3209             // Check for null receiver. In such case, the optimizer has
  3210             // detected that the virtual call will always result in a null
  3211             // pointer exception. The fall-through projection of this CatchNode
  3212             // will not be populated.
  3213             Node *arg0 = call->in(TypeFunc::Parms);
  3214             if (arg0->is_Type() &&
  3215                 arg0->as_Type()->type()->higher_equal(TypePtr::NULL_PTR)) {
  3216               required_outcnt--;
  3218           } else if (call->entry_point() == OptoRuntime::new_array_Java() &&
  3219                      call->req() > TypeFunc::Parms+1 &&
  3220                      call->is_CallStaticJava()) {
  3221             // Check for negative array length. In such case, the optimizer has
  3222             // detected that the allocation attempt will always result in an
  3223             // exception. There is no fall-through projection of this CatchNode .
  3224             Node *arg1 = call->in(TypeFunc::Parms+1);
  3225             if (arg1->is_Type() &&
  3226                 arg1->as_Type()->type()->join(TypeInt::POS)->empty()) {
  3227               required_outcnt--;
  3232       // Recheck with a better notion of 'required_outcnt'
  3233       if (n->outcnt() != required_outcnt) {
  3234         record_method_not_compilable("malformed control flow");
  3235         return true;            // Not all targets reachable!
  3238     // Check that I actually visited all kids.  Unreached kids
  3239     // must be infinite loops.
  3240     for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++)
  3241       if (!frc._visited.test(n->fast_out(j)->_idx)) {
  3242         record_method_not_compilable("infinite loop");
  3243         return true;            // Found unvisited kid; must be unreach
  3247   // If original bytecodes contained a mixture of floats and doubles
  3248   // check if the optimizer has made it homogenous, item (3).
  3249   if( Use24BitFPMode && Use24BitFP && UseSSE == 0 &&
  3250       frc.get_float_count() > 32 &&
  3251       frc.get_double_count() == 0 &&
  3252       (10 * frc.get_call_count() < frc.get_float_count()) ) {
  3253     set_24_bit_selection_and_mode( false,  true );
  3256   set_java_calls(frc.get_java_call_count());
  3257   set_inner_loops(frc.get_inner_loop_count());
  3259   // No infinite loops, no reason to bail out.
  3260   return false;
  3263 //-----------------------------too_many_traps----------------------------------
  3264 // Report if there are too many traps at the current method and bci.
  3265 // Return true if there was a trap, and/or PerMethodTrapLimit is exceeded.
  3266 bool Compile::too_many_traps(ciMethod* method,
  3267                              int bci,
  3268                              Deoptimization::DeoptReason reason) {
  3269   ciMethodData* md = method->method_data();
  3270   if (md->is_empty()) {
  3271     // Assume the trap has not occurred, or that it occurred only
  3272     // because of a transient condition during start-up in the interpreter.
  3273     return false;
  3275   if (md->has_trap_at(bci, reason) != 0) {
  3276     // Assume PerBytecodeTrapLimit==0, for a more conservative heuristic.
  3277     // Also, if there are multiple reasons, or if there is no per-BCI record,
  3278     // assume the worst.
  3279     if (log())
  3280       log()->elem("observe trap='%s' count='%d'",
  3281                   Deoptimization::trap_reason_name(reason),
  3282                   md->trap_count(reason));
  3283     return true;
  3284   } else {
  3285     // Ignore method/bci and see if there have been too many globally.
  3286     return too_many_traps(reason, md);
  3290 // Less-accurate variant which does not require a method and bci.
  3291 bool Compile::too_many_traps(Deoptimization::DeoptReason reason,
  3292                              ciMethodData* logmd) {
  3293  if (trap_count(reason) >= (uint)PerMethodTrapLimit) {
  3294     // Too many traps globally.
  3295     // Note that we use cumulative trap_count, not just md->trap_count.
  3296     if (log()) {
  3297       int mcount = (logmd == NULL)? -1: (int)logmd->trap_count(reason);
  3298       log()->elem("observe trap='%s' count='0' mcount='%d' ccount='%d'",
  3299                   Deoptimization::trap_reason_name(reason),
  3300                   mcount, trap_count(reason));
  3302     return true;
  3303   } else {
  3304     // The coast is clear.
  3305     return false;
  3309 //--------------------------too_many_recompiles--------------------------------
  3310 // Report if there are too many recompiles at the current method and bci.
  3311 // Consults PerBytecodeRecompilationCutoff and PerMethodRecompilationCutoff.
  3312 // Is not eager to return true, since this will cause the compiler to use
  3313 // Action_none for a trap point, to avoid too many recompilations.
  3314 bool Compile::too_many_recompiles(ciMethod* method,
  3315                                   int bci,
  3316                                   Deoptimization::DeoptReason reason) {
  3317   ciMethodData* md = method->method_data();
  3318   if (md->is_empty()) {
  3319     // Assume the trap has not occurred, or that it occurred only
  3320     // because of a transient condition during start-up in the interpreter.
  3321     return false;
  3323   // Pick a cutoff point well within PerBytecodeRecompilationCutoff.
  3324   uint bc_cutoff = (uint) PerBytecodeRecompilationCutoff / 8;
  3325   uint m_cutoff  = (uint) PerMethodRecompilationCutoff / 2 + 1;  // not zero
  3326   Deoptimization::DeoptReason per_bc_reason
  3327     = Deoptimization::reason_recorded_per_bytecode_if_any(reason);
  3328   if ((per_bc_reason == Deoptimization::Reason_none
  3329        || md->has_trap_at(bci, reason) != 0)
  3330       // The trap frequency measure we care about is the recompile count:
  3331       && md->trap_recompiled_at(bci)
  3332       && md->overflow_recompile_count() >= bc_cutoff) {
  3333     // Do not emit a trap here if it has already caused recompilations.
  3334     // Also, if there are multiple reasons, or if there is no per-BCI record,
  3335     // assume the worst.
  3336     if (log())
  3337       log()->elem("observe trap='%s recompiled' count='%d' recompiles2='%d'",
  3338                   Deoptimization::trap_reason_name(reason),
  3339                   md->trap_count(reason),
  3340                   md->overflow_recompile_count());
  3341     return true;
  3342   } else if (trap_count(reason) != 0
  3343              && decompile_count() >= m_cutoff) {
  3344     // Too many recompiles globally, and we have seen this sort of trap.
  3345     // Use cumulative decompile_count, not just md->decompile_count.
  3346     if (log())
  3347       log()->elem("observe trap='%s' count='%d' mcount='%d' decompiles='%d' mdecompiles='%d'",
  3348                   Deoptimization::trap_reason_name(reason),
  3349                   md->trap_count(reason), trap_count(reason),
  3350                   md->decompile_count(), decompile_count());
  3351     return true;
  3352   } else {
  3353     // The coast is clear.
  3354     return false;
  3359 #ifndef PRODUCT
  3360 //------------------------------verify_graph_edges---------------------------
  3361 // Walk the Graph and verify that there is a one-to-one correspondence
  3362 // between Use-Def edges and Def-Use edges in the graph.
  3363 void Compile::verify_graph_edges(bool no_dead_code) {
  3364   if (VerifyGraphEdges) {
  3365     ResourceArea *area = Thread::current()->resource_area();
  3366     Unique_Node_List visited(area);
  3367     // Call recursive graph walk to check edges
  3368     _root->verify_edges(visited);
  3369     if (no_dead_code) {
  3370       // Now make sure that no visited node is used by an unvisited node.
  3371       bool dead_nodes = 0;
  3372       Unique_Node_List checked(area);
  3373       while (visited.size() > 0) {
  3374         Node* n = visited.pop();
  3375         checked.push(n);
  3376         for (uint i = 0; i < n->outcnt(); i++) {
  3377           Node* use = n->raw_out(i);
  3378           if (checked.member(use))  continue;  // already checked
  3379           if (visited.member(use))  continue;  // already in the graph
  3380           if (use->is_Con())        continue;  // a dead ConNode is OK
  3381           // At this point, we have found a dead node which is DU-reachable.
  3382           if (dead_nodes++ == 0)
  3383             tty->print_cr("*** Dead nodes reachable via DU edges:");
  3384           use->dump(2);
  3385           tty->print_cr("---");
  3386           checked.push(use);  // No repeats; pretend it is now checked.
  3389       assert(dead_nodes == 0, "using nodes must be reachable from root");
  3394 // Verify GC barriers consistency
  3395 // Currently supported:
  3396 // - G1 pre-barriers (see GraphKit::g1_write_barrier_pre())
  3397 void Compile::verify_barriers() {
  3398   if (UseG1GC) {
  3399     // Verify G1 pre-barriers
  3400     const int marking_offset = in_bytes(JavaThread::satb_mark_queue_offset() + PtrQueue::byte_offset_of_active());
  3402     ResourceArea *area = Thread::current()->resource_area();
  3403     Unique_Node_List visited(area);
  3404     Node_List worklist(area);
  3405     // We're going to walk control flow backwards starting from the Root
  3406     worklist.push(_root);
  3407     while (worklist.size() > 0) {
  3408       Node* x = worklist.pop();
  3409       if (x == NULL || x == top()) continue;
  3410       if (visited.member(x)) {
  3411         continue;
  3412       } else {
  3413         visited.push(x);
  3416       if (x->is_Region()) {
  3417         for (uint i = 1; i < x->req(); i++) {
  3418           worklist.push(x->in(i));
  3420       } else {
  3421         worklist.push(x->in(0));
  3422         // We are looking for the pattern:
  3423         //                            /->ThreadLocal
  3424         // If->Bool->CmpI->LoadB->AddP->ConL(marking_offset)
  3425         //              \->ConI(0)
  3426         // We want to verify that the If and the LoadB have the same control
  3427         // See GraphKit::g1_write_barrier_pre()
  3428         if (x->is_If()) {
  3429           IfNode *iff = x->as_If();
  3430           if (iff->in(1)->is_Bool() && iff->in(1)->in(1)->is_Cmp()) {
  3431             CmpNode *cmp = iff->in(1)->in(1)->as_Cmp();
  3432             if (cmp->Opcode() == Op_CmpI && cmp->in(2)->is_Con() && cmp->in(2)->bottom_type()->is_int()->get_con() == 0
  3433                 && cmp->in(1)->is_Load()) {
  3434               LoadNode* load = cmp->in(1)->as_Load();
  3435               if (load->Opcode() == Op_LoadB && load->in(2)->is_AddP() && load->in(2)->in(2)->Opcode() == Op_ThreadLocal
  3436                   && load->in(2)->in(3)->is_Con()
  3437                   && load->in(2)->in(3)->bottom_type()->is_intptr_t()->get_con() == marking_offset) {
  3439                 Node* if_ctrl = iff->in(0);
  3440                 Node* load_ctrl = load->in(0);
  3442                 if (if_ctrl != load_ctrl) {
  3443                   // Skip possible CProj->NeverBranch in infinite loops
  3444                   if ((if_ctrl->is_Proj() && if_ctrl->Opcode() == Op_CProj)
  3445                       && (if_ctrl->in(0)->is_MultiBranch() && if_ctrl->in(0)->Opcode() == Op_NeverBranch)) {
  3446                     if_ctrl = if_ctrl->in(0)->in(0);
  3449                 assert(load_ctrl != NULL && if_ctrl == load_ctrl, "controls must match");
  3459 #endif
  3461 // The Compile object keeps track of failure reasons separately from the ciEnv.
  3462 // This is required because there is not quite a 1-1 relation between the
  3463 // ciEnv and its compilation task and the Compile object.  Note that one
  3464 // ciEnv might use two Compile objects, if C2Compiler::compile_method decides
  3465 // to backtrack and retry without subsuming loads.  Other than this backtracking
  3466 // behavior, the Compile's failure reason is quietly copied up to the ciEnv
  3467 // by the logic in C2Compiler.
  3468 void Compile::record_failure(const char* reason) {
  3469   if (log() != NULL) {
  3470     log()->elem("failure reason='%s' phase='compile'", reason);
  3472   if (_failure_reason == NULL) {
  3473     // Record the first failure reason.
  3474     _failure_reason = reason;
  3477   EventCompilerFailure event;
  3478   if (event.should_commit()) {
  3479     event.set_compileID(Compile::compile_id());
  3480     event.set_failure(reason);
  3481     event.commit();
  3484   if (!C->failure_reason_is(C2Compiler::retry_no_subsuming_loads())) {
  3485     C->print_method(PHASE_FAILURE);
  3487   _root = NULL;  // flush the graph, too
  3490 Compile::TracePhase::TracePhase(const char* name, elapsedTimer* accumulator, bool dolog)
  3491   : TraceTime(NULL, accumulator, false NOT_PRODUCT( || TimeCompiler ), false),
  3492     _phase_name(name), _dolog(dolog)
  3494   if (dolog) {
  3495     C = Compile::current();
  3496     _log = C->log();
  3497   } else {
  3498     C = NULL;
  3499     _log = NULL;
  3501   if (_log != NULL) {
  3502     _log->begin_head("phase name='%s' nodes='%d' live='%d'", _phase_name, C->unique(), C->live_nodes());
  3503     _log->stamp();
  3504     _log->end_head();
  3508 Compile::TracePhase::~TracePhase() {
  3510   C = Compile::current();
  3511   if (_dolog) {
  3512     _log = C->log();
  3513   } else {
  3514     _log = NULL;
  3517 #ifdef ASSERT
  3518   if (PrintIdealNodeCount) {
  3519     tty->print_cr("phase name='%s' nodes='%d' live='%d' live_graph_walk='%d'",
  3520                   _phase_name, C->unique(), C->live_nodes(), C->count_live_nodes_by_graph_walk());
  3523   if (VerifyIdealNodeCount) {
  3524     Compile::current()->print_missing_nodes();
  3526 #endif
  3528   if (_log != NULL) {
  3529     _log->done("phase name='%s' nodes='%d' live='%d'", _phase_name, C->unique(), C->live_nodes());
  3533 //=============================================================================
  3534 // Two Constant's are equal when the type and the value are equal.
  3535 bool Compile::Constant::operator==(const Constant& other) {
  3536   if (type()          != other.type()         )  return false;
  3537   if (can_be_reused() != other.can_be_reused())  return false;
  3538   // For floating point values we compare the bit pattern.
  3539   switch (type()) {
  3540   case T_FLOAT:   return (_v._value.i == other._v._value.i);
  3541   case T_LONG:
  3542   case T_DOUBLE:  return (_v._value.j == other._v._value.j);
  3543   case T_OBJECT:
  3544   case T_ADDRESS: return (_v._value.l == other._v._value.l);
  3545   case T_VOID:    return (_v._value.l == other._v._value.l);  // jump-table entries
  3546   case T_METADATA: return (_v._metadata == other._v._metadata);
  3547   default: ShouldNotReachHere();
  3549   return false;
  3552 static int type_to_size_in_bytes(BasicType t) {
  3553   switch (t) {
  3554   case T_LONG:    return sizeof(jlong  );
  3555   case T_FLOAT:   return sizeof(jfloat );
  3556   case T_DOUBLE:  return sizeof(jdouble);
  3557   case T_METADATA: return sizeof(Metadata*);
  3558     // We use T_VOID as marker for jump-table entries (labels) which
  3559     // need an internal word relocation.
  3560   case T_VOID:
  3561   case T_ADDRESS:
  3562   case T_OBJECT:  return sizeof(jobject);
  3565   ShouldNotReachHere();
  3566   return -1;
  3569 int Compile::ConstantTable::qsort_comparator(Constant* a, Constant* b) {
  3570   // sort descending
  3571   if (a->freq() > b->freq())  return -1;
  3572   if (a->freq() < b->freq())  return  1;
  3573   return 0;
  3576 void Compile::ConstantTable::calculate_offsets_and_size() {
  3577   // First, sort the array by frequencies.
  3578   _constants.sort(qsort_comparator);
  3580 #ifdef ASSERT
  3581   // Make sure all jump-table entries were sorted to the end of the
  3582   // array (they have a negative frequency).
  3583   bool found_void = false;
  3584   for (int i = 0; i < _constants.length(); i++) {
  3585     Constant con = _constants.at(i);
  3586     if (con.type() == T_VOID)
  3587       found_void = true;  // jump-tables
  3588     else
  3589       assert(!found_void, "wrong sorting");
  3591 #endif
  3593   int offset = 0;
  3594   for (int i = 0; i < _constants.length(); i++) {
  3595     Constant* con = _constants.adr_at(i);
  3597     // Align offset for type.
  3598     int typesize = type_to_size_in_bytes(con->type());
  3599     offset = align_size_up(offset, typesize);
  3600     con->set_offset(offset);   // set constant's offset
  3602     if (con->type() == T_VOID) {
  3603       MachConstantNode* n = (MachConstantNode*) con->get_jobject();
  3604       offset = offset + typesize * n->outcnt();  // expand jump-table
  3605     } else {
  3606       offset = offset + typesize;
  3610   // Align size up to the next section start (which is insts; see
  3611   // CodeBuffer::align_at_start).
  3612   assert(_size == -1, "already set?");
  3613   _size = align_size_up(offset, CodeEntryAlignment);
  3616 void Compile::ConstantTable::emit(CodeBuffer& cb) {
  3617   MacroAssembler _masm(&cb);
  3618   for (int i = 0; i < _constants.length(); i++) {
  3619     Constant con = _constants.at(i);
  3620     address constant_addr;
  3621     switch (con.type()) {
  3622     case T_LONG:   constant_addr = _masm.long_constant(  con.get_jlong()  ); break;
  3623     case T_FLOAT:  constant_addr = _masm.float_constant( con.get_jfloat() ); break;
  3624     case T_DOUBLE: constant_addr = _masm.double_constant(con.get_jdouble()); break;
  3625     case T_OBJECT: {
  3626       jobject obj = con.get_jobject();
  3627       int oop_index = _masm.oop_recorder()->find_index(obj);
  3628       constant_addr = _masm.address_constant((address) obj, oop_Relocation::spec(oop_index));
  3629       break;
  3631     case T_ADDRESS: {
  3632       address addr = (address) con.get_jobject();
  3633       constant_addr = _masm.address_constant(addr);
  3634       break;
  3636     // We use T_VOID as marker for jump-table entries (labels) which
  3637     // need an internal word relocation.
  3638     case T_VOID: {
  3639       MachConstantNode* n = (MachConstantNode*) con.get_jobject();
  3640       // Fill the jump-table with a dummy word.  The real value is
  3641       // filled in later in fill_jump_table.
  3642       address dummy = (address) n;
  3643       constant_addr = _masm.address_constant(dummy);
  3644       // Expand jump-table
  3645       for (uint i = 1; i < n->outcnt(); i++) {
  3646         address temp_addr = _masm.address_constant(dummy + i);
  3647         assert(temp_addr, "consts section too small");
  3649       break;
  3651     case T_METADATA: {
  3652       Metadata* obj = con.get_metadata();
  3653       int metadata_index = _masm.oop_recorder()->find_index(obj);
  3654       constant_addr = _masm.address_constant((address) obj, metadata_Relocation::spec(metadata_index));
  3655       break;
  3657     default: ShouldNotReachHere();
  3659     assert(constant_addr, "consts section too small");
  3660     assert((constant_addr - _masm.code()->consts()->start()) == con.offset(), err_msg_res("must be: %d == %d", constant_addr - _masm.code()->consts()->start(), con.offset()));
  3664 int Compile::ConstantTable::find_offset(Constant& con) const {
  3665   int idx = _constants.find(con);
  3666   assert(idx != -1, "constant must be in constant table");
  3667   int offset = _constants.at(idx).offset();
  3668   assert(offset != -1, "constant table not emitted yet?");
  3669   return offset;
  3672 void Compile::ConstantTable::add(Constant& con) {
  3673   if (con.can_be_reused()) {
  3674     int idx = _constants.find(con);
  3675     if (idx != -1 && _constants.at(idx).can_be_reused()) {
  3676       _constants.adr_at(idx)->inc_freq(con.freq());  // increase the frequency by the current value
  3677       return;
  3680   (void) _constants.append(con);
  3683 Compile::Constant Compile::ConstantTable::add(MachConstantNode* n, BasicType type, jvalue value) {
  3684   Block* b = Compile::current()->cfg()->get_block_for_node(n);
  3685   Constant con(type, value, b->_freq);
  3686   add(con);
  3687   return con;
  3690 Compile::Constant Compile::ConstantTable::add(Metadata* metadata) {
  3691   Constant con(metadata);
  3692   add(con);
  3693   return con;
  3696 Compile::Constant Compile::ConstantTable::add(MachConstantNode* n, MachOper* oper) {
  3697   jvalue value;
  3698   BasicType type = oper->type()->basic_type();
  3699   switch (type) {
  3700   case T_LONG:    value.j = oper->constantL(); break;
  3701   case T_FLOAT:   value.f = oper->constantF(); break;
  3702   case T_DOUBLE:  value.d = oper->constantD(); break;
  3703   case T_OBJECT:
  3704   case T_ADDRESS: value.l = (jobject) oper->constant(); break;
  3705   case T_METADATA: return add((Metadata*)oper->constant()); break;
  3706   default: guarantee(false, err_msg_res("unhandled type: %s", type2name(type)));
  3708   return add(n, type, value);
  3711 Compile::Constant Compile::ConstantTable::add_jump_table(MachConstantNode* n) {
  3712   jvalue value;
  3713   // We can use the node pointer here to identify the right jump-table
  3714   // as this method is called from Compile::Fill_buffer right before
  3715   // the MachNodes are emitted and the jump-table is filled (means the
  3716   // MachNode pointers do not change anymore).
  3717   value.l = (jobject) n;
  3718   Constant con(T_VOID, value, next_jump_table_freq(), false);  // Labels of a jump-table cannot be reused.
  3719   add(con);
  3720   return con;
  3723 void Compile::ConstantTable::fill_jump_table(CodeBuffer& cb, MachConstantNode* n, GrowableArray<Label*> labels) const {
  3724   // If called from Compile::scratch_emit_size do nothing.
  3725   if (Compile::current()->in_scratch_emit_size())  return;
  3727   assert(labels.is_nonempty(), "must be");
  3728   assert((uint) labels.length() == n->outcnt(), err_msg_res("must be equal: %d == %d", labels.length(), n->outcnt()));
  3730   // Since MachConstantNode::constant_offset() also contains
  3731   // table_base_offset() we need to subtract the table_base_offset()
  3732   // to get the plain offset into the constant table.
  3733   int offset = n->constant_offset() - table_base_offset();
  3735   MacroAssembler _masm(&cb);
  3736   address* jump_table_base = (address*) (_masm.code()->consts()->start() + offset);
  3738   for (uint i = 0; i < n->outcnt(); i++) {
  3739     address* constant_addr = &jump_table_base[i];
  3740     assert(*constant_addr == (((address) n) + i), err_msg_res("all jump-table entries must contain adjusted node pointer: " INTPTR_FORMAT " == " INTPTR_FORMAT, *constant_addr, (((address) n) + i)));
  3741     *constant_addr = cb.consts()->target(*labels.at(i), (address) constant_addr);
  3742     cb.consts()->relocate((address) constant_addr, relocInfo::internal_word_type);
  3746 void Compile::dump_inlining() {
  3747   if (print_inlining() || print_intrinsics()) {
  3748     // Print inlining message for candidates that we couldn't inline
  3749     // for lack of space or non constant receiver
  3750     for (int i = 0; i < _late_inlines.length(); i++) {
  3751       CallGenerator* cg = _late_inlines.at(i);
  3752       cg->print_inlining_late("live nodes > LiveNodeCountInliningCutoff");
  3754     Unique_Node_List useful;
  3755     useful.push(root());
  3756     for (uint next = 0; next < useful.size(); ++next) {
  3757       Node* n  = useful.at(next);
  3758       if (n->is_Call() && n->as_Call()->generator() != NULL && n->as_Call()->generator()->call_node() == n) {
  3759         CallNode* call = n->as_Call();
  3760         CallGenerator* cg = call->generator();
  3761         cg->print_inlining_late("receiver not constant");
  3763       uint max = n->len();
  3764       for ( uint i = 0; i < max; ++i ) {
  3765         Node *m = n->in(i);
  3766         if ( m == NULL ) continue;
  3767         useful.push(m);
  3770     for (int i = 0; i < _print_inlining_list->length(); i++) {
  3771       tty->print(_print_inlining_list->adr_at(i)->ss()->as_string());
  3776 // Dump inlining replay data to the stream.
  3777 // Don't change thread state and acquire any locks.
  3778 void Compile::dump_inline_data(outputStream* out) {
  3779   InlineTree* inl_tree = ilt();
  3780   if (inl_tree != NULL) {
  3781     out->print(" inline %d", inl_tree->count());
  3782     inl_tree->dump_replay_data(out);
  3786 int Compile::cmp_expensive_nodes(Node* n1, Node* n2) {
  3787   if (n1->Opcode() < n2->Opcode())      return -1;
  3788   else if (n1->Opcode() > n2->Opcode()) return 1;
  3790   assert(n1->req() == n2->req(), err_msg_res("can't compare %s nodes: n1->req() = %d, n2->req() = %d", NodeClassNames[n1->Opcode()], n1->req(), n2->req()));
  3791   for (uint i = 1; i < n1->req(); i++) {
  3792     if (n1->in(i) < n2->in(i))      return -1;
  3793     else if (n1->in(i) > n2->in(i)) return 1;
  3796   return 0;
  3799 int Compile::cmp_expensive_nodes(Node** n1p, Node** n2p) {
  3800   Node* n1 = *n1p;
  3801   Node* n2 = *n2p;
  3803   return cmp_expensive_nodes(n1, n2);
  3806 void Compile::sort_expensive_nodes() {
  3807   if (!expensive_nodes_sorted()) {
  3808     _expensive_nodes->sort(cmp_expensive_nodes);
  3812 bool Compile::expensive_nodes_sorted() const {
  3813   for (int i = 1; i < _expensive_nodes->length(); i++) {
  3814     if (cmp_expensive_nodes(_expensive_nodes->adr_at(i), _expensive_nodes->adr_at(i-1)) < 0) {
  3815       return false;
  3818   return true;
  3821 bool Compile::should_optimize_expensive_nodes(PhaseIterGVN &igvn) {
  3822   if (_expensive_nodes->length() == 0) {
  3823     return false;
  3826   assert(OptimizeExpensiveOps, "optimization off?");
  3828   // Take this opportunity to remove dead nodes from the list
  3829   int j = 0;
  3830   for (int i = 0; i < _expensive_nodes->length(); i++) {
  3831     Node* n = _expensive_nodes->at(i);
  3832     if (!n->is_unreachable(igvn)) {
  3833       assert(n->is_expensive(), "should be expensive");
  3834       _expensive_nodes->at_put(j, n);
  3835       j++;
  3838   _expensive_nodes->trunc_to(j);
  3840   // Then sort the list so that similar nodes are next to each other
  3841   // and check for at least two nodes of identical kind with same data
  3842   // inputs.
  3843   sort_expensive_nodes();
  3845   for (int i = 0; i < _expensive_nodes->length()-1; i++) {
  3846     if (cmp_expensive_nodes(_expensive_nodes->adr_at(i), _expensive_nodes->adr_at(i+1)) == 0) {
  3847       return true;
  3851   return false;
  3854 void Compile::cleanup_expensive_nodes(PhaseIterGVN &igvn) {
  3855   if (_expensive_nodes->length() == 0) {
  3856     return;
  3859   assert(OptimizeExpensiveOps, "optimization off?");
  3861   // Sort to bring similar nodes next to each other and clear the
  3862   // control input of nodes for which there's only a single copy.
  3863   sort_expensive_nodes();
  3865   int j = 0;
  3866   int identical = 0;
  3867   int i = 0;
  3868   for (; i < _expensive_nodes->length()-1; i++) {
  3869     assert(j <= i, "can't write beyond current index");
  3870     if (_expensive_nodes->at(i)->Opcode() == _expensive_nodes->at(i+1)->Opcode()) {
  3871       identical++;
  3872       _expensive_nodes->at_put(j++, _expensive_nodes->at(i));
  3873       continue;
  3875     if (identical > 0) {
  3876       _expensive_nodes->at_put(j++, _expensive_nodes->at(i));
  3877       identical = 0;
  3878     } else {
  3879       Node* n = _expensive_nodes->at(i);
  3880       igvn.hash_delete(n);
  3881       n->set_req(0, NULL);
  3882       igvn.hash_insert(n);
  3885   if (identical > 0) {
  3886     _expensive_nodes->at_put(j++, _expensive_nodes->at(i));
  3887   } else if (_expensive_nodes->length() >= 1) {
  3888     Node* n = _expensive_nodes->at(i);
  3889     igvn.hash_delete(n);
  3890     n->set_req(0, NULL);
  3891     igvn.hash_insert(n);
  3893   _expensive_nodes->trunc_to(j);
  3896 void Compile::add_expensive_node(Node * n) {
  3897   assert(!_expensive_nodes->contains(n), "duplicate entry in expensive list");
  3898   assert(n->is_expensive(), "expensive nodes with non-null control here only");
  3899   assert(!n->is_CFG() && !n->is_Mem(), "no cfg or memory nodes here");
  3900   if (OptimizeExpensiveOps) {
  3901     _expensive_nodes->append(n);
  3902   } else {
  3903     // Clear control input and let IGVN optimize expensive nodes if
  3904     // OptimizeExpensiveOps is off.
  3905     n->set_req(0, NULL);
  3909 /**
  3910  * Remove the speculative part of types and clean up the graph
  3911  */
  3912 void Compile::remove_speculative_types(PhaseIterGVN &igvn) {
  3913   if (UseTypeSpeculation) {
  3914     Unique_Node_List worklist;
  3915     worklist.push(root());
  3916     int modified = 0;
  3917     // Go over all type nodes that carry a speculative type, drop the
  3918     // speculative part of the type and enqueue the node for an igvn
  3919     // which may optimize it out.
  3920     for (uint next = 0; next < worklist.size(); ++next) {
  3921       Node *n  = worklist.at(next);
  3922       if (n->is_Type() && n->as_Type()->type()->isa_oopptr() != NULL &&
  3923           n->as_Type()->type()->is_oopptr()->speculative() != NULL) {
  3924         TypeNode* tn = n->as_Type();
  3925         const TypeOopPtr* t = tn->type()->is_oopptr();
  3926         bool in_hash = igvn.hash_delete(n);
  3927         assert(in_hash, "node should be in igvn hash table");
  3928         tn->set_type(t->remove_speculative());
  3929         igvn.hash_insert(n);
  3930         igvn._worklist.push(n); // give it a chance to go away
  3931         modified++;
  3933       uint max = n->len();
  3934       for( uint i = 0; i < max; ++i ) {
  3935         Node *m = n->in(i);
  3936         if (not_a_node(m))  continue;
  3937         worklist.push(m);
  3940     // Drop the speculative part of all types in the igvn's type table
  3941     igvn.remove_speculative_types();
  3942     if (modified > 0) {
  3943       igvn.optimize();
  3948 // Auxiliary method to support randomized stressing/fuzzing.
  3949 //
  3950 // This method can be called the arbitrary number of times, with current count
  3951 // as the argument. The logic allows selecting a single candidate from the
  3952 // running list of candidates as follows:
  3953 //    int count = 0;
  3954 //    Cand* selected = null;
  3955 //    while(cand = cand->next()) {
  3956 //      if (randomized_select(++count)) {
  3957 //        selected = cand;
  3958 //      }
  3959 //    }
  3960 //
  3961 // Including count equalizes the chances any candidate is "selected".
  3962 // This is useful when we don't have the complete list of candidates to choose
  3963 // from uniformly. In this case, we need to adjust the randomicity of the
  3964 // selection, or else we will end up biasing the selection towards the latter
  3965 // candidates.
  3966 //
  3967 // Quick back-envelope calculation shows that for the list of n candidates
  3968 // the equal probability for the candidate to persist as "best" can be
  3969 // achieved by replacing it with "next" k-th candidate with the probability
  3970 // of 1/k. It can be easily shown that by the end of the run, the
  3971 // probability for any candidate is converged to 1/n, thus giving the
  3972 // uniform distribution among all the candidates.
  3973 //
  3974 // We don't care about the domain size as long as (RANDOMIZED_DOMAIN / count) is large.
  3975 #define RANDOMIZED_DOMAIN_POW 29
  3976 #define RANDOMIZED_DOMAIN (1 << RANDOMIZED_DOMAIN_POW)
  3977 #define RANDOMIZED_DOMAIN_MASK ((1 << (RANDOMIZED_DOMAIN_POW + 1)) - 1)
  3978 bool Compile::randomized_select(int count) {
  3979   assert(count > 0, "only positive");
  3980   return (os::random() & RANDOMIZED_DOMAIN_MASK) < (RANDOMIZED_DOMAIN / count);

mercurial