src/share/vm/opto/compile.cpp

Fri, 03 Dec 2010 01:34:31 -0800

author
twisti
date
Fri, 03 Dec 2010 01:34:31 -0800
changeset 2350
2f644f85485d
parent 2349
5ddfcf4b079e
child 2414
51bd2d261853
permissions
-rw-r--r--

6961690: load oops from constant table on SPARC
Summary: oops should be loaded from the constant table of an nmethod instead of materializing them with a long code sequence.
Reviewed-by: never, kvn

     1 /*
     2  * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    20  * or visit www.oracle.com if you need additional information or have any
    21  * questions.
    22  *
    23  */
    25 #include "precompiled.hpp"
    26 #include "asm/assembler.hpp"
    27 #include "classfile/systemDictionary.hpp"
    28 #include "code/exceptionHandlerTable.hpp"
    29 #include "code/nmethod.hpp"
    30 #include "compiler/compileLog.hpp"
    31 #include "compiler/oopMap.hpp"
    32 #include "opto/addnode.hpp"
    33 #include "opto/block.hpp"
    34 #include "opto/c2compiler.hpp"
    35 #include "opto/callGenerator.hpp"
    36 #include "opto/callnode.hpp"
    37 #include "opto/cfgnode.hpp"
    38 #include "opto/chaitin.hpp"
    39 #include "opto/compile.hpp"
    40 #include "opto/connode.hpp"
    41 #include "opto/divnode.hpp"
    42 #include "opto/escape.hpp"
    43 #include "opto/idealGraphPrinter.hpp"
    44 #include "opto/loopnode.hpp"
    45 #include "opto/machnode.hpp"
    46 #include "opto/macro.hpp"
    47 #include "opto/matcher.hpp"
    48 #include "opto/memnode.hpp"
    49 #include "opto/mulnode.hpp"
    50 #include "opto/node.hpp"
    51 #include "opto/opcodes.hpp"
    52 #include "opto/output.hpp"
    53 #include "opto/parse.hpp"
    54 #include "opto/phaseX.hpp"
    55 #include "opto/rootnode.hpp"
    56 #include "opto/runtime.hpp"
    57 #include "opto/stringopts.hpp"
    58 #include "opto/type.hpp"
    59 #include "opto/vectornode.hpp"
    60 #include "runtime/arguments.hpp"
    61 #include "runtime/signature.hpp"
    62 #include "runtime/stubRoutines.hpp"
    63 #include "runtime/timer.hpp"
    64 #include "utilities/copy.hpp"
    65 #ifdef TARGET_ARCH_MODEL_x86_32
    66 # include "adfiles/ad_x86_32.hpp"
    67 #endif
    68 #ifdef TARGET_ARCH_MODEL_x86_64
    69 # include "adfiles/ad_x86_64.hpp"
    70 #endif
    71 #ifdef TARGET_ARCH_MODEL_sparc
    72 # include "adfiles/ad_sparc.hpp"
    73 #endif
    74 #ifdef TARGET_ARCH_MODEL_zero
    75 # include "adfiles/ad_zero.hpp"
    76 #endif
    79 // -------------------- Compile::mach_constant_base_node -----------------------
    80 // Constant table base node singleton.
    81 MachConstantBaseNode* Compile::mach_constant_base_node() {
    82   if (_mach_constant_base_node == NULL) {
    83     _mach_constant_base_node = new (C) MachConstantBaseNode();
    84     _mach_constant_base_node->add_req(C->root());
    85   }
    86   return _mach_constant_base_node;
    87 }
    90 /// Support for intrinsics.
    92 // Return the index at which m must be inserted (or already exists).
    93 // The sort order is by the address of the ciMethod, with is_virtual as minor key.
    94 int Compile::intrinsic_insertion_index(ciMethod* m, bool is_virtual) {
    95 #ifdef ASSERT
    96   for (int i = 1; i < _intrinsics->length(); i++) {
    97     CallGenerator* cg1 = _intrinsics->at(i-1);
    98     CallGenerator* cg2 = _intrinsics->at(i);
    99     assert(cg1->method() != cg2->method()
   100            ? cg1->method()     < cg2->method()
   101            : cg1->is_virtual() < cg2->is_virtual(),
   102            "compiler intrinsics list must stay sorted");
   103   }
   104 #endif
   105   // Binary search sorted list, in decreasing intervals [lo, hi].
   106   int lo = 0, hi = _intrinsics->length()-1;
   107   while (lo <= hi) {
   108     int mid = (uint)(hi + lo) / 2;
   109     ciMethod* mid_m = _intrinsics->at(mid)->method();
   110     if (m < mid_m) {
   111       hi = mid-1;
   112     } else if (m > mid_m) {
   113       lo = mid+1;
   114     } else {
   115       // look at minor sort key
   116       bool mid_virt = _intrinsics->at(mid)->is_virtual();
   117       if (is_virtual < mid_virt) {
   118         hi = mid-1;
   119       } else if (is_virtual > mid_virt) {
   120         lo = mid+1;
   121       } else {
   122         return mid;  // exact match
   123       }
   124     }
   125   }
   126   return lo;  // inexact match
   127 }
   129 void Compile::register_intrinsic(CallGenerator* cg) {
   130   if (_intrinsics == NULL) {
   131     _intrinsics = new GrowableArray<CallGenerator*>(60);
   132   }
   133   // This code is stolen from ciObjectFactory::insert.
   134   // Really, GrowableArray should have methods for
   135   // insert_at, remove_at, and binary_search.
   136   int len = _intrinsics->length();
   137   int index = intrinsic_insertion_index(cg->method(), cg->is_virtual());
   138   if (index == len) {
   139     _intrinsics->append(cg);
   140   } else {
   141 #ifdef ASSERT
   142     CallGenerator* oldcg = _intrinsics->at(index);
   143     assert(oldcg->method() != cg->method() || oldcg->is_virtual() != cg->is_virtual(), "don't register twice");
   144 #endif
   145     _intrinsics->append(_intrinsics->at(len-1));
   146     int pos;
   147     for (pos = len-2; pos >= index; pos--) {
   148       _intrinsics->at_put(pos+1,_intrinsics->at(pos));
   149     }
   150     _intrinsics->at_put(index, cg);
   151   }
   152   assert(find_intrinsic(cg->method(), cg->is_virtual()) == cg, "registration worked");
   153 }
   155 CallGenerator* Compile::find_intrinsic(ciMethod* m, bool is_virtual) {
   156   assert(m->is_loaded(), "don't try this on unloaded methods");
   157   if (_intrinsics != NULL) {
   158     int index = intrinsic_insertion_index(m, is_virtual);
   159     if (index < _intrinsics->length()
   160         && _intrinsics->at(index)->method() == m
   161         && _intrinsics->at(index)->is_virtual() == is_virtual) {
   162       return _intrinsics->at(index);
   163     }
   164   }
   165   // Lazily create intrinsics for intrinsic IDs well-known in the runtime.
   166   if (m->intrinsic_id() != vmIntrinsics::_none &&
   167       m->intrinsic_id() <= vmIntrinsics::LAST_COMPILER_INLINE) {
   168     CallGenerator* cg = make_vm_intrinsic(m, is_virtual);
   169     if (cg != NULL) {
   170       // Save it for next time:
   171       register_intrinsic(cg);
   172       return cg;
   173     } else {
   174       gather_intrinsic_statistics(m->intrinsic_id(), is_virtual, _intrinsic_disabled);
   175     }
   176   }
   177   return NULL;
   178 }
   180 // Compile:: register_library_intrinsics and make_vm_intrinsic are defined
   181 // in library_call.cpp.
   184 #ifndef PRODUCT
   185 // statistics gathering...
   187 juint  Compile::_intrinsic_hist_count[vmIntrinsics::ID_LIMIT] = {0};
   188 jubyte Compile::_intrinsic_hist_flags[vmIntrinsics::ID_LIMIT] = {0};
   190 bool Compile::gather_intrinsic_statistics(vmIntrinsics::ID id, bool is_virtual, int flags) {
   191   assert(id > vmIntrinsics::_none && id < vmIntrinsics::ID_LIMIT, "oob");
   192   int oflags = _intrinsic_hist_flags[id];
   193   assert(flags != 0, "what happened?");
   194   if (is_virtual) {
   195     flags |= _intrinsic_virtual;
   196   }
   197   bool changed = (flags != oflags);
   198   if ((flags & _intrinsic_worked) != 0) {
   199     juint count = (_intrinsic_hist_count[id] += 1);
   200     if (count == 1) {
   201       changed = true;           // first time
   202     }
   203     // increment the overall count also:
   204     _intrinsic_hist_count[vmIntrinsics::_none] += 1;
   205   }
   206   if (changed) {
   207     if (((oflags ^ flags) & _intrinsic_virtual) != 0) {
   208       // Something changed about the intrinsic's virtuality.
   209       if ((flags & _intrinsic_virtual) != 0) {
   210         // This is the first use of this intrinsic as a virtual call.
   211         if (oflags != 0) {
   212           // We already saw it as a non-virtual, so note both cases.
   213           flags |= _intrinsic_both;
   214         }
   215       } else if ((oflags & _intrinsic_both) == 0) {
   216         // This is the first use of this intrinsic as a non-virtual
   217         flags |= _intrinsic_both;
   218       }
   219     }
   220     _intrinsic_hist_flags[id] = (jubyte) (oflags | flags);
   221   }
   222   // update the overall flags also:
   223   _intrinsic_hist_flags[vmIntrinsics::_none] |= (jubyte) flags;
   224   return changed;
   225 }
   227 static char* format_flags(int flags, char* buf) {
   228   buf[0] = 0;
   229   if ((flags & Compile::_intrinsic_worked) != 0)    strcat(buf, ",worked");
   230   if ((flags & Compile::_intrinsic_failed) != 0)    strcat(buf, ",failed");
   231   if ((flags & Compile::_intrinsic_disabled) != 0)  strcat(buf, ",disabled");
   232   if ((flags & Compile::_intrinsic_virtual) != 0)   strcat(buf, ",virtual");
   233   if ((flags & Compile::_intrinsic_both) != 0)      strcat(buf, ",nonvirtual");
   234   if (buf[0] == 0)  strcat(buf, ",");
   235   assert(buf[0] == ',', "must be");
   236   return &buf[1];
   237 }
   239 void Compile::print_intrinsic_statistics() {
   240   char flagsbuf[100];
   241   ttyLocker ttyl;
   242   if (xtty != NULL)  xtty->head("statistics type='intrinsic'");
   243   tty->print_cr("Compiler intrinsic usage:");
   244   juint total = _intrinsic_hist_count[vmIntrinsics::_none];
   245   if (total == 0)  total = 1;  // avoid div0 in case of no successes
   246   #define PRINT_STAT_LINE(name, c, f) \
   247     tty->print_cr("  %4d (%4.1f%%) %s (%s)", (int)(c), ((c) * 100.0) / total, name, f);
   248   for (int index = 1 + (int)vmIntrinsics::_none; index < (int)vmIntrinsics::ID_LIMIT; index++) {
   249     vmIntrinsics::ID id = (vmIntrinsics::ID) index;
   250     int   flags = _intrinsic_hist_flags[id];
   251     juint count = _intrinsic_hist_count[id];
   252     if ((flags | count) != 0) {
   253       PRINT_STAT_LINE(vmIntrinsics::name_at(id), count, format_flags(flags, flagsbuf));
   254     }
   255   }
   256   PRINT_STAT_LINE("total", total, format_flags(_intrinsic_hist_flags[vmIntrinsics::_none], flagsbuf));
   257   if (xtty != NULL)  xtty->tail("statistics");
   258 }
   260 void Compile::print_statistics() {
   261   { ttyLocker ttyl;
   262     if (xtty != NULL)  xtty->head("statistics type='opto'");
   263     Parse::print_statistics();
   264     PhaseCCP::print_statistics();
   265     PhaseRegAlloc::print_statistics();
   266     Scheduling::print_statistics();
   267     PhasePeephole::print_statistics();
   268     PhaseIdealLoop::print_statistics();
   269     if (xtty != NULL)  xtty->tail("statistics");
   270   }
   271   if (_intrinsic_hist_flags[vmIntrinsics::_none] != 0) {
   272     // put this under its own <statistics> element.
   273     print_intrinsic_statistics();
   274   }
   275 }
   276 #endif //PRODUCT
   278 // Support for bundling info
   279 Bundle* Compile::node_bundling(const Node *n) {
   280   assert(valid_bundle_info(n), "oob");
   281   return &_node_bundling_base[n->_idx];
   282 }
   284 bool Compile::valid_bundle_info(const Node *n) {
   285   return (_node_bundling_limit > n->_idx);
   286 }
   289 void Compile::gvn_replace_by(Node* n, Node* nn) {
   290   for (DUIterator_Last imin, i = n->last_outs(imin); i >= imin; ) {
   291     Node* use = n->last_out(i);
   292     bool is_in_table = initial_gvn()->hash_delete(use);
   293     uint uses_found = 0;
   294     for (uint j = 0; j < use->len(); j++) {
   295       if (use->in(j) == n) {
   296         if (j < use->req())
   297           use->set_req(j, nn);
   298         else
   299           use->set_prec(j, nn);
   300         uses_found++;
   301       }
   302     }
   303     if (is_in_table) {
   304       // reinsert into table
   305       initial_gvn()->hash_find_insert(use);
   306     }
   307     record_for_igvn(use);
   308     i -= uses_found;    // we deleted 1 or more copies of this edge
   309   }
   310 }
   315 // Identify all nodes that are reachable from below, useful.
   316 // Use breadth-first pass that records state in a Unique_Node_List,
   317 // recursive traversal is slower.
   318 void Compile::identify_useful_nodes(Unique_Node_List &useful) {
   319   int estimated_worklist_size = unique();
   320   useful.map( estimated_worklist_size, NULL );  // preallocate space
   322   // Initialize worklist
   323   if (root() != NULL)     { useful.push(root()); }
   324   // If 'top' is cached, declare it useful to preserve cached node
   325   if( cached_top_node() ) { useful.push(cached_top_node()); }
   327   // Push all useful nodes onto the list, breadthfirst
   328   for( uint next = 0; next < useful.size(); ++next ) {
   329     assert( next < unique(), "Unique useful nodes < total nodes");
   330     Node *n  = useful.at(next);
   331     uint max = n->len();
   332     for( uint i = 0; i < max; ++i ) {
   333       Node *m = n->in(i);
   334       if( m == NULL ) continue;
   335       useful.push(m);
   336     }
   337   }
   338 }
   340 // Disconnect all useless nodes by disconnecting those at the boundary.
   341 void Compile::remove_useless_nodes(Unique_Node_List &useful) {
   342   uint next = 0;
   343   while( next < useful.size() ) {
   344     Node *n = useful.at(next++);
   345     // Use raw traversal of out edges since this code removes out edges
   346     int max = n->outcnt();
   347     for (int j = 0; j < max; ++j ) {
   348       Node* child = n->raw_out(j);
   349       if( ! useful.member(child) ) {
   350         assert( !child->is_top() || child != top(),
   351                 "If top is cached in Compile object it is in useful list");
   352         // Only need to remove this out-edge to the useless node
   353         n->raw_del_out(j);
   354         --j;
   355         --max;
   356       }
   357     }
   358     if (n->outcnt() == 1 && n->has_special_unique_user()) {
   359       record_for_igvn( n->unique_out() );
   360     }
   361   }
   362   debug_only(verify_graph_edges(true/*check for no_dead_code*/);)
   363 }
   365 //------------------------------frame_size_in_words-----------------------------
   366 // frame_slots in units of words
   367 int Compile::frame_size_in_words() const {
   368   // shift is 0 in LP32 and 1 in LP64
   369   const int shift = (LogBytesPerWord - LogBytesPerInt);
   370   int words = _frame_slots >> shift;
   371   assert( words << shift == _frame_slots, "frame size must be properly aligned in LP64" );
   372   return words;
   373 }
   375 // ============================================================================
   376 //------------------------------CompileWrapper---------------------------------
   377 class CompileWrapper : public StackObj {
   378   Compile *const _compile;
   379  public:
   380   CompileWrapper(Compile* compile);
   382   ~CompileWrapper();
   383 };
   385 CompileWrapper::CompileWrapper(Compile* compile) : _compile(compile) {
   386   // the Compile* pointer is stored in the current ciEnv:
   387   ciEnv* env = compile->env();
   388   assert(env == ciEnv::current(), "must already be a ciEnv active");
   389   assert(env->compiler_data() == NULL, "compile already active?");
   390   env->set_compiler_data(compile);
   391   assert(compile == Compile::current(), "sanity");
   393   compile->set_type_dict(NULL);
   394   compile->set_type_hwm(NULL);
   395   compile->set_type_last_size(0);
   396   compile->set_last_tf(NULL, NULL);
   397   compile->set_indexSet_arena(NULL);
   398   compile->set_indexSet_free_block_list(NULL);
   399   compile->init_type_arena();
   400   Type::Initialize(compile);
   401   _compile->set_scratch_buffer_blob(NULL);
   402   _compile->begin_method();
   403 }
   404 CompileWrapper::~CompileWrapper() {
   405   _compile->end_method();
   406   if (_compile->scratch_buffer_blob() != NULL)
   407     BufferBlob::free(_compile->scratch_buffer_blob());
   408   _compile->env()->set_compiler_data(NULL);
   409 }
   412 //----------------------------print_compile_messages---------------------------
   413 void Compile::print_compile_messages() {
   414 #ifndef PRODUCT
   415   // Check if recompiling
   416   if (_subsume_loads == false && PrintOpto) {
   417     // Recompiling without allowing machine instructions to subsume loads
   418     tty->print_cr("*********************************************************");
   419     tty->print_cr("** Bailout: Recompile without subsuming loads          **");
   420     tty->print_cr("*********************************************************");
   421   }
   422   if (_do_escape_analysis != DoEscapeAnalysis && PrintOpto) {
   423     // Recompiling without escape analysis
   424     tty->print_cr("*********************************************************");
   425     tty->print_cr("** Bailout: Recompile without escape analysis          **");
   426     tty->print_cr("*********************************************************");
   427   }
   428   if (env()->break_at_compile()) {
   429     // Open the debugger when compiling this method.
   430     tty->print("### Breaking when compiling: ");
   431     method()->print_short_name();
   432     tty->cr();
   433     BREAKPOINT;
   434   }
   436   if( PrintOpto ) {
   437     if (is_osr_compilation()) {
   438       tty->print("[OSR]%3d", _compile_id);
   439     } else {
   440       tty->print("%3d", _compile_id);
   441     }
   442   }
   443 #endif
   444 }
   447 void Compile::init_scratch_buffer_blob(int const_size) {
   448   if (scratch_buffer_blob() != NULL)  return;
   450   // Construct a temporary CodeBuffer to have it construct a BufferBlob
   451   // Cache this BufferBlob for this compile.
   452   ResourceMark rm;
   453   _scratch_const_size = const_size;
   454   int size = (MAX_inst_size + MAX_stubs_size + _scratch_const_size);
   455   BufferBlob* blob = BufferBlob::create("Compile::scratch_buffer", size);
   456   // Record the buffer blob for next time.
   457   set_scratch_buffer_blob(blob);
   458   // Have we run out of code space?
   459   if (scratch_buffer_blob() == NULL) {
   460     // Let CompilerBroker disable further compilations.
   461     record_failure("Not enough space for scratch buffer in CodeCache");
   462     return;
   463   }
   465   // Initialize the relocation buffers
   466   relocInfo* locs_buf = (relocInfo*) blob->content_end() - MAX_locs_size;
   467   set_scratch_locs_memory(locs_buf);
   468 }
   471 void Compile::clear_scratch_buffer_blob() {
   472   assert(scratch_buffer_blob(), "no BufferBlob set");
   473   set_scratch_buffer_blob(NULL);
   474   set_scratch_locs_memory(NULL);
   475 }
   478 //-----------------------scratch_emit_size-------------------------------------
   479 // Helper function that computes size by emitting code
   480 uint Compile::scratch_emit_size(const Node* n) {
   481   // Start scratch_emit_size section.
   482   set_in_scratch_emit_size(true);
   484   // Emit into a trash buffer and count bytes emitted.
   485   // This is a pretty expensive way to compute a size,
   486   // but it works well enough if seldom used.
   487   // All common fixed-size instructions are given a size
   488   // method by the AD file.
   489   // Note that the scratch buffer blob and locs memory are
   490   // allocated at the beginning of the compile task, and
   491   // may be shared by several calls to scratch_emit_size.
   492   // The allocation of the scratch buffer blob is particularly
   493   // expensive, since it has to grab the code cache lock.
   494   BufferBlob* blob = this->scratch_buffer_blob();
   495   assert(blob != NULL, "Initialize BufferBlob at start");
   496   assert(blob->size() > MAX_inst_size, "sanity");
   497   relocInfo* locs_buf = scratch_locs_memory();
   498   address blob_begin = blob->content_begin();
   499   address blob_end   = (address)locs_buf;
   500   assert(blob->content_contains(blob_end), "sanity");
   501   CodeBuffer buf(blob_begin, blob_end - blob_begin);
   502   buf.initialize_consts_size(_scratch_const_size);
   503   buf.initialize_stubs_size(MAX_stubs_size);
   504   assert(locs_buf != NULL, "sanity");
   505   int lsize = MAX_locs_size / 3;
   506   buf.consts()->initialize_shared_locs(&locs_buf[lsize * 0], lsize);
   507   buf.insts()->initialize_shared_locs( &locs_buf[lsize * 1], lsize);
   508   buf.stubs()->initialize_shared_locs( &locs_buf[lsize * 2], lsize);
   510   // Do the emission.
   511   n->emit(buf, this->regalloc());
   513   // End scratch_emit_size section.
   514   set_in_scratch_emit_size(false);
   516   return buf.insts_size();
   517 }
   520 // ============================================================================
   521 //------------------------------Compile standard-------------------------------
   522 debug_only( int Compile::_debug_idx = 100000; )
   524 // Compile a method.  entry_bci is -1 for normal compilations and indicates
   525 // the continuation bci for on stack replacement.
   528 Compile::Compile( ciEnv* ci_env, C2Compiler* compiler, ciMethod* target, int osr_bci, bool subsume_loads, bool do_escape_analysis )
   529                 : Phase(Compiler),
   530                   _env(ci_env),
   531                   _log(ci_env->log()),
   532                   _compile_id(ci_env->compile_id()),
   533                   _save_argument_registers(false),
   534                   _stub_name(NULL),
   535                   _stub_function(NULL),
   536                   _stub_entry_point(NULL),
   537                   _method(target),
   538                   _entry_bci(osr_bci),
   539                   _initial_gvn(NULL),
   540                   _for_igvn(NULL),
   541                   _warm_calls(NULL),
   542                   _subsume_loads(subsume_loads),
   543                   _do_escape_analysis(do_escape_analysis),
   544                   _failure_reason(NULL),
   545                   _code_buffer("Compile::Fill_buffer"),
   546                   _orig_pc_slot(0),
   547                   _orig_pc_slot_offset_in_bytes(0),
   548                   _has_method_handle_invokes(false),
   549                   _mach_constant_base_node(NULL),
   550                   _node_bundling_limit(0),
   551                   _node_bundling_base(NULL),
   552                   _java_calls(0),
   553                   _inner_loops(0),
   554                   _scratch_const_size(-1),
   555                   _in_scratch_emit_size(false),
   556 #ifndef PRODUCT
   557                   _trace_opto_output(TraceOptoOutput || method()->has_option("TraceOptoOutput")),
   558                   _printer(IdealGraphPrinter::printer()),
   559 #endif
   560                   _congraph(NULL) {
   561   C = this;
   563   CompileWrapper cw(this);
   564 #ifndef PRODUCT
   565   if (TimeCompiler2) {
   566     tty->print(" ");
   567     target->holder()->name()->print();
   568     tty->print(".");
   569     target->print_short_name();
   570     tty->print("  ");
   571   }
   572   TraceTime t1("Total compilation time", &_t_totalCompilation, TimeCompiler, TimeCompiler2);
   573   TraceTime t2(NULL, &_t_methodCompilation, TimeCompiler, false);
   574   bool print_opto_assembly = PrintOptoAssembly || _method->has_option("PrintOptoAssembly");
   575   if (!print_opto_assembly) {
   576     bool print_assembly = (PrintAssembly || _method->should_print_assembly());
   577     if (print_assembly && !Disassembler::can_decode()) {
   578       tty->print_cr("PrintAssembly request changed to PrintOptoAssembly");
   579       print_opto_assembly = true;
   580     }
   581   }
   582   set_print_assembly(print_opto_assembly);
   583   set_parsed_irreducible_loop(false);
   584 #endif
   586   if (ProfileTraps) {
   587     // Make sure the method being compiled gets its own MDO,
   588     // so we can at least track the decompile_count().
   589     method()->ensure_method_data();
   590   }
   592   Init(::AliasLevel);
   595   print_compile_messages();
   597   if (UseOldInlining || PrintCompilation NOT_PRODUCT( || PrintOpto) )
   598     _ilt = InlineTree::build_inline_tree_root();
   599   else
   600     _ilt = NULL;
   602   // Even if NO memory addresses are used, MergeMem nodes must have at least 1 slice
   603   assert(num_alias_types() >= AliasIdxRaw, "");
   605 #define MINIMUM_NODE_HASH  1023
   606   // Node list that Iterative GVN will start with
   607   Unique_Node_List for_igvn(comp_arena());
   608   set_for_igvn(&for_igvn);
   610   // GVN that will be run immediately on new nodes
   611   uint estimated_size = method()->code_size()*4+64;
   612   estimated_size = (estimated_size < MINIMUM_NODE_HASH ? MINIMUM_NODE_HASH : estimated_size);
   613   PhaseGVN gvn(node_arena(), estimated_size);
   614   set_initial_gvn(&gvn);
   616   { // Scope for timing the parser
   617     TracePhase t3("parse", &_t_parser, true);
   619     // Put top into the hash table ASAP.
   620     initial_gvn()->transform_no_reclaim(top());
   622     // Set up tf(), start(), and find a CallGenerator.
   623     CallGenerator* cg;
   624     if (is_osr_compilation()) {
   625       const TypeTuple *domain = StartOSRNode::osr_domain();
   626       const TypeTuple *range = TypeTuple::make_range(method()->signature());
   627       init_tf(TypeFunc::make(domain, range));
   628       StartNode* s = new (this, 2) StartOSRNode(root(), domain);
   629       initial_gvn()->set_type_bottom(s);
   630       init_start(s);
   631       cg = CallGenerator::for_osr(method(), entry_bci());
   632     } else {
   633       // Normal case.
   634       init_tf(TypeFunc::make(method()));
   635       StartNode* s = new (this, 2) StartNode(root(), tf()->domain());
   636       initial_gvn()->set_type_bottom(s);
   637       init_start(s);
   638       float past_uses = method()->interpreter_invocation_count();
   639       float expected_uses = past_uses;
   640       cg = CallGenerator::for_inline(method(), expected_uses);
   641     }
   642     if (failing())  return;
   643     if (cg == NULL) {
   644       record_method_not_compilable_all_tiers("cannot parse method");
   645       return;
   646     }
   647     JVMState* jvms = build_start_state(start(), tf());
   648     if ((jvms = cg->generate(jvms)) == NULL) {
   649       record_method_not_compilable("method parse failed");
   650       return;
   651     }
   652     GraphKit kit(jvms);
   654     if (!kit.stopped()) {
   655       // Accept return values, and transfer control we know not where.
   656       // This is done by a special, unique ReturnNode bound to root.
   657       return_values(kit.jvms());
   658     }
   660     if (kit.has_exceptions()) {
   661       // Any exceptions that escape from this call must be rethrown
   662       // to whatever caller is dynamically above us on the stack.
   663       // This is done by a special, unique RethrowNode bound to root.
   664       rethrow_exceptions(kit.transfer_exceptions_into_jvms());
   665     }
   667     if (!failing() && has_stringbuilder()) {
   668       {
   669         // remove useless nodes to make the usage analysis simpler
   670         ResourceMark rm;
   671         PhaseRemoveUseless pru(initial_gvn(), &for_igvn);
   672       }
   674       {
   675         ResourceMark rm;
   676         print_method("Before StringOpts", 3);
   677         PhaseStringOpts pso(initial_gvn(), &for_igvn);
   678         print_method("After StringOpts", 3);
   679       }
   681       // now inline anything that we skipped the first time around
   682       while (_late_inlines.length() > 0) {
   683         CallGenerator* cg = _late_inlines.pop();
   684         cg->do_late_inline();
   685       }
   686     }
   687     assert(_late_inlines.length() == 0, "should have been processed");
   689     print_method("Before RemoveUseless", 3);
   691     // Remove clutter produced by parsing.
   692     if (!failing()) {
   693       ResourceMark rm;
   694       PhaseRemoveUseless pru(initial_gvn(), &for_igvn);
   695     }
   696   }
   698   // Note:  Large methods are capped off in do_one_bytecode().
   699   if (failing())  return;
   701   // After parsing, node notes are no longer automagic.
   702   // They must be propagated by register_new_node_with_optimizer(),
   703   // clone(), or the like.
   704   set_default_node_notes(NULL);
   706   for (;;) {
   707     int successes = Inline_Warm();
   708     if (failing())  return;
   709     if (successes == 0)  break;
   710   }
   712   // Drain the list.
   713   Finish_Warm();
   714 #ifndef PRODUCT
   715   if (_printer) {
   716     _printer->print_inlining(this);
   717   }
   718 #endif
   720   if (failing())  return;
   721   NOT_PRODUCT( verify_graph_edges(); )
   723   // Now optimize
   724   Optimize();
   725   if (failing())  return;
   726   NOT_PRODUCT( verify_graph_edges(); )
   728 #ifndef PRODUCT
   729   if (PrintIdeal) {
   730     ttyLocker ttyl;  // keep the following output all in one block
   731     // This output goes directly to the tty, not the compiler log.
   732     // To enable tools to match it up with the compilation activity,
   733     // be sure to tag this tty output with the compile ID.
   734     if (xtty != NULL) {
   735       xtty->head("ideal compile_id='%d'%s", compile_id(),
   736                  is_osr_compilation()    ? " compile_kind='osr'" :
   737                  "");
   738     }
   739     root()->dump(9999);
   740     if (xtty != NULL) {
   741       xtty->tail("ideal");
   742     }
   743   }
   744 #endif
   746   // Now that we know the size of all the monitors we can add a fixed slot
   747   // for the original deopt pc.
   749   _orig_pc_slot =  fixed_slots();
   750   int next_slot = _orig_pc_slot + (sizeof(address) / VMRegImpl::stack_slot_size);
   751   set_fixed_slots(next_slot);
   753   // Now generate code
   754   Code_Gen();
   755   if (failing())  return;
   757   // Check if we want to skip execution of all compiled code.
   758   {
   759 #ifndef PRODUCT
   760     if (OptoNoExecute) {
   761       record_method_not_compilable("+OptoNoExecute");  // Flag as failed
   762       return;
   763     }
   764     TracePhase t2("install_code", &_t_registerMethod, TimeCompiler);
   765 #endif
   767     if (is_osr_compilation()) {
   768       _code_offsets.set_value(CodeOffsets::Verified_Entry, 0);
   769       _code_offsets.set_value(CodeOffsets::OSR_Entry, _first_block_size);
   770     } else {
   771       _code_offsets.set_value(CodeOffsets::Verified_Entry, _first_block_size);
   772       _code_offsets.set_value(CodeOffsets::OSR_Entry, 0);
   773     }
   775     env()->register_method(_method, _entry_bci,
   776                            &_code_offsets,
   777                            _orig_pc_slot_offset_in_bytes,
   778                            code_buffer(),
   779                            frame_size_in_words(), _oop_map_set,
   780                            &_handler_table, &_inc_table,
   781                            compiler,
   782                            env()->comp_level(),
   783                            true, /*has_debug_info*/
   784                            has_unsafe_access()
   785                            );
   786   }
   787 }
   789 //------------------------------Compile----------------------------------------
   790 // Compile a runtime stub
   791 Compile::Compile( ciEnv* ci_env,
   792                   TypeFunc_generator generator,
   793                   address stub_function,
   794                   const char *stub_name,
   795                   int is_fancy_jump,
   796                   bool pass_tls,
   797                   bool save_arg_registers,
   798                   bool return_pc )
   799   : Phase(Compiler),
   800     _env(ci_env),
   801     _log(ci_env->log()),
   802     _compile_id(-1),
   803     _save_argument_registers(save_arg_registers),
   804     _method(NULL),
   805     _stub_name(stub_name),
   806     _stub_function(stub_function),
   807     _stub_entry_point(NULL),
   808     _entry_bci(InvocationEntryBci),
   809     _initial_gvn(NULL),
   810     _for_igvn(NULL),
   811     _warm_calls(NULL),
   812     _orig_pc_slot(0),
   813     _orig_pc_slot_offset_in_bytes(0),
   814     _subsume_loads(true),
   815     _do_escape_analysis(false),
   816     _failure_reason(NULL),
   817     _code_buffer("Compile::Fill_buffer"),
   818     _has_method_handle_invokes(false),
   819     _mach_constant_base_node(NULL),
   820     _node_bundling_limit(0),
   821     _node_bundling_base(NULL),
   822     _java_calls(0),
   823     _inner_loops(0),
   824 #ifndef PRODUCT
   825     _trace_opto_output(TraceOptoOutput),
   826     _printer(NULL),
   827 #endif
   828     _congraph(NULL) {
   829   C = this;
   831 #ifndef PRODUCT
   832   TraceTime t1(NULL, &_t_totalCompilation, TimeCompiler, false);
   833   TraceTime t2(NULL, &_t_stubCompilation, TimeCompiler, false);
   834   set_print_assembly(PrintFrameConverterAssembly);
   835   set_parsed_irreducible_loop(false);
   836 #endif
   837   CompileWrapper cw(this);
   838   Init(/*AliasLevel=*/ 0);
   839   init_tf((*generator)());
   841   {
   842     // The following is a dummy for the sake of GraphKit::gen_stub
   843     Unique_Node_List for_igvn(comp_arena());
   844     set_for_igvn(&for_igvn);  // not used, but some GraphKit guys push on this
   845     PhaseGVN gvn(Thread::current()->resource_area(),255);
   846     set_initial_gvn(&gvn);    // not significant, but GraphKit guys use it pervasively
   847     gvn.transform_no_reclaim(top());
   849     GraphKit kit;
   850     kit.gen_stub(stub_function, stub_name, is_fancy_jump, pass_tls, return_pc);
   851   }
   853   NOT_PRODUCT( verify_graph_edges(); )
   854   Code_Gen();
   855   if (failing())  return;
   858   // Entry point will be accessed using compile->stub_entry_point();
   859   if (code_buffer() == NULL) {
   860     Matcher::soft_match_failure();
   861   } else {
   862     if (PrintAssembly && (WizardMode || Verbose))
   863       tty->print_cr("### Stub::%s", stub_name);
   865     if (!failing()) {
   866       assert(_fixed_slots == 0, "no fixed slots used for runtime stubs");
   868       // Make the NMethod
   869       // For now we mark the frame as never safe for profile stackwalking
   870       RuntimeStub *rs = RuntimeStub::new_runtime_stub(stub_name,
   871                                                       code_buffer(),
   872                                                       CodeOffsets::frame_never_safe,
   873                                                       // _code_offsets.value(CodeOffsets::Frame_Complete),
   874                                                       frame_size_in_words(),
   875                                                       _oop_map_set,
   876                                                       save_arg_registers);
   877       assert(rs != NULL && rs->is_runtime_stub(), "sanity check");
   879       _stub_entry_point = rs->entry_point();
   880     }
   881   }
   882 }
   884 #ifndef PRODUCT
   885 void print_opto_verbose_signature( const TypeFunc *j_sig, const char *stub_name ) {
   886   if(PrintOpto && Verbose) {
   887     tty->print("%s   ", stub_name); j_sig->print_flattened(); tty->cr();
   888   }
   889 }
   890 #endif
   892 void Compile::print_codes() {
   893 }
   895 //------------------------------Init-------------------------------------------
   896 // Prepare for a single compilation
   897 void Compile::Init(int aliaslevel) {
   898   _unique  = 0;
   899   _regalloc = NULL;
   901   _tf      = NULL;  // filled in later
   902   _top     = NULL;  // cached later
   903   _matcher = NULL;  // filled in later
   904   _cfg     = NULL;  // filled in later
   906   set_24_bit_selection_and_mode(Use24BitFP, false);
   908   _node_note_array = NULL;
   909   _default_node_notes = NULL;
   911   _immutable_memory = NULL; // filled in at first inquiry
   913   // Globally visible Nodes
   914   // First set TOP to NULL to give safe behavior during creation of RootNode
   915   set_cached_top_node(NULL);
   916   set_root(new (this, 3) RootNode());
   917   // Now that you have a Root to point to, create the real TOP
   918   set_cached_top_node( new (this, 1) ConNode(Type::TOP) );
   919   set_recent_alloc(NULL, NULL);
   921   // Create Debug Information Recorder to record scopes, oopmaps, etc.
   922   env()->set_oop_recorder(new OopRecorder(comp_arena()));
   923   env()->set_debug_info(new DebugInformationRecorder(env()->oop_recorder()));
   924   env()->set_dependencies(new Dependencies(env()));
   926   _fixed_slots = 0;
   927   set_has_split_ifs(false);
   928   set_has_loops(has_method() && method()->has_loops()); // first approximation
   929   set_has_stringbuilder(false);
   930   _trap_can_recompile = false;  // no traps emitted yet
   931   _major_progress = true; // start out assuming good things will happen
   932   set_has_unsafe_access(false);
   933   Copy::zero_to_bytes(_trap_hist, sizeof(_trap_hist));
   934   set_decompile_count(0);
   936   set_do_freq_based_layout(BlockLayoutByFrequency || method_has_option("BlockLayoutByFrequency"));
   937   set_num_loop_opts(LoopOptsCount);
   938   set_do_inlining(Inline);
   939   set_max_inline_size(MaxInlineSize);
   940   set_freq_inline_size(FreqInlineSize);
   941   set_do_scheduling(OptoScheduling);
   942   set_do_count_invocations(false);
   943   set_do_method_data_update(false);
   945   if (debug_info()->recording_non_safepoints()) {
   946     set_node_note_array(new(comp_arena()) GrowableArray<Node_Notes*>
   947                         (comp_arena(), 8, 0, NULL));
   948     set_default_node_notes(Node_Notes::make(this));
   949   }
   951   // // -- Initialize types before each compile --
   952   // // Update cached type information
   953   // if( _method && _method->constants() )
   954   //   Type::update_loaded_types(_method, _method->constants());
   956   // Init alias_type map.
   957   if (!_do_escape_analysis && aliaslevel == 3)
   958     aliaslevel = 2;  // No unique types without escape analysis
   959   _AliasLevel = aliaslevel;
   960   const int grow_ats = 16;
   961   _max_alias_types = grow_ats;
   962   _alias_types   = NEW_ARENA_ARRAY(comp_arena(), AliasType*, grow_ats);
   963   AliasType* ats = NEW_ARENA_ARRAY(comp_arena(), AliasType,  grow_ats);
   964   Copy::zero_to_bytes(ats, sizeof(AliasType)*grow_ats);
   965   {
   966     for (int i = 0; i < grow_ats; i++)  _alias_types[i] = &ats[i];
   967   }
   968   // Initialize the first few types.
   969   _alias_types[AliasIdxTop]->Init(AliasIdxTop, NULL);
   970   _alias_types[AliasIdxBot]->Init(AliasIdxBot, TypePtr::BOTTOM);
   971   _alias_types[AliasIdxRaw]->Init(AliasIdxRaw, TypeRawPtr::BOTTOM);
   972   _num_alias_types = AliasIdxRaw+1;
   973   // Zero out the alias type cache.
   974   Copy::zero_to_bytes(_alias_cache, sizeof(_alias_cache));
   975   // A NULL adr_type hits in the cache right away.  Preload the right answer.
   976   probe_alias_cache(NULL)->_index = AliasIdxTop;
   978   _intrinsics = NULL;
   979   _macro_nodes = new(comp_arena()) GrowableArray<Node*>(comp_arena(), 8,  0, NULL);
   980   _predicate_opaqs = new(comp_arena()) GrowableArray<Node*>(comp_arena(), 8,  0, NULL);
   981   register_library_intrinsics();
   982 }
   984 //---------------------------init_start----------------------------------------
   985 // Install the StartNode on this compile object.
   986 void Compile::init_start(StartNode* s) {
   987   if (failing())
   988     return; // already failing
   989   assert(s == start(), "");
   990 }
   992 StartNode* Compile::start() const {
   993   assert(!failing(), "");
   994   for (DUIterator_Fast imax, i = root()->fast_outs(imax); i < imax; i++) {
   995     Node* start = root()->fast_out(i);
   996     if( start->is_Start() )
   997       return start->as_Start();
   998   }
   999   ShouldNotReachHere();
  1000   return NULL;
  1003 //-------------------------------immutable_memory-------------------------------------
  1004 // Access immutable memory
  1005 Node* Compile::immutable_memory() {
  1006   if (_immutable_memory != NULL) {
  1007     return _immutable_memory;
  1009   StartNode* s = start();
  1010   for (DUIterator_Fast imax, i = s->fast_outs(imax); true; i++) {
  1011     Node *p = s->fast_out(i);
  1012     if (p != s && p->as_Proj()->_con == TypeFunc::Memory) {
  1013       _immutable_memory = p;
  1014       return _immutable_memory;
  1017   ShouldNotReachHere();
  1018   return NULL;
  1021 //----------------------set_cached_top_node------------------------------------
  1022 // Install the cached top node, and make sure Node::is_top works correctly.
  1023 void Compile::set_cached_top_node(Node* tn) {
  1024   if (tn != NULL)  verify_top(tn);
  1025   Node* old_top = _top;
  1026   _top = tn;
  1027   // Calling Node::setup_is_top allows the nodes the chance to adjust
  1028   // their _out arrays.
  1029   if (_top != NULL)     _top->setup_is_top();
  1030   if (old_top != NULL)  old_top->setup_is_top();
  1031   assert(_top == NULL || top()->is_top(), "");
  1034 #ifndef PRODUCT
  1035 void Compile::verify_top(Node* tn) const {
  1036   if (tn != NULL) {
  1037     assert(tn->is_Con(), "top node must be a constant");
  1038     assert(((ConNode*)tn)->type() == Type::TOP, "top node must have correct type");
  1039     assert(tn->in(0) != NULL, "must have live top node");
  1042 #endif
  1045 ///-------------------Managing Per-Node Debug & Profile Info-------------------
  1047 void Compile::grow_node_notes(GrowableArray<Node_Notes*>* arr, int grow_by) {
  1048   guarantee(arr != NULL, "");
  1049   int num_blocks = arr->length();
  1050   if (grow_by < num_blocks)  grow_by = num_blocks;
  1051   int num_notes = grow_by * _node_notes_block_size;
  1052   Node_Notes* notes = NEW_ARENA_ARRAY(node_arena(), Node_Notes, num_notes);
  1053   Copy::zero_to_bytes(notes, num_notes * sizeof(Node_Notes));
  1054   while (num_notes > 0) {
  1055     arr->append(notes);
  1056     notes     += _node_notes_block_size;
  1057     num_notes -= _node_notes_block_size;
  1059   assert(num_notes == 0, "exact multiple, please");
  1062 bool Compile::copy_node_notes_to(Node* dest, Node* source) {
  1063   if (source == NULL || dest == NULL)  return false;
  1065   if (dest->is_Con())
  1066     return false;               // Do not push debug info onto constants.
  1068 #ifdef ASSERT
  1069   // Leave a bread crumb trail pointing to the original node:
  1070   if (dest != NULL && dest != source && dest->debug_orig() == NULL) {
  1071     dest->set_debug_orig(source);
  1073 #endif
  1075   if (node_note_array() == NULL)
  1076     return false;               // Not collecting any notes now.
  1078   // This is a copy onto a pre-existing node, which may already have notes.
  1079   // If both nodes have notes, do not overwrite any pre-existing notes.
  1080   Node_Notes* source_notes = node_notes_at(source->_idx);
  1081   if (source_notes == NULL || source_notes->is_clear())  return false;
  1082   Node_Notes* dest_notes   = node_notes_at(dest->_idx);
  1083   if (dest_notes == NULL || dest_notes->is_clear()) {
  1084     return set_node_notes_at(dest->_idx, source_notes);
  1087   Node_Notes merged_notes = (*source_notes);
  1088   // The order of operations here ensures that dest notes will win...
  1089   merged_notes.update_from(dest_notes);
  1090   return set_node_notes_at(dest->_idx, &merged_notes);
  1094 //--------------------------allow_range_check_smearing-------------------------
  1095 // Gating condition for coalescing similar range checks.
  1096 // Sometimes we try 'speculatively' replacing a series of a range checks by a
  1097 // single covering check that is at least as strong as any of them.
  1098 // If the optimization succeeds, the simplified (strengthened) range check
  1099 // will always succeed.  If it fails, we will deopt, and then give up
  1100 // on the optimization.
  1101 bool Compile::allow_range_check_smearing() const {
  1102   // If this method has already thrown a range-check,
  1103   // assume it was because we already tried range smearing
  1104   // and it failed.
  1105   uint already_trapped = trap_count(Deoptimization::Reason_range_check);
  1106   return !already_trapped;
  1110 //------------------------------flatten_alias_type-----------------------------
  1111 const TypePtr *Compile::flatten_alias_type( const TypePtr *tj ) const {
  1112   int offset = tj->offset();
  1113   TypePtr::PTR ptr = tj->ptr();
  1115   // Known instance (scalarizable allocation) alias only with itself.
  1116   bool is_known_inst = tj->isa_oopptr() != NULL &&
  1117                        tj->is_oopptr()->is_known_instance();
  1119   // Process weird unsafe references.
  1120   if (offset == Type::OffsetBot && (tj->isa_instptr() /*|| tj->isa_klassptr()*/)) {
  1121     assert(InlineUnsafeOps, "indeterminate pointers come only from unsafe ops");
  1122     assert(!is_known_inst, "scalarizable allocation should not have unsafe references");
  1123     tj = TypeOopPtr::BOTTOM;
  1124     ptr = tj->ptr();
  1125     offset = tj->offset();
  1128   // Array pointers need some flattening
  1129   const TypeAryPtr *ta = tj->isa_aryptr();
  1130   if( ta && is_known_inst ) {
  1131     if ( offset != Type::OffsetBot &&
  1132          offset > arrayOopDesc::length_offset_in_bytes() ) {
  1133       offset = Type::OffsetBot; // Flatten constant access into array body only
  1134       tj = ta = TypeAryPtr::make(ptr, ta->ary(), ta->klass(), true, offset, ta->instance_id());
  1136   } else if( ta && _AliasLevel >= 2 ) {
  1137     // For arrays indexed by constant indices, we flatten the alias
  1138     // space to include all of the array body.  Only the header, klass
  1139     // and array length can be accessed un-aliased.
  1140     if( offset != Type::OffsetBot ) {
  1141       if( ta->const_oop() ) { // methodDataOop or methodOop
  1142         offset = Type::OffsetBot;   // Flatten constant access into array body
  1143         tj = ta = TypeAryPtr::make(ptr,ta->const_oop(),ta->ary(),ta->klass(),false,offset);
  1144       } else if( offset == arrayOopDesc::length_offset_in_bytes() ) {
  1145         // range is OK as-is.
  1146         tj = ta = TypeAryPtr::RANGE;
  1147       } else if( offset == oopDesc::klass_offset_in_bytes() ) {
  1148         tj = TypeInstPtr::KLASS; // all klass loads look alike
  1149         ta = TypeAryPtr::RANGE; // generic ignored junk
  1150         ptr = TypePtr::BotPTR;
  1151       } else if( offset == oopDesc::mark_offset_in_bytes() ) {
  1152         tj = TypeInstPtr::MARK;
  1153         ta = TypeAryPtr::RANGE; // generic ignored junk
  1154         ptr = TypePtr::BotPTR;
  1155       } else {                  // Random constant offset into array body
  1156         offset = Type::OffsetBot;   // Flatten constant access into array body
  1157         tj = ta = TypeAryPtr::make(ptr,ta->ary(),ta->klass(),false,offset);
  1160     // Arrays of fixed size alias with arrays of unknown size.
  1161     if (ta->size() != TypeInt::POS) {
  1162       const TypeAry *tary = TypeAry::make(ta->elem(), TypeInt::POS);
  1163       tj = ta = TypeAryPtr::make(ptr,ta->const_oop(),tary,ta->klass(),false,offset);
  1165     // Arrays of known objects become arrays of unknown objects.
  1166     if (ta->elem()->isa_narrowoop() && ta->elem() != TypeNarrowOop::BOTTOM) {
  1167       const TypeAry *tary = TypeAry::make(TypeNarrowOop::BOTTOM, ta->size());
  1168       tj = ta = TypeAryPtr::make(ptr,ta->const_oop(),tary,NULL,false,offset);
  1170     if (ta->elem()->isa_oopptr() && ta->elem() != TypeInstPtr::BOTTOM) {
  1171       const TypeAry *tary = TypeAry::make(TypeInstPtr::BOTTOM, ta->size());
  1172       tj = ta = TypeAryPtr::make(ptr,ta->const_oop(),tary,NULL,false,offset);
  1174     // Arrays of bytes and of booleans both use 'bastore' and 'baload' so
  1175     // cannot be distinguished by bytecode alone.
  1176     if (ta->elem() == TypeInt::BOOL) {
  1177       const TypeAry *tary = TypeAry::make(TypeInt::BYTE, ta->size());
  1178       ciKlass* aklass = ciTypeArrayKlass::make(T_BYTE);
  1179       tj = ta = TypeAryPtr::make(ptr,ta->const_oop(),tary,aklass,false,offset);
  1181     // During the 2nd round of IterGVN, NotNull castings are removed.
  1182     // Make sure the Bottom and NotNull variants alias the same.
  1183     // Also, make sure exact and non-exact variants alias the same.
  1184     if( ptr == TypePtr::NotNull || ta->klass_is_exact() ) {
  1185       if (ta->const_oop()) {
  1186         tj = ta = TypeAryPtr::make(TypePtr::Constant,ta->const_oop(),ta->ary(),ta->klass(),false,offset);
  1187       } else {
  1188         tj = ta = TypeAryPtr::make(TypePtr::BotPTR,ta->ary(),ta->klass(),false,offset);
  1193   // Oop pointers need some flattening
  1194   const TypeInstPtr *to = tj->isa_instptr();
  1195   if( to && _AliasLevel >= 2 && to != TypeOopPtr::BOTTOM ) {
  1196     if( ptr == TypePtr::Constant ) {
  1197       // No constant oop pointers (such as Strings); they alias with
  1198       // unknown strings.
  1199       assert(!is_known_inst, "not scalarizable allocation");
  1200       tj = to = TypeInstPtr::make(TypePtr::BotPTR,to->klass(),false,0,offset);
  1201     } else if( is_known_inst ) {
  1202       tj = to; // Keep NotNull and klass_is_exact for instance type
  1203     } else if( ptr == TypePtr::NotNull || to->klass_is_exact() ) {
  1204       // During the 2nd round of IterGVN, NotNull castings are removed.
  1205       // Make sure the Bottom and NotNull variants alias the same.
  1206       // Also, make sure exact and non-exact variants alias the same.
  1207       tj = to = TypeInstPtr::make(TypePtr::BotPTR,to->klass(),false,0,offset);
  1209     // Canonicalize the holder of this field
  1210     ciInstanceKlass *k = to->klass()->as_instance_klass();
  1211     if (offset >= 0 && offset < instanceOopDesc::base_offset_in_bytes()) {
  1212       // First handle header references such as a LoadKlassNode, even if the
  1213       // object's klass is unloaded at compile time (4965979).
  1214       if (!is_known_inst) { // Do it only for non-instance types
  1215         tj = to = TypeInstPtr::make(TypePtr::BotPTR, env()->Object_klass(), false, NULL, offset);
  1217     } else if (offset < 0 || offset >= k->size_helper() * wordSize) {
  1218       to = NULL;
  1219       tj = TypeOopPtr::BOTTOM;
  1220       offset = tj->offset();
  1221     } else {
  1222       ciInstanceKlass *canonical_holder = k->get_canonical_holder(offset);
  1223       if (!k->equals(canonical_holder) || tj->offset() != offset) {
  1224         if( is_known_inst ) {
  1225           tj = to = TypeInstPtr::make(to->ptr(), canonical_holder, true, NULL, offset, to->instance_id());
  1226         } else {
  1227           tj = to = TypeInstPtr::make(to->ptr(), canonical_holder, false, NULL, offset);
  1233   // Klass pointers to object array klasses need some flattening
  1234   const TypeKlassPtr *tk = tj->isa_klassptr();
  1235   if( tk ) {
  1236     // If we are referencing a field within a Klass, we need
  1237     // to assume the worst case of an Object.  Both exact and
  1238     // inexact types must flatten to the same alias class.
  1239     // Since the flattened result for a klass is defined to be
  1240     // precisely java.lang.Object, use a constant ptr.
  1241     if ( offset == Type::OffsetBot || (offset >= 0 && (size_t)offset < sizeof(Klass)) ) {
  1243       tj = tk = TypeKlassPtr::make(TypePtr::Constant,
  1244                                    TypeKlassPtr::OBJECT->klass(),
  1245                                    offset);
  1248     ciKlass* klass = tk->klass();
  1249     if( klass->is_obj_array_klass() ) {
  1250       ciKlass* k = TypeAryPtr::OOPS->klass();
  1251       if( !k || !k->is_loaded() )                  // Only fails for some -Xcomp runs
  1252         k = TypeInstPtr::BOTTOM->klass();
  1253       tj = tk = TypeKlassPtr::make( TypePtr::NotNull, k, offset );
  1256     // Check for precise loads from the primary supertype array and force them
  1257     // to the supertype cache alias index.  Check for generic array loads from
  1258     // the primary supertype array and also force them to the supertype cache
  1259     // alias index.  Since the same load can reach both, we need to merge
  1260     // these 2 disparate memories into the same alias class.  Since the
  1261     // primary supertype array is read-only, there's no chance of confusion
  1262     // where we bypass an array load and an array store.
  1263     uint off2 = offset - Klass::primary_supers_offset_in_bytes();
  1264     if( offset == Type::OffsetBot ||
  1265         off2 < Klass::primary_super_limit()*wordSize ) {
  1266       offset = sizeof(oopDesc) +Klass::secondary_super_cache_offset_in_bytes();
  1267       tj = tk = TypeKlassPtr::make( TypePtr::NotNull, tk->klass(), offset );
  1271   // Flatten all Raw pointers together.
  1272   if (tj->base() == Type::RawPtr)
  1273     tj = TypeRawPtr::BOTTOM;
  1275   if (tj->base() == Type::AnyPtr)
  1276     tj = TypePtr::BOTTOM;      // An error, which the caller must check for.
  1278   // Flatten all to bottom for now
  1279   switch( _AliasLevel ) {
  1280   case 0:
  1281     tj = TypePtr::BOTTOM;
  1282     break;
  1283   case 1:                       // Flatten to: oop, static, field or array
  1284     switch (tj->base()) {
  1285     //case Type::AryPtr: tj = TypeAryPtr::RANGE;    break;
  1286     case Type::RawPtr:   tj = TypeRawPtr::BOTTOM;   break;
  1287     case Type::AryPtr:   // do not distinguish arrays at all
  1288     case Type::InstPtr:  tj = TypeInstPtr::BOTTOM;  break;
  1289     case Type::KlassPtr: tj = TypeKlassPtr::OBJECT; break;
  1290     case Type::AnyPtr:   tj = TypePtr::BOTTOM;      break;  // caller checks it
  1291     default: ShouldNotReachHere();
  1293     break;
  1294   case 2:                       // No collapsing at level 2; keep all splits
  1295   case 3:                       // No collapsing at level 3; keep all splits
  1296     break;
  1297   default:
  1298     Unimplemented();
  1301   offset = tj->offset();
  1302   assert( offset != Type::OffsetTop, "Offset has fallen from constant" );
  1304   assert( (offset != Type::OffsetBot && tj->base() != Type::AryPtr) ||
  1305           (offset == Type::OffsetBot && tj->base() == Type::AryPtr) ||
  1306           (offset == Type::OffsetBot && tj == TypeOopPtr::BOTTOM) ||
  1307           (offset == Type::OffsetBot && tj == TypePtr::BOTTOM) ||
  1308           (offset == oopDesc::mark_offset_in_bytes() && tj->base() == Type::AryPtr) ||
  1309           (offset == oopDesc::klass_offset_in_bytes() && tj->base() == Type::AryPtr) ||
  1310           (offset == arrayOopDesc::length_offset_in_bytes() && tj->base() == Type::AryPtr)  ,
  1311           "For oops, klasses, raw offset must be constant; for arrays the offset is never known" );
  1312   assert( tj->ptr() != TypePtr::TopPTR &&
  1313           tj->ptr() != TypePtr::AnyNull &&
  1314           tj->ptr() != TypePtr::Null, "No imprecise addresses" );
  1315 //    assert( tj->ptr() != TypePtr::Constant ||
  1316 //            tj->base() == Type::RawPtr ||
  1317 //            tj->base() == Type::KlassPtr, "No constant oop addresses" );
  1319   return tj;
  1322 void Compile::AliasType::Init(int i, const TypePtr* at) {
  1323   _index = i;
  1324   _adr_type = at;
  1325   _field = NULL;
  1326   _is_rewritable = true; // default
  1327   const TypeOopPtr *atoop = (at != NULL) ? at->isa_oopptr() : NULL;
  1328   if (atoop != NULL && atoop->is_known_instance()) {
  1329     const TypeOopPtr *gt = atoop->cast_to_instance_id(TypeOopPtr::InstanceBot);
  1330     _general_index = Compile::current()->get_alias_index(gt);
  1331   } else {
  1332     _general_index = 0;
  1336 //---------------------------------print_on------------------------------------
  1337 #ifndef PRODUCT
  1338 void Compile::AliasType::print_on(outputStream* st) {
  1339   if (index() < 10)
  1340         st->print("@ <%d> ", index());
  1341   else  st->print("@ <%d>",  index());
  1342   st->print(is_rewritable() ? "   " : " RO");
  1343   int offset = adr_type()->offset();
  1344   if (offset == Type::OffsetBot)
  1345         st->print(" +any");
  1346   else  st->print(" +%-3d", offset);
  1347   st->print(" in ");
  1348   adr_type()->dump_on(st);
  1349   const TypeOopPtr* tjp = adr_type()->isa_oopptr();
  1350   if (field() != NULL && tjp) {
  1351     if (tjp->klass()  != field()->holder() ||
  1352         tjp->offset() != field()->offset_in_bytes()) {
  1353       st->print(" != ");
  1354       field()->print();
  1355       st->print(" ***");
  1360 void print_alias_types() {
  1361   Compile* C = Compile::current();
  1362   tty->print_cr("--- Alias types, AliasIdxBot .. %d", C->num_alias_types()-1);
  1363   for (int idx = Compile::AliasIdxBot; idx < C->num_alias_types(); idx++) {
  1364     C->alias_type(idx)->print_on(tty);
  1365     tty->cr();
  1368 #endif
  1371 //----------------------------probe_alias_cache--------------------------------
  1372 Compile::AliasCacheEntry* Compile::probe_alias_cache(const TypePtr* adr_type) {
  1373   intptr_t key = (intptr_t) adr_type;
  1374   key ^= key >> logAliasCacheSize;
  1375   return &_alias_cache[key & right_n_bits(logAliasCacheSize)];
  1379 //-----------------------------grow_alias_types--------------------------------
  1380 void Compile::grow_alias_types() {
  1381   const int old_ats  = _max_alias_types; // how many before?
  1382   const int new_ats  = old_ats;          // how many more?
  1383   const int grow_ats = old_ats+new_ats;  // how many now?
  1384   _max_alias_types = grow_ats;
  1385   _alias_types =  REALLOC_ARENA_ARRAY(comp_arena(), AliasType*, _alias_types, old_ats, grow_ats);
  1386   AliasType* ats =    NEW_ARENA_ARRAY(comp_arena(), AliasType, new_ats);
  1387   Copy::zero_to_bytes(ats, sizeof(AliasType)*new_ats);
  1388   for (int i = 0; i < new_ats; i++)  _alias_types[old_ats+i] = &ats[i];
  1392 //--------------------------------find_alias_type------------------------------
  1393 Compile::AliasType* Compile::find_alias_type(const TypePtr* adr_type, bool no_create) {
  1394   if (_AliasLevel == 0)
  1395     return alias_type(AliasIdxBot);
  1397   AliasCacheEntry* ace = probe_alias_cache(adr_type);
  1398   if (ace->_adr_type == adr_type) {
  1399     return alias_type(ace->_index);
  1402   // Handle special cases.
  1403   if (adr_type == NULL)             return alias_type(AliasIdxTop);
  1404   if (adr_type == TypePtr::BOTTOM)  return alias_type(AliasIdxBot);
  1406   // Do it the slow way.
  1407   const TypePtr* flat = flatten_alias_type(adr_type);
  1409 #ifdef ASSERT
  1410   assert(flat == flatten_alias_type(flat), "idempotent");
  1411   assert(flat != TypePtr::BOTTOM,     "cannot alias-analyze an untyped ptr");
  1412   if (flat->isa_oopptr() && !flat->isa_klassptr()) {
  1413     const TypeOopPtr* foop = flat->is_oopptr();
  1414     // Scalarizable allocations have exact klass always.
  1415     bool exact = !foop->klass_is_exact() || foop->is_known_instance();
  1416     const TypePtr* xoop = foop->cast_to_exactness(exact)->is_ptr();
  1417     assert(foop == flatten_alias_type(xoop), "exactness must not affect alias type");
  1419   assert(flat == flatten_alias_type(flat), "exact bit doesn't matter");
  1420 #endif
  1422   int idx = AliasIdxTop;
  1423   for (int i = 0; i < num_alias_types(); i++) {
  1424     if (alias_type(i)->adr_type() == flat) {
  1425       idx = i;
  1426       break;
  1430   if (idx == AliasIdxTop) {
  1431     if (no_create)  return NULL;
  1432     // Grow the array if necessary.
  1433     if (_num_alias_types == _max_alias_types)  grow_alias_types();
  1434     // Add a new alias type.
  1435     idx = _num_alias_types++;
  1436     _alias_types[idx]->Init(idx, flat);
  1437     if (flat == TypeInstPtr::KLASS)  alias_type(idx)->set_rewritable(false);
  1438     if (flat == TypeAryPtr::RANGE)   alias_type(idx)->set_rewritable(false);
  1439     if (flat->isa_instptr()) {
  1440       if (flat->offset() == java_lang_Class::klass_offset_in_bytes()
  1441           && flat->is_instptr()->klass() == env()->Class_klass())
  1442         alias_type(idx)->set_rewritable(false);
  1444     if (flat->isa_klassptr()) {
  1445       if (flat->offset() == Klass::super_check_offset_offset_in_bytes() + (int)sizeof(oopDesc))
  1446         alias_type(idx)->set_rewritable(false);
  1447       if (flat->offset() == Klass::modifier_flags_offset_in_bytes() + (int)sizeof(oopDesc))
  1448         alias_type(idx)->set_rewritable(false);
  1449       if (flat->offset() == Klass::access_flags_offset_in_bytes() + (int)sizeof(oopDesc))
  1450         alias_type(idx)->set_rewritable(false);
  1451       if (flat->offset() == Klass::java_mirror_offset_in_bytes() + (int)sizeof(oopDesc))
  1452         alias_type(idx)->set_rewritable(false);
  1454     // %%% (We would like to finalize JavaThread::threadObj_offset(),
  1455     // but the base pointer type is not distinctive enough to identify
  1456     // references into JavaThread.)
  1458     // Check for final instance fields.
  1459     const TypeInstPtr* tinst = flat->isa_instptr();
  1460     if (tinst && tinst->offset() >= instanceOopDesc::base_offset_in_bytes()) {
  1461       ciInstanceKlass *k = tinst->klass()->as_instance_klass();
  1462       ciField* field = k->get_field_by_offset(tinst->offset(), false);
  1463       // Set field() and is_rewritable() attributes.
  1464       if (field != NULL)  alias_type(idx)->set_field(field);
  1466     const TypeKlassPtr* tklass = flat->isa_klassptr();
  1467     // Check for final static fields.
  1468     if (tklass && tklass->klass()->is_instance_klass()) {
  1469       ciInstanceKlass *k = tklass->klass()->as_instance_klass();
  1470       ciField* field = k->get_field_by_offset(tklass->offset(), true);
  1471       // Set field() and is_rewritable() attributes.
  1472       if (field != NULL)   alias_type(idx)->set_field(field);
  1476   // Fill the cache for next time.
  1477   ace->_adr_type = adr_type;
  1478   ace->_index    = idx;
  1479   assert(alias_type(adr_type) == alias_type(idx),  "type must be installed");
  1481   // Might as well try to fill the cache for the flattened version, too.
  1482   AliasCacheEntry* face = probe_alias_cache(flat);
  1483   if (face->_adr_type == NULL) {
  1484     face->_adr_type = flat;
  1485     face->_index    = idx;
  1486     assert(alias_type(flat) == alias_type(idx), "flat type must work too");
  1489   return alias_type(idx);
  1493 Compile::AliasType* Compile::alias_type(ciField* field) {
  1494   const TypeOopPtr* t;
  1495   if (field->is_static())
  1496     t = TypeKlassPtr::make(field->holder());
  1497   else
  1498     t = TypeOopPtr::make_from_klass_raw(field->holder());
  1499   AliasType* atp = alias_type(t->add_offset(field->offset_in_bytes()));
  1500   assert(field->is_final() == !atp->is_rewritable(), "must get the rewritable bits correct");
  1501   return atp;
  1505 //------------------------------have_alias_type--------------------------------
  1506 bool Compile::have_alias_type(const TypePtr* adr_type) {
  1507   AliasCacheEntry* ace = probe_alias_cache(adr_type);
  1508   if (ace->_adr_type == adr_type) {
  1509     return true;
  1512   // Handle special cases.
  1513   if (adr_type == NULL)             return true;
  1514   if (adr_type == TypePtr::BOTTOM)  return true;
  1516   return find_alias_type(adr_type, true) != NULL;
  1519 //-----------------------------must_alias--------------------------------------
  1520 // True if all values of the given address type are in the given alias category.
  1521 bool Compile::must_alias(const TypePtr* adr_type, int alias_idx) {
  1522   if (alias_idx == AliasIdxBot)         return true;  // the universal category
  1523   if (adr_type == NULL)                 return true;  // NULL serves as TypePtr::TOP
  1524   if (alias_idx == AliasIdxTop)         return false; // the empty category
  1525   if (adr_type->base() == Type::AnyPtr) return false; // TypePtr::BOTTOM or its twins
  1527   // the only remaining possible overlap is identity
  1528   int adr_idx = get_alias_index(adr_type);
  1529   assert(adr_idx != AliasIdxBot && adr_idx != AliasIdxTop, "");
  1530   assert(adr_idx == alias_idx ||
  1531          (alias_type(alias_idx)->adr_type() != TypeOopPtr::BOTTOM
  1532           && adr_type                       != TypeOopPtr::BOTTOM),
  1533          "should not be testing for overlap with an unsafe pointer");
  1534   return adr_idx == alias_idx;
  1537 //------------------------------can_alias--------------------------------------
  1538 // True if any values of the given address type are in the given alias category.
  1539 bool Compile::can_alias(const TypePtr* adr_type, int alias_idx) {
  1540   if (alias_idx == AliasIdxTop)         return false; // the empty category
  1541   if (adr_type == NULL)                 return false; // NULL serves as TypePtr::TOP
  1542   if (alias_idx == AliasIdxBot)         return true;  // the universal category
  1543   if (adr_type->base() == Type::AnyPtr) return true;  // TypePtr::BOTTOM or its twins
  1545   // the only remaining possible overlap is identity
  1546   int adr_idx = get_alias_index(adr_type);
  1547   assert(adr_idx != AliasIdxBot && adr_idx != AliasIdxTop, "");
  1548   return adr_idx == alias_idx;
  1553 //---------------------------pop_warm_call-------------------------------------
  1554 WarmCallInfo* Compile::pop_warm_call() {
  1555   WarmCallInfo* wci = _warm_calls;
  1556   if (wci != NULL)  _warm_calls = wci->remove_from(wci);
  1557   return wci;
  1560 //----------------------------Inline_Warm--------------------------------------
  1561 int Compile::Inline_Warm() {
  1562   // If there is room, try to inline some more warm call sites.
  1563   // %%% Do a graph index compaction pass when we think we're out of space?
  1564   if (!InlineWarmCalls)  return 0;
  1566   int calls_made_hot = 0;
  1567   int room_to_grow   = NodeCountInliningCutoff - unique();
  1568   int amount_to_grow = MIN2(room_to_grow, (int)NodeCountInliningStep);
  1569   int amount_grown   = 0;
  1570   WarmCallInfo* call;
  1571   while (amount_to_grow > 0 && (call = pop_warm_call()) != NULL) {
  1572     int est_size = (int)call->size();
  1573     if (est_size > (room_to_grow - amount_grown)) {
  1574       // This one won't fit anyway.  Get rid of it.
  1575       call->make_cold();
  1576       continue;
  1578     call->make_hot();
  1579     calls_made_hot++;
  1580     amount_grown   += est_size;
  1581     amount_to_grow -= est_size;
  1584   if (calls_made_hot > 0)  set_major_progress();
  1585   return calls_made_hot;
  1589 //----------------------------Finish_Warm--------------------------------------
  1590 void Compile::Finish_Warm() {
  1591   if (!InlineWarmCalls)  return;
  1592   if (failing())  return;
  1593   if (warm_calls() == NULL)  return;
  1595   // Clean up loose ends, if we are out of space for inlining.
  1596   WarmCallInfo* call;
  1597   while ((call = pop_warm_call()) != NULL) {
  1598     call->make_cold();
  1602 //---------------------cleanup_loop_predicates-----------------------
  1603 // Remove the opaque nodes that protect the predicates so that all unused
  1604 // checks and uncommon_traps will be eliminated from the ideal graph
  1605 void Compile::cleanup_loop_predicates(PhaseIterGVN &igvn) {
  1606   if (predicate_count()==0) return;
  1607   for (int i = predicate_count(); i > 0; i--) {
  1608     Node * n = predicate_opaque1_node(i-1);
  1609     assert(n->Opcode() == Op_Opaque1, "must be");
  1610     igvn.replace_node(n, n->in(1));
  1612   assert(predicate_count()==0, "should be clean!");
  1613   igvn.optimize();
  1616 //------------------------------Optimize---------------------------------------
  1617 // Given a graph, optimize it.
  1618 void Compile::Optimize() {
  1619   TracePhase t1("optimizer", &_t_optimizer, true);
  1621 #ifndef PRODUCT
  1622   if (env()->break_at_compile()) {
  1623     BREAKPOINT;
  1626 #endif
  1628   ResourceMark rm;
  1629   int          loop_opts_cnt;
  1631   NOT_PRODUCT( verify_graph_edges(); )
  1633   print_method("After Parsing");
  1636   // Iterative Global Value Numbering, including ideal transforms
  1637   // Initialize IterGVN with types and values from parse-time GVN
  1638   PhaseIterGVN igvn(initial_gvn());
  1640     NOT_PRODUCT( TracePhase t2("iterGVN", &_t_iterGVN, TimeCompiler); )
  1641     igvn.optimize();
  1644   print_method("Iter GVN 1", 2);
  1646   if (failing())  return;
  1648   // Perform escape analysis
  1649   if (_do_escape_analysis && ConnectionGraph::has_candidates(this)) {
  1650     TracePhase t2("escapeAnalysis", &_t_escapeAnalysis, true);
  1651     ConnectionGraph::do_analysis(this, &igvn);
  1653     if (failing())  return;
  1655     igvn.optimize();
  1656     print_method("Iter GVN 3", 2);
  1658     if (failing())  return;
  1662   // Loop transforms on the ideal graph.  Range Check Elimination,
  1663   // peeling, unrolling, etc.
  1665   // Set loop opts counter
  1666   loop_opts_cnt = num_loop_opts();
  1667   if((loop_opts_cnt > 0) && (has_loops() || has_split_ifs())) {
  1669       TracePhase t2("idealLoop", &_t_idealLoop, true);
  1670       PhaseIdealLoop ideal_loop( igvn, true, UseLoopPredicate);
  1671       loop_opts_cnt--;
  1672       if (major_progress()) print_method("PhaseIdealLoop 1", 2);
  1673       if (failing())  return;
  1675     // Loop opts pass if partial peeling occurred in previous pass
  1676     if(PartialPeelLoop && major_progress() && (loop_opts_cnt > 0)) {
  1677       TracePhase t3("idealLoop", &_t_idealLoop, true);
  1678       PhaseIdealLoop ideal_loop( igvn, false, UseLoopPredicate);
  1679       loop_opts_cnt--;
  1680       if (major_progress()) print_method("PhaseIdealLoop 2", 2);
  1681       if (failing())  return;
  1683     // Loop opts pass for loop-unrolling before CCP
  1684     if(major_progress() && (loop_opts_cnt > 0)) {
  1685       TracePhase t4("idealLoop", &_t_idealLoop, true);
  1686       PhaseIdealLoop ideal_loop( igvn, false, UseLoopPredicate);
  1687       loop_opts_cnt--;
  1688       if (major_progress()) print_method("PhaseIdealLoop 3", 2);
  1690     if (!failing()) {
  1691       // Verify that last round of loop opts produced a valid graph
  1692       NOT_PRODUCT( TracePhase t2("idealLoopVerify", &_t_idealLoopVerify, TimeCompiler); )
  1693       PhaseIdealLoop::verify(igvn);
  1696   if (failing())  return;
  1698   // Conditional Constant Propagation;
  1699   PhaseCCP ccp( &igvn );
  1700   assert( true, "Break here to ccp.dump_nodes_and_types(_root,999,1)");
  1702     TracePhase t2("ccp", &_t_ccp, true);
  1703     ccp.do_transform();
  1705   print_method("PhaseCPP 1", 2);
  1707   assert( true, "Break here to ccp.dump_old2new_map()");
  1709   // Iterative Global Value Numbering, including ideal transforms
  1711     NOT_PRODUCT( TracePhase t2("iterGVN2", &_t_iterGVN2, TimeCompiler); )
  1712     igvn = ccp;
  1713     igvn.optimize();
  1716   print_method("Iter GVN 2", 2);
  1718   if (failing())  return;
  1720   // Loop transforms on the ideal graph.  Range Check Elimination,
  1721   // peeling, unrolling, etc.
  1722   if(loop_opts_cnt > 0) {
  1723     debug_only( int cnt = 0; );
  1724     bool loop_predication = UseLoopPredicate;
  1725     while(major_progress() && (loop_opts_cnt > 0)) {
  1726       TracePhase t2("idealLoop", &_t_idealLoop, true);
  1727       assert( cnt++ < 40, "infinite cycle in loop optimization" );
  1728       PhaseIdealLoop ideal_loop( igvn, true, loop_predication);
  1729       loop_opts_cnt--;
  1730       if (major_progress()) print_method("PhaseIdealLoop iterations", 2);
  1731       if (failing())  return;
  1732       // Perform loop predication optimization during first iteration after CCP.
  1733       // After that switch it off and cleanup unused loop predicates.
  1734       if (loop_predication) {
  1735         loop_predication = false;
  1736         cleanup_loop_predicates(igvn);
  1737         if (failing())  return;
  1743     // Verify that all previous optimizations produced a valid graph
  1744     // at least to this point, even if no loop optimizations were done.
  1745     NOT_PRODUCT( TracePhase t2("idealLoopVerify", &_t_idealLoopVerify, TimeCompiler); )
  1746     PhaseIdealLoop::verify(igvn);
  1750     NOT_PRODUCT( TracePhase t2("macroExpand", &_t_macroExpand, TimeCompiler); )
  1751     PhaseMacroExpand  mex(igvn);
  1752     if (mex.expand_macro_nodes()) {
  1753       assert(failing(), "must bail out w/ explicit message");
  1754       return;
  1758  } // (End scope of igvn; run destructor if necessary for asserts.)
  1760   // A method with only infinite loops has no edges entering loops from root
  1762     NOT_PRODUCT( TracePhase t2("graphReshape", &_t_graphReshaping, TimeCompiler); )
  1763     if (final_graph_reshaping()) {
  1764       assert(failing(), "must bail out w/ explicit message");
  1765       return;
  1769   print_method("Optimize finished", 2);
  1773 //------------------------------Code_Gen---------------------------------------
  1774 // Given a graph, generate code for it
  1775 void Compile::Code_Gen() {
  1776   if (failing())  return;
  1778   // Perform instruction selection.  You might think we could reclaim Matcher
  1779   // memory PDQ, but actually the Matcher is used in generating spill code.
  1780   // Internals of the Matcher (including some VectorSets) must remain live
  1781   // for awhile - thus I cannot reclaim Matcher memory lest a VectorSet usage
  1782   // set a bit in reclaimed memory.
  1784   // In debug mode can dump m._nodes.dump() for mapping of ideal to machine
  1785   // nodes.  Mapping is only valid at the root of each matched subtree.
  1786   NOT_PRODUCT( verify_graph_edges(); )
  1788   Node_List proj_list;
  1789   Matcher m(proj_list);
  1790   _matcher = &m;
  1792     TracePhase t2("matcher", &_t_matcher, true);
  1793     m.match();
  1795   // In debug mode can dump m._nodes.dump() for mapping of ideal to machine
  1796   // nodes.  Mapping is only valid at the root of each matched subtree.
  1797   NOT_PRODUCT( verify_graph_edges(); )
  1799   // If you have too many nodes, or if matching has failed, bail out
  1800   check_node_count(0, "out of nodes matching instructions");
  1801   if (failing())  return;
  1803   // Build a proper-looking CFG
  1804   PhaseCFG cfg(node_arena(), root(), m);
  1805   _cfg = &cfg;
  1807     NOT_PRODUCT( TracePhase t2("scheduler", &_t_scheduler, TimeCompiler); )
  1808     cfg.Dominators();
  1809     if (failing())  return;
  1811     NOT_PRODUCT( verify_graph_edges(); )
  1813     cfg.Estimate_Block_Frequency();
  1814     cfg.GlobalCodeMotion(m,unique(),proj_list);
  1816     print_method("Global code motion", 2);
  1818     if (failing())  return;
  1819     NOT_PRODUCT( verify_graph_edges(); )
  1821     debug_only( cfg.verify(); )
  1823   NOT_PRODUCT( verify_graph_edges(); )
  1825   PhaseChaitin regalloc(unique(),cfg,m);
  1826   _regalloc = &regalloc;
  1828     TracePhase t2("regalloc", &_t_registerAllocation, true);
  1829     // Perform any platform dependent preallocation actions.  This is used,
  1830     // for example, to avoid taking an implicit null pointer exception
  1831     // using the frame pointer on win95.
  1832     _regalloc->pd_preallocate_hook();
  1834     // Perform register allocation.  After Chaitin, use-def chains are
  1835     // no longer accurate (at spill code) and so must be ignored.
  1836     // Node->LRG->reg mappings are still accurate.
  1837     _regalloc->Register_Allocate();
  1839     // Bail out if the allocator builds too many nodes
  1840     if (failing())  return;
  1843   // Prior to register allocation we kept empty basic blocks in case the
  1844   // the allocator needed a place to spill.  After register allocation we
  1845   // are not adding any new instructions.  If any basic block is empty, we
  1846   // can now safely remove it.
  1848     NOT_PRODUCT( TracePhase t2("blockOrdering", &_t_blockOrdering, TimeCompiler); )
  1849     cfg.remove_empty();
  1850     if (do_freq_based_layout()) {
  1851       PhaseBlockLayout layout(cfg);
  1852     } else {
  1853       cfg.set_loop_alignment();
  1855     cfg.fixup_flow();
  1858   // Perform any platform dependent postallocation verifications.
  1859   debug_only( _regalloc->pd_postallocate_verify_hook(); )
  1861   // Apply peephole optimizations
  1862   if( OptoPeephole ) {
  1863     NOT_PRODUCT( TracePhase t2("peephole", &_t_peephole, TimeCompiler); )
  1864     PhasePeephole peep( _regalloc, cfg);
  1865     peep.do_transform();
  1868   // Convert Nodes to instruction bits in a buffer
  1870     // %%%% workspace merge brought two timers together for one job
  1871     TracePhase t2a("output", &_t_output, true);
  1872     NOT_PRODUCT( TraceTime t2b(NULL, &_t_codeGeneration, TimeCompiler, false); )
  1873     Output();
  1876   print_method("Final Code");
  1878   // He's dead, Jim.
  1879   _cfg     = (PhaseCFG*)0xdeadbeef;
  1880   _regalloc = (PhaseChaitin*)0xdeadbeef;
  1884 //------------------------------dump_asm---------------------------------------
  1885 // Dump formatted assembly
  1886 #ifndef PRODUCT
  1887 void Compile::dump_asm(int *pcs, uint pc_limit) {
  1888   bool cut_short = false;
  1889   tty->print_cr("#");
  1890   tty->print("#  ");  _tf->dump();  tty->cr();
  1891   tty->print_cr("#");
  1893   // For all blocks
  1894   int pc = 0x0;                 // Program counter
  1895   char starts_bundle = ' ';
  1896   _regalloc->dump_frame();
  1898   Node *n = NULL;
  1899   for( uint i=0; i<_cfg->_num_blocks; i++ ) {
  1900     if (VMThread::should_terminate()) { cut_short = true; break; }
  1901     Block *b = _cfg->_blocks[i];
  1902     if (b->is_connector() && !Verbose) continue;
  1903     n = b->_nodes[0];
  1904     if (pcs && n->_idx < pc_limit)
  1905       tty->print("%3.3x   ", pcs[n->_idx]);
  1906     else
  1907       tty->print("      ");
  1908     b->dump_head( &_cfg->_bbs );
  1909     if (b->is_connector()) {
  1910       tty->print_cr("        # Empty connector block");
  1911     } else if (b->num_preds() == 2 && b->pred(1)->is_CatchProj() && b->pred(1)->as_CatchProj()->_con == CatchProjNode::fall_through_index) {
  1912       tty->print_cr("        # Block is sole successor of call");
  1915     // For all instructions
  1916     Node *delay = NULL;
  1917     for( uint j = 0; j<b->_nodes.size(); j++ ) {
  1918       if (VMThread::should_terminate()) { cut_short = true; break; }
  1919       n = b->_nodes[j];
  1920       if (valid_bundle_info(n)) {
  1921         Bundle *bundle = node_bundling(n);
  1922         if (bundle->used_in_unconditional_delay()) {
  1923           delay = n;
  1924           continue;
  1926         if (bundle->starts_bundle())
  1927           starts_bundle = '+';
  1930       if (WizardMode) n->dump();
  1932       if( !n->is_Region() &&    // Dont print in the Assembly
  1933           !n->is_Phi() &&       // a few noisely useless nodes
  1934           !n->is_Proj() &&
  1935           !n->is_MachTemp() &&
  1936           !n->is_SafePointScalarObject() &&
  1937           !n->is_Catch() &&     // Would be nice to print exception table targets
  1938           !n->is_MergeMem() &&  // Not very interesting
  1939           !n->is_top() &&       // Debug info table constants
  1940           !(n->is_Con() && !n->is_Mach())// Debug info table constants
  1941           ) {
  1942         if (pcs && n->_idx < pc_limit)
  1943           tty->print("%3.3x", pcs[n->_idx]);
  1944         else
  1945           tty->print("   ");
  1946         tty->print(" %c ", starts_bundle);
  1947         starts_bundle = ' ';
  1948         tty->print("\t");
  1949         n->format(_regalloc, tty);
  1950         tty->cr();
  1953       // If we have an instruction with a delay slot, and have seen a delay,
  1954       // then back up and print it
  1955       if (valid_bundle_info(n) && node_bundling(n)->use_unconditional_delay()) {
  1956         assert(delay != NULL, "no unconditional delay instruction");
  1957         if (WizardMode) delay->dump();
  1959         if (node_bundling(delay)->starts_bundle())
  1960           starts_bundle = '+';
  1961         if (pcs && n->_idx < pc_limit)
  1962           tty->print("%3.3x", pcs[n->_idx]);
  1963         else
  1964           tty->print("   ");
  1965         tty->print(" %c ", starts_bundle);
  1966         starts_bundle = ' ';
  1967         tty->print("\t");
  1968         delay->format(_regalloc, tty);
  1969         tty->print_cr("");
  1970         delay = NULL;
  1973       // Dump the exception table as well
  1974       if( n->is_Catch() && (Verbose || WizardMode) ) {
  1975         // Print the exception table for this offset
  1976         _handler_table.print_subtable_for(pc);
  1980     if (pcs && n->_idx < pc_limit)
  1981       tty->print_cr("%3.3x", pcs[n->_idx]);
  1982     else
  1983       tty->print_cr("");
  1985     assert(cut_short || delay == NULL, "no unconditional delay branch");
  1987   } // End of per-block dump
  1988   tty->print_cr("");
  1990   if (cut_short)  tty->print_cr("*** disassembly is cut short ***");
  1992 #endif
  1994 //------------------------------Final_Reshape_Counts---------------------------
  1995 // This class defines counters to help identify when a method
  1996 // may/must be executed using hardware with only 24-bit precision.
  1997 struct Final_Reshape_Counts : public StackObj {
  1998   int  _call_count;             // count non-inlined 'common' calls
  1999   int  _float_count;            // count float ops requiring 24-bit precision
  2000   int  _double_count;           // count double ops requiring more precision
  2001   int  _java_call_count;        // count non-inlined 'java' calls
  2002   int  _inner_loop_count;       // count loops which need alignment
  2003   VectorSet _visited;           // Visitation flags
  2004   Node_List _tests;             // Set of IfNodes & PCTableNodes
  2006   Final_Reshape_Counts() :
  2007     _call_count(0), _float_count(0), _double_count(0),
  2008     _java_call_count(0), _inner_loop_count(0),
  2009     _visited( Thread::current()->resource_area() ) { }
  2011   void inc_call_count  () { _call_count  ++; }
  2012   void inc_float_count () { _float_count ++; }
  2013   void inc_double_count() { _double_count++; }
  2014   void inc_java_call_count() { _java_call_count++; }
  2015   void inc_inner_loop_count() { _inner_loop_count++; }
  2017   int  get_call_count  () const { return _call_count  ; }
  2018   int  get_float_count () const { return _float_count ; }
  2019   int  get_double_count() const { return _double_count; }
  2020   int  get_java_call_count() const { return _java_call_count; }
  2021   int  get_inner_loop_count() const { return _inner_loop_count; }
  2022 };
  2024 static bool oop_offset_is_sane(const TypeInstPtr* tp) {
  2025   ciInstanceKlass *k = tp->klass()->as_instance_klass();
  2026   // Make sure the offset goes inside the instance layout.
  2027   return k->contains_field_offset(tp->offset());
  2028   // Note that OffsetBot and OffsetTop are very negative.
  2031 //------------------------------final_graph_reshaping_impl----------------------
  2032 // Implement items 1-5 from final_graph_reshaping below.
  2033 static void final_graph_reshaping_impl( Node *n, Final_Reshape_Counts &frc ) {
  2035   if ( n->outcnt() == 0 ) return; // dead node
  2036   uint nop = n->Opcode();
  2038   // Check for 2-input instruction with "last use" on right input.
  2039   // Swap to left input.  Implements item (2).
  2040   if( n->req() == 3 &&          // two-input instruction
  2041       n->in(1)->outcnt() > 1 && // left use is NOT a last use
  2042       (!n->in(1)->is_Phi() || n->in(1)->in(2) != n) && // it is not data loop
  2043       n->in(2)->outcnt() == 1 &&// right use IS a last use
  2044       !n->in(2)->is_Con() ) {   // right use is not a constant
  2045     // Check for commutative opcode
  2046     switch( nop ) {
  2047     case Op_AddI:  case Op_AddF:  case Op_AddD:  case Op_AddL:
  2048     case Op_MaxI:  case Op_MinI:
  2049     case Op_MulI:  case Op_MulF:  case Op_MulD:  case Op_MulL:
  2050     case Op_AndL:  case Op_XorL:  case Op_OrL:
  2051     case Op_AndI:  case Op_XorI:  case Op_OrI: {
  2052       // Move "last use" input to left by swapping inputs
  2053       n->swap_edges(1, 2);
  2054       break;
  2056     default:
  2057       break;
  2061 #ifdef ASSERT
  2062   if( n->is_Mem() ) {
  2063     Compile* C = Compile::current();
  2064     int alias_idx = C->get_alias_index(n->as_Mem()->adr_type());
  2065     assert( n->in(0) != NULL || alias_idx != Compile::AliasIdxRaw ||
  2066             // oop will be recorded in oop map if load crosses safepoint
  2067             n->is_Load() && (n->as_Load()->bottom_type()->isa_oopptr() ||
  2068                              LoadNode::is_immutable_value(n->in(MemNode::Address))),
  2069             "raw memory operations should have control edge");
  2071 #endif
  2072   // Count FPU ops and common calls, implements item (3)
  2073   switch( nop ) {
  2074   // Count all float operations that may use FPU
  2075   case Op_AddF:
  2076   case Op_SubF:
  2077   case Op_MulF:
  2078   case Op_DivF:
  2079   case Op_NegF:
  2080   case Op_ModF:
  2081   case Op_ConvI2F:
  2082   case Op_ConF:
  2083   case Op_CmpF:
  2084   case Op_CmpF3:
  2085   // case Op_ConvL2F: // longs are split into 32-bit halves
  2086     frc.inc_float_count();
  2087     break;
  2089   case Op_ConvF2D:
  2090   case Op_ConvD2F:
  2091     frc.inc_float_count();
  2092     frc.inc_double_count();
  2093     break;
  2095   // Count all double operations that may use FPU
  2096   case Op_AddD:
  2097   case Op_SubD:
  2098   case Op_MulD:
  2099   case Op_DivD:
  2100   case Op_NegD:
  2101   case Op_ModD:
  2102   case Op_ConvI2D:
  2103   case Op_ConvD2I:
  2104   // case Op_ConvL2D: // handled by leaf call
  2105   // case Op_ConvD2L: // handled by leaf call
  2106   case Op_ConD:
  2107   case Op_CmpD:
  2108   case Op_CmpD3:
  2109     frc.inc_double_count();
  2110     break;
  2111   case Op_Opaque1:              // Remove Opaque Nodes before matching
  2112   case Op_Opaque2:              // Remove Opaque Nodes before matching
  2113     n->subsume_by(n->in(1));
  2114     break;
  2115   case Op_CallStaticJava:
  2116   case Op_CallJava:
  2117   case Op_CallDynamicJava:
  2118     frc.inc_java_call_count(); // Count java call site;
  2119   case Op_CallRuntime:
  2120   case Op_CallLeaf:
  2121   case Op_CallLeafNoFP: {
  2122     assert( n->is_Call(), "" );
  2123     CallNode *call = n->as_Call();
  2124     // Count call sites where the FP mode bit would have to be flipped.
  2125     // Do not count uncommon runtime calls:
  2126     // uncommon_trap, _complete_monitor_locking, _complete_monitor_unlocking,
  2127     // _new_Java, _new_typeArray, _new_objArray, _rethrow_Java, ...
  2128     if( !call->is_CallStaticJava() || !call->as_CallStaticJava()->_name ) {
  2129       frc.inc_call_count();   // Count the call site
  2130     } else {                  // See if uncommon argument is shared
  2131       Node *n = call->in(TypeFunc::Parms);
  2132       int nop = n->Opcode();
  2133       // Clone shared simple arguments to uncommon calls, item (1).
  2134       if( n->outcnt() > 1 &&
  2135           !n->is_Proj() &&
  2136           nop != Op_CreateEx &&
  2137           nop != Op_CheckCastPP &&
  2138           nop != Op_DecodeN &&
  2139           !n->is_Mem() ) {
  2140         Node *x = n->clone();
  2141         call->set_req( TypeFunc::Parms, x );
  2144     break;
  2147   case Op_StoreD:
  2148   case Op_LoadD:
  2149   case Op_LoadD_unaligned:
  2150     frc.inc_double_count();
  2151     goto handle_mem;
  2152   case Op_StoreF:
  2153   case Op_LoadF:
  2154     frc.inc_float_count();
  2155     goto handle_mem;
  2157   case Op_StoreB:
  2158   case Op_StoreC:
  2159   case Op_StoreCM:
  2160   case Op_StorePConditional:
  2161   case Op_StoreI:
  2162   case Op_StoreL:
  2163   case Op_StoreIConditional:
  2164   case Op_StoreLConditional:
  2165   case Op_CompareAndSwapI:
  2166   case Op_CompareAndSwapL:
  2167   case Op_CompareAndSwapP:
  2168   case Op_CompareAndSwapN:
  2169   case Op_StoreP:
  2170   case Op_StoreN:
  2171   case Op_LoadB:
  2172   case Op_LoadUB:
  2173   case Op_LoadUS:
  2174   case Op_LoadI:
  2175   case Op_LoadUI2L:
  2176   case Op_LoadKlass:
  2177   case Op_LoadNKlass:
  2178   case Op_LoadL:
  2179   case Op_LoadL_unaligned:
  2180   case Op_LoadPLocked:
  2181   case Op_LoadLLocked:
  2182   case Op_LoadP:
  2183   case Op_LoadN:
  2184   case Op_LoadRange:
  2185   case Op_LoadS: {
  2186   handle_mem:
  2187 #ifdef ASSERT
  2188     if( VerifyOptoOopOffsets ) {
  2189       assert( n->is_Mem(), "" );
  2190       MemNode *mem  = (MemNode*)n;
  2191       // Check to see if address types have grounded out somehow.
  2192       const TypeInstPtr *tp = mem->in(MemNode::Address)->bottom_type()->isa_instptr();
  2193       assert( !tp || oop_offset_is_sane(tp), "" );
  2195 #endif
  2196     break;
  2199   case Op_AddP: {               // Assert sane base pointers
  2200     Node *addp = n->in(AddPNode::Address);
  2201     assert( !addp->is_AddP() ||
  2202             addp->in(AddPNode::Base)->is_top() || // Top OK for allocation
  2203             addp->in(AddPNode::Base) == n->in(AddPNode::Base),
  2204             "Base pointers must match" );
  2205 #ifdef _LP64
  2206     if (UseCompressedOops &&
  2207         addp->Opcode() == Op_ConP &&
  2208         addp == n->in(AddPNode::Base) &&
  2209         n->in(AddPNode::Offset)->is_Con()) {
  2210       // Use addressing with narrow klass to load with offset on x86.
  2211       // On sparc loading 32-bits constant and decoding it have less
  2212       // instructions (4) then load 64-bits constant (7).
  2213       // Do this transformation here since IGVN will convert ConN back to ConP.
  2214       const Type* t = addp->bottom_type();
  2215       if (t->isa_oopptr()) {
  2216         Node* nn = NULL;
  2218         // Look for existing ConN node of the same exact type.
  2219         Compile* C = Compile::current();
  2220         Node* r  = C->root();
  2221         uint cnt = r->outcnt();
  2222         for (uint i = 0; i < cnt; i++) {
  2223           Node* m = r->raw_out(i);
  2224           if (m!= NULL && m->Opcode() == Op_ConN &&
  2225               m->bottom_type()->make_ptr() == t) {
  2226             nn = m;
  2227             break;
  2230         if (nn != NULL) {
  2231           // Decode a narrow oop to match address
  2232           // [R12 + narrow_oop_reg<<3 + offset]
  2233           nn = new (C,  2) DecodeNNode(nn, t);
  2234           n->set_req(AddPNode::Base, nn);
  2235           n->set_req(AddPNode::Address, nn);
  2236           if (addp->outcnt() == 0) {
  2237             addp->disconnect_inputs(NULL);
  2242 #endif
  2243     break;
  2246 #ifdef _LP64
  2247   case Op_CastPP:
  2248     if (n->in(1)->is_DecodeN() && Matcher::gen_narrow_oop_implicit_null_checks()) {
  2249       Compile* C = Compile::current();
  2250       Node* in1 = n->in(1);
  2251       const Type* t = n->bottom_type();
  2252       Node* new_in1 = in1->clone();
  2253       new_in1->as_DecodeN()->set_type(t);
  2255       if (!Matcher::narrow_oop_use_complex_address()) {
  2256         //
  2257         // x86, ARM and friends can handle 2 adds in addressing mode
  2258         // and Matcher can fold a DecodeN node into address by using
  2259         // a narrow oop directly and do implicit NULL check in address:
  2260         //
  2261         // [R12 + narrow_oop_reg<<3 + offset]
  2262         // NullCheck narrow_oop_reg
  2263         //
  2264         // On other platforms (Sparc) we have to keep new DecodeN node and
  2265         // use it to do implicit NULL check in address:
  2266         //
  2267         // decode_not_null narrow_oop_reg, base_reg
  2268         // [base_reg + offset]
  2269         // NullCheck base_reg
  2270         //
  2271         // Pin the new DecodeN node to non-null path on these platform (Sparc)
  2272         // to keep the information to which NULL check the new DecodeN node
  2273         // corresponds to use it as value in implicit_null_check().
  2274         //
  2275         new_in1->set_req(0, n->in(0));
  2278       n->subsume_by(new_in1);
  2279       if (in1->outcnt() == 0) {
  2280         in1->disconnect_inputs(NULL);
  2283     break;
  2285   case Op_CmpP:
  2286     // Do this transformation here to preserve CmpPNode::sub() and
  2287     // other TypePtr related Ideal optimizations (for example, ptr nullness).
  2288     if (n->in(1)->is_DecodeN() || n->in(2)->is_DecodeN()) {
  2289       Node* in1 = n->in(1);
  2290       Node* in2 = n->in(2);
  2291       if (!in1->is_DecodeN()) {
  2292         in2 = in1;
  2293         in1 = n->in(2);
  2295       assert(in1->is_DecodeN(), "sanity");
  2297       Compile* C = Compile::current();
  2298       Node* new_in2 = NULL;
  2299       if (in2->is_DecodeN()) {
  2300         new_in2 = in2->in(1);
  2301       } else if (in2->Opcode() == Op_ConP) {
  2302         const Type* t = in2->bottom_type();
  2303         if (t == TypePtr::NULL_PTR) {
  2304           // Don't convert CmpP null check into CmpN if compressed
  2305           // oops implicit null check is not generated.
  2306           // This will allow to generate normal oop implicit null check.
  2307           if (Matcher::gen_narrow_oop_implicit_null_checks())
  2308             new_in2 = ConNode::make(C, TypeNarrowOop::NULL_PTR);
  2309           //
  2310           // This transformation together with CastPP transformation above
  2311           // will generated code for implicit NULL checks for compressed oops.
  2312           //
  2313           // The original code after Optimize()
  2314           //
  2315           //    LoadN memory, narrow_oop_reg
  2316           //    decode narrow_oop_reg, base_reg
  2317           //    CmpP base_reg, NULL
  2318           //    CastPP base_reg // NotNull
  2319           //    Load [base_reg + offset], val_reg
  2320           //
  2321           // after these transformations will be
  2322           //
  2323           //    LoadN memory, narrow_oop_reg
  2324           //    CmpN narrow_oop_reg, NULL
  2325           //    decode_not_null narrow_oop_reg, base_reg
  2326           //    Load [base_reg + offset], val_reg
  2327           //
  2328           // and the uncommon path (== NULL) will use narrow_oop_reg directly
  2329           // since narrow oops can be used in debug info now (see the code in
  2330           // final_graph_reshaping_walk()).
  2331           //
  2332           // At the end the code will be matched to
  2333           // on x86:
  2334           //
  2335           //    Load_narrow_oop memory, narrow_oop_reg
  2336           //    Load [R12 + narrow_oop_reg<<3 + offset], val_reg
  2337           //    NullCheck narrow_oop_reg
  2338           //
  2339           // and on sparc:
  2340           //
  2341           //    Load_narrow_oop memory, narrow_oop_reg
  2342           //    decode_not_null narrow_oop_reg, base_reg
  2343           //    Load [base_reg + offset], val_reg
  2344           //    NullCheck base_reg
  2345           //
  2346         } else if (t->isa_oopptr()) {
  2347           new_in2 = ConNode::make(C, t->make_narrowoop());
  2350       if (new_in2 != NULL) {
  2351         Node* cmpN = new (C, 3) CmpNNode(in1->in(1), new_in2);
  2352         n->subsume_by( cmpN );
  2353         if (in1->outcnt() == 0) {
  2354           in1->disconnect_inputs(NULL);
  2356         if (in2->outcnt() == 0) {
  2357           in2->disconnect_inputs(NULL);
  2361     break;
  2363   case Op_DecodeN:
  2364     assert(!n->in(1)->is_EncodeP(), "should be optimized out");
  2365     // DecodeN could be pinned when it can't be fold into
  2366     // an address expression, see the code for Op_CastPP above.
  2367     assert(n->in(0) == NULL || !Matcher::narrow_oop_use_complex_address(), "no control");
  2368     break;
  2370   case Op_EncodeP: {
  2371     Node* in1 = n->in(1);
  2372     if (in1->is_DecodeN()) {
  2373       n->subsume_by(in1->in(1));
  2374     } else if (in1->Opcode() == Op_ConP) {
  2375       Compile* C = Compile::current();
  2376       const Type* t = in1->bottom_type();
  2377       if (t == TypePtr::NULL_PTR) {
  2378         n->subsume_by(ConNode::make(C, TypeNarrowOop::NULL_PTR));
  2379       } else if (t->isa_oopptr()) {
  2380         n->subsume_by(ConNode::make(C, t->make_narrowoop()));
  2383     if (in1->outcnt() == 0) {
  2384       in1->disconnect_inputs(NULL);
  2386     break;
  2389   case Op_Proj: {
  2390     if (OptimizeStringConcat) {
  2391       ProjNode* p = n->as_Proj();
  2392       if (p->_is_io_use) {
  2393         // Separate projections were used for the exception path which
  2394         // are normally removed by a late inline.  If it wasn't inlined
  2395         // then they will hang around and should just be replaced with
  2396         // the original one.
  2397         Node* proj = NULL;
  2398         // Replace with just one
  2399         for (SimpleDUIterator i(p->in(0)); i.has_next(); i.next()) {
  2400           Node *use = i.get();
  2401           if (use->is_Proj() && p != use && use->as_Proj()->_con == p->_con) {
  2402             proj = use;
  2403             break;
  2406         assert(p != NULL, "must be found");
  2407         p->subsume_by(proj);
  2410     break;
  2413   case Op_Phi:
  2414     if (n->as_Phi()->bottom_type()->isa_narrowoop()) {
  2415       // The EncodeP optimization may create Phi with the same edges
  2416       // for all paths. It is not handled well by Register Allocator.
  2417       Node* unique_in = n->in(1);
  2418       assert(unique_in != NULL, "");
  2419       uint cnt = n->req();
  2420       for (uint i = 2; i < cnt; i++) {
  2421         Node* m = n->in(i);
  2422         assert(m != NULL, "");
  2423         if (unique_in != m)
  2424           unique_in = NULL;
  2426       if (unique_in != NULL) {
  2427         n->subsume_by(unique_in);
  2430     break;
  2432 #endif
  2434   case Op_ModI:
  2435     if (UseDivMod) {
  2436       // Check if a%b and a/b both exist
  2437       Node* d = n->find_similar(Op_DivI);
  2438       if (d) {
  2439         // Replace them with a fused divmod if supported
  2440         Compile* C = Compile::current();
  2441         if (Matcher::has_match_rule(Op_DivModI)) {
  2442           DivModINode* divmod = DivModINode::make(C, n);
  2443           d->subsume_by(divmod->div_proj());
  2444           n->subsume_by(divmod->mod_proj());
  2445         } else {
  2446           // replace a%b with a-((a/b)*b)
  2447           Node* mult = new (C, 3) MulINode(d, d->in(2));
  2448           Node* sub  = new (C, 3) SubINode(d->in(1), mult);
  2449           n->subsume_by( sub );
  2453     break;
  2455   case Op_ModL:
  2456     if (UseDivMod) {
  2457       // Check if a%b and a/b both exist
  2458       Node* d = n->find_similar(Op_DivL);
  2459       if (d) {
  2460         // Replace them with a fused divmod if supported
  2461         Compile* C = Compile::current();
  2462         if (Matcher::has_match_rule(Op_DivModL)) {
  2463           DivModLNode* divmod = DivModLNode::make(C, n);
  2464           d->subsume_by(divmod->div_proj());
  2465           n->subsume_by(divmod->mod_proj());
  2466         } else {
  2467           // replace a%b with a-((a/b)*b)
  2468           Node* mult = new (C, 3) MulLNode(d, d->in(2));
  2469           Node* sub  = new (C, 3) SubLNode(d->in(1), mult);
  2470           n->subsume_by( sub );
  2474     break;
  2476   case Op_Load16B:
  2477   case Op_Load8B:
  2478   case Op_Load4B:
  2479   case Op_Load8S:
  2480   case Op_Load4S:
  2481   case Op_Load2S:
  2482   case Op_Load8C:
  2483   case Op_Load4C:
  2484   case Op_Load2C:
  2485   case Op_Load4I:
  2486   case Op_Load2I:
  2487   case Op_Load2L:
  2488   case Op_Load4F:
  2489   case Op_Load2F:
  2490   case Op_Load2D:
  2491   case Op_Store16B:
  2492   case Op_Store8B:
  2493   case Op_Store4B:
  2494   case Op_Store8C:
  2495   case Op_Store4C:
  2496   case Op_Store2C:
  2497   case Op_Store4I:
  2498   case Op_Store2I:
  2499   case Op_Store2L:
  2500   case Op_Store4F:
  2501   case Op_Store2F:
  2502   case Op_Store2D:
  2503     break;
  2505   case Op_PackB:
  2506   case Op_PackS:
  2507   case Op_PackC:
  2508   case Op_PackI:
  2509   case Op_PackF:
  2510   case Op_PackL:
  2511   case Op_PackD:
  2512     if (n->req()-1 > 2) {
  2513       // Replace many operand PackNodes with a binary tree for matching
  2514       PackNode* p = (PackNode*) n;
  2515       Node* btp = p->binaryTreePack(Compile::current(), 1, n->req());
  2516       n->subsume_by(btp);
  2518     break;
  2519   case Op_Loop:
  2520   case Op_CountedLoop:
  2521     if (n->as_Loop()->is_inner_loop()) {
  2522       frc.inc_inner_loop_count();
  2524     break;
  2525   default:
  2526     assert( !n->is_Call(), "" );
  2527     assert( !n->is_Mem(), "" );
  2528     break;
  2531   // Collect CFG split points
  2532   if (n->is_MultiBranch())
  2533     frc._tests.push(n);
  2536 //------------------------------final_graph_reshaping_walk---------------------
  2537 // Replacing Opaque nodes with their input in final_graph_reshaping_impl(),
  2538 // requires that the walk visits a node's inputs before visiting the node.
  2539 static void final_graph_reshaping_walk( Node_Stack &nstack, Node *root, Final_Reshape_Counts &frc ) {
  2540   ResourceArea *area = Thread::current()->resource_area();
  2541   Unique_Node_List sfpt(area);
  2543   frc._visited.set(root->_idx); // first, mark node as visited
  2544   uint cnt = root->req();
  2545   Node *n = root;
  2546   uint  i = 0;
  2547   while (true) {
  2548     if (i < cnt) {
  2549       // Place all non-visited non-null inputs onto stack
  2550       Node* m = n->in(i);
  2551       ++i;
  2552       if (m != NULL && !frc._visited.test_set(m->_idx)) {
  2553         if (m->is_SafePoint() && m->as_SafePoint()->jvms() != NULL)
  2554           sfpt.push(m);
  2555         cnt = m->req();
  2556         nstack.push(n, i); // put on stack parent and next input's index
  2557         n = m;
  2558         i = 0;
  2560     } else {
  2561       // Now do post-visit work
  2562       final_graph_reshaping_impl( n, frc );
  2563       if (nstack.is_empty())
  2564         break;             // finished
  2565       n = nstack.node();   // Get node from stack
  2566       cnt = n->req();
  2567       i = nstack.index();
  2568       nstack.pop();        // Shift to the next node on stack
  2572   // Skip next transformation if compressed oops are not used.
  2573   if (!UseCompressedOops || !Matcher::gen_narrow_oop_implicit_null_checks())
  2574     return;
  2576   // Go over safepoints nodes to skip DecodeN nodes for debug edges.
  2577   // It could be done for an uncommon traps or any safepoints/calls
  2578   // if the DecodeN node is referenced only in a debug info.
  2579   while (sfpt.size() > 0) {
  2580     n = sfpt.pop();
  2581     JVMState *jvms = n->as_SafePoint()->jvms();
  2582     assert(jvms != NULL, "sanity");
  2583     int start = jvms->debug_start();
  2584     int end   = n->req();
  2585     bool is_uncommon = (n->is_CallStaticJava() &&
  2586                         n->as_CallStaticJava()->uncommon_trap_request() != 0);
  2587     for (int j = start; j < end; j++) {
  2588       Node* in = n->in(j);
  2589       if (in->is_DecodeN()) {
  2590         bool safe_to_skip = true;
  2591         if (!is_uncommon ) {
  2592           // Is it safe to skip?
  2593           for (uint i = 0; i < in->outcnt(); i++) {
  2594             Node* u = in->raw_out(i);
  2595             if (!u->is_SafePoint() ||
  2596                  u->is_Call() && u->as_Call()->has_non_debug_use(n)) {
  2597               safe_to_skip = false;
  2601         if (safe_to_skip) {
  2602           n->set_req(j, in->in(1));
  2604         if (in->outcnt() == 0) {
  2605           in->disconnect_inputs(NULL);
  2612 //------------------------------final_graph_reshaping--------------------------
  2613 // Final Graph Reshaping.
  2614 //
  2615 // (1) Clone simple inputs to uncommon calls, so they can be scheduled late
  2616 //     and not commoned up and forced early.  Must come after regular
  2617 //     optimizations to avoid GVN undoing the cloning.  Clone constant
  2618 //     inputs to Loop Phis; these will be split by the allocator anyways.
  2619 //     Remove Opaque nodes.
  2620 // (2) Move last-uses by commutative operations to the left input to encourage
  2621 //     Intel update-in-place two-address operations and better register usage
  2622 //     on RISCs.  Must come after regular optimizations to avoid GVN Ideal
  2623 //     calls canonicalizing them back.
  2624 // (3) Count the number of double-precision FP ops, single-precision FP ops
  2625 //     and call sites.  On Intel, we can get correct rounding either by
  2626 //     forcing singles to memory (requires extra stores and loads after each
  2627 //     FP bytecode) or we can set a rounding mode bit (requires setting and
  2628 //     clearing the mode bit around call sites).  The mode bit is only used
  2629 //     if the relative frequency of single FP ops to calls is low enough.
  2630 //     This is a key transform for SPEC mpeg_audio.
  2631 // (4) Detect infinite loops; blobs of code reachable from above but not
  2632 //     below.  Several of the Code_Gen algorithms fail on such code shapes,
  2633 //     so we simply bail out.  Happens a lot in ZKM.jar, but also happens
  2634 //     from time to time in other codes (such as -Xcomp finalizer loops, etc).
  2635 //     Detection is by looking for IfNodes where only 1 projection is
  2636 //     reachable from below or CatchNodes missing some targets.
  2637 // (5) Assert for insane oop offsets in debug mode.
  2639 bool Compile::final_graph_reshaping() {
  2640   // an infinite loop may have been eliminated by the optimizer,
  2641   // in which case the graph will be empty.
  2642   if (root()->req() == 1) {
  2643     record_method_not_compilable("trivial infinite loop");
  2644     return true;
  2647   Final_Reshape_Counts frc;
  2649   // Visit everybody reachable!
  2650   // Allocate stack of size C->unique()/2 to avoid frequent realloc
  2651   Node_Stack nstack(unique() >> 1);
  2652   final_graph_reshaping_walk(nstack, root(), frc);
  2654   // Check for unreachable (from below) code (i.e., infinite loops).
  2655   for( uint i = 0; i < frc._tests.size(); i++ ) {
  2656     MultiBranchNode *n = frc._tests[i]->as_MultiBranch();
  2657     // Get number of CFG targets.
  2658     // Note that PCTables include exception targets after calls.
  2659     uint required_outcnt = n->required_outcnt();
  2660     if (n->outcnt() != required_outcnt) {
  2661       // Check for a few special cases.  Rethrow Nodes never take the
  2662       // 'fall-thru' path, so expected kids is 1 less.
  2663       if (n->is_PCTable() && n->in(0) && n->in(0)->in(0)) {
  2664         if (n->in(0)->in(0)->is_Call()) {
  2665           CallNode *call = n->in(0)->in(0)->as_Call();
  2666           if (call->entry_point() == OptoRuntime::rethrow_stub()) {
  2667             required_outcnt--;      // Rethrow always has 1 less kid
  2668           } else if (call->req() > TypeFunc::Parms &&
  2669                      call->is_CallDynamicJava()) {
  2670             // Check for null receiver. In such case, the optimizer has
  2671             // detected that the virtual call will always result in a null
  2672             // pointer exception. The fall-through projection of this CatchNode
  2673             // will not be populated.
  2674             Node *arg0 = call->in(TypeFunc::Parms);
  2675             if (arg0->is_Type() &&
  2676                 arg0->as_Type()->type()->higher_equal(TypePtr::NULL_PTR)) {
  2677               required_outcnt--;
  2679           } else if (call->entry_point() == OptoRuntime::new_array_Java() &&
  2680                      call->req() > TypeFunc::Parms+1 &&
  2681                      call->is_CallStaticJava()) {
  2682             // Check for negative array length. In such case, the optimizer has
  2683             // detected that the allocation attempt will always result in an
  2684             // exception. There is no fall-through projection of this CatchNode .
  2685             Node *arg1 = call->in(TypeFunc::Parms+1);
  2686             if (arg1->is_Type() &&
  2687                 arg1->as_Type()->type()->join(TypeInt::POS)->empty()) {
  2688               required_outcnt--;
  2693       // Recheck with a better notion of 'required_outcnt'
  2694       if (n->outcnt() != required_outcnt) {
  2695         record_method_not_compilable("malformed control flow");
  2696         return true;            // Not all targets reachable!
  2699     // Check that I actually visited all kids.  Unreached kids
  2700     // must be infinite loops.
  2701     for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++)
  2702       if (!frc._visited.test(n->fast_out(j)->_idx)) {
  2703         record_method_not_compilable("infinite loop");
  2704         return true;            // Found unvisited kid; must be unreach
  2708   // If original bytecodes contained a mixture of floats and doubles
  2709   // check if the optimizer has made it homogenous, item (3).
  2710   if( Use24BitFPMode && Use24BitFP && UseSSE == 0 &&
  2711       frc.get_float_count() > 32 &&
  2712       frc.get_double_count() == 0 &&
  2713       (10 * frc.get_call_count() < frc.get_float_count()) ) {
  2714     set_24_bit_selection_and_mode( false,  true );
  2717   set_java_calls(frc.get_java_call_count());
  2718   set_inner_loops(frc.get_inner_loop_count());
  2720   // No infinite loops, no reason to bail out.
  2721   return false;
  2724 //-----------------------------too_many_traps----------------------------------
  2725 // Report if there are too many traps at the current method and bci.
  2726 // Return true if there was a trap, and/or PerMethodTrapLimit is exceeded.
  2727 bool Compile::too_many_traps(ciMethod* method,
  2728                              int bci,
  2729                              Deoptimization::DeoptReason reason) {
  2730   ciMethodData* md = method->method_data();
  2731   if (md->is_empty()) {
  2732     // Assume the trap has not occurred, or that it occurred only
  2733     // because of a transient condition during start-up in the interpreter.
  2734     return false;
  2736   if (md->has_trap_at(bci, reason) != 0) {
  2737     // Assume PerBytecodeTrapLimit==0, for a more conservative heuristic.
  2738     // Also, if there are multiple reasons, or if there is no per-BCI record,
  2739     // assume the worst.
  2740     if (log())
  2741       log()->elem("observe trap='%s' count='%d'",
  2742                   Deoptimization::trap_reason_name(reason),
  2743                   md->trap_count(reason));
  2744     return true;
  2745   } else {
  2746     // Ignore method/bci and see if there have been too many globally.
  2747     return too_many_traps(reason, md);
  2751 // Less-accurate variant which does not require a method and bci.
  2752 bool Compile::too_many_traps(Deoptimization::DeoptReason reason,
  2753                              ciMethodData* logmd) {
  2754  if (trap_count(reason) >= (uint)PerMethodTrapLimit) {
  2755     // Too many traps globally.
  2756     // Note that we use cumulative trap_count, not just md->trap_count.
  2757     if (log()) {
  2758       int mcount = (logmd == NULL)? -1: (int)logmd->trap_count(reason);
  2759       log()->elem("observe trap='%s' count='0' mcount='%d' ccount='%d'",
  2760                   Deoptimization::trap_reason_name(reason),
  2761                   mcount, trap_count(reason));
  2763     return true;
  2764   } else {
  2765     // The coast is clear.
  2766     return false;
  2770 //--------------------------too_many_recompiles--------------------------------
  2771 // Report if there are too many recompiles at the current method and bci.
  2772 // Consults PerBytecodeRecompilationCutoff and PerMethodRecompilationCutoff.
  2773 // Is not eager to return true, since this will cause the compiler to use
  2774 // Action_none for a trap point, to avoid too many recompilations.
  2775 bool Compile::too_many_recompiles(ciMethod* method,
  2776                                   int bci,
  2777                                   Deoptimization::DeoptReason reason) {
  2778   ciMethodData* md = method->method_data();
  2779   if (md->is_empty()) {
  2780     // Assume the trap has not occurred, or that it occurred only
  2781     // because of a transient condition during start-up in the interpreter.
  2782     return false;
  2784   // Pick a cutoff point well within PerBytecodeRecompilationCutoff.
  2785   uint bc_cutoff = (uint) PerBytecodeRecompilationCutoff / 8;
  2786   uint m_cutoff  = (uint) PerMethodRecompilationCutoff / 2 + 1;  // not zero
  2787   Deoptimization::DeoptReason per_bc_reason
  2788     = Deoptimization::reason_recorded_per_bytecode_if_any(reason);
  2789   if ((per_bc_reason == Deoptimization::Reason_none
  2790        || md->has_trap_at(bci, reason) != 0)
  2791       // The trap frequency measure we care about is the recompile count:
  2792       && md->trap_recompiled_at(bci)
  2793       && md->overflow_recompile_count() >= bc_cutoff) {
  2794     // Do not emit a trap here if it has already caused recompilations.
  2795     // Also, if there are multiple reasons, or if there is no per-BCI record,
  2796     // assume the worst.
  2797     if (log())
  2798       log()->elem("observe trap='%s recompiled' count='%d' recompiles2='%d'",
  2799                   Deoptimization::trap_reason_name(reason),
  2800                   md->trap_count(reason),
  2801                   md->overflow_recompile_count());
  2802     return true;
  2803   } else if (trap_count(reason) != 0
  2804              && decompile_count() >= m_cutoff) {
  2805     // Too many recompiles globally, and we have seen this sort of trap.
  2806     // Use cumulative decompile_count, not just md->decompile_count.
  2807     if (log())
  2808       log()->elem("observe trap='%s' count='%d' mcount='%d' decompiles='%d' mdecompiles='%d'",
  2809                   Deoptimization::trap_reason_name(reason),
  2810                   md->trap_count(reason), trap_count(reason),
  2811                   md->decompile_count(), decompile_count());
  2812     return true;
  2813   } else {
  2814     // The coast is clear.
  2815     return false;
  2820 #ifndef PRODUCT
  2821 //------------------------------verify_graph_edges---------------------------
  2822 // Walk the Graph and verify that there is a one-to-one correspondence
  2823 // between Use-Def edges and Def-Use edges in the graph.
  2824 void Compile::verify_graph_edges(bool no_dead_code) {
  2825   if (VerifyGraphEdges) {
  2826     ResourceArea *area = Thread::current()->resource_area();
  2827     Unique_Node_List visited(area);
  2828     // Call recursive graph walk to check edges
  2829     _root->verify_edges(visited);
  2830     if (no_dead_code) {
  2831       // Now make sure that no visited node is used by an unvisited node.
  2832       bool dead_nodes = 0;
  2833       Unique_Node_List checked(area);
  2834       while (visited.size() > 0) {
  2835         Node* n = visited.pop();
  2836         checked.push(n);
  2837         for (uint i = 0; i < n->outcnt(); i++) {
  2838           Node* use = n->raw_out(i);
  2839           if (checked.member(use))  continue;  // already checked
  2840           if (visited.member(use))  continue;  // already in the graph
  2841           if (use->is_Con())        continue;  // a dead ConNode is OK
  2842           // At this point, we have found a dead node which is DU-reachable.
  2843           if (dead_nodes++ == 0)
  2844             tty->print_cr("*** Dead nodes reachable via DU edges:");
  2845           use->dump(2);
  2846           tty->print_cr("---");
  2847           checked.push(use);  // No repeats; pretend it is now checked.
  2850       assert(dead_nodes == 0, "using nodes must be reachable from root");
  2854 #endif
  2856 // The Compile object keeps track of failure reasons separately from the ciEnv.
  2857 // This is required because there is not quite a 1-1 relation between the
  2858 // ciEnv and its compilation task and the Compile object.  Note that one
  2859 // ciEnv might use two Compile objects, if C2Compiler::compile_method decides
  2860 // to backtrack and retry without subsuming loads.  Other than this backtracking
  2861 // behavior, the Compile's failure reason is quietly copied up to the ciEnv
  2862 // by the logic in C2Compiler.
  2863 void Compile::record_failure(const char* reason) {
  2864   if (log() != NULL) {
  2865     log()->elem("failure reason='%s' phase='compile'", reason);
  2867   if (_failure_reason == NULL) {
  2868     // Record the first failure reason.
  2869     _failure_reason = reason;
  2871   if (!C->failure_reason_is(C2Compiler::retry_no_subsuming_loads())) {
  2872     C->print_method(_failure_reason);
  2874   _root = NULL;  // flush the graph, too
  2877 Compile::TracePhase::TracePhase(const char* name, elapsedTimer* accumulator, bool dolog)
  2878   : TraceTime(NULL, accumulator, false NOT_PRODUCT( || TimeCompiler ), false)
  2880   if (dolog) {
  2881     C = Compile::current();
  2882     _log = C->log();
  2883   } else {
  2884     C = NULL;
  2885     _log = NULL;
  2887   if (_log != NULL) {
  2888     _log->begin_head("phase name='%s' nodes='%d'", name, C->unique());
  2889     _log->stamp();
  2890     _log->end_head();
  2894 Compile::TracePhase::~TracePhase() {
  2895   if (_log != NULL) {
  2896     _log->done("phase nodes='%d'", C->unique());
  2900 //=============================================================================
  2901 // Two Constant's are equal when the type and the value are equal.
  2902 bool Compile::Constant::operator==(const Constant& other) {
  2903   if (type()          != other.type()         )  return false;
  2904   if (can_be_reused() != other.can_be_reused())  return false;
  2905   // For floating point values we compare the bit pattern.
  2906   switch (type()) {
  2907   case T_FLOAT:   return (_value.i == other._value.i);
  2908   case T_LONG:
  2909   case T_DOUBLE:  return (_value.j == other._value.j);
  2910   case T_OBJECT:
  2911   case T_ADDRESS: return (_value.l == other._value.l);
  2912   case T_VOID:    return (_value.l == other._value.l);  // jump-table entries
  2913   default: ShouldNotReachHere();
  2915   return false;
  2918 // Emit constants grouped in the following order:
  2919 static BasicType type_order[] = {
  2920   T_FLOAT,    // 32-bit
  2921   T_OBJECT,   // 32 or 64-bit
  2922   T_ADDRESS,  // 32 or 64-bit
  2923   T_DOUBLE,   // 64-bit
  2924   T_LONG,     // 64-bit
  2925   T_VOID,     // 32 or 64-bit (jump-tables are at the end of the constant table for code emission reasons)
  2926   T_ILLEGAL
  2927 };
  2929 static int type_to_size_in_bytes(BasicType t) {
  2930   switch (t) {
  2931   case T_LONG:    return sizeof(jlong  );
  2932   case T_FLOAT:   return sizeof(jfloat );
  2933   case T_DOUBLE:  return sizeof(jdouble);
  2934     // We use T_VOID as marker for jump-table entries (labels) which
  2935     // need an interal word relocation.
  2936   case T_VOID:
  2937   case T_ADDRESS:
  2938   case T_OBJECT:  return sizeof(jobject);
  2941   ShouldNotReachHere();
  2942   return -1;
  2945 void Compile::ConstantTable::calculate_offsets_and_size() {
  2946   int size = 0;
  2947   for (int t = 0; type_order[t] != T_ILLEGAL; t++) {
  2948     BasicType type = type_order[t];
  2950     for (int i = 0; i < _constants.length(); i++) {
  2951       Constant con = _constants.at(i);
  2952       if (con.type() != type)  continue;  // Skip other types.
  2954       // Align size for type.
  2955       int typesize = type_to_size_in_bytes(con.type());
  2956       size = align_size_up(size, typesize);
  2958       // Set offset.
  2959       con.set_offset(size);
  2960       _constants.at_put(i, con);
  2962       // Add type size.
  2963       size = size + typesize;
  2967   // Align size up to the next section start (which is insts; see
  2968   // CodeBuffer::align_at_start).
  2969   assert(_size == -1, "already set?");
  2970   _size = align_size_up(size, CodeEntryAlignment);
  2972   if (Matcher::constant_table_absolute_addressing) {
  2973     set_table_base_offset(0);  // No table base offset required
  2974   } else {
  2975     if (UseRDPCForConstantTableBase) {
  2976       // table base offset is set in MachConstantBaseNode::emit
  2977     } else {
  2978       // When RDPC is not used, the table base is set into the middle of
  2979       // the constant table.
  2980       int half_size = _size / 2;
  2981       assert(half_size * 2 == _size, "sanity");
  2982       set_table_base_offset(-half_size);
  2987 void Compile::ConstantTable::emit(CodeBuffer& cb) {
  2988   MacroAssembler _masm(&cb);
  2989   for (int t = 0; type_order[t] != T_ILLEGAL; t++) {
  2990     BasicType type = type_order[t];
  2992     for (int i = 0; i < _constants.length(); i++) {
  2993       Constant con = _constants.at(i);
  2994       if (con.type() != type)  continue;  // Skip other types.
  2996       address constant_addr;
  2997       switch (con.type()) {
  2998       case T_LONG:   constant_addr = _masm.long_constant(  con.get_jlong()  ); break;
  2999       case T_FLOAT:  constant_addr = _masm.float_constant( con.get_jfloat() ); break;
  3000       case T_DOUBLE: constant_addr = _masm.double_constant(con.get_jdouble()); break;
  3001       case T_OBJECT: {
  3002         jobject obj = con.get_jobject();
  3003         int oop_index = _masm.oop_recorder()->find_index(obj);
  3004         constant_addr = _masm.address_constant((address) obj, oop_Relocation::spec(oop_index));
  3005         break;
  3007       case T_ADDRESS: {
  3008         address addr = (address) con.get_jobject();
  3009         constant_addr = _masm.address_constant(addr);
  3010         break;
  3012       // We use T_VOID as marker for jump-table entries (labels) which
  3013       // need an interal word relocation.
  3014       case T_VOID: {
  3015         // Write a dummy word.  The real value is filled in later
  3016         // in fill_jump_table_in_constant_table.
  3017         address addr = (address) con.get_jobject();
  3018         constant_addr = _masm.address_constant(addr);
  3019         break;
  3021       default: ShouldNotReachHere();
  3023       assert(constant_addr != NULL, "consts section too small");
  3024       assert((constant_addr - _masm.code()->consts()->start()) == con.offset(), err_msg("must be: %d == %d", constant_addr - _masm.code()->consts()->start(), con.offset()));
  3029 int Compile::ConstantTable::find_offset(Constant& con) const {
  3030   int idx = _constants.find(con);
  3031   assert(idx != -1, "constant must be in constant table");
  3032   int offset = _constants.at(idx).offset();
  3033   assert(offset != -1, "constant table not emitted yet?");
  3034   return offset;
  3037 void Compile::ConstantTable::add(Constant& con) {
  3038   if (con.can_be_reused()) {
  3039     int idx = _constants.find(con);
  3040     if (idx != -1 && _constants.at(idx).can_be_reused()) {
  3041       return;
  3044   (void) _constants.append(con);
  3047 Compile::Constant Compile::ConstantTable::add(BasicType type, jvalue value) {
  3048   Constant con(type, value);
  3049   add(con);
  3050   return con;
  3053 Compile::Constant Compile::ConstantTable::add(MachOper* oper) {
  3054   jvalue value;
  3055   BasicType type = oper->type()->basic_type();
  3056   switch (type) {
  3057   case T_LONG:    value.j = oper->constantL(); break;
  3058   case T_FLOAT:   value.f = oper->constantF(); break;
  3059   case T_DOUBLE:  value.d = oper->constantD(); break;
  3060   case T_OBJECT:
  3061   case T_ADDRESS: value.l = (jobject) oper->constant(); break;
  3062   default: ShouldNotReachHere();
  3064   return add(type, value);
  3067 Compile::Constant Compile::ConstantTable::allocate_jump_table(MachConstantNode* n) {
  3068   jvalue value;
  3069   // We can use the node pointer here to identify the right jump-table
  3070   // as this method is called from Compile::Fill_buffer right before
  3071   // the MachNodes are emitted and the jump-table is filled (means the
  3072   // MachNode pointers do not change anymore).
  3073   value.l = (jobject) n;
  3074   Constant con(T_VOID, value, false);  // Labels of a jump-table cannot be reused.
  3075   for (uint i = 0; i < n->outcnt(); i++) {
  3076     add(con);
  3078   return con;
  3081 void Compile::ConstantTable::fill_jump_table(CodeBuffer& cb, MachConstantNode* n, GrowableArray<Label*> labels) const {
  3082   // If called from Compile::scratch_emit_size do nothing.
  3083   if (Compile::current()->in_scratch_emit_size())  return;
  3085   assert(labels.is_nonempty(), "must be");
  3086   assert((uint) labels.length() == n->outcnt(), err_msg("must be equal: %d == %d", labels.length(), n->outcnt()));
  3088   // Since MachConstantNode::constant_offset() also contains
  3089   // table_base_offset() we need to subtract the table_base_offset()
  3090   // to get the plain offset into the constant table.
  3091   int offset = n->constant_offset() - table_base_offset();
  3093   MacroAssembler _masm(&cb);
  3094   address* jump_table_base = (address*) (_masm.code()->consts()->start() + offset);
  3096   for (int i = 0; i < labels.length(); i++) {
  3097     address* constant_addr = &jump_table_base[i];
  3098     assert(*constant_addr == (address) n, "all jump-table entries must contain node pointer");
  3099     *constant_addr = cb.consts()->target(*labels.at(i), (address) constant_addr);
  3100     cb.consts()->relocate((address) constant_addr, relocInfo::internal_word_type);

mercurial