src/share/vm/opto/doCall.cpp

Tue, 16 Apr 2013 10:08:41 +0200

author
neliasso
date
Tue, 16 Apr 2013 10:08:41 +0200
changeset 4949
8373c19be854
parent 4447
f1de9dbc914e
child 5110
6f3fd5150b67
permissions
-rw-r--r--

8011621: live_ranges_in_separate_class.patch
Reviewed-by: kvn, roland
Contributed-by: niclas.adlertz@oracle.com

     1 /*
     2  * Copyright (c) 1998, 2012, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    20  * or visit www.oracle.com if you need additional information or have any
    21  * questions.
    22  *
    23  */
    25 #include "precompiled.hpp"
    26 #include "ci/ciCallSite.hpp"
    27 #include "ci/ciMethodHandle.hpp"
    28 #include "classfile/vmSymbols.hpp"
    29 #include "compiler/compileBroker.hpp"
    30 #include "compiler/compileLog.hpp"
    31 #include "interpreter/linkResolver.hpp"
    32 #include "opto/addnode.hpp"
    33 #include "opto/callGenerator.hpp"
    34 #include "opto/cfgnode.hpp"
    35 #include "opto/mulnode.hpp"
    36 #include "opto/parse.hpp"
    37 #include "opto/rootnode.hpp"
    38 #include "opto/runtime.hpp"
    39 #include "opto/subnode.hpp"
    40 #include "prims/nativeLookup.hpp"
    41 #include "runtime/sharedRuntime.hpp"
    43 void trace_type_profile(Compile* C, ciMethod *method, int depth, int bci, ciMethod *prof_method, ciKlass *prof_klass, int site_count, int receiver_count) {
    44   if (TraceTypeProfile || PrintInlining NOT_PRODUCT(|| PrintOptoInlining)) {
    45     outputStream* out = tty;
    46     if (!PrintInlining) {
    47       if (NOT_PRODUCT(!PrintOpto &&) !PrintCompilation) {
    48         method->print_short_name();
    49         tty->cr();
    50       }
    51       CompileTask::print_inlining(prof_method, depth, bci);
    52     } else {
    53       out = C->print_inlining_stream();
    54     }
    55     CompileTask::print_inline_indent(depth, out);
    56     out->print(" \\-> TypeProfile (%d/%d counts) = ", receiver_count, site_count);
    57     stringStream ss;
    58     prof_klass->name()->print_symbol_on(&ss);
    59     out->print(ss.as_string());
    60     out->cr();
    61   }
    62 }
    64 CallGenerator* Compile::call_generator(ciMethod* callee, int vtable_index, bool call_does_dispatch,
    65                                        JVMState* jvms, bool allow_inline,
    66                                        float prof_factor, bool allow_intrinsics, bool delayed_forbidden) {
    67   ciMethod*       caller   = jvms->method();
    68   int             bci      = jvms->bci();
    69   Bytecodes::Code bytecode = caller->java_code_at_bci(bci);
    70   guarantee(callee != NULL, "failed method resolution");
    72   // Dtrace currently doesn't work unless all calls are vanilla
    73   if (env()->dtrace_method_probes()) {
    74     allow_inline = false;
    75   }
    77   // Note: When we get profiling during stage-1 compiles, we want to pull
    78   // from more specific profile data which pertains to this inlining.
    79   // Right now, ignore the information in jvms->caller(), and do method[bci].
    80   ciCallProfile profile = caller->call_profile_at_bci(bci);
    82   // See how many times this site has been invoked.
    83   int site_count = profile.count();
    84   int receiver_count = -1;
    85   if (call_does_dispatch && UseTypeProfile && profile.has_receiver(0)) {
    86     // Receivers in the profile structure are ordered by call counts
    87     // so that the most called (major) receiver is profile.receiver(0).
    88     receiver_count = profile.receiver_count(0);
    89   }
    91   CompileLog* log = this->log();
    92   if (log != NULL) {
    93     int rid = (receiver_count >= 0)? log->identify(profile.receiver(0)): -1;
    94     int r2id = (rid != -1 && profile.has_receiver(1))? log->identify(profile.receiver(1)):-1;
    95     log->begin_elem("call method='%d' count='%d' prof_factor='%g'",
    96                     log->identify(callee), site_count, prof_factor);
    97     if (call_does_dispatch)  log->print(" virtual='1'");
    98     if (allow_inline)     log->print(" inline='1'");
    99     if (receiver_count >= 0) {
   100       log->print(" receiver='%d' receiver_count='%d'", rid, receiver_count);
   101       if (profile.has_receiver(1)) {
   102         log->print(" receiver2='%d' receiver2_count='%d'", r2id, profile.receiver_count(1));
   103       }
   104     }
   105     log->end_elem();
   106   }
   108   // Special case the handling of certain common, profitable library
   109   // methods.  If these methods are replaced with specialized code,
   110   // then we return it as the inlined version of the call.
   111   // We do this before the strict f.p. check below because the
   112   // intrinsics handle strict f.p. correctly.
   113   if (allow_inline && allow_intrinsics) {
   114     CallGenerator* cg = find_intrinsic(callee, call_does_dispatch);
   115     if (cg != NULL) {
   116       if (cg->is_predicted()) {
   117         // Code without intrinsic but, hopefully, inlined.
   118         CallGenerator* inline_cg = this->call_generator(callee,
   119               vtable_index, call_does_dispatch, jvms, allow_inline, prof_factor, false);
   120         if (inline_cg != NULL) {
   121           cg = CallGenerator::for_predicted_intrinsic(cg, inline_cg);
   122         }
   123       }
   124       return cg;
   125     }
   126   }
   128   // Do method handle calls.
   129   // NOTE: This must happen before normal inlining logic below since
   130   // MethodHandle.invoke* are native methods which obviously don't
   131   // have bytecodes and so normal inlining fails.
   132   if (callee->is_method_handle_intrinsic()) {
   133     CallGenerator* cg = CallGenerator::for_method_handle_call(jvms, caller, callee, delayed_forbidden);
   134     assert(cg == NULL || !delayed_forbidden || !cg->is_late_inline() || cg->is_mh_late_inline(), "unexpected CallGenerator");
   135     return cg;
   136   }
   138   // Do not inline strict fp into non-strict code, or the reverse
   139   if (caller->is_strict() ^ callee->is_strict()) {
   140     allow_inline = false;
   141   }
   143   // Attempt to inline...
   144   if (allow_inline) {
   145     // The profile data is only partly attributable to this caller,
   146     // scale back the call site information.
   147     float past_uses = jvms->method()->scale_count(site_count, prof_factor);
   148     // This is the number of times we expect the call code to be used.
   149     float expected_uses = past_uses;
   151     // Try inlining a bytecoded method:
   152     if (!call_does_dispatch) {
   153       InlineTree* ilt;
   154       if (UseOldInlining) {
   155         ilt = InlineTree::find_subtree_from_root(this->ilt(), jvms->caller(), jvms->method());
   156       } else {
   157         // Make a disembodied, stateless ILT.
   158         // TO DO:  When UseOldInlining is removed, copy the ILT code elsewhere.
   159         float site_invoke_ratio = prof_factor;
   160         // Note:  ilt is for the root of this parse, not the present call site.
   161         ilt = new InlineTree(this, jvms->method(), jvms->caller(), site_invoke_ratio, MaxInlineLevel);
   162       }
   163       WarmCallInfo scratch_ci;
   164       if (!UseOldInlining)
   165         scratch_ci.init(jvms, callee, profile, prof_factor);
   166       bool should_delay = false;
   167       WarmCallInfo* ci = ilt->ok_to_inline(callee, jvms, profile, &scratch_ci, should_delay);
   168       assert(ci != &scratch_ci, "do not let this pointer escape");
   169       bool allow_inline   = (ci != NULL && !ci->is_cold());
   170       bool require_inline = (allow_inline && ci->is_hot());
   172       if (allow_inline) {
   173         CallGenerator* cg = CallGenerator::for_inline(callee, expected_uses);
   175         if (require_inline && cg != NULL) {
   176           // Delay the inlining of this method to give us the
   177           // opportunity to perform some high level optimizations
   178           // first.
   179           if (should_delay_inlining(callee, jvms)) {
   180             assert(!delayed_forbidden, "strange");
   181             return CallGenerator::for_string_late_inline(callee, cg);
   182           } else if ((should_delay || AlwaysIncrementalInline) && !delayed_forbidden) {
   183             return CallGenerator::for_late_inline(callee, cg);
   184           }
   185         }
   186         if (cg == NULL || should_delay) {
   187           // Fall through.
   188         } else if (require_inline || !InlineWarmCalls) {
   189           return cg;
   190         } else {
   191           CallGenerator* cold_cg = call_generator(callee, vtable_index, call_does_dispatch, jvms, false, prof_factor);
   192           return CallGenerator::for_warm_call(ci, cold_cg, cg);
   193         }
   194       }
   195     }
   197     // Try using the type profile.
   198     if (call_does_dispatch && site_count > 0 && receiver_count > 0) {
   199       // The major receiver's count >= TypeProfileMajorReceiverPercent of site_count.
   200       bool have_major_receiver = (100.*profile.receiver_prob(0) >= (float)TypeProfileMajorReceiverPercent);
   201       ciMethod* receiver_method = NULL;
   202       if (have_major_receiver || profile.morphism() == 1 ||
   203           (profile.morphism() == 2 && UseBimorphicInlining)) {
   204         // receiver_method = profile.method();
   205         // Profiles do not suggest methods now.  Look it up in the major receiver.
   206         receiver_method = callee->resolve_invoke(jvms->method()->holder(),
   207                                                       profile.receiver(0));
   208       }
   209       if (receiver_method != NULL) {
   210         // The single majority receiver sufficiently outweighs the minority.
   211         CallGenerator* hit_cg = this->call_generator(receiver_method,
   212               vtable_index, !call_does_dispatch, jvms, allow_inline, prof_factor);
   213         if (hit_cg != NULL) {
   214           // Look up second receiver.
   215           CallGenerator* next_hit_cg = NULL;
   216           ciMethod* next_receiver_method = NULL;
   217           if (profile.morphism() == 2 && UseBimorphicInlining) {
   218             next_receiver_method = callee->resolve_invoke(jvms->method()->holder(),
   219                                                                profile.receiver(1));
   220             if (next_receiver_method != NULL) {
   221               next_hit_cg = this->call_generator(next_receiver_method,
   222                                   vtable_index, !call_does_dispatch, jvms,
   223                                   allow_inline, prof_factor);
   224               if (next_hit_cg != NULL && !next_hit_cg->is_inline() &&
   225                   have_major_receiver && UseOnlyInlinedBimorphic) {
   226                   // Skip if we can't inline second receiver's method
   227                   next_hit_cg = NULL;
   228               }
   229             }
   230           }
   231           CallGenerator* miss_cg;
   232           Deoptimization::DeoptReason reason = (profile.morphism() == 2) ?
   233                                     Deoptimization::Reason_bimorphic :
   234                                     Deoptimization::Reason_class_check;
   235           if (( profile.morphism() == 1 ||
   236                (profile.morphism() == 2 && next_hit_cg != NULL) ) &&
   237               !too_many_traps(jvms->method(), jvms->bci(), reason)
   238              ) {
   239             // Generate uncommon trap for class check failure path
   240             // in case of monomorphic or bimorphic virtual call site.
   241             miss_cg = CallGenerator::for_uncommon_trap(callee, reason,
   242                         Deoptimization::Action_maybe_recompile);
   243           } else {
   244             // Generate virtual call for class check failure path
   245             // in case of polymorphic virtual call site.
   246             miss_cg = CallGenerator::for_virtual_call(callee, vtable_index);
   247           }
   248           if (miss_cg != NULL) {
   249             if (next_hit_cg != NULL) {
   250               trace_type_profile(C, jvms->method(), jvms->depth() - 1, jvms->bci(), next_receiver_method, profile.receiver(1), site_count, profile.receiver_count(1));
   251               // We don't need to record dependency on a receiver here and below.
   252               // Whenever we inline, the dependency is added by Parse::Parse().
   253               miss_cg = CallGenerator::for_predicted_call(profile.receiver(1), miss_cg, next_hit_cg, PROB_MAX);
   254             }
   255             if (miss_cg != NULL) {
   256               trace_type_profile(C, jvms->method(), jvms->depth() - 1, jvms->bci(), receiver_method, profile.receiver(0), site_count, receiver_count);
   257               CallGenerator* cg = CallGenerator::for_predicted_call(profile.receiver(0), miss_cg, hit_cg, profile.receiver_prob(0));
   258               if (cg != NULL)  return cg;
   259             }
   260           }
   261         }
   262       }
   263     }
   264   }
   266   // There was no special inlining tactic, or it bailed out.
   267   // Use a more generic tactic, like a simple call.
   268   if (call_does_dispatch) {
   269     return CallGenerator::for_virtual_call(callee, vtable_index);
   270   } else {
   271     // Class Hierarchy Analysis or Type Profile reveals a unique target,
   272     // or it is a static or special call.
   273     return CallGenerator::for_direct_call(callee, should_delay_inlining(callee, jvms));
   274   }
   275 }
   277 // Return true for methods that shouldn't be inlined early so that
   278 // they are easier to analyze and optimize as intrinsics.
   279 bool Compile::should_delay_inlining(ciMethod* call_method, JVMState* jvms) {
   280   if (has_stringbuilder()) {
   282     if ((call_method->holder() == C->env()->StringBuilder_klass() ||
   283          call_method->holder() == C->env()->StringBuffer_klass()) &&
   284         (jvms->method()->holder() == C->env()->StringBuilder_klass() ||
   285          jvms->method()->holder() == C->env()->StringBuffer_klass())) {
   286       // Delay SB calls only when called from non-SB code
   287       return false;
   288     }
   290     switch (call_method->intrinsic_id()) {
   291       case vmIntrinsics::_StringBuilder_void:
   292       case vmIntrinsics::_StringBuilder_int:
   293       case vmIntrinsics::_StringBuilder_String:
   294       case vmIntrinsics::_StringBuilder_append_char:
   295       case vmIntrinsics::_StringBuilder_append_int:
   296       case vmIntrinsics::_StringBuilder_append_String:
   297       case vmIntrinsics::_StringBuilder_toString:
   298       case vmIntrinsics::_StringBuffer_void:
   299       case vmIntrinsics::_StringBuffer_int:
   300       case vmIntrinsics::_StringBuffer_String:
   301       case vmIntrinsics::_StringBuffer_append_char:
   302       case vmIntrinsics::_StringBuffer_append_int:
   303       case vmIntrinsics::_StringBuffer_append_String:
   304       case vmIntrinsics::_StringBuffer_toString:
   305       case vmIntrinsics::_Integer_toString:
   306         return true;
   308       case vmIntrinsics::_String_String:
   309         {
   310           Node* receiver = jvms->map()->in(jvms->argoff() + 1);
   311           if (receiver->is_Proj() && receiver->in(0)->is_CallStaticJava()) {
   312             CallStaticJavaNode* csj = receiver->in(0)->as_CallStaticJava();
   313             ciMethod* m = csj->method();
   314             if (m != NULL &&
   315                 (m->intrinsic_id() == vmIntrinsics::_StringBuffer_toString ||
   316                  m->intrinsic_id() == vmIntrinsics::_StringBuilder_toString))
   317               // Delay String.<init>(new SB())
   318               return true;
   319           }
   320           return false;
   321         }
   323       default:
   324         return false;
   325     }
   326   }
   327   return false;
   328 }
   331 // uncommon-trap call-sites where callee is unloaded, uninitialized or will not link
   332 bool Parse::can_not_compile_call_site(ciMethod *dest_method, ciInstanceKlass* klass) {
   333   // Additional inputs to consider...
   334   // bc      = bc()
   335   // caller  = method()
   336   // iter().get_method_holder_index()
   337   assert( dest_method->is_loaded(), "ciTypeFlow should not let us get here" );
   338   // Interface classes can be loaded & linked and never get around to
   339   // being initialized.  Uncommon-trap for not-initialized static or
   340   // v-calls.  Let interface calls happen.
   341   ciInstanceKlass* holder_klass = dest_method->holder();
   342   if (!holder_klass->is_being_initialized() &&
   343       !holder_klass->is_initialized() &&
   344       !holder_klass->is_interface()) {
   345     uncommon_trap(Deoptimization::Reason_uninitialized,
   346                   Deoptimization::Action_reinterpret,
   347                   holder_klass);
   348     return true;
   349   }
   351   assert(dest_method->is_loaded(), "dest_method: typeflow responsibility");
   352   return false;
   353 }
   356 //------------------------------do_call----------------------------------------
   357 // Handle your basic call.  Inline if we can & want to, else just setup call.
   358 void Parse::do_call() {
   359   // It's likely we are going to add debug info soon.
   360   // Also, if we inline a guy who eventually needs debug info for this JVMS,
   361   // our contribution to it is cleaned up right here.
   362   kill_dead_locals();
   364   // Set frequently used booleans
   365   const bool is_virtual = bc() == Bytecodes::_invokevirtual;
   366   const bool is_virtual_or_interface = is_virtual || bc() == Bytecodes::_invokeinterface;
   367   const bool has_receiver = Bytecodes::has_receiver(bc());
   369   // Find target being called
   370   bool             will_link;
   371   ciSignature*     declared_signature = NULL;
   372   ciMethod*        orig_callee  = iter().get_method(will_link, &declared_signature);  // callee in the bytecode
   373   ciInstanceKlass* holder_klass = orig_callee->holder();
   374   ciKlass*         holder       = iter().get_declared_method_holder();
   375   ciInstanceKlass* klass = ciEnv::get_instance_klass_for_declared_method_holder(holder);
   376   assert(declared_signature != NULL, "cannot be null");
   378   // uncommon-trap when callee is unloaded, uninitialized or will not link
   379   // bailout when too many arguments for register representation
   380   if (!will_link || can_not_compile_call_site(orig_callee, klass)) {
   381 #ifndef PRODUCT
   382     if (PrintOpto && (Verbose || WizardMode)) {
   383       method()->print_name(); tty->print_cr(" can not compile call at bci %d to:", bci());
   384       orig_callee->print_name(); tty->cr();
   385     }
   386 #endif
   387     return;
   388   }
   389   assert(holder_klass->is_loaded(), "");
   390   //assert((bc_callee->is_static() || is_invokedynamic) == !has_receiver , "must match bc");  // XXX invokehandle (cur_bc_raw)
   391   // Note: this takes into account invokeinterface of methods declared in java/lang/Object,
   392   // which should be invokevirtuals but according to the VM spec may be invokeinterfaces
   393   assert(holder_klass->is_interface() || holder_klass->super() == NULL || (bc() != Bytecodes::_invokeinterface), "must match bc");
   394   // Note:  In the absence of miranda methods, an abstract class K can perform
   395   // an invokevirtual directly on an interface method I.m if K implements I.
   397   // orig_callee is the resolved callee which's signature includes the
   398   // appendix argument.
   399   const int nargs = orig_callee->arg_size();
   400   const bool is_signature_polymorphic = MethodHandles::is_signature_polymorphic(orig_callee->intrinsic_id());
   402   // Push appendix argument (MethodType, CallSite, etc.), if one.
   403   if (iter().has_appendix()) {
   404     ciObject* appendix_arg = iter().get_appendix();
   405     const TypeOopPtr* appendix_arg_type = TypeOopPtr::make_from_constant(appendix_arg);
   406     Node* appendix_arg_node = _gvn.makecon(appendix_arg_type);
   407     push(appendix_arg_node);
   408   }
   410   // ---------------------
   411   // Does Class Hierarchy Analysis reveal only a single target of a v-call?
   412   // Then we may inline or make a static call, but become dependent on there being only 1 target.
   413   // Does the call-site type profile reveal only one receiver?
   414   // Then we may introduce a run-time check and inline on the path where it succeeds.
   415   // The other path may uncommon_trap, check for another receiver, or do a v-call.
   417   // Try to get the most accurate receiver type
   418   ciMethod* callee             = orig_callee;
   419   int       vtable_index       = Method::invalid_vtable_index;
   420   bool      call_does_dispatch = false;
   422   if (is_virtual_or_interface) {
   423     Node*             receiver_node = stack(sp() - nargs);
   424     const TypeOopPtr* receiver_type = _gvn.type(receiver_node)->isa_oopptr();
   425     // call_does_dispatch and vtable_index are out-parameters.  They might be changed.
   426     callee = C->optimize_virtual_call(method(), bci(), klass, orig_callee, receiver_type,
   427                                       is_virtual,
   428                                       call_does_dispatch, vtable_index);  // out-parameters
   429   }
   431   // Note:  It's OK to try to inline a virtual call.
   432   // The call generator will not attempt to inline a polymorphic call
   433   // unless it knows how to optimize the receiver dispatch.
   434   bool try_inline = (C->do_inlining() || InlineAccessors);
   436   // ---------------------
   437   dec_sp(nargs);              // Temporarily pop args for JVM state of call
   438   JVMState* jvms = sync_jvms();
   440   // ---------------------
   441   // Decide call tactic.
   442   // This call checks with CHA, the interpreter profile, intrinsics table, etc.
   443   // It decides whether inlining is desirable or not.
   444   CallGenerator* cg = C->call_generator(callee, vtable_index, call_does_dispatch, jvms, try_inline, prof_factor());
   446   // NOTE:  Don't use orig_callee and callee after this point!  Use cg->method() instead.
   447   orig_callee = callee = NULL;
   449   // ---------------------
   450   // Round double arguments before call
   451   round_double_arguments(cg->method());
   453 #ifndef PRODUCT
   454   // bump global counters for calls
   455   count_compiled_calls(/*at_method_entry*/ false, cg->is_inline());
   457   // Record first part of parsing work for this call
   458   parse_histogram()->record_change();
   459 #endif // not PRODUCT
   461   assert(jvms == this->jvms(), "still operating on the right JVMS");
   462   assert(jvms_in_sync(),       "jvms must carry full info into CG");
   464   // save across call, for a subsequent cast_not_null.
   465   Node* receiver = has_receiver ? argument(0) : NULL;
   467   // Bump method data counters (We profile *before* the call is made
   468   // because exceptions don't return to the call site.)
   469   profile_call(receiver);
   471   JVMState* new_jvms = cg->generate(jvms);
   472   if (new_jvms == NULL) {
   473     // When inlining attempt fails (e.g., too many arguments),
   474     // it may contaminate the current compile state, making it
   475     // impossible to pull back and try again.  Once we call
   476     // cg->generate(), we are committed.  If it fails, the whole
   477     // compilation task is compromised.
   478     if (failing())  return;
   480     // This can happen if a library intrinsic is available, but refuses
   481     // the call site, perhaps because it did not match a pattern the
   482     // intrinsic was expecting to optimize. Should always be possible to
   483     // get a normal java call that may inline in that case
   484     cg = C->call_generator(cg->method(), vtable_index, call_does_dispatch, jvms, try_inline, prof_factor(), /* allow_intrinsics= */ false);
   485     if ((new_jvms = cg->generate(jvms)) == NULL) {
   486       guarantee(failing(), "call failed to generate:  calls should work");
   487       return;
   488     }
   489   }
   491   if (cg->is_inline()) {
   492     // Accumulate has_loops estimate
   493     C->set_has_loops(C->has_loops() || cg->method()->has_loops());
   494     C->env()->notice_inlined_method(cg->method());
   495   }
   497   // Reset parser state from [new_]jvms, which now carries results of the call.
   498   // Return value (if any) is already pushed on the stack by the cg.
   499   add_exception_states_from(new_jvms);
   500   if (new_jvms->map()->control() == top()) {
   501     stop_and_kill_map();
   502   } else {
   503     assert(new_jvms->same_calls_as(jvms), "method/bci left unchanged");
   504     set_jvms(new_jvms);
   505   }
   507   if (!stopped()) {
   508     // This was some sort of virtual call, which did a null check for us.
   509     // Now we can assert receiver-not-null, on the normal return path.
   510     if (receiver != NULL && cg->is_virtual()) {
   511       Node* cast = cast_not_null(receiver);
   512       // %%% assert(receiver == cast, "should already have cast the receiver");
   513     }
   515     // Round double result after a call from strict to non-strict code
   516     round_double_result(cg->method());
   518     ciType* rtype = cg->method()->return_type();
   519     ciType* ctype = declared_signature->return_type();
   521     if (Bytecodes::has_optional_appendix(iter().cur_bc_raw()) || is_signature_polymorphic) {
   522       // Be careful here with return types.
   523       if (ctype != rtype) {
   524         BasicType rt = rtype->basic_type();
   525         BasicType ct = ctype->basic_type();
   526         if (ct == T_VOID) {
   527           // It's OK for a method  to return a value that is discarded.
   528           // The discarding does not require any special action from the caller.
   529           // The Java code knows this, at VerifyType.isNullConversion.
   530           pop_node(rt);  // whatever it was, pop it
   531         } else if (rt == T_INT || is_subword_type(rt)) {
   532           // Nothing.  These cases are handled in lambda form bytecode.
   533           assert(ct == T_INT || is_subword_type(ct), err_msg_res("must match: rt=%s, ct=%s", type2name(rt), type2name(ct)));
   534         } else if (rt == T_OBJECT || rt == T_ARRAY) {
   535           assert(ct == T_OBJECT || ct == T_ARRAY, err_msg_res("rt=%s, ct=%s", type2name(rt), type2name(ct)));
   536           if (ctype->is_loaded()) {
   537             const TypeOopPtr* arg_type = TypeOopPtr::make_from_klass(rtype->as_klass());
   538             const Type*       sig_type = TypeOopPtr::make_from_klass(ctype->as_klass());
   539             if (arg_type != NULL && !arg_type->higher_equal(sig_type)) {
   540               Node* retnode = pop();
   541               Node* cast_obj = _gvn.transform(new (C) CheckCastPPNode(control(), retnode, sig_type));
   542               push(cast_obj);
   543             }
   544           }
   545         } else {
   546           assert(rt == ct, err_msg_res("unexpected mismatch: rt=%s, ct=%s", type2name(rt), type2name(ct)));
   547           // push a zero; it's better than getting an oop/int mismatch
   548           pop_node(rt);
   549           Node* retnode = zerocon(ct);
   550           push_node(ct, retnode);
   551         }
   552         // Now that the value is well-behaved, continue with the call-site type.
   553         rtype = ctype;
   554       }
   555     } else {
   556       // Symbolic resolution enforces the types to be the same.
   557       // NOTE: We must relax the assert for unloaded types because two
   558       // different ciType instances of the same unloaded class type
   559       // can appear to be "loaded" by different loaders (depending on
   560       // the accessing class).
   561       assert(!rtype->is_loaded() || !ctype->is_loaded() || rtype == ctype,
   562              err_msg_res("mismatched return types: rtype=%s, ctype=%s", rtype->name(), ctype->name()));
   563     }
   565     // If the return type of the method is not loaded, assert that the
   566     // value we got is a null.  Otherwise, we need to recompile.
   567     if (!rtype->is_loaded()) {
   568 #ifndef PRODUCT
   569       if (PrintOpto && (Verbose || WizardMode)) {
   570         method()->print_name(); tty->print_cr(" asserting nullness of result at bci: %d", bci());
   571         cg->method()->print_name(); tty->cr();
   572       }
   573 #endif
   574       if (C->log() != NULL) {
   575         C->log()->elem("assert_null reason='return' klass='%d'",
   576                        C->log()->identify(rtype));
   577       }
   578       // If there is going to be a trap, put it at the next bytecode:
   579       set_bci(iter().next_bci());
   580       null_assert(peek());
   581       set_bci(iter().cur_bci()); // put it back
   582     }
   583   }
   585   // Restart record of parsing work after possible inlining of call
   586 #ifndef PRODUCT
   587   parse_histogram()->set_initial_state(bc());
   588 #endif
   589 }
   591 //---------------------------catch_call_exceptions-----------------------------
   592 // Put a Catch and CatchProj nodes behind a just-created call.
   593 // Send their caught exceptions to the proper handler.
   594 // This may be used after a call to the rethrow VM stub,
   595 // when it is needed to process unloaded exception classes.
   596 void Parse::catch_call_exceptions(ciExceptionHandlerStream& handlers) {
   597   // Exceptions are delivered through this channel:
   598   Node* i_o = this->i_o();
   600   // Add a CatchNode.
   601   GrowableArray<int>* bcis = new (C->node_arena()) GrowableArray<int>(C->node_arena(), 8, 0, -1);
   602   GrowableArray<const Type*>* extypes = new (C->node_arena()) GrowableArray<const Type*>(C->node_arena(), 8, 0, NULL);
   603   GrowableArray<int>* saw_unloaded = new (C->node_arena()) GrowableArray<int>(C->node_arena(), 8, 0, 0);
   605   for (; !handlers.is_done(); handlers.next()) {
   606     ciExceptionHandler* h        = handlers.handler();
   607     int                 h_bci    = h->handler_bci();
   608     ciInstanceKlass*    h_klass  = h->is_catch_all() ? env()->Throwable_klass() : h->catch_klass();
   609     // Do not introduce unloaded exception types into the graph:
   610     if (!h_klass->is_loaded()) {
   611       if (saw_unloaded->contains(h_bci)) {
   612         /* We've already seen an unloaded exception with h_bci,
   613            so don't duplicate. Duplication will cause the CatchNode to be
   614            unnecessarily large. See 4713716. */
   615         continue;
   616       } else {
   617         saw_unloaded->append(h_bci);
   618       }
   619     }
   620     const Type*         h_extype = TypeOopPtr::make_from_klass(h_klass);
   621     // (We use make_from_klass because it respects UseUniqueSubclasses.)
   622     h_extype = h_extype->join(TypeInstPtr::NOTNULL);
   623     assert(!h_extype->empty(), "sanity");
   624     // Note:  It's OK if the BCIs repeat themselves.
   625     bcis->append(h_bci);
   626     extypes->append(h_extype);
   627   }
   629   int len = bcis->length();
   630   CatchNode *cn = new (C) CatchNode(control(), i_o, len+1);
   631   Node *catch_ = _gvn.transform(cn);
   633   // now branch with the exception state to each of the (potential)
   634   // handlers
   635   for(int i=0; i < len; i++) {
   636     // Setup JVM state to enter the handler.
   637     PreserveJVMState pjvms(this);
   638     // Locals are just copied from before the call.
   639     // Get control from the CatchNode.
   640     int handler_bci = bcis->at(i);
   641     Node* ctrl = _gvn.transform( new (C) CatchProjNode(catch_, i+1,handler_bci));
   642     // This handler cannot happen?
   643     if (ctrl == top())  continue;
   644     set_control(ctrl);
   646     // Create exception oop
   647     const TypeInstPtr* extype = extypes->at(i)->is_instptr();
   648     Node *ex_oop = _gvn.transform(new (C) CreateExNode(extypes->at(i), ctrl, i_o));
   650     // Handle unloaded exception classes.
   651     if (saw_unloaded->contains(handler_bci)) {
   652       // An unloaded exception type is coming here.  Do an uncommon trap.
   653 #ifndef PRODUCT
   654       // We do not expect the same handler bci to take both cold unloaded
   655       // and hot loaded exceptions.  But, watch for it.
   656       if ((Verbose || WizardMode) && extype->is_loaded()) {
   657         tty->print("Warning: Handler @%d takes mixed loaded/unloaded exceptions in ", bci());
   658         method()->print_name(); tty->cr();
   659       } else if (PrintOpto && (Verbose || WizardMode)) {
   660         tty->print("Bailing out on unloaded exception type ");
   661         extype->klass()->print_name();
   662         tty->print(" at bci:%d in ", bci());
   663         method()->print_name(); tty->cr();
   664       }
   665 #endif
   666       // Emit an uncommon trap instead of processing the block.
   667       set_bci(handler_bci);
   668       push_ex_oop(ex_oop);
   669       uncommon_trap(Deoptimization::Reason_unloaded,
   670                     Deoptimization::Action_reinterpret,
   671                     extype->klass(), "!loaded exception");
   672       set_bci(iter().cur_bci()); // put it back
   673       continue;
   674     }
   676     // go to the exception handler
   677     if (handler_bci < 0) {     // merge with corresponding rethrow node
   678       throw_to_exit(make_exception_state(ex_oop));
   679     } else {                      // Else jump to corresponding handle
   680       push_ex_oop(ex_oop);        // Clear stack and push just the oop.
   681       merge_exception(handler_bci);
   682     }
   683   }
   685   // The first CatchProj is for the normal return.
   686   // (Note:  If this is a call to rethrow_Java, this node goes dead.)
   687   set_control(_gvn.transform( new (C) CatchProjNode(catch_, CatchProjNode::fall_through_index, CatchProjNode::no_handler_bci)));
   688 }
   691 //----------------------------catch_inline_exceptions--------------------------
   692 // Handle all exceptions thrown by an inlined method or individual bytecode.
   693 // Common case 1: we have no handler, so all exceptions merge right into
   694 // the rethrow case.
   695 // Case 2: we have some handlers, with loaded exception klasses that have
   696 // no subklasses.  We do a Deutsch-Shiffman style type-check on the incoming
   697 // exception oop and branch to the handler directly.
   698 // Case 3: We have some handlers with subklasses or are not loaded at
   699 // compile-time.  We have to call the runtime to resolve the exception.
   700 // So we insert a RethrowCall and all the logic that goes with it.
   701 void Parse::catch_inline_exceptions(SafePointNode* ex_map) {
   702   // Caller is responsible for saving away the map for normal control flow!
   703   assert(stopped(), "call set_map(NULL) first");
   704   assert(method()->has_exception_handlers(), "don't come here w/o work to do");
   706   Node* ex_node = saved_ex_oop(ex_map);
   707   if (ex_node == top()) {
   708     // No action needed.
   709     return;
   710   }
   711   const TypeInstPtr* ex_type = _gvn.type(ex_node)->isa_instptr();
   712   NOT_PRODUCT(if (ex_type==NULL) tty->print_cr("*** Exception not InstPtr"));
   713   if (ex_type == NULL)
   714     ex_type = TypeOopPtr::make_from_klass(env()->Throwable_klass())->is_instptr();
   716   // determine potential exception handlers
   717   ciExceptionHandlerStream handlers(method(), bci(),
   718                                     ex_type->klass()->as_instance_klass(),
   719                                     ex_type->klass_is_exact());
   721   // Start executing from the given throw state.  (Keep its stack, for now.)
   722   // Get the exception oop as known at compile time.
   723   ex_node = use_exception_state(ex_map);
   725   // Get the exception oop klass from its header
   726   Node* ex_klass_node = NULL;
   727   if (has_ex_handler() && !ex_type->klass_is_exact()) {
   728     Node* p = basic_plus_adr( ex_node, ex_node, oopDesc::klass_offset_in_bytes());
   729     ex_klass_node = _gvn.transform( LoadKlassNode::make(_gvn, immutable_memory(), p, TypeInstPtr::KLASS, TypeKlassPtr::OBJECT) );
   731     // Compute the exception klass a little more cleverly.
   732     // Obvious solution is to simple do a LoadKlass from the 'ex_node'.
   733     // However, if the ex_node is a PhiNode, I'm going to do a LoadKlass for
   734     // each arm of the Phi.  If I know something clever about the exceptions
   735     // I'm loading the class from, I can replace the LoadKlass with the
   736     // klass constant for the exception oop.
   737     if( ex_node->is_Phi() ) {
   738       ex_klass_node = new (C) PhiNode( ex_node->in(0), TypeKlassPtr::OBJECT );
   739       for( uint i = 1; i < ex_node->req(); i++ ) {
   740         Node* p = basic_plus_adr( ex_node->in(i), ex_node->in(i), oopDesc::klass_offset_in_bytes() );
   741         Node* k = _gvn.transform( LoadKlassNode::make(_gvn, immutable_memory(), p, TypeInstPtr::KLASS, TypeKlassPtr::OBJECT) );
   742         ex_klass_node->init_req( i, k );
   743       }
   744       _gvn.set_type(ex_klass_node, TypeKlassPtr::OBJECT);
   746     }
   747   }
   749   // Scan the exception table for applicable handlers.
   750   // If none, we can call rethrow() and be done!
   751   // If precise (loaded with no subklasses), insert a D.S. style
   752   // pointer compare to the correct handler and loop back.
   753   // If imprecise, switch to the Rethrow VM-call style handling.
   755   int remaining = handlers.count_remaining();
   757   // iterate through all entries sequentially
   758   for (;!handlers.is_done(); handlers.next()) {
   759     ciExceptionHandler* handler = handlers.handler();
   761     if (handler->is_rethrow()) {
   762       // If we fell off the end of the table without finding an imprecise
   763       // exception klass (and without finding a generic handler) then we
   764       // know this exception is not handled in this method.  We just rethrow
   765       // the exception into the caller.
   766       throw_to_exit(make_exception_state(ex_node));
   767       return;
   768     }
   770     // exception handler bci range covers throw_bci => investigate further
   771     int handler_bci = handler->handler_bci();
   773     if (remaining == 1) {
   774       push_ex_oop(ex_node);        // Push exception oop for handler
   775 #ifndef PRODUCT
   776       if (PrintOpto && WizardMode) {
   777         tty->print_cr("  Catching every inline exception bci:%d -> handler_bci:%d", bci(), handler_bci);
   778       }
   779 #endif
   780       merge_exception(handler_bci); // jump to handler
   781       return;                   // No more handling to be done here!
   782     }
   784     // Get the handler's klass
   785     ciInstanceKlass* klass = handler->catch_klass();
   787     if (!klass->is_loaded()) {  // klass is not loaded?
   788       // fall through into catch_call_exceptions which will emit a
   789       // handler with an uncommon trap.
   790       break;
   791     }
   793     if (klass->is_interface())  // should not happen, but...
   794       break;                    // bail out
   796     // Check the type of the exception against the catch type
   797     const TypeKlassPtr *tk = TypeKlassPtr::make(klass);
   798     Node* con = _gvn.makecon(tk);
   799     Node* not_subtype_ctrl = gen_subtype_check(ex_klass_node, con);
   800     if (!stopped()) {
   801       PreserveJVMState pjvms(this);
   802       const TypeInstPtr* tinst = TypeOopPtr::make_from_klass_unique(klass)->cast_to_ptr_type(TypePtr::NotNull)->is_instptr();
   803       assert(klass->has_subklass() || tinst->klass_is_exact(), "lost exactness");
   804       Node* ex_oop = _gvn.transform(new (C) CheckCastPPNode(control(), ex_node, tinst));
   805       push_ex_oop(ex_oop);      // Push exception oop for handler
   806 #ifndef PRODUCT
   807       if (PrintOpto && WizardMode) {
   808         tty->print("  Catching inline exception bci:%d -> handler_bci:%d -- ", bci(), handler_bci);
   809         klass->print_name();
   810         tty->cr();
   811       }
   812 #endif
   813       merge_exception(handler_bci);
   814     }
   815     set_control(not_subtype_ctrl);
   817     // Come here if exception does not match handler.
   818     // Carry on with more handler checks.
   819     --remaining;
   820   }
   822   assert(!stopped(), "you should return if you finish the chain");
   824   // Oops, need to call into the VM to resolve the klasses at runtime.
   825   // Note:  This call must not deoptimize, since it is not a real at this bci!
   826   kill_dead_locals();
   828   make_runtime_call(RC_NO_LEAF | RC_MUST_THROW,
   829                     OptoRuntime::rethrow_Type(),
   830                     OptoRuntime::rethrow_stub(),
   831                     NULL, NULL,
   832                     ex_node);
   834   // Rethrow is a pure call, no side effects, only a result.
   835   // The result cannot be allocated, so we use I_O
   837   // Catch exceptions from the rethrow
   838   catch_call_exceptions(handlers);
   839 }
   842 // (Note:  Moved add_debug_info into GraphKit::add_safepoint_edges.)
   845 #ifndef PRODUCT
   846 void Parse::count_compiled_calls(bool at_method_entry, bool is_inline) {
   847   if( CountCompiledCalls ) {
   848     if( at_method_entry ) {
   849       // bump invocation counter if top method (for statistics)
   850       if (CountCompiledCalls && depth() == 1) {
   851         const TypePtr* addr_type = TypeMetadataPtr::make(method());
   852         Node* adr1 = makecon(addr_type);
   853         Node* adr2 = basic_plus_adr(adr1, adr1, in_bytes(Method::compiled_invocation_counter_offset()));
   854         increment_counter(adr2);
   855       }
   856     } else if (is_inline) {
   857       switch (bc()) {
   858       case Bytecodes::_invokevirtual:   increment_counter(SharedRuntime::nof_inlined_calls_addr()); break;
   859       case Bytecodes::_invokeinterface: increment_counter(SharedRuntime::nof_inlined_interface_calls_addr()); break;
   860       case Bytecodes::_invokestatic:
   861       case Bytecodes::_invokedynamic:
   862       case Bytecodes::_invokespecial:   increment_counter(SharedRuntime::nof_inlined_static_calls_addr()); break;
   863       default: fatal("unexpected call bytecode");
   864       }
   865     } else {
   866       switch (bc()) {
   867       case Bytecodes::_invokevirtual:   increment_counter(SharedRuntime::nof_normal_calls_addr()); break;
   868       case Bytecodes::_invokeinterface: increment_counter(SharedRuntime::nof_interface_calls_addr()); break;
   869       case Bytecodes::_invokestatic:
   870       case Bytecodes::_invokedynamic:
   871       case Bytecodes::_invokespecial:   increment_counter(SharedRuntime::nof_static_calls_addr()); break;
   872       default: fatal("unexpected call bytecode");
   873       }
   874     }
   875   }
   876 }
   877 #endif //PRODUCT
   880 ciMethod* Compile::optimize_virtual_call(ciMethod* caller, int bci, ciInstanceKlass* klass,
   881                                          ciMethod* callee, const TypeOopPtr* receiver_type,
   882                                          bool is_virtual,
   883                                          bool& call_does_dispatch, int& vtable_index) {
   884   // Set default values for out-parameters.
   885   call_does_dispatch = true;
   886   vtable_index       = Method::invalid_vtable_index;
   888   // Choose call strategy.
   889   ciMethod* optimized_virtual_method = optimize_inlining(caller, bci, klass, callee, receiver_type);
   891   // Have the call been sufficiently improved such that it is no longer a virtual?
   892   if (optimized_virtual_method != NULL) {
   893     callee             = optimized_virtual_method;
   894     call_does_dispatch = false;
   895   } else if (!UseInlineCaches && is_virtual && callee->is_loaded()) {
   896     // We can make a vtable call at this site
   897     vtable_index = callee->resolve_vtable_index(caller->holder(), klass);
   898   }
   899   return callee;
   900 }
   902 // Identify possible target method and inlining style
   903 ciMethod* Compile::optimize_inlining(ciMethod* caller, int bci, ciInstanceKlass* klass,
   904                                      ciMethod* callee, const TypeOopPtr* receiver_type) {
   905   // only use for virtual or interface calls
   907   // If it is obviously final, do not bother to call find_monomorphic_target,
   908   // because the class hierarchy checks are not needed, and may fail due to
   909   // incompletely loaded classes.  Since we do our own class loading checks
   910   // in this module, we may confidently bind to any method.
   911   if (callee->can_be_statically_bound()) {
   912     return callee;
   913   }
   915   // Attempt to improve the receiver
   916   bool actual_receiver_is_exact = false;
   917   ciInstanceKlass* actual_receiver = klass;
   918   if (receiver_type != NULL) {
   919     // Array methods are all inherited from Object, and are monomorphic.
   920     if (receiver_type->isa_aryptr() &&
   921         callee->holder() == env()->Object_klass()) {
   922       return callee;
   923     }
   925     // All other interesting cases are instance klasses.
   926     if (!receiver_type->isa_instptr()) {
   927       return NULL;
   928     }
   930     ciInstanceKlass *ikl = receiver_type->klass()->as_instance_klass();
   931     if (ikl->is_loaded() && ikl->is_initialized() && !ikl->is_interface() &&
   932         (ikl == actual_receiver || ikl->is_subtype_of(actual_receiver))) {
   933       // ikl is a same or better type than the original actual_receiver,
   934       // e.g. static receiver from bytecodes.
   935       actual_receiver = ikl;
   936       // Is the actual_receiver exact?
   937       actual_receiver_is_exact = receiver_type->klass_is_exact();
   938     }
   939   }
   941   ciInstanceKlass*   calling_klass = caller->holder();
   942   ciMethod* cha_monomorphic_target = callee->find_monomorphic_target(calling_klass, klass, actual_receiver);
   943   if (cha_monomorphic_target != NULL) {
   944     assert(!cha_monomorphic_target->is_abstract(), "");
   945     // Look at the method-receiver type.  Does it add "too much information"?
   946     ciKlass*    mr_klass = cha_monomorphic_target->holder();
   947     const Type* mr_type  = TypeInstPtr::make(TypePtr::BotPTR, mr_klass);
   948     if (receiver_type == NULL || !receiver_type->higher_equal(mr_type)) {
   949       // Calling this method would include an implicit cast to its holder.
   950       // %%% Not yet implemented.  Would throw minor asserts at present.
   951       // %%% The most common wins are already gained by +UseUniqueSubclasses.
   952       // To fix, put the higher_equal check at the call of this routine,
   953       // and add a CheckCastPP to the receiver.
   954       if (TraceDependencies) {
   955         tty->print_cr("found unique CHA method, but could not cast up");
   956         tty->print("  method  = ");
   957         cha_monomorphic_target->print();
   958         tty->cr();
   959       }
   960       if (log() != NULL) {
   961         log()->elem("missed_CHA_opportunity klass='%d' method='%d'",
   962                        log()->identify(klass),
   963                        log()->identify(cha_monomorphic_target));
   964       }
   965       cha_monomorphic_target = NULL;
   966     }
   967   }
   968   if (cha_monomorphic_target != NULL) {
   969     // Hardwiring a virtual.
   970     // If we inlined because CHA revealed only a single target method,
   971     // then we are dependent on that target method not getting overridden
   972     // by dynamic class loading.  Be sure to test the "static" receiver
   973     // dest_method here, as opposed to the actual receiver, which may
   974     // falsely lead us to believe that the receiver is final or private.
   975     dependencies()->assert_unique_concrete_method(actual_receiver, cha_monomorphic_target);
   976     return cha_monomorphic_target;
   977   }
   979   // If the type is exact, we can still bind the method w/o a vcall.
   980   // (This case comes after CHA so we can see how much extra work it does.)
   981   if (actual_receiver_is_exact) {
   982     // In case of evolution, there is a dependence on every inlined method, since each
   983     // such method can be changed when its class is redefined.
   984     ciMethod* exact_method = callee->resolve_invoke(calling_klass, actual_receiver);
   985     if (exact_method != NULL) {
   986 #ifndef PRODUCT
   987       if (PrintOpto) {
   988         tty->print("  Calling method via exact type @%d --- ", bci);
   989         exact_method->print_name();
   990         tty->cr();
   991       }
   992 #endif
   993       return exact_method;
   994     }
   995   }
   997   return NULL;
   998 }

mercurial