src/share/vm/opto/doCall.cpp

Mon, 01 Feb 2010 16:49:49 -0800

author
kvn
date
Mon, 01 Feb 2010 16:49:49 -0800
changeset 1641
87684f1a88b5
parent 1592
c3b315a0d58a
child 1686
576e77447e3c
permissions
-rw-r--r--

6614597: Performance variability in jvm2008 xml.validation
Summary: Fix incorrect marking of methods as not compilable.
Reviewed-by: never

     1 /*
     2  * Copyright 1998-2009 Sun Microsystems, Inc.  All Rights Reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
    20  * CA 95054 USA or visit www.sun.com if you need additional information or
    21  * have any questions.
    22  *
    23  */
    25 #include "incls/_precompiled.incl"
    26 #include "incls/_doCall.cpp.incl"
    28 #ifndef PRODUCT
    29 void trace_type_profile(ciMethod *method, int depth, int bci, ciMethod *prof_method, ciKlass *prof_klass, int site_count, int receiver_count) {
    30   if (TraceTypeProfile || PrintInlining || PrintOptoInlining) {
    31     tty->print("   ");
    32     for( int i = 0; i < depth; i++ ) tty->print("  ");
    33     if (!PrintOpto) {
    34       method->print_short_name();
    35       tty->print(" ->");
    36     }
    37     tty->print(" @ %d  ", bci);
    38     prof_method->print_short_name();
    39     tty->print("  >>TypeProfile (%d/%d counts) = ", receiver_count, site_count);
    40     prof_klass->name()->print_symbol();
    41     tty->print_cr(" (%d bytes)", prof_method->code_size());
    42   }
    43 }
    44 #endif
    46 CallGenerator* Compile::call_generator(ciMethod* call_method, int vtable_index, bool call_is_virtual,
    47                                        JVMState* jvms, bool allow_inline,
    48                                        float prof_factor) {
    49   CallGenerator* cg;
    51   // Dtrace currently doesn't work unless all calls are vanilla
    52   if (env()->dtrace_method_probes()) {
    53     allow_inline = false;
    54   }
    56   // Note: When we get profiling during stage-1 compiles, we want to pull
    57   // from more specific profile data which pertains to this inlining.
    58   // Right now, ignore the information in jvms->caller(), and do method[bci].
    59   ciCallProfile profile = jvms->method()->call_profile_at_bci(jvms->bci());
    61   // See how many times this site has been invoked.
    62   int site_count = profile.count();
    63   int receiver_count = -1;
    64   if (call_is_virtual && UseTypeProfile && profile.has_receiver(0)) {
    65     // Receivers in the profile structure are ordered by call counts
    66     // so that the most called (major) receiver is profile.receiver(0).
    67     receiver_count = profile.receiver_count(0);
    68   }
    70   CompileLog* log = this->log();
    71   if (log != NULL) {
    72     int rid = (receiver_count >= 0)? log->identify(profile.receiver(0)): -1;
    73     int r2id = (profile.morphism() == 2)? log->identify(profile.receiver(1)):-1;
    74     log->begin_elem("call method='%d' count='%d' prof_factor='%g'",
    75                     log->identify(call_method), site_count, prof_factor);
    76     if (call_is_virtual)  log->print(" virtual='1'");
    77     if (allow_inline)     log->print(" inline='1'");
    78     if (receiver_count >= 0) {
    79       log->print(" receiver='%d' receiver_count='%d'", rid, receiver_count);
    80       if (profile.has_receiver(1)) {
    81         log->print(" receiver2='%d' receiver2_count='%d'", r2id, profile.receiver_count(1));
    82       }
    83     }
    84     log->end_elem();
    85   }
    87   // Special case the handling of certain common, profitable library
    88   // methods.  If these methods are replaced with specialized code,
    89   // then we return it as the inlined version of the call.
    90   // We do this before the strict f.p. check below because the
    91   // intrinsics handle strict f.p. correctly.
    92   if (allow_inline) {
    93     cg = find_intrinsic(call_method, call_is_virtual);
    94     if (cg != NULL)  return cg;
    95   }
    97   // Do not inline strict fp into non-strict code, or the reverse
    98   bool caller_method_is_strict = jvms->method()->is_strict();
    99   if( caller_method_is_strict ^ call_method->is_strict() ) {
   100     allow_inline = false;
   101   }
   103   // Attempt to inline...
   104   if (allow_inline) {
   105     // The profile data is only partly attributable to this caller,
   106     // scale back the call site information.
   107     float past_uses = jvms->method()->scale_count(site_count, prof_factor);
   108     // This is the number of times we expect the call code to be used.
   109     float expected_uses = past_uses;
   111     // Try inlining a bytecoded method:
   112     if (!call_is_virtual) {
   113       InlineTree* ilt;
   114       if (UseOldInlining) {
   115         ilt = InlineTree::find_subtree_from_root(this->ilt(), jvms->caller(), jvms->method());
   116       } else {
   117         // Make a disembodied, stateless ILT.
   118         // TO DO:  When UseOldInlining is removed, copy the ILT code elsewhere.
   119         float site_invoke_ratio = prof_factor;
   120         // Note:  ilt is for the root of this parse, not the present call site.
   121         ilt = new InlineTree(this, jvms->method(), jvms->caller(), site_invoke_ratio, 0);
   122       }
   123       WarmCallInfo scratch_ci;
   124       if (!UseOldInlining)
   125         scratch_ci.init(jvms, call_method, profile, prof_factor);
   126       WarmCallInfo* ci = ilt->ok_to_inline(call_method, jvms, profile, &scratch_ci);
   127       assert(ci != &scratch_ci, "do not let this pointer escape");
   128       bool allow_inline   = (ci != NULL && !ci->is_cold());
   129       bool require_inline = (allow_inline && ci->is_hot());
   131       if (allow_inline) {
   132         CallGenerator* cg = CallGenerator::for_inline(call_method, expected_uses);
   133         if (require_inline && cg != NULL && should_delay_inlining(call_method, jvms)) {
   134           // Delay the inlining of this method to give us the
   135           // opportunity to perform some high level optimizations
   136           // first.
   137           return CallGenerator::for_late_inline(call_method, cg);
   138         }
   139         if (cg == NULL) {
   140           // Fall through.
   141         } else if (require_inline || !InlineWarmCalls) {
   142           return cg;
   143         } else {
   144           CallGenerator* cold_cg = call_generator(call_method, vtable_index, call_is_virtual, jvms, false, prof_factor);
   145           return CallGenerator::for_warm_call(ci, cold_cg, cg);
   146         }
   147       }
   148     }
   150     // Try using the type profile.
   151     if (call_is_virtual && site_count > 0 && receiver_count > 0) {
   152       // The major receiver's count >= TypeProfileMajorReceiverPercent of site_count.
   153       bool have_major_receiver = (100.*profile.receiver_prob(0) >= (float)TypeProfileMajorReceiverPercent);
   154       ciMethod* receiver_method = NULL;
   155       if (have_major_receiver || profile.morphism() == 1 ||
   156           (profile.morphism() == 2 && UseBimorphicInlining)) {
   157         // receiver_method = profile.method();
   158         // Profiles do not suggest methods now.  Look it up in the major receiver.
   159         receiver_method = call_method->resolve_invoke(jvms->method()->holder(),
   160                                                       profile.receiver(0));
   161       }
   162       if (receiver_method != NULL) {
   163         // The single majority receiver sufficiently outweighs the minority.
   164         CallGenerator* hit_cg = this->call_generator(receiver_method,
   165               vtable_index, !call_is_virtual, jvms, allow_inline, prof_factor);
   166         if (hit_cg != NULL) {
   167           // Look up second receiver.
   168           CallGenerator* next_hit_cg = NULL;
   169           ciMethod* next_receiver_method = NULL;
   170           if (profile.morphism() == 2 && UseBimorphicInlining) {
   171             next_receiver_method = call_method->resolve_invoke(jvms->method()->holder(),
   172                                                                profile.receiver(1));
   173             if (next_receiver_method != NULL) {
   174               next_hit_cg = this->call_generator(next_receiver_method,
   175                                   vtable_index, !call_is_virtual, jvms,
   176                                   allow_inline, prof_factor);
   177               if (next_hit_cg != NULL && !next_hit_cg->is_inline() &&
   178                   have_major_receiver && UseOnlyInlinedBimorphic) {
   179                   // Skip if we can't inline second receiver's method
   180                   next_hit_cg = NULL;
   181               }
   182             }
   183           }
   184           CallGenerator* miss_cg;
   185           Deoptimization::DeoptReason reason = (profile.morphism() == 2) ?
   186                                     Deoptimization::Reason_bimorphic :
   187                                     Deoptimization::Reason_class_check;
   188           if (( profile.morphism() == 1 ||
   189                (profile.morphism() == 2 && next_hit_cg != NULL) ) &&
   190               !too_many_traps(jvms->method(), jvms->bci(), reason)
   191              ) {
   192             // Generate uncommon trap for class check failure path
   193             // in case of monomorphic or bimorphic virtual call site.
   194             miss_cg = CallGenerator::for_uncommon_trap(call_method, reason,
   195                         Deoptimization::Action_maybe_recompile);
   196           } else {
   197             // Generate virtual call for class check failure path
   198             // in case of polymorphic virtual call site.
   199             miss_cg = CallGenerator::for_virtual_call(call_method, vtable_index);
   200           }
   201           if (miss_cg != NULL) {
   202             if (next_hit_cg != NULL) {
   203               NOT_PRODUCT(trace_type_profile(jvms->method(), jvms->depth(), jvms->bci(), next_receiver_method, profile.receiver(1), site_count, profile.receiver_count(1)));
   204               // We don't need to record dependency on a receiver here and below.
   205               // Whenever we inline, the dependency is added by Parse::Parse().
   206               miss_cg = CallGenerator::for_predicted_call(profile.receiver(1), miss_cg, next_hit_cg, PROB_MAX);
   207             }
   208             if (miss_cg != NULL) {
   209               NOT_PRODUCT(trace_type_profile(jvms->method(), jvms->depth(), jvms->bci(), receiver_method, profile.receiver(0), site_count, receiver_count));
   210               cg = CallGenerator::for_predicted_call(profile.receiver(0), miss_cg, hit_cg, profile.receiver_prob(0));
   211               if (cg != NULL)  return cg;
   212             }
   213           }
   214         }
   215       }
   216     }
   217   }
   219   // Do MethodHandle calls.
   220   if (call_method->is_method_handle_invoke()) {
   221     if (jvms->method()->java_code_at_bci(jvms->bci()) != Bytecodes::_invokedynamic) {
   222       GraphKit kit(jvms);
   223       Node* n = kit.argument(0);
   225       if (n->Opcode() == Op_ConP) {
   226         const TypeOopPtr* oop_ptr = n->bottom_type()->is_oopptr();
   227         ciObject* const_oop = oop_ptr->const_oop();
   228         ciMethodHandle* method_handle = const_oop->as_method_handle();
   230         // Set the actually called method to have access to the class
   231         // and signature in the MethodHandleCompiler.
   232         method_handle->set_callee(call_method);
   234         // Get an adapter for the MethodHandle.
   235         ciMethod* target_method = method_handle->get_method_handle_adapter();
   237         CallGenerator* hit_cg = this->call_generator(target_method, vtable_index, false, jvms, true, prof_factor);
   238         if (hit_cg != NULL && hit_cg->is_inline())
   239           return hit_cg;
   240       }
   242       return CallGenerator::for_direct_call(call_method);
   243     }
   244     else {
   245       // Get the MethodHandle from the CallSite.
   246       ciMethod* caller_method = jvms->method();
   247       ciBytecodeStream str(caller_method);
   248       str.force_bci(jvms->bci());  // Set the stream to the invokedynamic bci.
   249       ciCallSite*     call_site     = str.get_call_site();
   250       ciMethodHandle* method_handle = call_site->get_target();
   252       // Set the actually called method to have access to the class
   253       // and signature in the MethodHandleCompiler.
   254       method_handle->set_callee(call_method);
   256       // Get an adapter for the MethodHandle.
   257       ciMethod* target_method = method_handle->get_invokedynamic_adapter();
   259       CallGenerator* hit_cg = this->call_generator(target_method, vtable_index, false, jvms, true, prof_factor);
   260       if (hit_cg != NULL && hit_cg->is_inline()) {
   261         CallGenerator* miss_cg = CallGenerator::for_dynamic_call(call_method);
   262         return CallGenerator::for_predicted_dynamic_call(method_handle, miss_cg, hit_cg, prof_factor);
   263       }
   265       // If something failed, generate a normal dynamic call.
   266       return CallGenerator::for_dynamic_call(call_method);
   267     }
   268   }
   270   // There was no special inlining tactic, or it bailed out.
   271   // Use a more generic tactic, like a simple call.
   272   if (call_is_virtual) {
   273     return CallGenerator::for_virtual_call(call_method, vtable_index);
   274   } else {
   275     // Class Hierarchy Analysis or Type Profile reveals a unique target,
   276     // or it is a static or special call.
   277     return CallGenerator::for_direct_call(call_method, should_delay_inlining(call_method, jvms));
   278   }
   279 }
   281 // Return true for methods that shouldn't be inlined early so that
   282 // they are easier to analyze and optimize as intrinsics.
   283 bool Compile::should_delay_inlining(ciMethod* call_method, JVMState* jvms) {
   284   if (has_stringbuilder()) {
   286     if ((call_method->holder() == C->env()->StringBuilder_klass() ||
   287          call_method->holder() == C->env()->StringBuffer_klass()) &&
   288         (jvms->method()->holder() == C->env()->StringBuilder_klass() ||
   289          jvms->method()->holder() == C->env()->StringBuffer_klass())) {
   290       // Delay SB calls only when called from non-SB code
   291       return false;
   292     }
   294     switch (call_method->intrinsic_id()) {
   295       case vmIntrinsics::_StringBuilder_void:
   296       case vmIntrinsics::_StringBuilder_int:
   297       case vmIntrinsics::_StringBuilder_String:
   298       case vmIntrinsics::_StringBuilder_append_char:
   299       case vmIntrinsics::_StringBuilder_append_int:
   300       case vmIntrinsics::_StringBuilder_append_String:
   301       case vmIntrinsics::_StringBuilder_toString:
   302       case vmIntrinsics::_StringBuffer_void:
   303       case vmIntrinsics::_StringBuffer_int:
   304       case vmIntrinsics::_StringBuffer_String:
   305       case vmIntrinsics::_StringBuffer_append_char:
   306       case vmIntrinsics::_StringBuffer_append_int:
   307       case vmIntrinsics::_StringBuffer_append_String:
   308       case vmIntrinsics::_StringBuffer_toString:
   309       case vmIntrinsics::_Integer_toString:
   310         return true;
   312       case vmIntrinsics::_String_String:
   313         {
   314           Node* receiver = jvms->map()->in(jvms->argoff() + 1);
   315           if (receiver->is_Proj() && receiver->in(0)->is_CallStaticJava()) {
   316             CallStaticJavaNode* csj = receiver->in(0)->as_CallStaticJava();
   317             ciMethod* m = csj->method();
   318             if (m != NULL &&
   319                 (m->intrinsic_id() == vmIntrinsics::_StringBuffer_toString ||
   320                  m->intrinsic_id() == vmIntrinsics::_StringBuilder_toString))
   321               // Delay String.<init>(new SB())
   322               return true;
   323           }
   324           return false;
   325         }
   327       default:
   328         return false;
   329     }
   330   }
   331   return false;
   332 }
   335 // uncommon-trap call-sites where callee is unloaded, uninitialized or will not link
   336 bool Parse::can_not_compile_call_site(ciMethod *dest_method, ciInstanceKlass* klass) {
   337   // Additional inputs to consider...
   338   // bc      = bc()
   339   // caller  = method()
   340   // iter().get_method_holder_index()
   341   assert( dest_method->is_loaded(), "ciTypeFlow should not let us get here" );
   342   // Interface classes can be loaded & linked and never get around to
   343   // being initialized.  Uncommon-trap for not-initialized static or
   344   // v-calls.  Let interface calls happen.
   345   ciInstanceKlass* holder_klass = dest_method->holder();
   346   if (!holder_klass->is_initialized() &&
   347       !holder_klass->is_interface()) {
   348     uncommon_trap(Deoptimization::Reason_uninitialized,
   349                   Deoptimization::Action_reinterpret,
   350                   holder_klass);
   351     return true;
   352   }
   354   assert(dest_method->will_link(method()->holder(), klass, bc()), "dest_method: typeflow responsibility");
   355   return false;
   356 }
   359 //------------------------------do_call----------------------------------------
   360 // Handle your basic call.  Inline if we can & want to, else just setup call.
   361 void Parse::do_call() {
   362   // It's likely we are going to add debug info soon.
   363   // Also, if we inline a guy who eventually needs debug info for this JVMS,
   364   // our contribution to it is cleaned up right here.
   365   kill_dead_locals();
   367   // Set frequently used booleans
   368   bool is_virtual = bc() == Bytecodes::_invokevirtual;
   369   bool is_virtual_or_interface = is_virtual || bc() == Bytecodes::_invokeinterface;
   370   bool has_receiver = is_virtual_or_interface || bc() == Bytecodes::_invokespecial;
   371   bool is_invokedynamic = bc() == Bytecodes::_invokedynamic;
   373   // Find target being called
   374   bool             will_link;
   375   ciMethod*        dest_method   = iter().get_method(will_link);
   376   ciInstanceKlass* holder_klass  = dest_method->holder();
   377   ciKlass* holder = iter().get_declared_method_holder();
   378   ciInstanceKlass* klass = ciEnv::get_instance_klass_for_declared_method_holder(holder);
   380   int nargs = dest_method->arg_size();
   381   if (is_invokedynamic)  nargs -= 1;
   383   // uncommon-trap when callee is unloaded, uninitialized or will not link
   384   // bailout when too many arguments for register representation
   385   if (!will_link || can_not_compile_call_site(dest_method, klass)) {
   386 #ifndef PRODUCT
   387     if (PrintOpto && (Verbose || WizardMode)) {
   388       method()->print_name(); tty->print_cr(" can not compile call at bci %d to:", bci());
   389       dest_method->print_name(); tty->cr();
   390     }
   391 #endif
   392     return;
   393   }
   394   assert(holder_klass->is_loaded(), "");
   395   assert((dest_method->is_static() || is_invokedynamic) == !has_receiver , "must match bc");
   396   // Note: this takes into account invokeinterface of methods declared in java/lang/Object,
   397   // which should be invokevirtuals but according to the VM spec may be invokeinterfaces
   398   assert(holder_klass->is_interface() || holder_klass->super() == NULL || (bc() != Bytecodes::_invokeinterface), "must match bc");
   399   // Note:  In the absence of miranda methods, an abstract class K can perform
   400   // an invokevirtual directly on an interface method I.m if K implements I.
   402   // ---------------------
   403   // Does Class Hierarchy Analysis reveal only a single target of a v-call?
   404   // Then we may inline or make a static call, but become dependent on there being only 1 target.
   405   // Does the call-site type profile reveal only one receiver?
   406   // Then we may introduce a run-time check and inline on the path where it succeeds.
   407   // The other path may uncommon_trap, check for another receiver, or do a v-call.
   409   // Choose call strategy.
   410   bool call_is_virtual = is_virtual_or_interface;
   411   int vtable_index = methodOopDesc::invalid_vtable_index;
   412   ciMethod* call_method = dest_method;
   414   // Try to get the most accurate receiver type
   415   if (is_virtual_or_interface) {
   416     Node*             receiver_node = stack(sp() - nargs);
   417     const TypeOopPtr* receiver_type = _gvn.type(receiver_node)->isa_oopptr();
   418     ciMethod* optimized_virtual_method = optimize_inlining(method(), bci(), klass, dest_method, receiver_type);
   420     // Have the call been sufficiently improved such that it is no longer a virtual?
   421     if (optimized_virtual_method != NULL) {
   422       call_method     = optimized_virtual_method;
   423       call_is_virtual = false;
   424     } else if (!UseInlineCaches && is_virtual && call_method->is_loaded()) {
   425       // We can make a vtable call at this site
   426       vtable_index = call_method->resolve_vtable_index(method()->holder(), klass);
   427     }
   428   }
   430   // Note:  It's OK to try to inline a virtual call.
   431   // The call generator will not attempt to inline a polymorphic call
   432   // unless it knows how to optimize the receiver dispatch.
   433   bool try_inline = (C->do_inlining() || InlineAccessors);
   435   // ---------------------
   436   inc_sp(- nargs);              // Temporarily pop args for JVM state of call
   437   JVMState* jvms = sync_jvms();
   439   // ---------------------
   440   // Decide call tactic.
   441   // This call checks with CHA, the interpreter profile, intrinsics table, etc.
   442   // It decides whether inlining is desirable or not.
   443   CallGenerator* cg = C->call_generator(call_method, vtable_index, call_is_virtual, jvms, try_inline, prof_factor());
   445   // ---------------------
   446   // Round double arguments before call
   447   round_double_arguments(dest_method);
   449 #ifndef PRODUCT
   450   // bump global counters for calls
   451   count_compiled_calls(false/*at_method_entry*/, cg->is_inline());
   453   // Record first part of parsing work for this call
   454   parse_histogram()->record_change();
   455 #endif // not PRODUCT
   457   assert(jvms == this->jvms(), "still operating on the right JVMS");
   458   assert(jvms_in_sync(),       "jvms must carry full info into CG");
   460   // save across call, for a subsequent cast_not_null.
   461   Node* receiver = has_receiver ? argument(0) : NULL;
   463   // Bump method data counters (We profile *before* the call is made
   464   // because exceptions don't return to the call site.)
   465   profile_call(receiver);
   467   JVMState* new_jvms;
   468   if ((new_jvms = cg->generate(jvms)) == NULL) {
   469     // When inlining attempt fails (e.g., too many arguments),
   470     // it may contaminate the current compile state, making it
   471     // impossible to pull back and try again.  Once we call
   472     // cg->generate(), we are committed.  If it fails, the whole
   473     // compilation task is compromised.
   474     if (failing())  return;
   475 #ifndef PRODUCT
   476     if (PrintOpto || PrintOptoInlining || PrintInlining) {
   477       // Only one fall-back, so if an intrinsic fails, ignore any bytecodes.
   478       if (cg->is_intrinsic() && call_method->code_size() > 0) {
   479         tty->print("Bailed out of intrinsic, will not inline: ");
   480         call_method->print_name(); tty->cr();
   481       }
   482     }
   483 #endif
   484     // This can happen if a library intrinsic is available, but refuses
   485     // the call site, perhaps because it did not match a pattern the
   486     // intrinsic was expecting to optimize.  The fallback position is
   487     // to call out-of-line.
   488     try_inline = false;  // Inline tactic bailed out.
   489     cg = C->call_generator(call_method, vtable_index, call_is_virtual, jvms, try_inline, prof_factor());
   490     if ((new_jvms = cg->generate(jvms)) == NULL) {
   491       guarantee(failing(), "call failed to generate:  calls should work");
   492       return;
   493     }
   494   }
   496   if (cg->is_inline()) {
   497     // Accumulate has_loops estimate
   498     C->set_has_loops(C->has_loops() || call_method->has_loops());
   499     C->env()->notice_inlined_method(call_method);
   500   }
   502   // Reset parser state from [new_]jvms, which now carries results of the call.
   503   // Return value (if any) is already pushed on the stack by the cg.
   504   add_exception_states_from(new_jvms);
   505   if (new_jvms->map()->control() == top()) {
   506     stop_and_kill_map();
   507   } else {
   508     assert(new_jvms->same_calls_as(jvms), "method/bci left unchanged");
   509     set_jvms(new_jvms);
   510   }
   512   if (!stopped()) {
   513     // This was some sort of virtual call, which did a null check for us.
   514     // Now we can assert receiver-not-null, on the normal return path.
   515     if (receiver != NULL && cg->is_virtual()) {
   516       Node* cast = cast_not_null(receiver);
   517       // %%% assert(receiver == cast, "should already have cast the receiver");
   518     }
   520     // Round double result after a call from strict to non-strict code
   521     round_double_result(dest_method);
   523     // If the return type of the method is not loaded, assert that the
   524     // value we got is a null.  Otherwise, we need to recompile.
   525     if (!dest_method->return_type()->is_loaded()) {
   526 #ifndef PRODUCT
   527       if (PrintOpto && (Verbose || WizardMode)) {
   528         method()->print_name(); tty->print_cr(" asserting nullness of result at bci: %d", bci());
   529         dest_method->print_name(); tty->cr();
   530       }
   531 #endif
   532       if (C->log() != NULL) {
   533         C->log()->elem("assert_null reason='return' klass='%d'",
   534                        C->log()->identify(dest_method->return_type()));
   535       }
   536       // If there is going to be a trap, put it at the next bytecode:
   537       set_bci(iter().next_bci());
   538       do_null_assert(peek(), T_OBJECT);
   539       set_bci(iter().cur_bci()); // put it back
   540     }
   541   }
   543   // Restart record of parsing work after possible inlining of call
   544 #ifndef PRODUCT
   545   parse_histogram()->set_initial_state(bc());
   546 #endif
   547 }
   549 //---------------------------catch_call_exceptions-----------------------------
   550 // Put a Catch and CatchProj nodes behind a just-created call.
   551 // Send their caught exceptions to the proper handler.
   552 // This may be used after a call to the rethrow VM stub,
   553 // when it is needed to process unloaded exception classes.
   554 void Parse::catch_call_exceptions(ciExceptionHandlerStream& handlers) {
   555   // Exceptions are delivered through this channel:
   556   Node* i_o = this->i_o();
   558   // Add a CatchNode.
   559   GrowableArray<int>* bcis = new (C->node_arena()) GrowableArray<int>(C->node_arena(), 8, 0, -1);
   560   GrowableArray<const Type*>* extypes = new (C->node_arena()) GrowableArray<const Type*>(C->node_arena(), 8, 0, NULL);
   561   GrowableArray<int>* saw_unloaded = new (C->node_arena()) GrowableArray<int>(C->node_arena(), 8, 0, 0);
   563   for (; !handlers.is_done(); handlers.next()) {
   564     ciExceptionHandler* h        = handlers.handler();
   565     int                 h_bci    = h->handler_bci();
   566     ciInstanceKlass*    h_klass  = h->is_catch_all() ? env()->Throwable_klass() : h->catch_klass();
   567     // Do not introduce unloaded exception types into the graph:
   568     if (!h_klass->is_loaded()) {
   569       if (saw_unloaded->contains(h_bci)) {
   570         /* We've already seen an unloaded exception with h_bci,
   571            so don't duplicate. Duplication will cause the CatchNode to be
   572            unnecessarily large. See 4713716. */
   573         continue;
   574       } else {
   575         saw_unloaded->append(h_bci);
   576       }
   577     }
   578     const Type*         h_extype = TypeOopPtr::make_from_klass(h_klass);
   579     // (We use make_from_klass because it respects UseUniqueSubclasses.)
   580     h_extype = h_extype->join(TypeInstPtr::NOTNULL);
   581     assert(!h_extype->empty(), "sanity");
   582     // Note:  It's OK if the BCIs repeat themselves.
   583     bcis->append(h_bci);
   584     extypes->append(h_extype);
   585   }
   587   int len = bcis->length();
   588   CatchNode *cn = new (C, 2) CatchNode(control(), i_o, len+1);
   589   Node *catch_ = _gvn.transform(cn);
   591   // now branch with the exception state to each of the (potential)
   592   // handlers
   593   for(int i=0; i < len; i++) {
   594     // Setup JVM state to enter the handler.
   595     PreserveJVMState pjvms(this);
   596     // Locals are just copied from before the call.
   597     // Get control from the CatchNode.
   598     int handler_bci = bcis->at(i);
   599     Node* ctrl = _gvn.transform( new (C, 1) CatchProjNode(catch_, i+1,handler_bci));
   600     // This handler cannot happen?
   601     if (ctrl == top())  continue;
   602     set_control(ctrl);
   604     // Create exception oop
   605     const TypeInstPtr* extype = extypes->at(i)->is_instptr();
   606     Node *ex_oop = _gvn.transform(new (C, 2) CreateExNode(extypes->at(i), ctrl, i_o));
   608     // Handle unloaded exception classes.
   609     if (saw_unloaded->contains(handler_bci)) {
   610       // An unloaded exception type is coming here.  Do an uncommon trap.
   611 #ifndef PRODUCT
   612       // We do not expect the same handler bci to take both cold unloaded
   613       // and hot loaded exceptions.  But, watch for it.
   614       if (extype->is_loaded()) {
   615         tty->print_cr("Warning: Handler @%d takes mixed loaded/unloaded exceptions in ");
   616         method()->print_name(); tty->cr();
   617       } else if (PrintOpto && (Verbose || WizardMode)) {
   618         tty->print("Bailing out on unloaded exception type ");
   619         extype->klass()->print_name();
   620         tty->print(" at bci:%d in ", bci());
   621         method()->print_name(); tty->cr();
   622       }
   623 #endif
   624       // Emit an uncommon trap instead of processing the block.
   625       set_bci(handler_bci);
   626       push_ex_oop(ex_oop);
   627       uncommon_trap(Deoptimization::Reason_unloaded,
   628                     Deoptimization::Action_reinterpret,
   629                     extype->klass(), "!loaded exception");
   630       set_bci(iter().cur_bci()); // put it back
   631       continue;
   632     }
   634     // go to the exception handler
   635     if (handler_bci < 0) {     // merge with corresponding rethrow node
   636       throw_to_exit(make_exception_state(ex_oop));
   637     } else {                      // Else jump to corresponding handle
   638       push_ex_oop(ex_oop);        // Clear stack and push just the oop.
   639       merge_exception(handler_bci);
   640     }
   641   }
   643   // The first CatchProj is for the normal return.
   644   // (Note:  If this is a call to rethrow_Java, this node goes dead.)
   645   set_control(_gvn.transform( new (C, 1) CatchProjNode(catch_, CatchProjNode::fall_through_index, CatchProjNode::no_handler_bci)));
   646 }
   649 //----------------------------catch_inline_exceptions--------------------------
   650 // Handle all exceptions thrown by an inlined method or individual bytecode.
   651 // Common case 1: we have no handler, so all exceptions merge right into
   652 // the rethrow case.
   653 // Case 2: we have some handlers, with loaded exception klasses that have
   654 // no subklasses.  We do a Deutsch-Shiffman style type-check on the incoming
   655 // exception oop and branch to the handler directly.
   656 // Case 3: We have some handlers with subklasses or are not loaded at
   657 // compile-time.  We have to call the runtime to resolve the exception.
   658 // So we insert a RethrowCall and all the logic that goes with it.
   659 void Parse::catch_inline_exceptions(SafePointNode* ex_map) {
   660   // Caller is responsible for saving away the map for normal control flow!
   661   assert(stopped(), "call set_map(NULL) first");
   662   assert(method()->has_exception_handlers(), "don't come here w/o work to do");
   664   Node* ex_node = saved_ex_oop(ex_map);
   665   if (ex_node == top()) {
   666     // No action needed.
   667     return;
   668   }
   669   const TypeInstPtr* ex_type = _gvn.type(ex_node)->isa_instptr();
   670   NOT_PRODUCT(if (ex_type==NULL) tty->print_cr("*** Exception not InstPtr"));
   671   if (ex_type == NULL)
   672     ex_type = TypeOopPtr::make_from_klass(env()->Throwable_klass())->is_instptr();
   674   // determine potential exception handlers
   675   ciExceptionHandlerStream handlers(method(), bci(),
   676                                     ex_type->klass()->as_instance_klass(),
   677                                     ex_type->klass_is_exact());
   679   // Start executing from the given throw state.  (Keep its stack, for now.)
   680   // Get the exception oop as known at compile time.
   681   ex_node = use_exception_state(ex_map);
   683   // Get the exception oop klass from its header
   684   Node* ex_klass_node = NULL;
   685   if (has_ex_handler() && !ex_type->klass_is_exact()) {
   686     Node* p = basic_plus_adr( ex_node, ex_node, oopDesc::klass_offset_in_bytes());
   687     ex_klass_node = _gvn.transform( LoadKlassNode::make(_gvn, immutable_memory(), p, TypeInstPtr::KLASS, TypeKlassPtr::OBJECT) );
   689     // Compute the exception klass a little more cleverly.
   690     // Obvious solution is to simple do a LoadKlass from the 'ex_node'.
   691     // However, if the ex_node is a PhiNode, I'm going to do a LoadKlass for
   692     // each arm of the Phi.  If I know something clever about the exceptions
   693     // I'm loading the class from, I can replace the LoadKlass with the
   694     // klass constant for the exception oop.
   695     if( ex_node->is_Phi() ) {
   696       ex_klass_node = new (C, ex_node->req()) PhiNode( ex_node->in(0), TypeKlassPtr::OBJECT );
   697       for( uint i = 1; i < ex_node->req(); i++ ) {
   698         Node* p = basic_plus_adr( ex_node->in(i), ex_node->in(i), oopDesc::klass_offset_in_bytes() );
   699         Node* k = _gvn.transform( LoadKlassNode::make(_gvn, immutable_memory(), p, TypeInstPtr::KLASS, TypeKlassPtr::OBJECT) );
   700         ex_klass_node->init_req( i, k );
   701       }
   702       _gvn.set_type(ex_klass_node, TypeKlassPtr::OBJECT);
   704     }
   705   }
   707   // Scan the exception table for applicable handlers.
   708   // If none, we can call rethrow() and be done!
   709   // If precise (loaded with no subklasses), insert a D.S. style
   710   // pointer compare to the correct handler and loop back.
   711   // If imprecise, switch to the Rethrow VM-call style handling.
   713   int remaining = handlers.count_remaining();
   715   // iterate through all entries sequentially
   716   for (;!handlers.is_done(); handlers.next()) {
   717     // Do nothing if turned off
   718     if( !DeutschShiffmanExceptions ) break;
   719     ciExceptionHandler* handler = handlers.handler();
   721     if (handler->is_rethrow()) {
   722       // If we fell off the end of the table without finding an imprecise
   723       // exception klass (and without finding a generic handler) then we
   724       // know this exception is not handled in this method.  We just rethrow
   725       // the exception into the caller.
   726       throw_to_exit(make_exception_state(ex_node));
   727       return;
   728     }
   730     // exception handler bci range covers throw_bci => investigate further
   731     int handler_bci = handler->handler_bci();
   733     if (remaining == 1) {
   734       push_ex_oop(ex_node);        // Push exception oop for handler
   735 #ifndef PRODUCT
   736       if (PrintOpto && WizardMode) {
   737         tty->print_cr("  Catching every inline exception bci:%d -> handler_bci:%d", bci(), handler_bci);
   738       }
   739 #endif
   740       merge_exception(handler_bci); // jump to handler
   741       return;                   // No more handling to be done here!
   742     }
   744     // %%% The following logic replicates make_from_klass_unique.
   745     // TO DO:  Replace by a subroutine call.  Then generalize
   746     // the type check, as noted in the next "%%%" comment.
   748     ciInstanceKlass* klass = handler->catch_klass();
   749     if (UseUniqueSubclasses) {
   750       // (We use make_from_klass because it respects UseUniqueSubclasses.)
   751       const TypeOopPtr* tp = TypeOopPtr::make_from_klass(klass);
   752       klass = tp->klass()->as_instance_klass();
   753     }
   755     // Get the handler's klass
   756     if (!klass->is_loaded())    // klass is not loaded?
   757       break;                    // Must call Rethrow!
   758     if (klass->is_interface())  // should not happen, but...
   759       break;                    // bail out
   760     // See if the loaded exception klass has no subtypes
   761     if (klass->has_subklass())
   762       break;                    // Cannot easily do precise test ==> Rethrow
   764     // %%% Now that subclass checking is very fast, we need to rewrite
   765     // this section and remove the option "DeutschShiffmanExceptions".
   766     // The exception processing chain should be a normal typecase pattern,
   767     // with a bailout to the interpreter only in the case of unloaded
   768     // classes.  (The bailout should mark the method non-entrant.)
   769     // This rewrite should be placed in GraphKit::, not Parse::.
   771     // Add a dependence; if any subclass added we need to recompile
   772     // %%% should use stronger assert_unique_concrete_subtype instead
   773     if (!klass->is_final()) {
   774       C->dependencies()->assert_leaf_type(klass);
   775     }
   777     // Implement precise test
   778     const TypeKlassPtr *tk = TypeKlassPtr::make(klass);
   779     Node* con = _gvn.makecon(tk);
   780     Node* cmp = _gvn.transform( new (C, 3) CmpPNode(ex_klass_node, con) );
   781     Node* bol = _gvn.transform( new (C, 2) BoolNode(cmp, BoolTest::ne) );
   782     { BuildCutout unless(this, bol, PROB_LIKELY(0.7f));
   783       const TypeInstPtr* tinst = TypeInstPtr::make_exact(TypePtr::NotNull, klass);
   784       Node* ex_oop = _gvn.transform(new (C, 2) CheckCastPPNode(control(), ex_node, tinst));
   785       push_ex_oop(ex_oop);      // Push exception oop for handler
   786 #ifndef PRODUCT
   787       if (PrintOpto && WizardMode) {
   788         tty->print("  Catching inline exception bci:%d -> handler_bci:%d -- ", bci(), handler_bci);
   789         klass->print_name();
   790         tty->cr();
   791       }
   792 #endif
   793       merge_exception(handler_bci);
   794     }
   796     // Come here if exception does not match handler.
   797     // Carry on with more handler checks.
   798     --remaining;
   799   }
   801   assert(!stopped(), "you should return if you finish the chain");
   803   if (remaining == 1) {
   804     // Further checks do not matter.
   805   }
   807   if (can_rerun_bytecode()) {
   808     // Do not push_ex_oop here!
   809     // Re-executing the bytecode will reproduce the throwing condition.
   810     bool must_throw = true;
   811     uncommon_trap(Deoptimization::Reason_unhandled,
   812                   Deoptimization::Action_none,
   813                   (ciKlass*)NULL, (const char*)NULL, // default args
   814                   must_throw);
   815     return;
   816   }
   818   // Oops, need to call into the VM to resolve the klasses at runtime.
   819   // Note:  This call must not deoptimize, since it is not a real at this bci!
   820   kill_dead_locals();
   822   make_runtime_call(RC_NO_LEAF | RC_MUST_THROW,
   823                     OptoRuntime::rethrow_Type(),
   824                     OptoRuntime::rethrow_stub(),
   825                     NULL, NULL,
   826                     ex_node);
   828   // Rethrow is a pure call, no side effects, only a result.
   829   // The result cannot be allocated, so we use I_O
   831   // Catch exceptions from the rethrow
   832   catch_call_exceptions(handlers);
   833 }
   836 // (Note:  Moved add_debug_info into GraphKit::add_safepoint_edges.)
   839 #ifndef PRODUCT
   840 void Parse::count_compiled_calls(bool at_method_entry, bool is_inline) {
   841   if( CountCompiledCalls ) {
   842     if( at_method_entry ) {
   843       // bump invocation counter if top method (for statistics)
   844       if (CountCompiledCalls && depth() == 1) {
   845         const TypeInstPtr* addr_type = TypeInstPtr::make(method());
   846         Node* adr1 = makecon(addr_type);
   847         Node* adr2 = basic_plus_adr(adr1, adr1, in_bytes(methodOopDesc::compiled_invocation_counter_offset()));
   848         increment_counter(adr2);
   849       }
   850     } else if (is_inline) {
   851       switch (bc()) {
   852       case Bytecodes::_invokevirtual:   increment_counter(SharedRuntime::nof_inlined_calls_addr()); break;
   853       case Bytecodes::_invokeinterface: increment_counter(SharedRuntime::nof_inlined_interface_calls_addr()); break;
   854       case Bytecodes::_invokestatic:
   855       case Bytecodes::_invokedynamic:
   856       case Bytecodes::_invokespecial:   increment_counter(SharedRuntime::nof_inlined_static_calls_addr()); break;
   857       default: fatal("unexpected call bytecode");
   858       }
   859     } else {
   860       switch (bc()) {
   861       case Bytecodes::_invokevirtual:   increment_counter(SharedRuntime::nof_normal_calls_addr()); break;
   862       case Bytecodes::_invokeinterface: increment_counter(SharedRuntime::nof_interface_calls_addr()); break;
   863       case Bytecodes::_invokestatic:
   864       case Bytecodes::_invokedynamic:
   865       case Bytecodes::_invokespecial:   increment_counter(SharedRuntime::nof_static_calls_addr()); break;
   866       default: fatal("unexpected call bytecode");
   867       }
   868     }
   869   }
   870 }
   871 #endif //PRODUCT
   874 // Identify possible target method and inlining style
   875 ciMethod* Parse::optimize_inlining(ciMethod* caller, int bci, ciInstanceKlass* klass,
   876                                    ciMethod *dest_method, const TypeOopPtr* receiver_type) {
   877   // only use for virtual or interface calls
   879   // If it is obviously final, do not bother to call find_monomorphic_target,
   880   // because the class hierarchy checks are not needed, and may fail due to
   881   // incompletely loaded classes.  Since we do our own class loading checks
   882   // in this module, we may confidently bind to any method.
   883   if (dest_method->can_be_statically_bound()) {
   884     return dest_method;
   885   }
   887   // Attempt to improve the receiver
   888   bool actual_receiver_is_exact = false;
   889   ciInstanceKlass* actual_receiver = klass;
   890   if (receiver_type != NULL) {
   891     // Array methods are all inherited from Object, and are monomorphic.
   892     if (receiver_type->isa_aryptr() &&
   893         dest_method->holder() == env()->Object_klass()) {
   894       return dest_method;
   895     }
   897     // All other interesting cases are instance klasses.
   898     if (!receiver_type->isa_instptr()) {
   899       return NULL;
   900     }
   902     ciInstanceKlass *ikl = receiver_type->klass()->as_instance_klass();
   903     if (ikl->is_loaded() && ikl->is_initialized() && !ikl->is_interface() &&
   904         (ikl == actual_receiver || ikl->is_subtype_of(actual_receiver))) {
   905       // ikl is a same or better type than the original actual_receiver,
   906       // e.g. static receiver from bytecodes.
   907       actual_receiver = ikl;
   908       // Is the actual_receiver exact?
   909       actual_receiver_is_exact = receiver_type->klass_is_exact();
   910     }
   911   }
   913   ciInstanceKlass*   calling_klass = caller->holder();
   914   ciMethod* cha_monomorphic_target = dest_method->find_monomorphic_target(calling_klass, klass, actual_receiver);
   915   if (cha_monomorphic_target != NULL) {
   916     assert(!cha_monomorphic_target->is_abstract(), "");
   917     // Look at the method-receiver type.  Does it add "too much information"?
   918     ciKlass*    mr_klass = cha_monomorphic_target->holder();
   919     const Type* mr_type  = TypeInstPtr::make(TypePtr::BotPTR, mr_klass);
   920     if (receiver_type == NULL || !receiver_type->higher_equal(mr_type)) {
   921       // Calling this method would include an implicit cast to its holder.
   922       // %%% Not yet implemented.  Would throw minor asserts at present.
   923       // %%% The most common wins are already gained by +UseUniqueSubclasses.
   924       // To fix, put the higher_equal check at the call of this routine,
   925       // and add a CheckCastPP to the receiver.
   926       if (TraceDependencies) {
   927         tty->print_cr("found unique CHA method, but could not cast up");
   928         tty->print("  method  = ");
   929         cha_monomorphic_target->print();
   930         tty->cr();
   931       }
   932       if (C->log() != NULL) {
   933         C->log()->elem("missed_CHA_opportunity klass='%d' method='%d'",
   934                        C->log()->identify(klass),
   935                        C->log()->identify(cha_monomorphic_target));
   936       }
   937       cha_monomorphic_target = NULL;
   938     }
   939   }
   940   if (cha_monomorphic_target != NULL) {
   941     // Hardwiring a virtual.
   942     // If we inlined because CHA revealed only a single target method,
   943     // then we are dependent on that target method not getting overridden
   944     // by dynamic class loading.  Be sure to test the "static" receiver
   945     // dest_method here, as opposed to the actual receiver, which may
   946     // falsely lead us to believe that the receiver is final or private.
   947     C->dependencies()->assert_unique_concrete_method(actual_receiver, cha_monomorphic_target);
   948     return cha_monomorphic_target;
   949   }
   951   // If the type is exact, we can still bind the method w/o a vcall.
   952   // (This case comes after CHA so we can see how much extra work it does.)
   953   if (actual_receiver_is_exact) {
   954     // In case of evolution, there is a dependence on every inlined method, since each
   955     // such method can be changed when its class is redefined.
   956     ciMethod* exact_method = dest_method->resolve_invoke(calling_klass, actual_receiver);
   957     if (exact_method != NULL) {
   958 #ifndef PRODUCT
   959       if (PrintOpto) {
   960         tty->print("  Calling method via exact type @%d --- ", bci);
   961         exact_method->print_name();
   962         tty->cr();
   963       }
   964 #endif
   965       return exact_method;
   966     }
   967   }
   969   return NULL;
   970 }

mercurial