src/share/vm/opto/doCall.cpp

Mon, 14 Nov 2011 18:38:03 -0800

author
kvn
date
Mon, 14 Nov 2011 18:38:03 -0800
changeset 3309
8c57262447d3
parent 3101
aa67216400d3
child 3313
a04a201f0f5a
permissions
-rw-r--r--

7105605: Use EA info to optimize pointers compare
Summary: optimize pointers compare using EA information.
Reviewed-by: never, twisti

duke@435 1 /*
twisti@2687 2 * Copyright (c) 1998, 2011, Oracle and/or its affiliates. All rights reserved.
duke@435 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
duke@435 4 *
duke@435 5 * This code is free software; you can redistribute it and/or modify it
duke@435 6 * under the terms of the GNU General Public License version 2 only, as
duke@435 7 * published by the Free Software Foundation.
duke@435 8 *
duke@435 9 * This code is distributed in the hope that it will be useful, but WITHOUT
duke@435 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
duke@435 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
duke@435 12 * version 2 for more details (a copy is included in the LICENSE file that
duke@435 13 * accompanied this code).
duke@435 14 *
duke@435 15 * You should have received a copy of the GNU General Public License version
duke@435 16 * 2 along with this work; if not, write to the Free Software Foundation,
duke@435 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
duke@435 18 *
trims@1907 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
trims@1907 20 * or visit www.oracle.com if you need additional information or have any
trims@1907 21 * questions.
duke@435 22 *
duke@435 23 */
duke@435 24
stefank@2314 25 #include "precompiled.hpp"
stefank@2314 26 #include "ci/ciCPCache.hpp"
stefank@2314 27 #include "ci/ciCallSite.hpp"
stefank@2314 28 #include "ci/ciMethodHandle.hpp"
stefank@2314 29 #include "classfile/vmSymbols.hpp"
twisti@2687 30 #include "compiler/compileBroker.hpp"
stefank@2314 31 #include "compiler/compileLog.hpp"
stefank@2314 32 #include "interpreter/linkResolver.hpp"
stefank@2314 33 #include "opto/addnode.hpp"
stefank@2314 34 #include "opto/callGenerator.hpp"
stefank@2314 35 #include "opto/cfgnode.hpp"
stefank@2314 36 #include "opto/mulnode.hpp"
stefank@2314 37 #include "opto/parse.hpp"
stefank@2314 38 #include "opto/rootnode.hpp"
stefank@2314 39 #include "opto/runtime.hpp"
stefank@2314 40 #include "opto/subnode.hpp"
stefank@2314 41 #include "prims/nativeLookup.hpp"
stefank@2314 42 #include "runtime/sharedRuntime.hpp"
duke@435 43
duke@435 44 #ifndef PRODUCT
duke@435 45 void trace_type_profile(ciMethod *method, int depth, int bci, ciMethod *prof_method, ciKlass *prof_klass, int site_count, int receiver_count) {
duke@435 46 if (TraceTypeProfile || PrintInlining || PrintOptoInlining) {
twisti@2687 47 if (!PrintInlining) {
twisti@2687 48 if (!PrintOpto && !PrintCompilation) {
twisti@2687 49 method->print_short_name();
twisti@2687 50 tty->cr();
twisti@2687 51 }
twisti@2687 52 CompileTask::print_inlining(prof_method, depth, bci);
duke@435 53 }
twisti@2687 54 CompileTask::print_inline_indent(depth);
twisti@2687 55 tty->print(" \\-> TypeProfile (%d/%d counts) = ", receiver_count, site_count);
duke@435 56 prof_klass->name()->print_symbol();
twisti@2687 57 tty->cr();
duke@435 58 }
duke@435 59 }
duke@435 60 #endif
duke@435 61
jrose@1592 62 CallGenerator* Compile::call_generator(ciMethod* call_method, int vtable_index, bool call_is_virtual,
jrose@1592 63 JVMState* jvms, bool allow_inline,
jrose@1592 64 float prof_factor) {
twisti@2903 65 CallGenerator* cg;
twisti@2903 66 ciMethod* caller = jvms->method();
twisti@2903 67 int bci = jvms->bci();
twisti@2903 68 Bytecodes::Code bytecode = caller->java_code_at_bci(bci);
jrose@2744 69 guarantee(call_method != NULL, "failed method resolution");
duke@435 70
duke@435 71 // Dtrace currently doesn't work unless all calls are vanilla
kvn@1215 72 if (env()->dtrace_method_probes()) {
duke@435 73 allow_inline = false;
duke@435 74 }
duke@435 75
duke@435 76 // Note: When we get profiling during stage-1 compiles, we want to pull
duke@435 77 // from more specific profile data which pertains to this inlining.
duke@435 78 // Right now, ignore the information in jvms->caller(), and do method[bci].
twisti@2903 79 ciCallProfile profile = caller->call_profile_at_bci(bci);
duke@435 80
duke@435 81 // See how many times this site has been invoked.
duke@435 82 int site_count = profile.count();
duke@435 83 int receiver_count = -1;
duke@435 84 if (call_is_virtual && UseTypeProfile && profile.has_receiver(0)) {
duke@435 85 // Receivers in the profile structure are ordered by call counts
duke@435 86 // so that the most called (major) receiver is profile.receiver(0).
duke@435 87 receiver_count = profile.receiver_count(0);
duke@435 88 }
duke@435 89
duke@435 90 CompileLog* log = this->log();
duke@435 91 if (log != NULL) {
duke@435 92 int rid = (receiver_count >= 0)? log->identify(profile.receiver(0)): -1;
kvn@1686 93 int r2id = (rid != -1 && profile.has_receiver(1))? log->identify(profile.receiver(1)):-1;
duke@435 94 log->begin_elem("call method='%d' count='%d' prof_factor='%g'",
duke@435 95 log->identify(call_method), site_count, prof_factor);
duke@435 96 if (call_is_virtual) log->print(" virtual='1'");
duke@435 97 if (allow_inline) log->print(" inline='1'");
duke@435 98 if (receiver_count >= 0) {
duke@435 99 log->print(" receiver='%d' receiver_count='%d'", rid, receiver_count);
duke@435 100 if (profile.has_receiver(1)) {
duke@435 101 log->print(" receiver2='%d' receiver2_count='%d'", r2id, profile.receiver_count(1));
duke@435 102 }
duke@435 103 }
duke@435 104 log->end_elem();
duke@435 105 }
duke@435 106
duke@435 107 // Special case the handling of certain common, profitable library
duke@435 108 // methods. If these methods are replaced with specialized code,
duke@435 109 // then we return it as the inlined version of the call.
duke@435 110 // We do this before the strict f.p. check below because the
duke@435 111 // intrinsics handle strict f.p. correctly.
duke@435 112 if (allow_inline) {
duke@435 113 cg = find_intrinsic(call_method, call_is_virtual);
duke@435 114 if (cg != NULL) return cg;
duke@435 115 }
duke@435 116
twisti@3050 117 // Do method handle calls.
twisti@2178 118 // NOTE: This must happen before normal inlining logic below since
twisti@2178 119 // MethodHandle.invoke* are native methods which obviously don't
twisti@2178 120 // have bytecodes and so normal inlining fails.
twisti@2178 121 if (call_method->is_method_handle_invoke()) {
twisti@2898 122 if (bytecode != Bytecodes::_invokedynamic) {
twisti@2178 123 GraphKit kit(jvms);
twisti@2178 124 Node* n = kit.argument(0);
twisti@2178 125
never@2949 126 CallGenerator* cg = CallGenerator::for_method_handle_inline(n, jvms, caller, call_method, profile);
never@2949 127 if (cg != NULL) {
never@2949 128 return cg;
twisti@2178 129 }
twisti@2178 130 return CallGenerator::for_direct_call(call_method);
twisti@2178 131 }
twisti@2178 132 else {
twisti@3050 133 // Get the CallSite object.
twisti@2178 134 ciMethod* caller_method = jvms->method();
twisti@2178 135 ciBytecodeStream str(caller_method);
twisti@2178 136 str.force_bci(jvms->bci()); // Set the stream to the invokedynamic bci.
twisti@3050 137 ciCallSite* call_site = str.get_call_site();
twisti@2178 138
twisti@3101 139 CallGenerator* cg = CallGenerator::for_invokedynamic_inline(call_site, jvms, caller, call_method, profile);
twisti@3101 140 if (cg != NULL) {
twisti@3101 141 return cg;
twisti@2178 142 }
twisti@2178 143 // If something failed, generate a normal dynamic call.
twisti@2178 144 return CallGenerator::for_dynamic_call(call_method);
twisti@2178 145 }
twisti@2178 146 }
twisti@2178 147
duke@435 148 // Do not inline strict fp into non-strict code, or the reverse
duke@435 149 bool caller_method_is_strict = jvms->method()->is_strict();
duke@435 150 if( caller_method_is_strict ^ call_method->is_strict() ) {
duke@435 151 allow_inline = false;
duke@435 152 }
duke@435 153
duke@435 154 // Attempt to inline...
duke@435 155 if (allow_inline) {
duke@435 156 // The profile data is only partly attributable to this caller,
duke@435 157 // scale back the call site information.
duke@435 158 float past_uses = jvms->method()->scale_count(site_count, prof_factor);
duke@435 159 // This is the number of times we expect the call code to be used.
duke@435 160 float expected_uses = past_uses;
duke@435 161
duke@435 162 // Try inlining a bytecoded method:
duke@435 163 if (!call_is_virtual) {
duke@435 164 InlineTree* ilt;
duke@435 165 if (UseOldInlining) {
duke@435 166 ilt = InlineTree::find_subtree_from_root(this->ilt(), jvms->caller(), jvms->method());
duke@435 167 } else {
duke@435 168 // Make a disembodied, stateless ILT.
duke@435 169 // TO DO: When UseOldInlining is removed, copy the ILT code elsewhere.
duke@435 170 float site_invoke_ratio = prof_factor;
duke@435 171 // Note: ilt is for the root of this parse, not the present call site.
never@2981 172 ilt = new InlineTree(this, jvms->method(), jvms->caller(), site_invoke_ratio, MaxInlineLevel);
duke@435 173 }
duke@435 174 WarmCallInfo scratch_ci;
duke@435 175 if (!UseOldInlining)
duke@435 176 scratch_ci.init(jvms, call_method, profile, prof_factor);
duke@435 177 WarmCallInfo* ci = ilt->ok_to_inline(call_method, jvms, profile, &scratch_ci);
duke@435 178 assert(ci != &scratch_ci, "do not let this pointer escape");
duke@435 179 bool allow_inline = (ci != NULL && !ci->is_cold());
duke@435 180 bool require_inline = (allow_inline && ci->is_hot());
duke@435 181
duke@435 182 if (allow_inline) {
duke@435 183 CallGenerator* cg = CallGenerator::for_inline(call_method, expected_uses);
never@1515 184 if (require_inline && cg != NULL && should_delay_inlining(call_method, jvms)) {
never@1515 185 // Delay the inlining of this method to give us the
never@1515 186 // opportunity to perform some high level optimizations
never@1515 187 // first.
never@1515 188 return CallGenerator::for_late_inline(call_method, cg);
never@1515 189 }
duke@435 190 if (cg == NULL) {
duke@435 191 // Fall through.
duke@435 192 } else if (require_inline || !InlineWarmCalls) {
duke@435 193 return cg;
duke@435 194 } else {
duke@435 195 CallGenerator* cold_cg = call_generator(call_method, vtable_index, call_is_virtual, jvms, false, prof_factor);
duke@435 196 return CallGenerator::for_warm_call(ci, cold_cg, cg);
duke@435 197 }
duke@435 198 }
duke@435 199 }
duke@435 200
duke@435 201 // Try using the type profile.
duke@435 202 if (call_is_virtual && site_count > 0 && receiver_count > 0) {
duke@435 203 // The major receiver's count >= TypeProfileMajorReceiverPercent of site_count.
duke@435 204 bool have_major_receiver = (100.*profile.receiver_prob(0) >= (float)TypeProfileMajorReceiverPercent);
duke@435 205 ciMethod* receiver_method = NULL;
duke@435 206 if (have_major_receiver || profile.morphism() == 1 ||
duke@435 207 (profile.morphism() == 2 && UseBimorphicInlining)) {
duke@435 208 // receiver_method = profile.method();
duke@435 209 // Profiles do not suggest methods now. Look it up in the major receiver.
duke@435 210 receiver_method = call_method->resolve_invoke(jvms->method()->holder(),
duke@435 211 profile.receiver(0));
duke@435 212 }
duke@435 213 if (receiver_method != NULL) {
duke@435 214 // The single majority receiver sufficiently outweighs the minority.
duke@435 215 CallGenerator* hit_cg = this->call_generator(receiver_method,
duke@435 216 vtable_index, !call_is_virtual, jvms, allow_inline, prof_factor);
duke@435 217 if (hit_cg != NULL) {
duke@435 218 // Look up second receiver.
duke@435 219 CallGenerator* next_hit_cg = NULL;
duke@435 220 ciMethod* next_receiver_method = NULL;
duke@435 221 if (profile.morphism() == 2 && UseBimorphicInlining) {
duke@435 222 next_receiver_method = call_method->resolve_invoke(jvms->method()->holder(),
duke@435 223 profile.receiver(1));
duke@435 224 if (next_receiver_method != NULL) {
duke@435 225 next_hit_cg = this->call_generator(next_receiver_method,
duke@435 226 vtable_index, !call_is_virtual, jvms,
duke@435 227 allow_inline, prof_factor);
duke@435 228 if (next_hit_cg != NULL && !next_hit_cg->is_inline() &&
duke@435 229 have_major_receiver && UseOnlyInlinedBimorphic) {
duke@435 230 // Skip if we can't inline second receiver's method
duke@435 231 next_hit_cg = NULL;
duke@435 232 }
duke@435 233 }
duke@435 234 }
duke@435 235 CallGenerator* miss_cg;
kvn@1641 236 Deoptimization::DeoptReason reason = (profile.morphism() == 2) ?
kvn@1641 237 Deoptimization::Reason_bimorphic :
kvn@1641 238 Deoptimization::Reason_class_check;
duke@435 239 if (( profile.morphism() == 1 ||
duke@435 240 (profile.morphism() == 2 && next_hit_cg != NULL) ) &&
kvn@1641 241 !too_many_traps(jvms->method(), jvms->bci(), reason)
duke@435 242 ) {
duke@435 243 // Generate uncommon trap for class check failure path
duke@435 244 // in case of monomorphic or bimorphic virtual call site.
kvn@1641 245 miss_cg = CallGenerator::for_uncommon_trap(call_method, reason,
duke@435 246 Deoptimization::Action_maybe_recompile);
duke@435 247 } else {
duke@435 248 // Generate virtual call for class check failure path
duke@435 249 // in case of polymorphic virtual call site.
duke@435 250 miss_cg = CallGenerator::for_virtual_call(call_method, vtable_index);
duke@435 251 }
duke@435 252 if (miss_cg != NULL) {
duke@435 253 if (next_hit_cg != NULL) {
twisti@2687 254 NOT_PRODUCT(trace_type_profile(jvms->method(), jvms->depth() - 1, jvms->bci(), next_receiver_method, profile.receiver(1), site_count, profile.receiver_count(1)));
duke@435 255 // We don't need to record dependency on a receiver here and below.
duke@435 256 // Whenever we inline, the dependency is added by Parse::Parse().
duke@435 257 miss_cg = CallGenerator::for_predicted_call(profile.receiver(1), miss_cg, next_hit_cg, PROB_MAX);
duke@435 258 }
duke@435 259 if (miss_cg != NULL) {
twisti@2687 260 NOT_PRODUCT(trace_type_profile(jvms->method(), jvms->depth() - 1, jvms->bci(), receiver_method, profile.receiver(0), site_count, receiver_count));
duke@435 261 cg = CallGenerator::for_predicted_call(profile.receiver(0), miss_cg, hit_cg, profile.receiver_prob(0));
duke@435 262 if (cg != NULL) return cg;
duke@435 263 }
duke@435 264 }
duke@435 265 }
duke@435 266 }
duke@435 267 }
duke@435 268 }
duke@435 269
duke@435 270 // There was no special inlining tactic, or it bailed out.
duke@435 271 // Use a more generic tactic, like a simple call.
duke@435 272 if (call_is_virtual) {
duke@435 273 return CallGenerator::for_virtual_call(call_method, vtable_index);
duke@435 274 } else {
duke@435 275 // Class Hierarchy Analysis or Type Profile reveals a unique target,
duke@435 276 // or it is a static or special call.
never@1515 277 return CallGenerator::for_direct_call(call_method, should_delay_inlining(call_method, jvms));
duke@435 278 }
duke@435 279 }
duke@435 280
never@1515 281 // Return true for methods that shouldn't be inlined early so that
never@1515 282 // they are easier to analyze and optimize as intrinsics.
never@1515 283 bool Compile::should_delay_inlining(ciMethod* call_method, JVMState* jvms) {
never@1515 284 if (has_stringbuilder()) {
never@1515 285
never@1515 286 if ((call_method->holder() == C->env()->StringBuilder_klass() ||
never@1515 287 call_method->holder() == C->env()->StringBuffer_klass()) &&
never@1515 288 (jvms->method()->holder() == C->env()->StringBuilder_klass() ||
never@1515 289 jvms->method()->holder() == C->env()->StringBuffer_klass())) {
never@1515 290 // Delay SB calls only when called from non-SB code
never@1515 291 return false;
never@1515 292 }
never@1515 293
never@1515 294 switch (call_method->intrinsic_id()) {
never@1515 295 case vmIntrinsics::_StringBuilder_void:
never@1515 296 case vmIntrinsics::_StringBuilder_int:
never@1515 297 case vmIntrinsics::_StringBuilder_String:
never@1515 298 case vmIntrinsics::_StringBuilder_append_char:
never@1515 299 case vmIntrinsics::_StringBuilder_append_int:
never@1515 300 case vmIntrinsics::_StringBuilder_append_String:
never@1515 301 case vmIntrinsics::_StringBuilder_toString:
never@1515 302 case vmIntrinsics::_StringBuffer_void:
never@1515 303 case vmIntrinsics::_StringBuffer_int:
never@1515 304 case vmIntrinsics::_StringBuffer_String:
never@1515 305 case vmIntrinsics::_StringBuffer_append_char:
never@1515 306 case vmIntrinsics::_StringBuffer_append_int:
never@1515 307 case vmIntrinsics::_StringBuffer_append_String:
never@1515 308 case vmIntrinsics::_StringBuffer_toString:
never@1515 309 case vmIntrinsics::_Integer_toString:
never@1515 310 return true;
never@1515 311
never@1515 312 case vmIntrinsics::_String_String:
never@1515 313 {
never@1515 314 Node* receiver = jvms->map()->in(jvms->argoff() + 1);
never@1515 315 if (receiver->is_Proj() && receiver->in(0)->is_CallStaticJava()) {
never@1515 316 CallStaticJavaNode* csj = receiver->in(0)->as_CallStaticJava();
never@1515 317 ciMethod* m = csj->method();
never@1515 318 if (m != NULL &&
never@1515 319 (m->intrinsic_id() == vmIntrinsics::_StringBuffer_toString ||
never@1515 320 m->intrinsic_id() == vmIntrinsics::_StringBuilder_toString))
never@1515 321 // Delay String.<init>(new SB())
never@1515 322 return true;
never@1515 323 }
never@1515 324 return false;
never@1515 325 }
never@1515 326
never@1515 327 default:
never@1515 328 return false;
never@1515 329 }
never@1515 330 }
never@1515 331 return false;
never@1515 332 }
never@1515 333
duke@435 334
duke@435 335 // uncommon-trap call-sites where callee is unloaded, uninitialized or will not link
duke@435 336 bool Parse::can_not_compile_call_site(ciMethod *dest_method, ciInstanceKlass* klass) {
duke@435 337 // Additional inputs to consider...
duke@435 338 // bc = bc()
duke@435 339 // caller = method()
duke@435 340 // iter().get_method_holder_index()
duke@435 341 assert( dest_method->is_loaded(), "ciTypeFlow should not let us get here" );
duke@435 342 // Interface classes can be loaded & linked and never get around to
duke@435 343 // being initialized. Uncommon-trap for not-initialized static or
duke@435 344 // v-calls. Let interface calls happen.
twisti@1572 345 ciInstanceKlass* holder_klass = dest_method->holder();
never@2000 346 if (!holder_klass->is_being_initialized() &&
never@2000 347 !holder_klass->is_initialized() &&
duke@435 348 !holder_klass->is_interface()) {
duke@435 349 uncommon_trap(Deoptimization::Reason_uninitialized,
duke@435 350 Deoptimization::Action_reinterpret,
duke@435 351 holder_klass);
duke@435 352 return true;
duke@435 353 }
duke@435 354
duke@435 355 assert(dest_method->will_link(method()->holder(), klass, bc()), "dest_method: typeflow responsibility");
duke@435 356 return false;
duke@435 357 }
duke@435 358
duke@435 359
duke@435 360 //------------------------------do_call----------------------------------------
duke@435 361 // Handle your basic call. Inline if we can & want to, else just setup call.
duke@435 362 void Parse::do_call() {
duke@435 363 // It's likely we are going to add debug info soon.
duke@435 364 // Also, if we inline a guy who eventually needs debug info for this JVMS,
duke@435 365 // our contribution to it is cleaned up right here.
duke@435 366 kill_dead_locals();
duke@435 367
duke@435 368 // Set frequently used booleans
duke@435 369 bool is_virtual = bc() == Bytecodes::_invokevirtual;
duke@435 370 bool is_virtual_or_interface = is_virtual || bc() == Bytecodes::_invokeinterface;
duke@435 371 bool has_receiver = is_virtual_or_interface || bc() == Bytecodes::_invokespecial;
twisti@1572 372 bool is_invokedynamic = bc() == Bytecodes::_invokedynamic;
duke@435 373
duke@435 374 // Find target being called
duke@435 375 bool will_link;
duke@435 376 ciMethod* dest_method = iter().get_method(will_link);
duke@435 377 ciInstanceKlass* holder_klass = dest_method->holder();
duke@435 378 ciKlass* holder = iter().get_declared_method_holder();
duke@435 379 ciInstanceKlass* klass = ciEnv::get_instance_klass_for_declared_method_holder(holder);
duke@435 380
twisti@1572 381 int nargs = dest_method->arg_size();
twisti@1572 382 if (is_invokedynamic) nargs -= 1;
duke@435 383
duke@435 384 // uncommon-trap when callee is unloaded, uninitialized or will not link
duke@435 385 // bailout when too many arguments for register representation
duke@435 386 if (!will_link || can_not_compile_call_site(dest_method, klass)) {
duke@435 387 #ifndef PRODUCT
duke@435 388 if (PrintOpto && (Verbose || WizardMode)) {
duke@435 389 method()->print_name(); tty->print_cr(" can not compile call at bci %d to:", bci());
duke@435 390 dest_method->print_name(); tty->cr();
duke@435 391 }
duke@435 392 #endif
duke@435 393 return;
duke@435 394 }
duke@435 395 assert(holder_klass->is_loaded(), "");
twisti@1572 396 assert((dest_method->is_static() || is_invokedynamic) == !has_receiver , "must match bc");
duke@435 397 // Note: this takes into account invokeinterface of methods declared in java/lang/Object,
duke@435 398 // which should be invokevirtuals but according to the VM spec may be invokeinterfaces
duke@435 399 assert(holder_klass->is_interface() || holder_klass->super() == NULL || (bc() != Bytecodes::_invokeinterface), "must match bc");
duke@435 400 // Note: In the absence of miranda methods, an abstract class K can perform
duke@435 401 // an invokevirtual directly on an interface method I.m if K implements I.
duke@435 402
duke@435 403 // ---------------------
duke@435 404 // Does Class Hierarchy Analysis reveal only a single target of a v-call?
duke@435 405 // Then we may inline or make a static call, but become dependent on there being only 1 target.
duke@435 406 // Does the call-site type profile reveal only one receiver?
duke@435 407 // Then we may introduce a run-time check and inline on the path where it succeeds.
duke@435 408 // The other path may uncommon_trap, check for another receiver, or do a v-call.
duke@435 409
duke@435 410 // Choose call strategy.
duke@435 411 bool call_is_virtual = is_virtual_or_interface;
duke@435 412 int vtable_index = methodOopDesc::invalid_vtable_index;
duke@435 413 ciMethod* call_method = dest_method;
duke@435 414
duke@435 415 // Try to get the most accurate receiver type
duke@435 416 if (is_virtual_or_interface) {
duke@435 417 Node* receiver_node = stack(sp() - nargs);
duke@435 418 const TypeOopPtr* receiver_type = _gvn.type(receiver_node)->isa_oopptr();
duke@435 419 ciMethod* optimized_virtual_method = optimize_inlining(method(), bci(), klass, dest_method, receiver_type);
duke@435 420
duke@435 421 // Have the call been sufficiently improved such that it is no longer a virtual?
duke@435 422 if (optimized_virtual_method != NULL) {
duke@435 423 call_method = optimized_virtual_method;
duke@435 424 call_is_virtual = false;
duke@435 425 } else if (!UseInlineCaches && is_virtual && call_method->is_loaded()) {
duke@435 426 // We can make a vtable call at this site
duke@435 427 vtable_index = call_method->resolve_vtable_index(method()->holder(), klass);
duke@435 428 }
duke@435 429 }
duke@435 430
duke@435 431 // Note: It's OK to try to inline a virtual call.
duke@435 432 // The call generator will not attempt to inline a polymorphic call
duke@435 433 // unless it knows how to optimize the receiver dispatch.
duke@435 434 bool try_inline = (C->do_inlining() || InlineAccessors);
duke@435 435
duke@435 436 // ---------------------
duke@435 437 inc_sp(- nargs); // Temporarily pop args for JVM state of call
duke@435 438 JVMState* jvms = sync_jvms();
duke@435 439
duke@435 440 // ---------------------
duke@435 441 // Decide call tactic.
duke@435 442 // This call checks with CHA, the interpreter profile, intrinsics table, etc.
duke@435 443 // It decides whether inlining is desirable or not.
duke@435 444 CallGenerator* cg = C->call_generator(call_method, vtable_index, call_is_virtual, jvms, try_inline, prof_factor());
duke@435 445
duke@435 446 // ---------------------
duke@435 447 // Round double arguments before call
duke@435 448 round_double_arguments(dest_method);
duke@435 449
duke@435 450 #ifndef PRODUCT
duke@435 451 // bump global counters for calls
duke@435 452 count_compiled_calls(false/*at_method_entry*/, cg->is_inline());
duke@435 453
duke@435 454 // Record first part of parsing work for this call
duke@435 455 parse_histogram()->record_change();
duke@435 456 #endif // not PRODUCT
duke@435 457
duke@435 458 assert(jvms == this->jvms(), "still operating on the right JVMS");
duke@435 459 assert(jvms_in_sync(), "jvms must carry full info into CG");
duke@435 460
duke@435 461 // save across call, for a subsequent cast_not_null.
duke@435 462 Node* receiver = has_receiver ? argument(0) : NULL;
duke@435 463
duke@435 464 // Bump method data counters (We profile *before* the call is made
duke@435 465 // because exceptions don't return to the call site.)
duke@435 466 profile_call(receiver);
duke@435 467
duke@435 468 JVMState* new_jvms;
duke@435 469 if ((new_jvms = cg->generate(jvms)) == NULL) {
duke@435 470 // When inlining attempt fails (e.g., too many arguments),
duke@435 471 // it may contaminate the current compile state, making it
duke@435 472 // impossible to pull back and try again. Once we call
duke@435 473 // cg->generate(), we are committed. If it fails, the whole
duke@435 474 // compilation task is compromised.
duke@435 475 if (failing()) return;
duke@435 476 #ifndef PRODUCT
duke@435 477 if (PrintOpto || PrintOptoInlining || PrintInlining) {
duke@435 478 // Only one fall-back, so if an intrinsic fails, ignore any bytecodes.
duke@435 479 if (cg->is_intrinsic() && call_method->code_size() > 0) {
duke@435 480 tty->print("Bailed out of intrinsic, will not inline: ");
duke@435 481 call_method->print_name(); tty->cr();
duke@435 482 }
duke@435 483 }
duke@435 484 #endif
duke@435 485 // This can happen if a library intrinsic is available, but refuses
duke@435 486 // the call site, perhaps because it did not match a pattern the
duke@435 487 // intrinsic was expecting to optimize. The fallback position is
duke@435 488 // to call out-of-line.
duke@435 489 try_inline = false; // Inline tactic bailed out.
duke@435 490 cg = C->call_generator(call_method, vtable_index, call_is_virtual, jvms, try_inline, prof_factor());
duke@435 491 if ((new_jvms = cg->generate(jvms)) == NULL) {
duke@435 492 guarantee(failing(), "call failed to generate: calls should work");
duke@435 493 return;
duke@435 494 }
duke@435 495 }
duke@435 496
duke@435 497 if (cg->is_inline()) {
never@502 498 // Accumulate has_loops estimate
never@502 499 C->set_has_loops(C->has_loops() || call_method->has_loops());
duke@435 500 C->env()->notice_inlined_method(call_method);
duke@435 501 }
duke@435 502
duke@435 503 // Reset parser state from [new_]jvms, which now carries results of the call.
duke@435 504 // Return value (if any) is already pushed on the stack by the cg.
duke@435 505 add_exception_states_from(new_jvms);
duke@435 506 if (new_jvms->map()->control() == top()) {
duke@435 507 stop_and_kill_map();
duke@435 508 } else {
duke@435 509 assert(new_jvms->same_calls_as(jvms), "method/bci left unchanged");
duke@435 510 set_jvms(new_jvms);
duke@435 511 }
duke@435 512
duke@435 513 if (!stopped()) {
duke@435 514 // This was some sort of virtual call, which did a null check for us.
duke@435 515 // Now we can assert receiver-not-null, on the normal return path.
duke@435 516 if (receiver != NULL && cg->is_virtual()) {
duke@435 517 Node* cast = cast_not_null(receiver);
duke@435 518 // %%% assert(receiver == cast, "should already have cast the receiver");
duke@435 519 }
duke@435 520
duke@435 521 // Round double result after a call from strict to non-strict code
duke@435 522 round_double_result(dest_method);
duke@435 523
duke@435 524 // If the return type of the method is not loaded, assert that the
duke@435 525 // value we got is a null. Otherwise, we need to recompile.
duke@435 526 if (!dest_method->return_type()->is_loaded()) {
duke@435 527 #ifndef PRODUCT
duke@435 528 if (PrintOpto && (Verbose || WizardMode)) {
duke@435 529 method()->print_name(); tty->print_cr(" asserting nullness of result at bci: %d", bci());
duke@435 530 dest_method->print_name(); tty->cr();
duke@435 531 }
duke@435 532 #endif
duke@435 533 if (C->log() != NULL) {
duke@435 534 C->log()->elem("assert_null reason='return' klass='%d'",
duke@435 535 C->log()->identify(dest_method->return_type()));
duke@435 536 }
duke@435 537 // If there is going to be a trap, put it at the next bytecode:
duke@435 538 set_bci(iter().next_bci());
duke@435 539 do_null_assert(peek(), T_OBJECT);
duke@435 540 set_bci(iter().cur_bci()); // put it back
duke@435 541 }
duke@435 542 }
duke@435 543
duke@435 544 // Restart record of parsing work after possible inlining of call
duke@435 545 #ifndef PRODUCT
duke@435 546 parse_histogram()->set_initial_state(bc());
duke@435 547 #endif
duke@435 548 }
duke@435 549
duke@435 550 //---------------------------catch_call_exceptions-----------------------------
duke@435 551 // Put a Catch and CatchProj nodes behind a just-created call.
duke@435 552 // Send their caught exceptions to the proper handler.
duke@435 553 // This may be used after a call to the rethrow VM stub,
duke@435 554 // when it is needed to process unloaded exception classes.
duke@435 555 void Parse::catch_call_exceptions(ciExceptionHandlerStream& handlers) {
duke@435 556 // Exceptions are delivered through this channel:
duke@435 557 Node* i_o = this->i_o();
duke@435 558
duke@435 559 // Add a CatchNode.
duke@435 560 GrowableArray<int>* bcis = new (C->node_arena()) GrowableArray<int>(C->node_arena(), 8, 0, -1);
duke@435 561 GrowableArray<const Type*>* extypes = new (C->node_arena()) GrowableArray<const Type*>(C->node_arena(), 8, 0, NULL);
duke@435 562 GrowableArray<int>* saw_unloaded = new (C->node_arena()) GrowableArray<int>(C->node_arena(), 8, 0, 0);
duke@435 563
duke@435 564 for (; !handlers.is_done(); handlers.next()) {
duke@435 565 ciExceptionHandler* h = handlers.handler();
duke@435 566 int h_bci = h->handler_bci();
duke@435 567 ciInstanceKlass* h_klass = h->is_catch_all() ? env()->Throwable_klass() : h->catch_klass();
duke@435 568 // Do not introduce unloaded exception types into the graph:
duke@435 569 if (!h_klass->is_loaded()) {
duke@435 570 if (saw_unloaded->contains(h_bci)) {
duke@435 571 /* We've already seen an unloaded exception with h_bci,
duke@435 572 so don't duplicate. Duplication will cause the CatchNode to be
duke@435 573 unnecessarily large. See 4713716. */
duke@435 574 continue;
duke@435 575 } else {
duke@435 576 saw_unloaded->append(h_bci);
duke@435 577 }
duke@435 578 }
duke@435 579 const Type* h_extype = TypeOopPtr::make_from_klass(h_klass);
duke@435 580 // (We use make_from_klass because it respects UseUniqueSubclasses.)
duke@435 581 h_extype = h_extype->join(TypeInstPtr::NOTNULL);
duke@435 582 assert(!h_extype->empty(), "sanity");
duke@435 583 // Note: It's OK if the BCIs repeat themselves.
duke@435 584 bcis->append(h_bci);
duke@435 585 extypes->append(h_extype);
duke@435 586 }
duke@435 587
duke@435 588 int len = bcis->length();
duke@435 589 CatchNode *cn = new (C, 2) CatchNode(control(), i_o, len+1);
duke@435 590 Node *catch_ = _gvn.transform(cn);
duke@435 591
duke@435 592 // now branch with the exception state to each of the (potential)
duke@435 593 // handlers
duke@435 594 for(int i=0; i < len; i++) {
duke@435 595 // Setup JVM state to enter the handler.
duke@435 596 PreserveJVMState pjvms(this);
duke@435 597 // Locals are just copied from before the call.
duke@435 598 // Get control from the CatchNode.
duke@435 599 int handler_bci = bcis->at(i);
duke@435 600 Node* ctrl = _gvn.transform( new (C, 1) CatchProjNode(catch_, i+1,handler_bci));
duke@435 601 // This handler cannot happen?
duke@435 602 if (ctrl == top()) continue;
duke@435 603 set_control(ctrl);
duke@435 604
duke@435 605 // Create exception oop
duke@435 606 const TypeInstPtr* extype = extypes->at(i)->is_instptr();
duke@435 607 Node *ex_oop = _gvn.transform(new (C, 2) CreateExNode(extypes->at(i), ctrl, i_o));
duke@435 608
duke@435 609 // Handle unloaded exception classes.
duke@435 610 if (saw_unloaded->contains(handler_bci)) {
duke@435 611 // An unloaded exception type is coming here. Do an uncommon trap.
duke@435 612 #ifndef PRODUCT
duke@435 613 // We do not expect the same handler bci to take both cold unloaded
duke@435 614 // and hot loaded exceptions. But, watch for it.
duke@435 615 if (extype->is_loaded()) {
duke@435 616 tty->print_cr("Warning: Handler @%d takes mixed loaded/unloaded exceptions in ");
duke@435 617 method()->print_name(); tty->cr();
duke@435 618 } else if (PrintOpto && (Verbose || WizardMode)) {
duke@435 619 tty->print("Bailing out on unloaded exception type ");
duke@435 620 extype->klass()->print_name();
duke@435 621 tty->print(" at bci:%d in ", bci());
duke@435 622 method()->print_name(); tty->cr();
duke@435 623 }
duke@435 624 #endif
duke@435 625 // Emit an uncommon trap instead of processing the block.
duke@435 626 set_bci(handler_bci);
duke@435 627 push_ex_oop(ex_oop);
duke@435 628 uncommon_trap(Deoptimization::Reason_unloaded,
duke@435 629 Deoptimization::Action_reinterpret,
duke@435 630 extype->klass(), "!loaded exception");
duke@435 631 set_bci(iter().cur_bci()); // put it back
duke@435 632 continue;
duke@435 633 }
duke@435 634
duke@435 635 // go to the exception handler
duke@435 636 if (handler_bci < 0) { // merge with corresponding rethrow node
duke@435 637 throw_to_exit(make_exception_state(ex_oop));
duke@435 638 } else { // Else jump to corresponding handle
duke@435 639 push_ex_oop(ex_oop); // Clear stack and push just the oop.
duke@435 640 merge_exception(handler_bci);
duke@435 641 }
duke@435 642 }
duke@435 643
duke@435 644 // The first CatchProj is for the normal return.
duke@435 645 // (Note: If this is a call to rethrow_Java, this node goes dead.)
duke@435 646 set_control(_gvn.transform( new (C, 1) CatchProjNode(catch_, CatchProjNode::fall_through_index, CatchProjNode::no_handler_bci)));
duke@435 647 }
duke@435 648
duke@435 649
duke@435 650 //----------------------------catch_inline_exceptions--------------------------
duke@435 651 // Handle all exceptions thrown by an inlined method or individual bytecode.
duke@435 652 // Common case 1: we have no handler, so all exceptions merge right into
duke@435 653 // the rethrow case.
duke@435 654 // Case 2: we have some handlers, with loaded exception klasses that have
duke@435 655 // no subklasses. We do a Deutsch-Shiffman style type-check on the incoming
duke@435 656 // exception oop and branch to the handler directly.
duke@435 657 // Case 3: We have some handlers with subklasses or are not loaded at
duke@435 658 // compile-time. We have to call the runtime to resolve the exception.
duke@435 659 // So we insert a RethrowCall and all the logic that goes with it.
duke@435 660 void Parse::catch_inline_exceptions(SafePointNode* ex_map) {
duke@435 661 // Caller is responsible for saving away the map for normal control flow!
duke@435 662 assert(stopped(), "call set_map(NULL) first");
duke@435 663 assert(method()->has_exception_handlers(), "don't come here w/o work to do");
duke@435 664
duke@435 665 Node* ex_node = saved_ex_oop(ex_map);
duke@435 666 if (ex_node == top()) {
duke@435 667 // No action needed.
duke@435 668 return;
duke@435 669 }
duke@435 670 const TypeInstPtr* ex_type = _gvn.type(ex_node)->isa_instptr();
duke@435 671 NOT_PRODUCT(if (ex_type==NULL) tty->print_cr("*** Exception not InstPtr"));
duke@435 672 if (ex_type == NULL)
duke@435 673 ex_type = TypeOopPtr::make_from_klass(env()->Throwable_klass())->is_instptr();
duke@435 674
duke@435 675 // determine potential exception handlers
duke@435 676 ciExceptionHandlerStream handlers(method(), bci(),
duke@435 677 ex_type->klass()->as_instance_klass(),
duke@435 678 ex_type->klass_is_exact());
duke@435 679
duke@435 680 // Start executing from the given throw state. (Keep its stack, for now.)
duke@435 681 // Get the exception oop as known at compile time.
duke@435 682 ex_node = use_exception_state(ex_map);
duke@435 683
duke@435 684 // Get the exception oop klass from its header
duke@435 685 Node* ex_klass_node = NULL;
duke@435 686 if (has_ex_handler() && !ex_type->klass_is_exact()) {
duke@435 687 Node* p = basic_plus_adr( ex_node, ex_node, oopDesc::klass_offset_in_bytes());
kvn@599 688 ex_klass_node = _gvn.transform( LoadKlassNode::make(_gvn, immutable_memory(), p, TypeInstPtr::KLASS, TypeKlassPtr::OBJECT) );
duke@435 689
duke@435 690 // Compute the exception klass a little more cleverly.
duke@435 691 // Obvious solution is to simple do a LoadKlass from the 'ex_node'.
duke@435 692 // However, if the ex_node is a PhiNode, I'm going to do a LoadKlass for
duke@435 693 // each arm of the Phi. If I know something clever about the exceptions
duke@435 694 // I'm loading the class from, I can replace the LoadKlass with the
duke@435 695 // klass constant for the exception oop.
duke@435 696 if( ex_node->is_Phi() ) {
duke@435 697 ex_klass_node = new (C, ex_node->req()) PhiNode( ex_node->in(0), TypeKlassPtr::OBJECT );
duke@435 698 for( uint i = 1; i < ex_node->req(); i++ ) {
duke@435 699 Node* p = basic_plus_adr( ex_node->in(i), ex_node->in(i), oopDesc::klass_offset_in_bytes() );
kvn@599 700 Node* k = _gvn.transform( LoadKlassNode::make(_gvn, immutable_memory(), p, TypeInstPtr::KLASS, TypeKlassPtr::OBJECT) );
duke@435 701 ex_klass_node->init_req( i, k );
duke@435 702 }
duke@435 703 _gvn.set_type(ex_klass_node, TypeKlassPtr::OBJECT);
duke@435 704
duke@435 705 }
duke@435 706 }
duke@435 707
duke@435 708 // Scan the exception table for applicable handlers.
duke@435 709 // If none, we can call rethrow() and be done!
duke@435 710 // If precise (loaded with no subklasses), insert a D.S. style
duke@435 711 // pointer compare to the correct handler and loop back.
duke@435 712 // If imprecise, switch to the Rethrow VM-call style handling.
duke@435 713
duke@435 714 int remaining = handlers.count_remaining();
duke@435 715
duke@435 716 // iterate through all entries sequentially
duke@435 717 for (;!handlers.is_done(); handlers.next()) {
duke@435 718 ciExceptionHandler* handler = handlers.handler();
duke@435 719
duke@435 720 if (handler->is_rethrow()) {
duke@435 721 // If we fell off the end of the table without finding an imprecise
duke@435 722 // exception klass (and without finding a generic handler) then we
duke@435 723 // know this exception is not handled in this method. We just rethrow
duke@435 724 // the exception into the caller.
duke@435 725 throw_to_exit(make_exception_state(ex_node));
duke@435 726 return;
duke@435 727 }
duke@435 728
duke@435 729 // exception handler bci range covers throw_bci => investigate further
duke@435 730 int handler_bci = handler->handler_bci();
duke@435 731
duke@435 732 if (remaining == 1) {
duke@435 733 push_ex_oop(ex_node); // Push exception oop for handler
duke@435 734 #ifndef PRODUCT
duke@435 735 if (PrintOpto && WizardMode) {
duke@435 736 tty->print_cr(" Catching every inline exception bci:%d -> handler_bci:%d", bci(), handler_bci);
duke@435 737 }
duke@435 738 #endif
duke@435 739 merge_exception(handler_bci); // jump to handler
duke@435 740 return; // No more handling to be done here!
duke@435 741 }
duke@435 742
never@1779 743 // Get the handler's klass
never@1779 744 ciInstanceKlass* klass = handler->catch_klass();
duke@435 745
never@1779 746 if (!klass->is_loaded()) { // klass is not loaded?
never@1779 747 // fall through into catch_call_exceptions which will emit a
never@1779 748 // handler with an uncommon trap.
never@1779 749 break;
duke@435 750 }
duke@435 751
duke@435 752 if (klass->is_interface()) // should not happen, but...
duke@435 753 break; // bail out
duke@435 754
never@1779 755 // Check the type of the exception against the catch type
duke@435 756 const TypeKlassPtr *tk = TypeKlassPtr::make(klass);
duke@435 757 Node* con = _gvn.makecon(tk);
never@1779 758 Node* not_subtype_ctrl = gen_subtype_check(ex_klass_node, con);
never@1779 759 if (!stopped()) {
never@1779 760 PreserveJVMState pjvms(this);
never@1779 761 const TypeInstPtr* tinst = TypeOopPtr::make_from_klass_unique(klass)->cast_to_ptr_type(TypePtr::NotNull)->is_instptr();
never@1779 762 assert(klass->has_subklass() || tinst->klass_is_exact(), "lost exactness");
duke@435 763 Node* ex_oop = _gvn.transform(new (C, 2) CheckCastPPNode(control(), ex_node, tinst));
duke@435 764 push_ex_oop(ex_oop); // Push exception oop for handler
duke@435 765 #ifndef PRODUCT
duke@435 766 if (PrintOpto && WizardMode) {
duke@435 767 tty->print(" Catching inline exception bci:%d -> handler_bci:%d -- ", bci(), handler_bci);
duke@435 768 klass->print_name();
duke@435 769 tty->cr();
duke@435 770 }
duke@435 771 #endif
duke@435 772 merge_exception(handler_bci);
duke@435 773 }
never@1779 774 set_control(not_subtype_ctrl);
duke@435 775
duke@435 776 // Come here if exception does not match handler.
duke@435 777 // Carry on with more handler checks.
duke@435 778 --remaining;
duke@435 779 }
duke@435 780
duke@435 781 assert(!stopped(), "you should return if you finish the chain");
duke@435 782
duke@435 783 // Oops, need to call into the VM to resolve the klasses at runtime.
duke@435 784 // Note: This call must not deoptimize, since it is not a real at this bci!
duke@435 785 kill_dead_locals();
duke@435 786
duke@435 787 make_runtime_call(RC_NO_LEAF | RC_MUST_THROW,
duke@435 788 OptoRuntime::rethrow_Type(),
duke@435 789 OptoRuntime::rethrow_stub(),
duke@435 790 NULL, NULL,
duke@435 791 ex_node);
duke@435 792
duke@435 793 // Rethrow is a pure call, no side effects, only a result.
duke@435 794 // The result cannot be allocated, so we use I_O
duke@435 795
duke@435 796 // Catch exceptions from the rethrow
duke@435 797 catch_call_exceptions(handlers);
duke@435 798 }
duke@435 799
duke@435 800
duke@435 801 // (Note: Moved add_debug_info into GraphKit::add_safepoint_edges.)
duke@435 802
duke@435 803
duke@435 804 #ifndef PRODUCT
duke@435 805 void Parse::count_compiled_calls(bool at_method_entry, bool is_inline) {
duke@435 806 if( CountCompiledCalls ) {
duke@435 807 if( at_method_entry ) {
duke@435 808 // bump invocation counter if top method (for statistics)
duke@435 809 if (CountCompiledCalls && depth() == 1) {
duke@435 810 const TypeInstPtr* addr_type = TypeInstPtr::make(method());
duke@435 811 Node* adr1 = makecon(addr_type);
duke@435 812 Node* adr2 = basic_plus_adr(adr1, adr1, in_bytes(methodOopDesc::compiled_invocation_counter_offset()));
duke@435 813 increment_counter(adr2);
duke@435 814 }
duke@435 815 } else if (is_inline) {
duke@435 816 switch (bc()) {
duke@435 817 case Bytecodes::_invokevirtual: increment_counter(SharedRuntime::nof_inlined_calls_addr()); break;
duke@435 818 case Bytecodes::_invokeinterface: increment_counter(SharedRuntime::nof_inlined_interface_calls_addr()); break;
duke@435 819 case Bytecodes::_invokestatic:
jrose@1161 820 case Bytecodes::_invokedynamic:
duke@435 821 case Bytecodes::_invokespecial: increment_counter(SharedRuntime::nof_inlined_static_calls_addr()); break;
duke@435 822 default: fatal("unexpected call bytecode");
duke@435 823 }
duke@435 824 } else {
duke@435 825 switch (bc()) {
duke@435 826 case Bytecodes::_invokevirtual: increment_counter(SharedRuntime::nof_normal_calls_addr()); break;
duke@435 827 case Bytecodes::_invokeinterface: increment_counter(SharedRuntime::nof_interface_calls_addr()); break;
duke@435 828 case Bytecodes::_invokestatic:
jrose@1161 829 case Bytecodes::_invokedynamic:
duke@435 830 case Bytecodes::_invokespecial: increment_counter(SharedRuntime::nof_static_calls_addr()); break;
duke@435 831 default: fatal("unexpected call bytecode");
duke@435 832 }
duke@435 833 }
duke@435 834 }
duke@435 835 }
duke@435 836 #endif //PRODUCT
duke@435 837
duke@435 838
duke@435 839 // Identify possible target method and inlining style
duke@435 840 ciMethod* Parse::optimize_inlining(ciMethod* caller, int bci, ciInstanceKlass* klass,
duke@435 841 ciMethod *dest_method, const TypeOopPtr* receiver_type) {
duke@435 842 // only use for virtual or interface calls
duke@435 843
duke@435 844 // If it is obviously final, do not bother to call find_monomorphic_target,
duke@435 845 // because the class hierarchy checks are not needed, and may fail due to
duke@435 846 // incompletely loaded classes. Since we do our own class loading checks
duke@435 847 // in this module, we may confidently bind to any method.
duke@435 848 if (dest_method->can_be_statically_bound()) {
duke@435 849 return dest_method;
duke@435 850 }
duke@435 851
duke@435 852 // Attempt to improve the receiver
duke@435 853 bool actual_receiver_is_exact = false;
duke@435 854 ciInstanceKlass* actual_receiver = klass;
duke@435 855 if (receiver_type != NULL) {
duke@435 856 // Array methods are all inherited from Object, and are monomorphic.
duke@435 857 if (receiver_type->isa_aryptr() &&
duke@435 858 dest_method->holder() == env()->Object_klass()) {
duke@435 859 return dest_method;
duke@435 860 }
duke@435 861
duke@435 862 // All other interesting cases are instance klasses.
duke@435 863 if (!receiver_type->isa_instptr()) {
duke@435 864 return NULL;
duke@435 865 }
duke@435 866
duke@435 867 ciInstanceKlass *ikl = receiver_type->klass()->as_instance_klass();
duke@435 868 if (ikl->is_loaded() && ikl->is_initialized() && !ikl->is_interface() &&
never@802 869 (ikl == actual_receiver || ikl->is_subtype_of(actual_receiver))) {
duke@435 870 // ikl is a same or better type than the original actual_receiver,
duke@435 871 // e.g. static receiver from bytecodes.
duke@435 872 actual_receiver = ikl;
duke@435 873 // Is the actual_receiver exact?
duke@435 874 actual_receiver_is_exact = receiver_type->klass_is_exact();
duke@435 875 }
duke@435 876 }
duke@435 877
duke@435 878 ciInstanceKlass* calling_klass = caller->holder();
duke@435 879 ciMethod* cha_monomorphic_target = dest_method->find_monomorphic_target(calling_klass, klass, actual_receiver);
duke@435 880 if (cha_monomorphic_target != NULL) {
duke@435 881 assert(!cha_monomorphic_target->is_abstract(), "");
duke@435 882 // Look at the method-receiver type. Does it add "too much information"?
duke@435 883 ciKlass* mr_klass = cha_monomorphic_target->holder();
duke@435 884 const Type* mr_type = TypeInstPtr::make(TypePtr::BotPTR, mr_klass);
duke@435 885 if (receiver_type == NULL || !receiver_type->higher_equal(mr_type)) {
duke@435 886 // Calling this method would include an implicit cast to its holder.
duke@435 887 // %%% Not yet implemented. Would throw minor asserts at present.
duke@435 888 // %%% The most common wins are already gained by +UseUniqueSubclasses.
duke@435 889 // To fix, put the higher_equal check at the call of this routine,
duke@435 890 // and add a CheckCastPP to the receiver.
duke@435 891 if (TraceDependencies) {
duke@435 892 tty->print_cr("found unique CHA method, but could not cast up");
duke@435 893 tty->print(" method = ");
duke@435 894 cha_monomorphic_target->print();
duke@435 895 tty->cr();
duke@435 896 }
duke@435 897 if (C->log() != NULL) {
duke@435 898 C->log()->elem("missed_CHA_opportunity klass='%d' method='%d'",
duke@435 899 C->log()->identify(klass),
duke@435 900 C->log()->identify(cha_monomorphic_target));
duke@435 901 }
duke@435 902 cha_monomorphic_target = NULL;
duke@435 903 }
duke@435 904 }
duke@435 905 if (cha_monomorphic_target != NULL) {
duke@435 906 // Hardwiring a virtual.
duke@435 907 // If we inlined because CHA revealed only a single target method,
duke@435 908 // then we are dependent on that target method not getting overridden
duke@435 909 // by dynamic class loading. Be sure to test the "static" receiver
duke@435 910 // dest_method here, as opposed to the actual receiver, which may
duke@435 911 // falsely lead us to believe that the receiver is final or private.
duke@435 912 C->dependencies()->assert_unique_concrete_method(actual_receiver, cha_monomorphic_target);
duke@435 913 return cha_monomorphic_target;
duke@435 914 }
duke@435 915
duke@435 916 // If the type is exact, we can still bind the method w/o a vcall.
duke@435 917 // (This case comes after CHA so we can see how much extra work it does.)
duke@435 918 if (actual_receiver_is_exact) {
duke@435 919 // In case of evolution, there is a dependence on every inlined method, since each
duke@435 920 // such method can be changed when its class is redefined.
duke@435 921 ciMethod* exact_method = dest_method->resolve_invoke(calling_klass, actual_receiver);
duke@435 922 if (exact_method != NULL) {
duke@435 923 #ifndef PRODUCT
duke@435 924 if (PrintOpto) {
duke@435 925 tty->print(" Calling method via exact type @%d --- ", bci);
duke@435 926 exact_method->print_name();
duke@435 927 tty->cr();
duke@435 928 }
duke@435 929 #endif
duke@435 930 return exact_method;
duke@435 931 }
duke@435 932 }
duke@435 933
duke@435 934 return NULL;
duke@435 935 }

mercurial