1.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 1.2 +++ b/src/share/vm/opto/doCall.cpp Sat Dec 01 00:00:00 2007 +0000 1.3 @@ -0,0 +1,862 @@ 1.4 +/* 1.5 + * Copyright 1998-2007 Sun Microsystems, Inc. All Rights Reserved. 1.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 1.7 + * 1.8 + * This code is free software; you can redistribute it and/or modify it 1.9 + * under the terms of the GNU General Public License version 2 only, as 1.10 + * published by the Free Software Foundation. 1.11 + * 1.12 + * This code is distributed in the hope that it will be useful, but WITHOUT 1.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 1.14 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 1.15 + * version 2 for more details (a copy is included in the LICENSE file that 1.16 + * accompanied this code). 1.17 + * 1.18 + * You should have received a copy of the GNU General Public License version 1.19 + * 2 along with this work; if not, write to the Free Software Foundation, 1.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 1.21 + * 1.22 + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, 1.23 + * CA 95054 USA or visit www.sun.com if you need additional information or 1.24 + * have any questions. 1.25 + * 1.26 + */ 1.27 + 1.28 +#include "incls/_precompiled.incl" 1.29 +#include "incls/_doCall.cpp.incl" 1.30 + 1.31 +#ifndef PRODUCT 1.32 +void trace_type_profile(ciMethod *method, int depth, int bci, ciMethod *prof_method, ciKlass *prof_klass, int site_count, int receiver_count) { 1.33 + if (TraceTypeProfile || PrintInlining || PrintOptoInlining) { 1.34 + tty->print(" "); 1.35 + for( int i = 0; i < depth; i++ ) tty->print(" "); 1.36 + if (!PrintOpto) { 1.37 + method->print_short_name(); 1.38 + tty->print(" ->"); 1.39 + } 1.40 + tty->print(" @ %d ", bci); 1.41 + prof_method->print_short_name(); 1.42 + tty->print(" >>TypeProfile (%d/%d counts) = ", receiver_count, site_count); 1.43 + prof_klass->name()->print_symbol(); 1.44 + tty->print_cr(" (%d bytes)", prof_method->code_size()); 1.45 + } 1.46 +} 1.47 +#endif 1.48 + 1.49 +CallGenerator* Compile::call_generator(ciMethod* call_method, int vtable_index, bool call_is_virtual, JVMState* jvms, bool allow_inline, float prof_factor) { 1.50 + CallGenerator* cg; 1.51 + 1.52 + // Dtrace currently doesn't work unless all calls are vanilla 1.53 + if (DTraceMethodProbes) { 1.54 + allow_inline = false; 1.55 + } 1.56 + 1.57 + // Note: When we get profiling during stage-1 compiles, we want to pull 1.58 + // from more specific profile data which pertains to this inlining. 1.59 + // Right now, ignore the information in jvms->caller(), and do method[bci]. 1.60 + ciCallProfile profile = jvms->method()->call_profile_at_bci(jvms->bci()); 1.61 + 1.62 + // See how many times this site has been invoked. 1.63 + int site_count = profile.count(); 1.64 + int receiver_count = -1; 1.65 + if (call_is_virtual && UseTypeProfile && profile.has_receiver(0)) { 1.66 + // Receivers in the profile structure are ordered by call counts 1.67 + // so that the most called (major) receiver is profile.receiver(0). 1.68 + receiver_count = profile.receiver_count(0); 1.69 + } 1.70 + 1.71 + CompileLog* log = this->log(); 1.72 + if (log != NULL) { 1.73 + int rid = (receiver_count >= 0)? log->identify(profile.receiver(0)): -1; 1.74 + int r2id = (profile.morphism() == 2)? log->identify(profile.receiver(1)):-1; 1.75 + log->begin_elem("call method='%d' count='%d' prof_factor='%g'", 1.76 + log->identify(call_method), site_count, prof_factor); 1.77 + if (call_is_virtual) log->print(" virtual='1'"); 1.78 + if (allow_inline) log->print(" inline='1'"); 1.79 + if (receiver_count >= 0) { 1.80 + log->print(" receiver='%d' receiver_count='%d'", rid, receiver_count); 1.81 + if (profile.has_receiver(1)) { 1.82 + log->print(" receiver2='%d' receiver2_count='%d'", r2id, profile.receiver_count(1)); 1.83 + } 1.84 + } 1.85 + log->end_elem(); 1.86 + } 1.87 + 1.88 + // Special case the handling of certain common, profitable library 1.89 + // methods. If these methods are replaced with specialized code, 1.90 + // then we return it as the inlined version of the call. 1.91 + // We do this before the strict f.p. check below because the 1.92 + // intrinsics handle strict f.p. correctly. 1.93 + if (allow_inline) { 1.94 + cg = find_intrinsic(call_method, call_is_virtual); 1.95 + if (cg != NULL) return cg; 1.96 + } 1.97 + 1.98 + // Do not inline strict fp into non-strict code, or the reverse 1.99 + bool caller_method_is_strict = jvms->method()->is_strict(); 1.100 + if( caller_method_is_strict ^ call_method->is_strict() ) { 1.101 + allow_inline = false; 1.102 + } 1.103 + 1.104 + // Attempt to inline... 1.105 + if (allow_inline) { 1.106 + // The profile data is only partly attributable to this caller, 1.107 + // scale back the call site information. 1.108 + float past_uses = jvms->method()->scale_count(site_count, prof_factor); 1.109 + // This is the number of times we expect the call code to be used. 1.110 + float expected_uses = past_uses; 1.111 + 1.112 + // Try inlining a bytecoded method: 1.113 + if (!call_is_virtual) { 1.114 + InlineTree* ilt; 1.115 + if (UseOldInlining) { 1.116 + ilt = InlineTree::find_subtree_from_root(this->ilt(), jvms->caller(), jvms->method()); 1.117 + } else { 1.118 + // Make a disembodied, stateless ILT. 1.119 + // TO DO: When UseOldInlining is removed, copy the ILT code elsewhere. 1.120 + float site_invoke_ratio = prof_factor; 1.121 + // Note: ilt is for the root of this parse, not the present call site. 1.122 + ilt = new InlineTree(this, jvms->method(), jvms->caller(), site_invoke_ratio); 1.123 + } 1.124 + WarmCallInfo scratch_ci; 1.125 + if (!UseOldInlining) 1.126 + scratch_ci.init(jvms, call_method, profile, prof_factor); 1.127 + WarmCallInfo* ci = ilt->ok_to_inline(call_method, jvms, profile, &scratch_ci); 1.128 + assert(ci != &scratch_ci, "do not let this pointer escape"); 1.129 + bool allow_inline = (ci != NULL && !ci->is_cold()); 1.130 + bool require_inline = (allow_inline && ci->is_hot()); 1.131 + 1.132 + if (allow_inline) { 1.133 + CallGenerator* cg = CallGenerator::for_inline(call_method, expected_uses); 1.134 + if (cg == NULL) { 1.135 + // Fall through. 1.136 + } else if (require_inline || !InlineWarmCalls) { 1.137 + return cg; 1.138 + } else { 1.139 + CallGenerator* cold_cg = call_generator(call_method, vtable_index, call_is_virtual, jvms, false, prof_factor); 1.140 + return CallGenerator::for_warm_call(ci, cold_cg, cg); 1.141 + } 1.142 + } 1.143 + } 1.144 + 1.145 + // Try using the type profile. 1.146 + if (call_is_virtual && site_count > 0 && receiver_count > 0) { 1.147 + // The major receiver's count >= TypeProfileMajorReceiverPercent of site_count. 1.148 + bool have_major_receiver = (100.*profile.receiver_prob(0) >= (float)TypeProfileMajorReceiverPercent); 1.149 + ciMethod* receiver_method = NULL; 1.150 + if (have_major_receiver || profile.morphism() == 1 || 1.151 + (profile.morphism() == 2 && UseBimorphicInlining)) { 1.152 + // receiver_method = profile.method(); 1.153 + // Profiles do not suggest methods now. Look it up in the major receiver. 1.154 + receiver_method = call_method->resolve_invoke(jvms->method()->holder(), 1.155 + profile.receiver(0)); 1.156 + } 1.157 + if (receiver_method != NULL) { 1.158 + // The single majority receiver sufficiently outweighs the minority. 1.159 + CallGenerator* hit_cg = this->call_generator(receiver_method, 1.160 + vtable_index, !call_is_virtual, jvms, allow_inline, prof_factor); 1.161 + if (hit_cg != NULL) { 1.162 + // Look up second receiver. 1.163 + CallGenerator* next_hit_cg = NULL; 1.164 + ciMethod* next_receiver_method = NULL; 1.165 + if (profile.morphism() == 2 && UseBimorphicInlining) { 1.166 + next_receiver_method = call_method->resolve_invoke(jvms->method()->holder(), 1.167 + profile.receiver(1)); 1.168 + if (next_receiver_method != NULL) { 1.169 + next_hit_cg = this->call_generator(next_receiver_method, 1.170 + vtable_index, !call_is_virtual, jvms, 1.171 + allow_inline, prof_factor); 1.172 + if (next_hit_cg != NULL && !next_hit_cg->is_inline() && 1.173 + have_major_receiver && UseOnlyInlinedBimorphic) { 1.174 + // Skip if we can't inline second receiver's method 1.175 + next_hit_cg = NULL; 1.176 + } 1.177 + } 1.178 + } 1.179 + CallGenerator* miss_cg; 1.180 + if (( profile.morphism() == 1 || 1.181 + (profile.morphism() == 2 && next_hit_cg != NULL) ) && 1.182 + 1.183 + !too_many_traps(Deoptimization::Reason_class_check) 1.184 + 1.185 + // Check only total number of traps per method to allow 1.186 + // the transition from monomorphic to bimorphic case between 1.187 + // compilations without falling into virtual call. 1.188 + // A monomorphic case may have the class_check trap flag is set 1.189 + // due to the time gap between the uncommon trap processing 1.190 + // when flags are set in MDO and the call site bytecode execution 1.191 + // in Interpreter when MDO counters are updated. 1.192 + // There was also class_check trap in monomorphic case due to 1.193 + // the bug 6225440. 1.194 + 1.195 + ) { 1.196 + // Generate uncommon trap for class check failure path 1.197 + // in case of monomorphic or bimorphic virtual call site. 1.198 + miss_cg = CallGenerator::for_uncommon_trap(call_method, 1.199 + Deoptimization::Reason_class_check, 1.200 + Deoptimization::Action_maybe_recompile); 1.201 + } else { 1.202 + // Generate virtual call for class check failure path 1.203 + // in case of polymorphic virtual call site. 1.204 + miss_cg = CallGenerator::for_virtual_call(call_method, vtable_index); 1.205 + } 1.206 + if (miss_cg != NULL) { 1.207 + if (next_hit_cg != NULL) { 1.208 + NOT_PRODUCT(trace_type_profile(jvms->method(), jvms->depth(), jvms->bci(), next_receiver_method, profile.receiver(1), site_count, profile.receiver_count(1))); 1.209 + // We don't need to record dependency on a receiver here and below. 1.210 + // Whenever we inline, the dependency is added by Parse::Parse(). 1.211 + miss_cg = CallGenerator::for_predicted_call(profile.receiver(1), miss_cg, next_hit_cg, PROB_MAX); 1.212 + } 1.213 + if (miss_cg != NULL) { 1.214 + NOT_PRODUCT(trace_type_profile(jvms->method(), jvms->depth(), jvms->bci(), receiver_method, profile.receiver(0), site_count, receiver_count)); 1.215 + cg = CallGenerator::for_predicted_call(profile.receiver(0), miss_cg, hit_cg, profile.receiver_prob(0)); 1.216 + if (cg != NULL) return cg; 1.217 + } 1.218 + } 1.219 + } 1.220 + } 1.221 + } 1.222 + } 1.223 + 1.224 + // There was no special inlining tactic, or it bailed out. 1.225 + // Use a more generic tactic, like a simple call. 1.226 + if (call_is_virtual) { 1.227 + return CallGenerator::for_virtual_call(call_method, vtable_index); 1.228 + } else { 1.229 + // Class Hierarchy Analysis or Type Profile reveals a unique target, 1.230 + // or it is a static or special call. 1.231 + return CallGenerator::for_direct_call(call_method); 1.232 + } 1.233 +} 1.234 + 1.235 + 1.236 +// uncommon-trap call-sites where callee is unloaded, uninitialized or will not link 1.237 +bool Parse::can_not_compile_call_site(ciMethod *dest_method, ciInstanceKlass* klass) { 1.238 + // Additional inputs to consider... 1.239 + // bc = bc() 1.240 + // caller = method() 1.241 + // iter().get_method_holder_index() 1.242 + assert( dest_method->is_loaded(), "ciTypeFlow should not let us get here" ); 1.243 + // Interface classes can be loaded & linked and never get around to 1.244 + // being initialized. Uncommon-trap for not-initialized static or 1.245 + // v-calls. Let interface calls happen. 1.246 + ciInstanceKlass* holder_klass = dest_method->holder(); 1.247 + if (!holder_klass->is_initialized() && 1.248 + !holder_klass->is_interface()) { 1.249 + uncommon_trap(Deoptimization::Reason_uninitialized, 1.250 + Deoptimization::Action_reinterpret, 1.251 + holder_klass); 1.252 + return true; 1.253 + } 1.254 + 1.255 + assert(dest_method->will_link(method()->holder(), klass, bc()), "dest_method: typeflow responsibility"); 1.256 + return false; 1.257 +} 1.258 + 1.259 + 1.260 +//------------------------------do_call---------------------------------------- 1.261 +// Handle your basic call. Inline if we can & want to, else just setup call. 1.262 +void Parse::do_call() { 1.263 + // It's likely we are going to add debug info soon. 1.264 + // Also, if we inline a guy who eventually needs debug info for this JVMS, 1.265 + // our contribution to it is cleaned up right here. 1.266 + kill_dead_locals(); 1.267 + 1.268 + // Set frequently used booleans 1.269 + bool is_virtual = bc() == Bytecodes::_invokevirtual; 1.270 + bool is_virtual_or_interface = is_virtual || bc() == Bytecodes::_invokeinterface; 1.271 + bool has_receiver = is_virtual_or_interface || bc() == Bytecodes::_invokespecial; 1.272 + 1.273 + // Find target being called 1.274 + bool will_link; 1.275 + ciMethod* dest_method = iter().get_method(will_link); 1.276 + ciInstanceKlass* holder_klass = dest_method->holder(); 1.277 + ciKlass* holder = iter().get_declared_method_holder(); 1.278 + ciInstanceKlass* klass = ciEnv::get_instance_klass_for_declared_method_holder(holder); 1.279 + 1.280 + int nargs = dest_method->arg_size(); 1.281 + 1.282 + // uncommon-trap when callee is unloaded, uninitialized or will not link 1.283 + // bailout when too many arguments for register representation 1.284 + if (!will_link || can_not_compile_call_site(dest_method, klass)) { 1.285 +#ifndef PRODUCT 1.286 + if (PrintOpto && (Verbose || WizardMode)) { 1.287 + method()->print_name(); tty->print_cr(" can not compile call at bci %d to:", bci()); 1.288 + dest_method->print_name(); tty->cr(); 1.289 + } 1.290 +#endif 1.291 + return; 1.292 + } 1.293 + assert(holder_klass->is_loaded(), ""); 1.294 + assert(dest_method->is_static() == !has_receiver, "must match bc"); 1.295 + // Note: this takes into account invokeinterface of methods declared in java/lang/Object, 1.296 + // which should be invokevirtuals but according to the VM spec may be invokeinterfaces 1.297 + assert(holder_klass->is_interface() || holder_klass->super() == NULL || (bc() != Bytecodes::_invokeinterface), "must match bc"); 1.298 + // Note: In the absence of miranda methods, an abstract class K can perform 1.299 + // an invokevirtual directly on an interface method I.m if K implements I. 1.300 + 1.301 + // --------------------- 1.302 + // Does Class Hierarchy Analysis reveal only a single target of a v-call? 1.303 + // Then we may inline or make a static call, but become dependent on there being only 1 target. 1.304 + // Does the call-site type profile reveal only one receiver? 1.305 + // Then we may introduce a run-time check and inline on the path where it succeeds. 1.306 + // The other path may uncommon_trap, check for another receiver, or do a v-call. 1.307 + 1.308 + // Choose call strategy. 1.309 + bool call_is_virtual = is_virtual_or_interface; 1.310 + int vtable_index = methodOopDesc::invalid_vtable_index; 1.311 + ciMethod* call_method = dest_method; 1.312 + 1.313 + // Try to get the most accurate receiver type 1.314 + if (is_virtual_or_interface) { 1.315 + Node* receiver_node = stack(sp() - nargs); 1.316 + const TypeOopPtr* receiver_type = _gvn.type(receiver_node)->isa_oopptr(); 1.317 + ciMethod* optimized_virtual_method = optimize_inlining(method(), bci(), klass, dest_method, receiver_type); 1.318 + 1.319 + // Have the call been sufficiently improved such that it is no longer a virtual? 1.320 + if (optimized_virtual_method != NULL) { 1.321 + call_method = optimized_virtual_method; 1.322 + call_is_virtual = false; 1.323 + } else if (!UseInlineCaches && is_virtual && call_method->is_loaded()) { 1.324 + // We can make a vtable call at this site 1.325 + vtable_index = call_method->resolve_vtable_index(method()->holder(), klass); 1.326 + } 1.327 + } 1.328 + 1.329 + // Note: It's OK to try to inline a virtual call. 1.330 + // The call generator will not attempt to inline a polymorphic call 1.331 + // unless it knows how to optimize the receiver dispatch. 1.332 + bool try_inline = (C->do_inlining() || InlineAccessors); 1.333 + 1.334 + // --------------------- 1.335 + inc_sp(- nargs); // Temporarily pop args for JVM state of call 1.336 + JVMState* jvms = sync_jvms(); 1.337 + 1.338 + // --------------------- 1.339 + // Decide call tactic. 1.340 + // This call checks with CHA, the interpreter profile, intrinsics table, etc. 1.341 + // It decides whether inlining is desirable or not. 1.342 + CallGenerator* cg = C->call_generator(call_method, vtable_index, call_is_virtual, jvms, try_inline, prof_factor()); 1.343 + 1.344 + // --------------------- 1.345 + // Round double arguments before call 1.346 + round_double_arguments(dest_method); 1.347 + 1.348 +#ifndef PRODUCT 1.349 + // bump global counters for calls 1.350 + count_compiled_calls(false/*at_method_entry*/, cg->is_inline()); 1.351 + 1.352 + // Record first part of parsing work for this call 1.353 + parse_histogram()->record_change(); 1.354 +#endif // not PRODUCT 1.355 + 1.356 + assert(jvms == this->jvms(), "still operating on the right JVMS"); 1.357 + assert(jvms_in_sync(), "jvms must carry full info into CG"); 1.358 + 1.359 + // save across call, for a subsequent cast_not_null. 1.360 + Node* receiver = has_receiver ? argument(0) : NULL; 1.361 + 1.362 + // Bump method data counters (We profile *before* the call is made 1.363 + // because exceptions don't return to the call site.) 1.364 + profile_call(receiver); 1.365 + 1.366 + JVMState* new_jvms; 1.367 + if ((new_jvms = cg->generate(jvms)) == NULL) { 1.368 + // When inlining attempt fails (e.g., too many arguments), 1.369 + // it may contaminate the current compile state, making it 1.370 + // impossible to pull back and try again. Once we call 1.371 + // cg->generate(), we are committed. If it fails, the whole 1.372 + // compilation task is compromised. 1.373 + if (failing()) return; 1.374 +#ifndef PRODUCT 1.375 + if (PrintOpto || PrintOptoInlining || PrintInlining) { 1.376 + // Only one fall-back, so if an intrinsic fails, ignore any bytecodes. 1.377 + if (cg->is_intrinsic() && call_method->code_size() > 0) { 1.378 + tty->print("Bailed out of intrinsic, will not inline: "); 1.379 + call_method->print_name(); tty->cr(); 1.380 + } 1.381 + } 1.382 +#endif 1.383 + // This can happen if a library intrinsic is available, but refuses 1.384 + // the call site, perhaps because it did not match a pattern the 1.385 + // intrinsic was expecting to optimize. The fallback position is 1.386 + // to call out-of-line. 1.387 + try_inline = false; // Inline tactic bailed out. 1.388 + cg = C->call_generator(call_method, vtable_index, call_is_virtual, jvms, try_inline, prof_factor()); 1.389 + if ((new_jvms = cg->generate(jvms)) == NULL) { 1.390 + guarantee(failing(), "call failed to generate: calls should work"); 1.391 + return; 1.392 + } 1.393 + } 1.394 + 1.395 + if (cg->is_inline()) { 1.396 + C->env()->notice_inlined_method(call_method); 1.397 + } 1.398 + 1.399 + // Reset parser state from [new_]jvms, which now carries results of the call. 1.400 + // Return value (if any) is already pushed on the stack by the cg. 1.401 + add_exception_states_from(new_jvms); 1.402 + if (new_jvms->map()->control() == top()) { 1.403 + stop_and_kill_map(); 1.404 + } else { 1.405 + assert(new_jvms->same_calls_as(jvms), "method/bci left unchanged"); 1.406 + set_jvms(new_jvms); 1.407 + } 1.408 + 1.409 + if (!stopped()) { 1.410 + // This was some sort of virtual call, which did a null check for us. 1.411 + // Now we can assert receiver-not-null, on the normal return path. 1.412 + if (receiver != NULL && cg->is_virtual()) { 1.413 + Node* cast = cast_not_null(receiver); 1.414 + // %%% assert(receiver == cast, "should already have cast the receiver"); 1.415 + } 1.416 + 1.417 + // Round double result after a call from strict to non-strict code 1.418 + round_double_result(dest_method); 1.419 + 1.420 + // If the return type of the method is not loaded, assert that the 1.421 + // value we got is a null. Otherwise, we need to recompile. 1.422 + if (!dest_method->return_type()->is_loaded()) { 1.423 +#ifndef PRODUCT 1.424 + if (PrintOpto && (Verbose || WizardMode)) { 1.425 + method()->print_name(); tty->print_cr(" asserting nullness of result at bci: %d", bci()); 1.426 + dest_method->print_name(); tty->cr(); 1.427 + } 1.428 +#endif 1.429 + if (C->log() != NULL) { 1.430 + C->log()->elem("assert_null reason='return' klass='%d'", 1.431 + C->log()->identify(dest_method->return_type())); 1.432 + } 1.433 + // If there is going to be a trap, put it at the next bytecode: 1.434 + set_bci(iter().next_bci()); 1.435 + do_null_assert(peek(), T_OBJECT); 1.436 + set_bci(iter().cur_bci()); // put it back 1.437 + } 1.438 + } 1.439 + 1.440 + // Restart record of parsing work after possible inlining of call 1.441 +#ifndef PRODUCT 1.442 + parse_histogram()->set_initial_state(bc()); 1.443 +#endif 1.444 +} 1.445 + 1.446 +//---------------------------catch_call_exceptions----------------------------- 1.447 +// Put a Catch and CatchProj nodes behind a just-created call. 1.448 +// Send their caught exceptions to the proper handler. 1.449 +// This may be used after a call to the rethrow VM stub, 1.450 +// when it is needed to process unloaded exception classes. 1.451 +void Parse::catch_call_exceptions(ciExceptionHandlerStream& handlers) { 1.452 + // Exceptions are delivered through this channel: 1.453 + Node* i_o = this->i_o(); 1.454 + 1.455 + // Add a CatchNode. 1.456 + GrowableArray<int>* bcis = new (C->node_arena()) GrowableArray<int>(C->node_arena(), 8, 0, -1); 1.457 + GrowableArray<const Type*>* extypes = new (C->node_arena()) GrowableArray<const Type*>(C->node_arena(), 8, 0, NULL); 1.458 + GrowableArray<int>* saw_unloaded = new (C->node_arena()) GrowableArray<int>(C->node_arena(), 8, 0, 0); 1.459 + 1.460 + for (; !handlers.is_done(); handlers.next()) { 1.461 + ciExceptionHandler* h = handlers.handler(); 1.462 + int h_bci = h->handler_bci(); 1.463 + ciInstanceKlass* h_klass = h->is_catch_all() ? env()->Throwable_klass() : h->catch_klass(); 1.464 + // Do not introduce unloaded exception types into the graph: 1.465 + if (!h_klass->is_loaded()) { 1.466 + if (saw_unloaded->contains(h_bci)) { 1.467 + /* We've already seen an unloaded exception with h_bci, 1.468 + so don't duplicate. Duplication will cause the CatchNode to be 1.469 + unnecessarily large. See 4713716. */ 1.470 + continue; 1.471 + } else { 1.472 + saw_unloaded->append(h_bci); 1.473 + } 1.474 + } 1.475 + const Type* h_extype = TypeOopPtr::make_from_klass(h_klass); 1.476 + // (We use make_from_klass because it respects UseUniqueSubclasses.) 1.477 + h_extype = h_extype->join(TypeInstPtr::NOTNULL); 1.478 + assert(!h_extype->empty(), "sanity"); 1.479 + // Note: It's OK if the BCIs repeat themselves. 1.480 + bcis->append(h_bci); 1.481 + extypes->append(h_extype); 1.482 + } 1.483 + 1.484 + int len = bcis->length(); 1.485 + CatchNode *cn = new (C, 2) CatchNode(control(), i_o, len+1); 1.486 + Node *catch_ = _gvn.transform(cn); 1.487 + 1.488 + // now branch with the exception state to each of the (potential) 1.489 + // handlers 1.490 + for(int i=0; i < len; i++) { 1.491 + // Setup JVM state to enter the handler. 1.492 + PreserveJVMState pjvms(this); 1.493 + // Locals are just copied from before the call. 1.494 + // Get control from the CatchNode. 1.495 + int handler_bci = bcis->at(i); 1.496 + Node* ctrl = _gvn.transform( new (C, 1) CatchProjNode(catch_, i+1,handler_bci)); 1.497 + // This handler cannot happen? 1.498 + if (ctrl == top()) continue; 1.499 + set_control(ctrl); 1.500 + 1.501 + // Create exception oop 1.502 + const TypeInstPtr* extype = extypes->at(i)->is_instptr(); 1.503 + Node *ex_oop = _gvn.transform(new (C, 2) CreateExNode(extypes->at(i), ctrl, i_o)); 1.504 + 1.505 + // Handle unloaded exception classes. 1.506 + if (saw_unloaded->contains(handler_bci)) { 1.507 + // An unloaded exception type is coming here. Do an uncommon trap. 1.508 +#ifndef PRODUCT 1.509 + // We do not expect the same handler bci to take both cold unloaded 1.510 + // and hot loaded exceptions. But, watch for it. 1.511 + if (extype->is_loaded()) { 1.512 + tty->print_cr("Warning: Handler @%d takes mixed loaded/unloaded exceptions in "); 1.513 + method()->print_name(); tty->cr(); 1.514 + } else if (PrintOpto && (Verbose || WizardMode)) { 1.515 + tty->print("Bailing out on unloaded exception type "); 1.516 + extype->klass()->print_name(); 1.517 + tty->print(" at bci:%d in ", bci()); 1.518 + method()->print_name(); tty->cr(); 1.519 + } 1.520 +#endif 1.521 + // Emit an uncommon trap instead of processing the block. 1.522 + set_bci(handler_bci); 1.523 + push_ex_oop(ex_oop); 1.524 + uncommon_trap(Deoptimization::Reason_unloaded, 1.525 + Deoptimization::Action_reinterpret, 1.526 + extype->klass(), "!loaded exception"); 1.527 + set_bci(iter().cur_bci()); // put it back 1.528 + continue; 1.529 + } 1.530 + 1.531 + // go to the exception handler 1.532 + if (handler_bci < 0) { // merge with corresponding rethrow node 1.533 + throw_to_exit(make_exception_state(ex_oop)); 1.534 + } else { // Else jump to corresponding handle 1.535 + push_ex_oop(ex_oop); // Clear stack and push just the oop. 1.536 + merge_exception(handler_bci); 1.537 + } 1.538 + } 1.539 + 1.540 + // The first CatchProj is for the normal return. 1.541 + // (Note: If this is a call to rethrow_Java, this node goes dead.) 1.542 + set_control(_gvn.transform( new (C, 1) CatchProjNode(catch_, CatchProjNode::fall_through_index, CatchProjNode::no_handler_bci))); 1.543 +} 1.544 + 1.545 + 1.546 +//----------------------------catch_inline_exceptions-------------------------- 1.547 +// Handle all exceptions thrown by an inlined method or individual bytecode. 1.548 +// Common case 1: we have no handler, so all exceptions merge right into 1.549 +// the rethrow case. 1.550 +// Case 2: we have some handlers, with loaded exception klasses that have 1.551 +// no subklasses. We do a Deutsch-Shiffman style type-check on the incoming 1.552 +// exception oop and branch to the handler directly. 1.553 +// Case 3: We have some handlers with subklasses or are not loaded at 1.554 +// compile-time. We have to call the runtime to resolve the exception. 1.555 +// So we insert a RethrowCall and all the logic that goes with it. 1.556 +void Parse::catch_inline_exceptions(SafePointNode* ex_map) { 1.557 + // Caller is responsible for saving away the map for normal control flow! 1.558 + assert(stopped(), "call set_map(NULL) first"); 1.559 + assert(method()->has_exception_handlers(), "don't come here w/o work to do"); 1.560 + 1.561 + Node* ex_node = saved_ex_oop(ex_map); 1.562 + if (ex_node == top()) { 1.563 + // No action needed. 1.564 + return; 1.565 + } 1.566 + const TypeInstPtr* ex_type = _gvn.type(ex_node)->isa_instptr(); 1.567 + NOT_PRODUCT(if (ex_type==NULL) tty->print_cr("*** Exception not InstPtr")); 1.568 + if (ex_type == NULL) 1.569 + ex_type = TypeOopPtr::make_from_klass(env()->Throwable_klass())->is_instptr(); 1.570 + 1.571 + // determine potential exception handlers 1.572 + ciExceptionHandlerStream handlers(method(), bci(), 1.573 + ex_type->klass()->as_instance_klass(), 1.574 + ex_type->klass_is_exact()); 1.575 + 1.576 + // Start executing from the given throw state. (Keep its stack, for now.) 1.577 + // Get the exception oop as known at compile time. 1.578 + ex_node = use_exception_state(ex_map); 1.579 + 1.580 + // Get the exception oop klass from its header 1.581 + Node* ex_klass_node = NULL; 1.582 + if (has_ex_handler() && !ex_type->klass_is_exact()) { 1.583 + Node* p = basic_plus_adr( ex_node, ex_node, oopDesc::klass_offset_in_bytes()); 1.584 + ex_klass_node = _gvn.transform(new (C, 3) LoadKlassNode(NULL, immutable_memory(), p, TypeInstPtr::KLASS, TypeKlassPtr::OBJECT)); 1.585 + 1.586 + // Compute the exception klass a little more cleverly. 1.587 + // Obvious solution is to simple do a LoadKlass from the 'ex_node'. 1.588 + // However, if the ex_node is a PhiNode, I'm going to do a LoadKlass for 1.589 + // each arm of the Phi. If I know something clever about the exceptions 1.590 + // I'm loading the class from, I can replace the LoadKlass with the 1.591 + // klass constant for the exception oop. 1.592 + if( ex_node->is_Phi() ) { 1.593 + ex_klass_node = new (C, ex_node->req()) PhiNode( ex_node->in(0), TypeKlassPtr::OBJECT ); 1.594 + for( uint i = 1; i < ex_node->req(); i++ ) { 1.595 + Node* p = basic_plus_adr( ex_node->in(i), ex_node->in(i), oopDesc::klass_offset_in_bytes() ); 1.596 + Node* k = _gvn.transform(new (C, 3) LoadKlassNode(0, immutable_memory(), p, TypeInstPtr::KLASS, TypeKlassPtr::OBJECT)); 1.597 + ex_klass_node->init_req( i, k ); 1.598 + } 1.599 + _gvn.set_type(ex_klass_node, TypeKlassPtr::OBJECT); 1.600 + 1.601 + } 1.602 + } 1.603 + 1.604 + // Scan the exception table for applicable handlers. 1.605 + // If none, we can call rethrow() and be done! 1.606 + // If precise (loaded with no subklasses), insert a D.S. style 1.607 + // pointer compare to the correct handler and loop back. 1.608 + // If imprecise, switch to the Rethrow VM-call style handling. 1.609 + 1.610 + int remaining = handlers.count_remaining(); 1.611 + 1.612 + // iterate through all entries sequentially 1.613 + for (;!handlers.is_done(); handlers.next()) { 1.614 + // Do nothing if turned off 1.615 + if( !DeutschShiffmanExceptions ) break; 1.616 + ciExceptionHandler* handler = handlers.handler(); 1.617 + 1.618 + if (handler->is_rethrow()) { 1.619 + // If we fell off the end of the table without finding an imprecise 1.620 + // exception klass (and without finding a generic handler) then we 1.621 + // know this exception is not handled in this method. We just rethrow 1.622 + // the exception into the caller. 1.623 + throw_to_exit(make_exception_state(ex_node)); 1.624 + return; 1.625 + } 1.626 + 1.627 + // exception handler bci range covers throw_bci => investigate further 1.628 + int handler_bci = handler->handler_bci(); 1.629 + 1.630 + if (remaining == 1) { 1.631 + push_ex_oop(ex_node); // Push exception oop for handler 1.632 +#ifndef PRODUCT 1.633 + if (PrintOpto && WizardMode) { 1.634 + tty->print_cr(" Catching every inline exception bci:%d -> handler_bci:%d", bci(), handler_bci); 1.635 + } 1.636 +#endif 1.637 + merge_exception(handler_bci); // jump to handler 1.638 + return; // No more handling to be done here! 1.639 + } 1.640 + 1.641 + // %%% The following logic replicates make_from_klass_unique. 1.642 + // TO DO: Replace by a subroutine call. Then generalize 1.643 + // the type check, as noted in the next "%%%" comment. 1.644 + 1.645 + ciInstanceKlass* klass = handler->catch_klass(); 1.646 + if (UseUniqueSubclasses) { 1.647 + // (We use make_from_klass because it respects UseUniqueSubclasses.) 1.648 + const TypeOopPtr* tp = TypeOopPtr::make_from_klass(klass); 1.649 + klass = tp->klass()->as_instance_klass(); 1.650 + } 1.651 + 1.652 + // Get the handler's klass 1.653 + if (!klass->is_loaded()) // klass is not loaded? 1.654 + break; // Must call Rethrow! 1.655 + if (klass->is_interface()) // should not happen, but... 1.656 + break; // bail out 1.657 + // See if the loaded exception klass has no subtypes 1.658 + if (klass->has_subklass()) 1.659 + break; // Cannot easily do precise test ==> Rethrow 1.660 + 1.661 + // %%% Now that subclass checking is very fast, we need to rewrite 1.662 + // this section and remove the option "DeutschShiffmanExceptions". 1.663 + // The exception processing chain should be a normal typecase pattern, 1.664 + // with a bailout to the interpreter only in the case of unloaded 1.665 + // classes. (The bailout should mark the method non-entrant.) 1.666 + // This rewrite should be placed in GraphKit::, not Parse::. 1.667 + 1.668 + // Add a dependence; if any subclass added we need to recompile 1.669 + // %%% should use stronger assert_unique_concrete_subtype instead 1.670 + if (!klass->is_final()) { 1.671 + C->dependencies()->assert_leaf_type(klass); 1.672 + } 1.673 + 1.674 + // Implement precise test 1.675 + const TypeKlassPtr *tk = TypeKlassPtr::make(klass); 1.676 + Node* con = _gvn.makecon(tk); 1.677 + Node* cmp = _gvn.transform( new (C, 3) CmpPNode(ex_klass_node, con) ); 1.678 + Node* bol = _gvn.transform( new (C, 2) BoolNode(cmp, BoolTest::ne) ); 1.679 + { BuildCutout unless(this, bol, PROB_LIKELY(0.7f)); 1.680 + const TypeInstPtr* tinst = TypeInstPtr::make_exact(TypePtr::NotNull, klass); 1.681 + Node* ex_oop = _gvn.transform(new (C, 2) CheckCastPPNode(control(), ex_node, tinst)); 1.682 + push_ex_oop(ex_oop); // Push exception oop for handler 1.683 +#ifndef PRODUCT 1.684 + if (PrintOpto && WizardMode) { 1.685 + tty->print(" Catching inline exception bci:%d -> handler_bci:%d -- ", bci(), handler_bci); 1.686 + klass->print_name(); 1.687 + tty->cr(); 1.688 + } 1.689 +#endif 1.690 + merge_exception(handler_bci); 1.691 + } 1.692 + 1.693 + // Come here if exception does not match handler. 1.694 + // Carry on with more handler checks. 1.695 + --remaining; 1.696 + } 1.697 + 1.698 + assert(!stopped(), "you should return if you finish the chain"); 1.699 + 1.700 + if (remaining == 1) { 1.701 + // Further checks do not matter. 1.702 + } 1.703 + 1.704 + if (can_rerun_bytecode()) { 1.705 + // Do not push_ex_oop here! 1.706 + // Re-executing the bytecode will reproduce the throwing condition. 1.707 + bool must_throw = true; 1.708 + uncommon_trap(Deoptimization::Reason_unhandled, 1.709 + Deoptimization::Action_none, 1.710 + (ciKlass*)NULL, (const char*)NULL, // default args 1.711 + must_throw); 1.712 + return; 1.713 + } 1.714 + 1.715 + // Oops, need to call into the VM to resolve the klasses at runtime. 1.716 + // Note: This call must not deoptimize, since it is not a real at this bci! 1.717 + kill_dead_locals(); 1.718 + 1.719 + make_runtime_call(RC_NO_LEAF | RC_MUST_THROW, 1.720 + OptoRuntime::rethrow_Type(), 1.721 + OptoRuntime::rethrow_stub(), 1.722 + NULL, NULL, 1.723 + ex_node); 1.724 + 1.725 + // Rethrow is a pure call, no side effects, only a result. 1.726 + // The result cannot be allocated, so we use I_O 1.727 + 1.728 + // Catch exceptions from the rethrow 1.729 + catch_call_exceptions(handlers); 1.730 +} 1.731 + 1.732 + 1.733 +// (Note: Moved add_debug_info into GraphKit::add_safepoint_edges.) 1.734 + 1.735 + 1.736 +#ifndef PRODUCT 1.737 +void Parse::count_compiled_calls(bool at_method_entry, bool is_inline) { 1.738 + if( CountCompiledCalls ) { 1.739 + if( at_method_entry ) { 1.740 + // bump invocation counter if top method (for statistics) 1.741 + if (CountCompiledCalls && depth() == 1) { 1.742 + const TypeInstPtr* addr_type = TypeInstPtr::make(method()); 1.743 + Node* adr1 = makecon(addr_type); 1.744 + Node* adr2 = basic_plus_adr(adr1, adr1, in_bytes(methodOopDesc::compiled_invocation_counter_offset())); 1.745 + increment_counter(adr2); 1.746 + } 1.747 + } else if (is_inline) { 1.748 + switch (bc()) { 1.749 + case Bytecodes::_invokevirtual: increment_counter(SharedRuntime::nof_inlined_calls_addr()); break; 1.750 + case Bytecodes::_invokeinterface: increment_counter(SharedRuntime::nof_inlined_interface_calls_addr()); break; 1.751 + case Bytecodes::_invokestatic: 1.752 + case Bytecodes::_invokespecial: increment_counter(SharedRuntime::nof_inlined_static_calls_addr()); break; 1.753 + default: fatal("unexpected call bytecode"); 1.754 + } 1.755 + } else { 1.756 + switch (bc()) { 1.757 + case Bytecodes::_invokevirtual: increment_counter(SharedRuntime::nof_normal_calls_addr()); break; 1.758 + case Bytecodes::_invokeinterface: increment_counter(SharedRuntime::nof_interface_calls_addr()); break; 1.759 + case Bytecodes::_invokestatic: 1.760 + case Bytecodes::_invokespecial: increment_counter(SharedRuntime::nof_static_calls_addr()); break; 1.761 + default: fatal("unexpected call bytecode"); 1.762 + } 1.763 + } 1.764 + } 1.765 +} 1.766 +#endif //PRODUCT 1.767 + 1.768 + 1.769 +// Identify possible target method and inlining style 1.770 +ciMethod* Parse::optimize_inlining(ciMethod* caller, int bci, ciInstanceKlass* klass, 1.771 + ciMethod *dest_method, const TypeOopPtr* receiver_type) { 1.772 + // only use for virtual or interface calls 1.773 + 1.774 + // If it is obviously final, do not bother to call find_monomorphic_target, 1.775 + // because the class hierarchy checks are not needed, and may fail due to 1.776 + // incompletely loaded classes. Since we do our own class loading checks 1.777 + // in this module, we may confidently bind to any method. 1.778 + if (dest_method->can_be_statically_bound()) { 1.779 + return dest_method; 1.780 + } 1.781 + 1.782 + // Attempt to improve the receiver 1.783 + bool actual_receiver_is_exact = false; 1.784 + ciInstanceKlass* actual_receiver = klass; 1.785 + if (receiver_type != NULL) { 1.786 + // Array methods are all inherited from Object, and are monomorphic. 1.787 + if (receiver_type->isa_aryptr() && 1.788 + dest_method->holder() == env()->Object_klass()) { 1.789 + return dest_method; 1.790 + } 1.791 + 1.792 + // All other interesting cases are instance klasses. 1.793 + if (!receiver_type->isa_instptr()) { 1.794 + return NULL; 1.795 + } 1.796 + 1.797 + ciInstanceKlass *ikl = receiver_type->klass()->as_instance_klass(); 1.798 + if (ikl->is_loaded() && ikl->is_initialized() && !ikl->is_interface() && 1.799 + (ikl == actual_receiver || ikl->is_subclass_of(actual_receiver))) { 1.800 + // ikl is a same or better type than the original actual_receiver, 1.801 + // e.g. static receiver from bytecodes. 1.802 + actual_receiver = ikl; 1.803 + // Is the actual_receiver exact? 1.804 + actual_receiver_is_exact = receiver_type->klass_is_exact(); 1.805 + } 1.806 + } 1.807 + 1.808 + ciInstanceKlass* calling_klass = caller->holder(); 1.809 + ciMethod* cha_monomorphic_target = dest_method->find_monomorphic_target(calling_klass, klass, actual_receiver); 1.810 + if (cha_monomorphic_target != NULL) { 1.811 + assert(!cha_monomorphic_target->is_abstract(), ""); 1.812 + // Look at the method-receiver type. Does it add "too much information"? 1.813 + ciKlass* mr_klass = cha_monomorphic_target->holder(); 1.814 + const Type* mr_type = TypeInstPtr::make(TypePtr::BotPTR, mr_klass); 1.815 + if (receiver_type == NULL || !receiver_type->higher_equal(mr_type)) { 1.816 + // Calling this method would include an implicit cast to its holder. 1.817 + // %%% Not yet implemented. Would throw minor asserts at present. 1.818 + // %%% The most common wins are already gained by +UseUniqueSubclasses. 1.819 + // To fix, put the higher_equal check at the call of this routine, 1.820 + // and add a CheckCastPP to the receiver. 1.821 + if (TraceDependencies) { 1.822 + tty->print_cr("found unique CHA method, but could not cast up"); 1.823 + tty->print(" method = "); 1.824 + cha_monomorphic_target->print(); 1.825 + tty->cr(); 1.826 + } 1.827 + if (C->log() != NULL) { 1.828 + C->log()->elem("missed_CHA_opportunity klass='%d' method='%d'", 1.829 + C->log()->identify(klass), 1.830 + C->log()->identify(cha_monomorphic_target)); 1.831 + } 1.832 + cha_monomorphic_target = NULL; 1.833 + } 1.834 + } 1.835 + if (cha_monomorphic_target != NULL) { 1.836 + // Hardwiring a virtual. 1.837 + // If we inlined because CHA revealed only a single target method, 1.838 + // then we are dependent on that target method not getting overridden 1.839 + // by dynamic class loading. Be sure to test the "static" receiver 1.840 + // dest_method here, as opposed to the actual receiver, which may 1.841 + // falsely lead us to believe that the receiver is final or private. 1.842 + C->dependencies()->assert_unique_concrete_method(actual_receiver, cha_monomorphic_target); 1.843 + return cha_monomorphic_target; 1.844 + } 1.845 + 1.846 + // If the type is exact, we can still bind the method w/o a vcall. 1.847 + // (This case comes after CHA so we can see how much extra work it does.) 1.848 + if (actual_receiver_is_exact) { 1.849 + // In case of evolution, there is a dependence on every inlined method, since each 1.850 + // such method can be changed when its class is redefined. 1.851 + ciMethod* exact_method = dest_method->resolve_invoke(calling_klass, actual_receiver); 1.852 + if (exact_method != NULL) { 1.853 +#ifndef PRODUCT 1.854 + if (PrintOpto) { 1.855 + tty->print(" Calling method via exact type @%d --- ", bci); 1.856 + exact_method->print_name(); 1.857 + tty->cr(); 1.858 + } 1.859 +#endif 1.860 + return exact_method; 1.861 + } 1.862 + } 1.863 + 1.864 + return NULL; 1.865 +}