Tue, 13 Dec 2016 14:37:04 -0500
8168699: Validate special case invocations
Reviewed-by: kevinw, vlivanov
1 /*
2 * Copyright (c) 1998, 2016, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "precompiled.hpp"
26 #include "ci/ciCallSite.hpp"
27 #include "ci/ciMethodHandle.hpp"
28 #include "classfile/vmSymbols.hpp"
29 #include "compiler/compileBroker.hpp"
30 #include "compiler/compileLog.hpp"
31 #include "interpreter/linkResolver.hpp"
32 #include "opto/addnode.hpp"
33 #include "opto/callGenerator.hpp"
34 #include "opto/cfgnode.hpp"
35 #include "opto/mulnode.hpp"
36 #include "opto/parse.hpp"
37 #include "opto/rootnode.hpp"
38 #include "opto/runtime.hpp"
39 #include "opto/subnode.hpp"
40 #include "prims/nativeLookup.hpp"
41 #include "runtime/sharedRuntime.hpp"
43 void trace_type_profile(Compile* C, ciMethod *method, int depth, int bci, ciMethod *prof_method, ciKlass *prof_klass, int site_count, int receiver_count) {
44 if (TraceTypeProfile || C->print_inlining()) {
45 outputStream* out = tty;
46 if (!C->print_inlining()) {
47 if (NOT_PRODUCT(!PrintOpto &&) !PrintCompilation) {
48 method->print_short_name();
49 tty->cr();
50 }
51 CompileTask::print_inlining(prof_method, depth, bci);
52 } else {
53 out = C->print_inlining_stream();
54 }
55 CompileTask::print_inline_indent(depth, out);
56 out->print(" \\-> TypeProfile (%d/%d counts) = ", receiver_count, site_count);
57 stringStream ss;
58 prof_klass->name()->print_symbol_on(&ss);
59 out->print("%s", ss.as_string());
60 out->cr();
61 }
62 }
64 CallGenerator* Compile::call_generator(ciMethod* callee, int vtable_index, bool call_does_dispatch,
65 JVMState* jvms, bool allow_inline,
66 float prof_factor, ciKlass* speculative_receiver_type,
67 bool allow_intrinsics, bool delayed_forbidden) {
68 ciMethod* caller = jvms->method();
69 int bci = jvms->bci();
70 Bytecodes::Code bytecode = caller->java_code_at_bci(bci);
71 guarantee(callee != NULL, "failed method resolution");
73 // Dtrace currently doesn't work unless all calls are vanilla
74 if (env()->dtrace_method_probes()) {
75 allow_inline = false;
76 }
78 // Note: When we get profiling during stage-1 compiles, we want to pull
79 // from more specific profile data which pertains to this inlining.
80 // Right now, ignore the information in jvms->caller(), and do method[bci].
81 ciCallProfile profile = caller->call_profile_at_bci(bci);
83 // See how many times this site has been invoked.
84 int site_count = profile.count();
85 int receiver_count = -1;
86 if (call_does_dispatch && UseTypeProfile && profile.has_receiver(0)) {
87 // Receivers in the profile structure are ordered by call counts
88 // so that the most called (major) receiver is profile.receiver(0).
89 receiver_count = profile.receiver_count(0);
90 }
92 CompileLog* log = this->log();
93 if (log != NULL) {
94 int rid = (receiver_count >= 0)? log->identify(profile.receiver(0)): -1;
95 int r2id = (rid != -1 && profile.has_receiver(1))? log->identify(profile.receiver(1)):-1;
96 log->begin_elem("call method='%d' count='%d' prof_factor='%g'",
97 log->identify(callee), site_count, prof_factor);
98 if (call_does_dispatch) log->print(" virtual='1'");
99 if (allow_inline) log->print(" inline='1'");
100 if (receiver_count >= 0) {
101 log->print(" receiver='%d' receiver_count='%d'", rid, receiver_count);
102 if (profile.has_receiver(1)) {
103 log->print(" receiver2='%d' receiver2_count='%d'", r2id, profile.receiver_count(1));
104 }
105 }
106 log->end_elem();
107 }
109 // Special case the handling of certain common, profitable library
110 // methods. If these methods are replaced with specialized code,
111 // then we return it as the inlined version of the call.
112 // We do this before the strict f.p. check below because the
113 // intrinsics handle strict f.p. correctly.
114 CallGenerator* cg_intrinsic = NULL;
115 if (allow_inline && allow_intrinsics) {
116 CallGenerator* cg = find_intrinsic(callee, call_does_dispatch);
117 if (cg != NULL) {
118 if (cg->is_predicated()) {
119 // Code without intrinsic but, hopefully, inlined.
120 CallGenerator* inline_cg = this->call_generator(callee,
121 vtable_index, call_does_dispatch, jvms, allow_inline, prof_factor, speculative_receiver_type, false);
122 if (inline_cg != NULL) {
123 cg = CallGenerator::for_predicated_intrinsic(cg, inline_cg);
124 }
125 }
127 // If intrinsic does the virtual dispatch, we try to use the type profile
128 // first, and hopefully inline it as the regular virtual call below.
129 // We will retry the intrinsic if nothing had claimed it afterwards.
130 if (cg->does_virtual_dispatch()) {
131 cg_intrinsic = cg;
132 cg = NULL;
133 } else {
134 return cg;
135 }
136 }
137 }
139 // Do method handle calls.
140 // NOTE: This must happen before normal inlining logic below since
141 // MethodHandle.invoke* are native methods which obviously don't
142 // have bytecodes and so normal inlining fails.
143 if (callee->is_method_handle_intrinsic()) {
144 CallGenerator* cg = CallGenerator::for_method_handle_call(jvms, caller, callee, delayed_forbidden);
145 assert(cg == NULL || !delayed_forbidden || !cg->is_late_inline() || cg->is_mh_late_inline(), "unexpected CallGenerator");
146 return cg;
147 }
149 // Do not inline strict fp into non-strict code, or the reverse
150 if (caller->is_strict() ^ callee->is_strict()) {
151 allow_inline = false;
152 }
154 // Attempt to inline...
155 if (allow_inline) {
156 // The profile data is only partly attributable to this caller,
157 // scale back the call site information.
158 float past_uses = jvms->method()->scale_count(site_count, prof_factor);
159 // This is the number of times we expect the call code to be used.
160 float expected_uses = past_uses;
162 // Try inlining a bytecoded method:
163 if (!call_does_dispatch) {
164 InlineTree* ilt = InlineTree::find_subtree_from_root(this->ilt(), jvms->caller(), jvms->method());
165 WarmCallInfo scratch_ci;
166 bool should_delay = false;
167 WarmCallInfo* ci = ilt->ok_to_inline(callee, jvms, profile, &scratch_ci, should_delay);
168 assert(ci != &scratch_ci, "do not let this pointer escape");
169 bool allow_inline = (ci != NULL && !ci->is_cold());
170 bool require_inline = (allow_inline && ci->is_hot());
172 if (allow_inline) {
173 CallGenerator* cg = CallGenerator::for_inline(callee, expected_uses);
175 if (require_inline && cg != NULL) {
176 // Delay the inlining of this method to give us the
177 // opportunity to perform some high level optimizations
178 // first.
179 if (should_delay_string_inlining(callee, jvms)) {
180 assert(!delayed_forbidden, "strange");
181 return CallGenerator::for_string_late_inline(callee, cg);
182 } else if (should_delay_boxing_inlining(callee, jvms)) {
183 assert(!delayed_forbidden, "strange");
184 return CallGenerator::for_boxing_late_inline(callee, cg);
185 } else if ((should_delay || AlwaysIncrementalInline) && !delayed_forbidden) {
186 return CallGenerator::for_late_inline(callee, cg);
187 }
188 }
189 if (cg == NULL || should_delay) {
190 // Fall through.
191 } else if (require_inline || !InlineWarmCalls) {
192 return cg;
193 } else {
194 CallGenerator* cold_cg = call_generator(callee, vtable_index, call_does_dispatch, jvms, false, prof_factor);
195 return CallGenerator::for_warm_call(ci, cold_cg, cg);
196 }
197 }
198 }
200 // Try using the type profile.
201 if (call_does_dispatch && site_count > 0 && receiver_count > 0) {
202 // The major receiver's count >= TypeProfileMajorReceiverPercent of site_count.
203 bool have_major_receiver = (100.*profile.receiver_prob(0) >= (float)TypeProfileMajorReceiverPercent);
204 ciMethod* receiver_method = NULL;
206 int morphism = profile.morphism();
207 if (speculative_receiver_type != NULL) {
208 // We have a speculative type, we should be able to resolve
209 // the call. We do that before looking at the profiling at
210 // this invoke because it may lead to bimorphic inlining which
211 // a speculative type should help us avoid.
212 receiver_method = callee->resolve_invoke(jvms->method()->holder(),
213 speculative_receiver_type);
214 if (receiver_method == NULL) {
215 speculative_receiver_type = NULL;
216 } else {
217 morphism = 1;
218 }
219 }
220 if (receiver_method == NULL &&
221 (have_major_receiver || morphism == 1 ||
222 (morphism == 2 && UseBimorphicInlining))) {
223 // receiver_method = profile.method();
224 // Profiles do not suggest methods now. Look it up in the major receiver.
225 receiver_method = callee->resolve_invoke(jvms->method()->holder(),
226 profile.receiver(0));
227 }
228 if (receiver_method != NULL) {
229 // The single majority receiver sufficiently outweighs the minority.
230 CallGenerator* hit_cg = this->call_generator(receiver_method,
231 vtable_index, !call_does_dispatch, jvms, allow_inline, prof_factor);
232 if (hit_cg != NULL) {
233 // Look up second receiver.
234 CallGenerator* next_hit_cg = NULL;
235 ciMethod* next_receiver_method = NULL;
236 if (morphism == 2 && UseBimorphicInlining) {
237 next_receiver_method = callee->resolve_invoke(jvms->method()->holder(),
238 profile.receiver(1));
239 if (next_receiver_method != NULL) {
240 next_hit_cg = this->call_generator(next_receiver_method,
241 vtable_index, !call_does_dispatch, jvms,
242 allow_inline, prof_factor);
243 if (next_hit_cg != NULL && !next_hit_cg->is_inline() &&
244 have_major_receiver && UseOnlyInlinedBimorphic) {
245 // Skip if we can't inline second receiver's method
246 next_hit_cg = NULL;
247 }
248 }
249 }
250 CallGenerator* miss_cg;
251 Deoptimization::DeoptReason reason = morphism == 2 ?
252 Deoptimization::Reason_bimorphic :
253 (speculative_receiver_type == NULL ? Deoptimization::Reason_class_check : Deoptimization::Reason_speculate_class_check);
254 if ((morphism == 1 || (morphism == 2 && next_hit_cg != NULL)) &&
255 !too_many_traps(jvms->method(), jvms->bci(), reason)
256 ) {
257 // Generate uncommon trap for class check failure path
258 // in case of monomorphic or bimorphic virtual call site.
259 miss_cg = CallGenerator::for_uncommon_trap(callee, reason,
260 Deoptimization::Action_maybe_recompile);
261 } else {
262 // Generate virtual call for class check failure path
263 // in case of polymorphic virtual call site.
264 miss_cg = CallGenerator::for_virtual_call(callee, vtable_index);
265 }
266 if (miss_cg != NULL) {
267 if (next_hit_cg != NULL) {
268 assert(speculative_receiver_type == NULL, "shouldn't end up here if we used speculation");
269 trace_type_profile(C, jvms->method(), jvms->depth() - 1, jvms->bci(), next_receiver_method, profile.receiver(1), site_count, profile.receiver_count(1));
270 // We don't need to record dependency on a receiver here and below.
271 // Whenever we inline, the dependency is added by Parse::Parse().
272 miss_cg = CallGenerator::for_predicted_call(profile.receiver(1), miss_cg, next_hit_cg, PROB_MAX);
273 }
274 if (miss_cg != NULL) {
275 trace_type_profile(C, jvms->method(), jvms->depth() - 1, jvms->bci(), receiver_method, profile.receiver(0), site_count, receiver_count);
276 ciKlass* k = speculative_receiver_type != NULL ? speculative_receiver_type : profile.receiver(0);
277 float hit_prob = speculative_receiver_type != NULL ? 1.0 : profile.receiver_prob(0);
278 CallGenerator* cg = CallGenerator::for_predicted_call(k, miss_cg, hit_cg, hit_prob);
279 if (cg != NULL) return cg;
280 }
281 }
282 }
283 }
284 }
285 }
287 // Nothing claimed the intrinsic, we go with straight-forward inlining
288 // for already discovered intrinsic.
289 if (allow_inline && allow_intrinsics && cg_intrinsic != NULL) {
290 assert(cg_intrinsic->does_virtual_dispatch(), "sanity");
291 return cg_intrinsic;
292 }
294 // There was no special inlining tactic, or it bailed out.
295 // Use a more generic tactic, like a simple call.
296 if (call_does_dispatch) {
297 return CallGenerator::for_virtual_call(callee, vtable_index);
298 } else {
299 // Class Hierarchy Analysis or Type Profile reveals a unique target,
300 // or it is a static or special call.
301 return CallGenerator::for_direct_call(callee, should_delay_inlining(callee, jvms));
302 }
303 }
305 // Return true for methods that shouldn't be inlined early so that
306 // they are easier to analyze and optimize as intrinsics.
307 bool Compile::should_delay_string_inlining(ciMethod* call_method, JVMState* jvms) {
308 if (has_stringbuilder()) {
310 if ((call_method->holder() == C->env()->StringBuilder_klass() ||
311 call_method->holder() == C->env()->StringBuffer_klass()) &&
312 (jvms->method()->holder() == C->env()->StringBuilder_klass() ||
313 jvms->method()->holder() == C->env()->StringBuffer_klass())) {
314 // Delay SB calls only when called from non-SB code
315 return false;
316 }
318 switch (call_method->intrinsic_id()) {
319 case vmIntrinsics::_StringBuilder_void:
320 case vmIntrinsics::_StringBuilder_int:
321 case vmIntrinsics::_StringBuilder_String:
322 case vmIntrinsics::_StringBuilder_append_char:
323 case vmIntrinsics::_StringBuilder_append_int:
324 case vmIntrinsics::_StringBuilder_append_String:
325 case vmIntrinsics::_StringBuilder_toString:
326 case vmIntrinsics::_StringBuffer_void:
327 case vmIntrinsics::_StringBuffer_int:
328 case vmIntrinsics::_StringBuffer_String:
329 case vmIntrinsics::_StringBuffer_append_char:
330 case vmIntrinsics::_StringBuffer_append_int:
331 case vmIntrinsics::_StringBuffer_append_String:
332 case vmIntrinsics::_StringBuffer_toString:
333 case vmIntrinsics::_Integer_toString:
334 return true;
336 case vmIntrinsics::_String_String:
337 {
338 Node* receiver = jvms->map()->in(jvms->argoff() + 1);
339 if (receiver->is_Proj() && receiver->in(0)->is_CallStaticJava()) {
340 CallStaticJavaNode* csj = receiver->in(0)->as_CallStaticJava();
341 ciMethod* m = csj->method();
342 if (m != NULL &&
343 (m->intrinsic_id() == vmIntrinsics::_StringBuffer_toString ||
344 m->intrinsic_id() == vmIntrinsics::_StringBuilder_toString))
345 // Delay String.<init>(new SB())
346 return true;
347 }
348 return false;
349 }
351 default:
352 return false;
353 }
354 }
355 return false;
356 }
358 bool Compile::should_delay_boxing_inlining(ciMethod* call_method, JVMState* jvms) {
359 if (eliminate_boxing() && call_method->is_boxing_method()) {
360 set_has_boxed_value(true);
361 return aggressive_unboxing();
362 }
363 return false;
364 }
366 // uncommon-trap call-sites where callee is unloaded, uninitialized or will not link
367 bool Parse::can_not_compile_call_site(ciMethod *dest_method, ciInstanceKlass* klass) {
368 // Additional inputs to consider...
369 // bc = bc()
370 // caller = method()
371 // iter().get_method_holder_index()
372 assert( dest_method->is_loaded(), "ciTypeFlow should not let us get here" );
373 // Interface classes can be loaded & linked and never get around to
374 // being initialized. Uncommon-trap for not-initialized static or
375 // v-calls. Let interface calls happen.
376 ciInstanceKlass* holder_klass = dest_method->holder();
377 if (!holder_klass->is_being_initialized() &&
378 !holder_klass->is_initialized() &&
379 !holder_klass->is_interface()) {
380 uncommon_trap(Deoptimization::Reason_uninitialized,
381 Deoptimization::Action_reinterpret,
382 holder_klass);
383 return true;
384 }
386 assert(dest_method->is_loaded(), "dest_method: typeflow responsibility");
387 return false;
388 }
391 //------------------------------do_call----------------------------------------
392 // Handle your basic call. Inline if we can & want to, else just setup call.
393 void Parse::do_call() {
394 // It's likely we are going to add debug info soon.
395 // Also, if we inline a guy who eventually needs debug info for this JVMS,
396 // our contribution to it is cleaned up right here.
397 kill_dead_locals();
399 // Set frequently used booleans
400 const bool is_virtual = bc() == Bytecodes::_invokevirtual;
401 const bool is_virtual_or_interface = is_virtual || bc() == Bytecodes::_invokeinterface;
402 const bool has_receiver = Bytecodes::has_receiver(bc());
404 // Find target being called
405 bool will_link;
406 ciSignature* declared_signature = NULL;
407 ciMethod* orig_callee = iter().get_method(will_link, &declared_signature); // callee in the bytecode
408 ciInstanceKlass* holder_klass = orig_callee->holder();
409 ciKlass* holder = iter().get_declared_method_holder();
410 ciInstanceKlass* klass = ciEnv::get_instance_klass_for_declared_method_holder(holder);
411 assert(declared_signature != NULL, "cannot be null");
413 // Bump max node limit for JSR292 users
414 if (bc() == Bytecodes::_invokedynamic || orig_callee->is_method_handle_intrinsic()) {
415 C->set_max_node_limit(3*MaxNodeLimit);
416 }
418 // uncommon-trap when callee is unloaded, uninitialized or will not link
419 // bailout when too many arguments for register representation
420 if (!will_link || can_not_compile_call_site(orig_callee, klass)) {
421 #ifndef PRODUCT
422 if (PrintOpto && (Verbose || WizardMode)) {
423 method()->print_name(); tty->print_cr(" can not compile call at bci %d to:", bci());
424 orig_callee->print_name(); tty->cr();
425 }
426 #endif
427 return;
428 }
429 assert(holder_klass->is_loaded(), "");
430 //assert((bc_callee->is_static() || is_invokedynamic) == !has_receiver , "must match bc"); // XXX invokehandle (cur_bc_raw)
431 // Note: this takes into account invokeinterface of methods declared in java/lang/Object,
432 // which should be invokevirtuals but according to the VM spec may be invokeinterfaces
433 assert(holder_klass->is_interface() || holder_klass->super() == NULL || (bc() != Bytecodes::_invokeinterface), "must match bc");
434 // Note: In the absence of miranda methods, an abstract class K can perform
435 // an invokevirtual directly on an interface method I.m if K implements I.
437 // orig_callee is the resolved callee which's signature includes the
438 // appendix argument.
439 const int nargs = orig_callee->arg_size();
440 const bool is_signature_polymorphic = MethodHandles::is_signature_polymorphic(orig_callee->intrinsic_id());
442 // Push appendix argument (MethodType, CallSite, etc.), if one.
443 if (iter().has_appendix()) {
444 ciObject* appendix_arg = iter().get_appendix();
445 const TypeOopPtr* appendix_arg_type = TypeOopPtr::make_from_constant(appendix_arg);
446 Node* appendix_arg_node = _gvn.makecon(appendix_arg_type);
447 push(appendix_arg_node);
448 }
450 // ---------------------
451 // Does Class Hierarchy Analysis reveal only a single target of a v-call?
452 // Then we may inline or make a static call, but become dependent on there being only 1 target.
453 // Does the call-site type profile reveal only one receiver?
454 // Then we may introduce a run-time check and inline on the path where it succeeds.
455 // The other path may uncommon_trap, check for another receiver, or do a v-call.
457 // Try to get the most accurate receiver type
458 ciMethod* callee = orig_callee;
459 int vtable_index = Method::invalid_vtable_index;
460 bool call_does_dispatch = false;
462 // Speculative type of the receiver if any
463 ciKlass* speculative_receiver_type = NULL;
464 if (is_virtual_or_interface) {
465 Node* receiver_node = stack(sp() - nargs);
466 const TypeOopPtr* receiver_type = _gvn.type(receiver_node)->isa_oopptr();
467 // call_does_dispatch and vtable_index are out-parameters. They might be changed.
468 // For arrays, klass below is Object. When vtable calls are used,
469 // resolving the call with Object would allow an illegal call to
470 // finalize() on an array. We use holder instead: illegal calls to
471 // finalize() won't be compiled as vtable calls (IC call
472 // resolution will catch the illegal call) and the few legal calls
473 // on array types won't be either.
474 callee = C->optimize_virtual_call(method(), bci(), klass, holder, orig_callee,
475 receiver_type, is_virtual,
476 call_does_dispatch, vtable_index); // out-parameters
477 speculative_receiver_type = receiver_type != NULL ? receiver_type->speculative_type() : NULL;
478 }
480 // invoke-super-special
481 if (iter().cur_bc_raw() == Bytecodes::_invokespecial && !orig_callee->is_object_initializer()) {
482 ciInstanceKlass* calling_klass = method()->holder();
483 ciInstanceKlass* sender_klass =
484 calling_klass->is_anonymous() ? calling_klass->host_klass() :
485 calling_klass;
486 if (sender_klass->is_interface()) {
487 Node* receiver_node = stack(sp() - nargs);
488 Node* cls_node = makecon(TypeKlassPtr::make(sender_klass));
489 Node* bad_type_ctrl = NULL;
490 Node* casted_receiver = gen_checkcast(receiver_node, cls_node, &bad_type_ctrl);
491 if (bad_type_ctrl != NULL) {
492 PreserveJVMState pjvms(this);
493 set_control(bad_type_ctrl);
494 uncommon_trap(Deoptimization::Reason_class_check,
495 Deoptimization::Action_none);
496 }
497 if (stopped()) {
498 return; // MUST uncommon-trap?
499 }
500 set_stack(sp() - nargs, casted_receiver);
501 }
502 }
504 // Note: It's OK to try to inline a virtual call.
505 // The call generator will not attempt to inline a polymorphic call
506 // unless it knows how to optimize the receiver dispatch.
507 bool try_inline = (C->do_inlining() || InlineAccessors);
509 // ---------------------
510 dec_sp(nargs); // Temporarily pop args for JVM state of call
511 JVMState* jvms = sync_jvms();
513 // ---------------------
514 // Decide call tactic.
515 // This call checks with CHA, the interpreter profile, intrinsics table, etc.
516 // It decides whether inlining is desirable or not.
517 CallGenerator* cg = C->call_generator(callee, vtable_index, call_does_dispatch, jvms, try_inline, prof_factor(), speculative_receiver_type);
519 // NOTE: Don't use orig_callee and callee after this point! Use cg->method() instead.
520 orig_callee = callee = NULL;
522 // ---------------------
523 // Round double arguments before call
524 round_double_arguments(cg->method());
526 // Feed profiling data for arguments to the type system so it can
527 // propagate it as speculative types
528 record_profiled_arguments_for_speculation(cg->method(), bc());
530 #ifndef PRODUCT
531 // bump global counters for calls
532 count_compiled_calls(/*at_method_entry*/ false, cg->is_inline());
534 // Record first part of parsing work for this call
535 parse_histogram()->record_change();
536 #endif // not PRODUCT
538 assert(jvms == this->jvms(), "still operating on the right JVMS");
539 assert(jvms_in_sync(), "jvms must carry full info into CG");
541 // save across call, for a subsequent cast_not_null.
542 Node* receiver = has_receiver ? argument(0) : NULL;
544 // The extra CheckCastPP for speculative types mess with PhaseStringOpts
545 if (receiver != NULL && !call_does_dispatch && !cg->is_string_late_inline()) {
546 // Feed profiling data for a single receiver to the type system so
547 // it can propagate it as a speculative type
548 receiver = record_profiled_receiver_for_speculation(receiver);
549 }
551 // Bump method data counters (We profile *before* the call is made
552 // because exceptions don't return to the call site.)
553 profile_call(receiver);
555 JVMState* new_jvms = cg->generate(jvms);
556 if (new_jvms == NULL) {
557 // When inlining attempt fails (e.g., too many arguments),
558 // it may contaminate the current compile state, making it
559 // impossible to pull back and try again. Once we call
560 // cg->generate(), we are committed. If it fails, the whole
561 // compilation task is compromised.
562 if (failing()) return;
564 // This can happen if a library intrinsic is available, but refuses
565 // the call site, perhaps because it did not match a pattern the
566 // intrinsic was expecting to optimize. Should always be possible to
567 // get a normal java call that may inline in that case
568 cg = C->call_generator(cg->method(), vtable_index, call_does_dispatch, jvms, try_inline, prof_factor(), speculative_receiver_type, /* allow_intrinsics= */ false);
569 if ((new_jvms = cg->generate(jvms)) == NULL) {
570 guarantee(failing(), "call failed to generate: calls should work");
571 return;
572 }
573 }
575 if (cg->is_inline()) {
576 // Accumulate has_loops estimate
577 C->set_has_loops(C->has_loops() || cg->method()->has_loops());
578 C->env()->notice_inlined_method(cg->method());
579 }
581 // Reset parser state from [new_]jvms, which now carries results of the call.
582 // Return value (if any) is already pushed on the stack by the cg.
583 add_exception_states_from(new_jvms);
584 if (new_jvms->map()->control() == top()) {
585 stop_and_kill_map();
586 } else {
587 assert(new_jvms->same_calls_as(jvms), "method/bci left unchanged");
588 set_jvms(new_jvms);
589 }
591 if (!stopped()) {
592 // This was some sort of virtual call, which did a null check for us.
593 // Now we can assert receiver-not-null, on the normal return path.
594 if (receiver != NULL && cg->is_virtual()) {
595 Node* cast = cast_not_null(receiver);
596 // %%% assert(receiver == cast, "should already have cast the receiver");
597 }
599 // Round double result after a call from strict to non-strict code
600 round_double_result(cg->method());
602 ciType* rtype = cg->method()->return_type();
603 ciType* ctype = declared_signature->return_type();
605 if (Bytecodes::has_optional_appendix(iter().cur_bc_raw()) || is_signature_polymorphic) {
606 // Be careful here with return types.
607 if (ctype != rtype) {
608 BasicType rt = rtype->basic_type();
609 BasicType ct = ctype->basic_type();
610 if (ct == T_VOID) {
611 // It's OK for a method to return a value that is discarded.
612 // The discarding does not require any special action from the caller.
613 // The Java code knows this, at VerifyType.isNullConversion.
614 pop_node(rt); // whatever it was, pop it
615 } else if (rt == T_INT || is_subword_type(rt)) {
616 // Nothing. These cases are handled in lambda form bytecode.
617 assert(ct == T_INT || is_subword_type(ct), err_msg_res("must match: rt=%s, ct=%s", type2name(rt), type2name(ct)));
618 } else if (rt == T_OBJECT || rt == T_ARRAY) {
619 assert(ct == T_OBJECT || ct == T_ARRAY, err_msg_res("rt=%s, ct=%s", type2name(rt), type2name(ct)));
620 if (ctype->is_loaded()) {
621 const TypeOopPtr* arg_type = TypeOopPtr::make_from_klass(rtype->as_klass());
622 const Type* sig_type = TypeOopPtr::make_from_klass(ctype->as_klass());
623 if (arg_type != NULL && !arg_type->higher_equal(sig_type)) {
624 Node* retnode = pop();
625 Node* cast_obj = _gvn.transform(new (C) CheckCastPPNode(control(), retnode, sig_type));
626 push(cast_obj);
627 }
628 }
629 } else {
630 assert(rt == ct, err_msg_res("unexpected mismatch: rt=%s, ct=%s", type2name(rt), type2name(ct)));
631 // push a zero; it's better than getting an oop/int mismatch
632 pop_node(rt);
633 Node* retnode = zerocon(ct);
634 push_node(ct, retnode);
635 }
636 // Now that the value is well-behaved, continue with the call-site type.
637 rtype = ctype;
638 }
639 } else {
640 // Symbolic resolution enforces the types to be the same.
641 // NOTE: We must relax the assert for unloaded types because two
642 // different ciType instances of the same unloaded class type
643 // can appear to be "loaded" by different loaders (depending on
644 // the accessing class).
645 assert(!rtype->is_loaded() || !ctype->is_loaded() || rtype == ctype,
646 err_msg_res("mismatched return types: rtype=%s, ctype=%s", rtype->name(), ctype->name()));
647 }
649 // If the return type of the method is not loaded, assert that the
650 // value we got is a null. Otherwise, we need to recompile.
651 if (!rtype->is_loaded()) {
652 #ifndef PRODUCT
653 if (PrintOpto && (Verbose || WizardMode)) {
654 method()->print_name(); tty->print_cr(" asserting nullness of result at bci: %d", bci());
655 cg->method()->print_name(); tty->cr();
656 }
657 #endif
658 if (C->log() != NULL) {
659 C->log()->elem("assert_null reason='return' klass='%d'",
660 C->log()->identify(rtype));
661 }
662 // If there is going to be a trap, put it at the next bytecode:
663 set_bci(iter().next_bci());
664 null_assert(peek());
665 set_bci(iter().cur_bci()); // put it back
666 }
667 BasicType ct = ctype->basic_type();
668 if (ct == T_OBJECT || ct == T_ARRAY) {
669 ciKlass* better_type = method()->return_profiled_type(bci());
670 if (UseTypeSpeculation && better_type != NULL) {
671 // If profiling reports a single type for the return value,
672 // feed it to the type system so it can propagate it as a
673 // speculative type
674 record_profile_for_speculation(stack(sp()-1), better_type);
675 }
676 }
677 }
679 // Restart record of parsing work after possible inlining of call
680 #ifndef PRODUCT
681 parse_histogram()->set_initial_state(bc());
682 #endif
683 }
685 //---------------------------catch_call_exceptions-----------------------------
686 // Put a Catch and CatchProj nodes behind a just-created call.
687 // Send their caught exceptions to the proper handler.
688 // This may be used after a call to the rethrow VM stub,
689 // when it is needed to process unloaded exception classes.
690 void Parse::catch_call_exceptions(ciExceptionHandlerStream& handlers) {
691 // Exceptions are delivered through this channel:
692 Node* i_o = this->i_o();
694 // Add a CatchNode.
695 GrowableArray<int>* bcis = new (C->node_arena()) GrowableArray<int>(C->node_arena(), 8, 0, -1);
696 GrowableArray<const Type*>* extypes = new (C->node_arena()) GrowableArray<const Type*>(C->node_arena(), 8, 0, NULL);
697 GrowableArray<int>* saw_unloaded = new (C->node_arena()) GrowableArray<int>(C->node_arena(), 8, 0, 0);
699 for (; !handlers.is_done(); handlers.next()) {
700 ciExceptionHandler* h = handlers.handler();
701 int h_bci = h->handler_bci();
702 ciInstanceKlass* h_klass = h->is_catch_all() ? env()->Throwable_klass() : h->catch_klass();
703 // Do not introduce unloaded exception types into the graph:
704 if (!h_klass->is_loaded()) {
705 if (saw_unloaded->contains(h_bci)) {
706 /* We've already seen an unloaded exception with h_bci,
707 so don't duplicate. Duplication will cause the CatchNode to be
708 unnecessarily large. See 4713716. */
709 continue;
710 } else {
711 saw_unloaded->append(h_bci);
712 }
713 }
714 const Type* h_extype = TypeOopPtr::make_from_klass(h_klass);
715 // (We use make_from_klass because it respects UseUniqueSubclasses.)
716 h_extype = h_extype->join(TypeInstPtr::NOTNULL);
717 assert(!h_extype->empty(), "sanity");
718 // Note: It's OK if the BCIs repeat themselves.
719 bcis->append(h_bci);
720 extypes->append(h_extype);
721 }
723 int len = bcis->length();
724 CatchNode *cn = new (C) CatchNode(control(), i_o, len+1);
725 Node *catch_ = _gvn.transform(cn);
727 // now branch with the exception state to each of the (potential)
728 // handlers
729 for(int i=0; i < len; i++) {
730 // Setup JVM state to enter the handler.
731 PreserveJVMState pjvms(this);
732 // Locals are just copied from before the call.
733 // Get control from the CatchNode.
734 int handler_bci = bcis->at(i);
735 Node* ctrl = _gvn.transform( new (C) CatchProjNode(catch_, i+1,handler_bci));
736 // This handler cannot happen?
737 if (ctrl == top()) continue;
738 set_control(ctrl);
740 // Create exception oop
741 const TypeInstPtr* extype = extypes->at(i)->is_instptr();
742 Node *ex_oop = _gvn.transform(new (C) CreateExNode(extypes->at(i), ctrl, i_o));
744 // Handle unloaded exception classes.
745 if (saw_unloaded->contains(handler_bci)) {
746 // An unloaded exception type is coming here. Do an uncommon trap.
747 #ifndef PRODUCT
748 // We do not expect the same handler bci to take both cold unloaded
749 // and hot loaded exceptions. But, watch for it.
750 if ((Verbose || WizardMode) && extype->is_loaded()) {
751 tty->print("Warning: Handler @%d takes mixed loaded/unloaded exceptions in ", bci());
752 method()->print_name(); tty->cr();
753 } else if (PrintOpto && (Verbose || WizardMode)) {
754 tty->print("Bailing out on unloaded exception type ");
755 extype->klass()->print_name();
756 tty->print(" at bci:%d in ", bci());
757 method()->print_name(); tty->cr();
758 }
759 #endif
760 // Emit an uncommon trap instead of processing the block.
761 set_bci(handler_bci);
762 push_ex_oop(ex_oop);
763 uncommon_trap(Deoptimization::Reason_unloaded,
764 Deoptimization::Action_reinterpret,
765 extype->klass(), "!loaded exception");
766 set_bci(iter().cur_bci()); // put it back
767 continue;
768 }
770 // go to the exception handler
771 if (handler_bci < 0) { // merge with corresponding rethrow node
772 throw_to_exit(make_exception_state(ex_oop));
773 } else { // Else jump to corresponding handle
774 push_ex_oop(ex_oop); // Clear stack and push just the oop.
775 merge_exception(handler_bci);
776 }
777 }
779 // The first CatchProj is for the normal return.
780 // (Note: If this is a call to rethrow_Java, this node goes dead.)
781 set_control(_gvn.transform( new (C) CatchProjNode(catch_, CatchProjNode::fall_through_index, CatchProjNode::no_handler_bci)));
782 }
785 //----------------------------catch_inline_exceptions--------------------------
786 // Handle all exceptions thrown by an inlined method or individual bytecode.
787 // Common case 1: we have no handler, so all exceptions merge right into
788 // the rethrow case.
789 // Case 2: we have some handlers, with loaded exception klasses that have
790 // no subklasses. We do a Deutsch-Shiffman style type-check on the incoming
791 // exception oop and branch to the handler directly.
792 // Case 3: We have some handlers with subklasses or are not loaded at
793 // compile-time. We have to call the runtime to resolve the exception.
794 // So we insert a RethrowCall and all the logic that goes with it.
795 void Parse::catch_inline_exceptions(SafePointNode* ex_map) {
796 // Caller is responsible for saving away the map for normal control flow!
797 assert(stopped(), "call set_map(NULL) first");
798 assert(method()->has_exception_handlers(), "don't come here w/o work to do");
800 Node* ex_node = saved_ex_oop(ex_map);
801 if (ex_node == top()) {
802 // No action needed.
803 return;
804 }
805 const TypeInstPtr* ex_type = _gvn.type(ex_node)->isa_instptr();
806 NOT_PRODUCT(if (ex_type==NULL) tty->print_cr("*** Exception not InstPtr"));
807 if (ex_type == NULL)
808 ex_type = TypeOopPtr::make_from_klass(env()->Throwable_klass())->is_instptr();
810 // determine potential exception handlers
811 ciExceptionHandlerStream handlers(method(), bci(),
812 ex_type->klass()->as_instance_klass(),
813 ex_type->klass_is_exact());
815 // Start executing from the given throw state. (Keep its stack, for now.)
816 // Get the exception oop as known at compile time.
817 ex_node = use_exception_state(ex_map);
819 // Get the exception oop klass from its header
820 Node* ex_klass_node = NULL;
821 if (has_ex_handler() && !ex_type->klass_is_exact()) {
822 Node* p = basic_plus_adr( ex_node, ex_node, oopDesc::klass_offset_in_bytes());
823 ex_klass_node = _gvn.transform(LoadKlassNode::make(_gvn, NULL, immutable_memory(), p, TypeInstPtr::KLASS, TypeKlassPtr::OBJECT));
825 // Compute the exception klass a little more cleverly.
826 // Obvious solution is to simple do a LoadKlass from the 'ex_node'.
827 // However, if the ex_node is a PhiNode, I'm going to do a LoadKlass for
828 // each arm of the Phi. If I know something clever about the exceptions
829 // I'm loading the class from, I can replace the LoadKlass with the
830 // klass constant for the exception oop.
831 if (ex_node->is_Phi()) {
832 ex_klass_node = new (C) PhiNode(ex_node->in(0), TypeKlassPtr::OBJECT);
833 for (uint i = 1; i < ex_node->req(); i++) {
834 Node* ex_in = ex_node->in(i);
835 if (ex_in == top() || ex_in == NULL) {
836 // This path was not taken.
837 ex_klass_node->init_req(i, top());
838 continue;
839 }
840 Node* p = basic_plus_adr(ex_in, ex_in, oopDesc::klass_offset_in_bytes());
841 Node* k = _gvn.transform(LoadKlassNode::make(_gvn, NULL, immutable_memory(), p, TypeInstPtr::KLASS, TypeKlassPtr::OBJECT));
842 ex_klass_node->init_req( i, k );
843 }
844 _gvn.set_type(ex_klass_node, TypeKlassPtr::OBJECT);
846 }
847 }
849 // Scan the exception table for applicable handlers.
850 // If none, we can call rethrow() and be done!
851 // If precise (loaded with no subklasses), insert a D.S. style
852 // pointer compare to the correct handler and loop back.
853 // If imprecise, switch to the Rethrow VM-call style handling.
855 int remaining = handlers.count_remaining();
857 // iterate through all entries sequentially
858 for (;!handlers.is_done(); handlers.next()) {
859 ciExceptionHandler* handler = handlers.handler();
861 if (handler->is_rethrow()) {
862 // If we fell off the end of the table without finding an imprecise
863 // exception klass (and without finding a generic handler) then we
864 // know this exception is not handled in this method. We just rethrow
865 // the exception into the caller.
866 throw_to_exit(make_exception_state(ex_node));
867 return;
868 }
870 // exception handler bci range covers throw_bci => investigate further
871 int handler_bci = handler->handler_bci();
873 if (remaining == 1) {
874 push_ex_oop(ex_node); // Push exception oop for handler
875 #ifndef PRODUCT
876 if (PrintOpto && WizardMode) {
877 tty->print_cr(" Catching every inline exception bci:%d -> handler_bci:%d", bci(), handler_bci);
878 }
879 #endif
880 merge_exception(handler_bci); // jump to handler
881 return; // No more handling to be done here!
882 }
884 // Get the handler's klass
885 ciInstanceKlass* klass = handler->catch_klass();
887 if (!klass->is_loaded()) { // klass is not loaded?
888 // fall through into catch_call_exceptions which will emit a
889 // handler with an uncommon trap.
890 break;
891 }
893 if (klass->is_interface()) // should not happen, but...
894 break; // bail out
896 // Check the type of the exception against the catch type
897 const TypeKlassPtr *tk = TypeKlassPtr::make(klass);
898 Node* con = _gvn.makecon(tk);
899 Node* not_subtype_ctrl = gen_subtype_check(ex_klass_node, con);
900 if (!stopped()) {
901 PreserveJVMState pjvms(this);
902 const TypeInstPtr* tinst = TypeOopPtr::make_from_klass_unique(klass)->cast_to_ptr_type(TypePtr::NotNull)->is_instptr();
903 assert(klass->has_subklass() || tinst->klass_is_exact(), "lost exactness");
904 Node* ex_oop = _gvn.transform(new (C) CheckCastPPNode(control(), ex_node, tinst));
905 push_ex_oop(ex_oop); // Push exception oop for handler
906 #ifndef PRODUCT
907 if (PrintOpto && WizardMode) {
908 tty->print(" Catching inline exception bci:%d -> handler_bci:%d -- ", bci(), handler_bci);
909 klass->print_name();
910 tty->cr();
911 }
912 #endif
913 merge_exception(handler_bci);
914 }
915 set_control(not_subtype_ctrl);
917 // Come here if exception does not match handler.
918 // Carry on with more handler checks.
919 --remaining;
920 }
922 assert(!stopped(), "you should return if you finish the chain");
924 // Oops, need to call into the VM to resolve the klasses at runtime.
925 // Note: This call must not deoptimize, since it is not a real at this bci!
926 kill_dead_locals();
928 make_runtime_call(RC_NO_LEAF | RC_MUST_THROW,
929 OptoRuntime::rethrow_Type(),
930 OptoRuntime::rethrow_stub(),
931 NULL, NULL,
932 ex_node);
934 // Rethrow is a pure call, no side effects, only a result.
935 // The result cannot be allocated, so we use I_O
937 // Catch exceptions from the rethrow
938 catch_call_exceptions(handlers);
939 }
942 // (Note: Moved add_debug_info into GraphKit::add_safepoint_edges.)
945 #ifndef PRODUCT
946 void Parse::count_compiled_calls(bool at_method_entry, bool is_inline) {
947 if( CountCompiledCalls ) {
948 if( at_method_entry ) {
949 // bump invocation counter if top method (for statistics)
950 if (CountCompiledCalls && depth() == 1) {
951 const TypePtr* addr_type = TypeMetadataPtr::make(method());
952 Node* adr1 = makecon(addr_type);
953 Node* adr2 = basic_plus_adr(adr1, adr1, in_bytes(Method::compiled_invocation_counter_offset()));
954 increment_counter(adr2);
955 }
956 } else if (is_inline) {
957 switch (bc()) {
958 case Bytecodes::_invokevirtual: increment_counter(SharedRuntime::nof_inlined_calls_addr()); break;
959 case Bytecodes::_invokeinterface: increment_counter(SharedRuntime::nof_inlined_interface_calls_addr()); break;
960 case Bytecodes::_invokestatic:
961 case Bytecodes::_invokedynamic:
962 case Bytecodes::_invokespecial: increment_counter(SharedRuntime::nof_inlined_static_calls_addr()); break;
963 default: fatal("unexpected call bytecode");
964 }
965 } else {
966 switch (bc()) {
967 case Bytecodes::_invokevirtual: increment_counter(SharedRuntime::nof_normal_calls_addr()); break;
968 case Bytecodes::_invokeinterface: increment_counter(SharedRuntime::nof_interface_calls_addr()); break;
969 case Bytecodes::_invokestatic:
970 case Bytecodes::_invokedynamic:
971 case Bytecodes::_invokespecial: increment_counter(SharedRuntime::nof_static_calls_addr()); break;
972 default: fatal("unexpected call bytecode");
973 }
974 }
975 }
976 }
977 #endif //PRODUCT
980 ciMethod* Compile::optimize_virtual_call(ciMethod* caller, int bci, ciInstanceKlass* klass,
981 ciKlass* holder, ciMethod* callee,
982 const TypeOopPtr* receiver_type, bool is_virtual,
983 bool& call_does_dispatch, int& vtable_index,
984 bool check_access) {
985 // Set default values for out-parameters.
986 call_does_dispatch = true;
987 vtable_index = Method::invalid_vtable_index;
989 // Choose call strategy.
990 ciMethod* optimized_virtual_method = optimize_inlining(caller, bci, klass, callee,
991 receiver_type, check_access);
993 // Have the call been sufficiently improved such that it is no longer a virtual?
994 if (optimized_virtual_method != NULL) {
995 callee = optimized_virtual_method;
996 call_does_dispatch = false;
997 } else if (!UseInlineCaches && is_virtual && callee->is_loaded()) {
998 // We can make a vtable call at this site
999 vtable_index = callee->resolve_vtable_index(caller->holder(), holder);
1000 }
1001 return callee;
1002 }
1004 // Identify possible target method and inlining style
1005 ciMethod* Compile::optimize_inlining(ciMethod* caller, int bci, ciInstanceKlass* klass,
1006 ciMethod* callee, const TypeOopPtr* receiver_type,
1007 bool check_access) {
1008 // only use for virtual or interface calls
1010 // If it is obviously final, do not bother to call find_monomorphic_target,
1011 // because the class hierarchy checks are not needed, and may fail due to
1012 // incompletely loaded classes. Since we do our own class loading checks
1013 // in this module, we may confidently bind to any method.
1014 if (callee->can_be_statically_bound()) {
1015 return callee;
1016 }
1018 // Attempt to improve the receiver
1019 bool actual_receiver_is_exact = false;
1020 ciInstanceKlass* actual_receiver = klass;
1021 if (receiver_type != NULL) {
1022 // Array methods are all inherited from Object, and are monomorphic.
1023 // finalize() call on array is not allowed.
1024 if (receiver_type->isa_aryptr() &&
1025 callee->holder() == env()->Object_klass() &&
1026 callee->name() != ciSymbol::finalize_method_name()) {
1027 return callee;
1028 }
1030 // All other interesting cases are instance klasses.
1031 if (!receiver_type->isa_instptr()) {
1032 return NULL;
1033 }
1035 ciInstanceKlass *ikl = receiver_type->klass()->as_instance_klass();
1036 if (ikl->is_loaded() && ikl->is_initialized() && !ikl->is_interface() &&
1037 (ikl == actual_receiver || ikl->is_subtype_of(actual_receiver))) {
1038 // ikl is a same or better type than the original actual_receiver,
1039 // e.g. static receiver from bytecodes.
1040 actual_receiver = ikl;
1041 // Is the actual_receiver exact?
1042 actual_receiver_is_exact = receiver_type->klass_is_exact();
1043 }
1044 }
1046 ciInstanceKlass* calling_klass = caller->holder();
1047 ciMethod* cha_monomorphic_target = callee->find_monomorphic_target(calling_klass, klass, actual_receiver, check_access);
1048 if (cha_monomorphic_target != NULL) {
1049 assert(!cha_monomorphic_target->is_abstract(), "");
1050 // Look at the method-receiver type. Does it add "too much information"?
1051 ciKlass* mr_klass = cha_monomorphic_target->holder();
1052 const Type* mr_type = TypeInstPtr::make(TypePtr::BotPTR, mr_klass);
1053 if (receiver_type == NULL || !receiver_type->higher_equal(mr_type)) {
1054 // Calling this method would include an implicit cast to its holder.
1055 // %%% Not yet implemented. Would throw minor asserts at present.
1056 // %%% The most common wins are already gained by +UseUniqueSubclasses.
1057 // To fix, put the higher_equal check at the call of this routine,
1058 // and add a CheckCastPP to the receiver.
1059 if (TraceDependencies) {
1060 tty->print_cr("found unique CHA method, but could not cast up");
1061 tty->print(" method = ");
1062 cha_monomorphic_target->print();
1063 tty->cr();
1064 }
1065 if (log() != NULL) {
1066 log()->elem("missed_CHA_opportunity klass='%d' method='%d'",
1067 log()->identify(klass),
1068 log()->identify(cha_monomorphic_target));
1069 }
1070 cha_monomorphic_target = NULL;
1071 }
1072 }
1073 if (cha_monomorphic_target != NULL) {
1074 // Hardwiring a virtual.
1075 // If we inlined because CHA revealed only a single target method,
1076 // then we are dependent on that target method not getting overridden
1077 // by dynamic class loading. Be sure to test the "static" receiver
1078 // dest_method here, as opposed to the actual receiver, which may
1079 // falsely lead us to believe that the receiver is final or private.
1080 dependencies()->assert_unique_concrete_method(actual_receiver, cha_monomorphic_target);
1081 return cha_monomorphic_target;
1082 }
1084 // If the type is exact, we can still bind the method w/o a vcall.
1085 // (This case comes after CHA so we can see how much extra work it does.)
1086 if (actual_receiver_is_exact) {
1087 // In case of evolution, there is a dependence on every inlined method, since each
1088 // such method can be changed when its class is redefined.
1089 ciMethod* exact_method = callee->resolve_invoke(calling_klass, actual_receiver);
1090 if (exact_method != NULL) {
1091 #ifndef PRODUCT
1092 if (PrintOpto) {
1093 tty->print(" Calling method via exact type @%d --- ", bci);
1094 exact_method->print_name();
1095 tty->cr();
1096 }
1097 #endif
1098 return exact_method;
1099 }
1100 }
1102 return NULL;
1103 }