Fri, 27 Jul 2012 16:14:15 -0700
7187290: nightly failures after JSR 292 lazy method handle update
Reviewed-by: kvn, twisti
1 /*
2 * Copyright (c) 1998, 2011, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "precompiled.hpp"
26 #include "ci/ciCPCache.hpp"
27 #include "ci/ciCallSite.hpp"
28 #include "ci/ciMethodHandle.hpp"
29 #include "classfile/vmSymbols.hpp"
30 #include "compiler/compileBroker.hpp"
31 #include "compiler/compileLog.hpp"
32 #include "interpreter/linkResolver.hpp"
33 #include "opto/addnode.hpp"
34 #include "opto/callGenerator.hpp"
35 #include "opto/cfgnode.hpp"
36 #include "opto/mulnode.hpp"
37 #include "opto/parse.hpp"
38 #include "opto/rootnode.hpp"
39 #include "opto/runtime.hpp"
40 #include "opto/subnode.hpp"
41 #include "prims/nativeLookup.hpp"
42 #include "runtime/sharedRuntime.hpp"
44 #ifndef PRODUCT
45 void trace_type_profile(ciMethod *method, int depth, int bci, ciMethod *prof_method, ciKlass *prof_klass, int site_count, int receiver_count) {
46 if (TraceTypeProfile || PrintInlining || PrintOptoInlining) {
47 if (!PrintInlining) {
48 if (!PrintOpto && !PrintCompilation) {
49 method->print_short_name();
50 tty->cr();
51 }
52 CompileTask::print_inlining(prof_method, depth, bci);
53 }
54 CompileTask::print_inline_indent(depth);
55 tty->print(" \\-> TypeProfile (%d/%d counts) = ", receiver_count, site_count);
56 prof_klass->name()->print_symbol();
57 tty->cr();
58 }
59 }
60 #endif
62 CallGenerator* Compile::call_generator(ciMethod* callee, int vtable_index, bool call_is_virtual,
63 JVMState* jvms, bool allow_inline,
64 float prof_factor, bool allow_intrinsics) {
65 ciMethod* caller = jvms->method();
66 int bci = jvms->bci();
67 Bytecodes::Code bytecode = caller->java_code_at_bci(bci);
68 guarantee(callee != NULL, "failed method resolution");
70 // Dtrace currently doesn't work unless all calls are vanilla
71 if (env()->dtrace_method_probes()) {
72 allow_inline = false;
73 }
75 // Note: When we get profiling during stage-1 compiles, we want to pull
76 // from more specific profile data which pertains to this inlining.
77 // Right now, ignore the information in jvms->caller(), and do method[bci].
78 ciCallProfile profile = caller->call_profile_at_bci(bci);
80 // See how many times this site has been invoked.
81 int site_count = profile.count();
82 int receiver_count = -1;
83 if (call_is_virtual && UseTypeProfile && profile.has_receiver(0)) {
84 // Receivers in the profile structure are ordered by call counts
85 // so that the most called (major) receiver is profile.receiver(0).
86 receiver_count = profile.receiver_count(0);
87 }
89 CompileLog* log = this->log();
90 if (log != NULL) {
91 int rid = (receiver_count >= 0)? log->identify(profile.receiver(0)): -1;
92 int r2id = (rid != -1 && profile.has_receiver(1))? log->identify(profile.receiver(1)):-1;
93 log->begin_elem("call method='%d' count='%d' prof_factor='%g'",
94 log->identify(callee), site_count, prof_factor);
95 if (call_is_virtual) log->print(" virtual='1'");
96 if (allow_inline) log->print(" inline='1'");
97 if (receiver_count >= 0) {
98 log->print(" receiver='%d' receiver_count='%d'", rid, receiver_count);
99 if (profile.has_receiver(1)) {
100 log->print(" receiver2='%d' receiver2_count='%d'", r2id, profile.receiver_count(1));
101 }
102 }
103 log->end_elem();
104 }
106 // Special case the handling of certain common, profitable library
107 // methods. If these methods are replaced with specialized code,
108 // then we return it as the inlined version of the call.
109 // We do this before the strict f.p. check below because the
110 // intrinsics handle strict f.p. correctly.
111 if (allow_inline && allow_intrinsics) {
112 CallGenerator* cg = find_intrinsic(callee, call_is_virtual);
113 if (cg != NULL) return cg;
114 }
116 // Do method handle calls.
117 // NOTE: This must happen before normal inlining logic below since
118 // MethodHandle.invoke* are native methods which obviously don't
119 // have bytecodes and so normal inlining fails.
120 if (callee->is_method_handle_intrinsic()) {
121 return CallGenerator::for_method_handle_call(jvms, caller, callee);
122 }
124 // Do not inline strict fp into non-strict code, or the reverse
125 if (caller->is_strict() ^ callee->is_strict()) {
126 allow_inline = false;
127 }
129 // Attempt to inline...
130 if (allow_inline) {
131 // The profile data is only partly attributable to this caller,
132 // scale back the call site information.
133 float past_uses = jvms->method()->scale_count(site_count, prof_factor);
134 // This is the number of times we expect the call code to be used.
135 float expected_uses = past_uses;
137 // Try inlining a bytecoded method:
138 if (!call_is_virtual) {
139 InlineTree* ilt;
140 if (UseOldInlining) {
141 ilt = InlineTree::find_subtree_from_root(this->ilt(), jvms->caller(), jvms->method());
142 } else {
143 // Make a disembodied, stateless ILT.
144 // TO DO: When UseOldInlining is removed, copy the ILT code elsewhere.
145 float site_invoke_ratio = prof_factor;
146 // Note: ilt is for the root of this parse, not the present call site.
147 ilt = new InlineTree(this, jvms->method(), jvms->caller(), site_invoke_ratio, MaxInlineLevel);
148 }
149 WarmCallInfo scratch_ci;
150 if (!UseOldInlining)
151 scratch_ci.init(jvms, callee, profile, prof_factor);
152 WarmCallInfo* ci = ilt->ok_to_inline(callee, jvms, profile, &scratch_ci);
153 assert(ci != &scratch_ci, "do not let this pointer escape");
154 bool allow_inline = (ci != NULL && !ci->is_cold());
155 bool require_inline = (allow_inline && ci->is_hot());
157 if (allow_inline) {
158 CallGenerator* cg = CallGenerator::for_inline(callee, expected_uses);
159 if (require_inline && cg != NULL && should_delay_inlining(callee, jvms)) {
160 // Delay the inlining of this method to give us the
161 // opportunity to perform some high level optimizations
162 // first.
163 return CallGenerator::for_late_inline(callee, cg);
164 }
165 if (cg == NULL) {
166 // Fall through.
167 } else if (require_inline || !InlineWarmCalls) {
168 return cg;
169 } else {
170 CallGenerator* cold_cg = call_generator(callee, vtable_index, call_is_virtual, jvms, false, prof_factor);
171 return CallGenerator::for_warm_call(ci, cold_cg, cg);
172 }
173 }
174 }
176 // Try using the type profile.
177 if (call_is_virtual && site_count > 0 && receiver_count > 0) {
178 // The major receiver's count >= TypeProfileMajorReceiverPercent of site_count.
179 bool have_major_receiver = (100.*profile.receiver_prob(0) >= (float)TypeProfileMajorReceiverPercent);
180 ciMethod* receiver_method = NULL;
181 if (have_major_receiver || profile.morphism() == 1 ||
182 (profile.morphism() == 2 && UseBimorphicInlining)) {
183 // receiver_method = profile.method();
184 // Profiles do not suggest methods now. Look it up in the major receiver.
185 receiver_method = callee->resolve_invoke(jvms->method()->holder(),
186 profile.receiver(0));
187 }
188 if (receiver_method != NULL) {
189 // The single majority receiver sufficiently outweighs the minority.
190 CallGenerator* hit_cg = this->call_generator(receiver_method,
191 vtable_index, !call_is_virtual, jvms, allow_inline, prof_factor);
192 if (hit_cg != NULL) {
193 // Look up second receiver.
194 CallGenerator* next_hit_cg = NULL;
195 ciMethod* next_receiver_method = NULL;
196 if (profile.morphism() == 2 && UseBimorphicInlining) {
197 next_receiver_method = callee->resolve_invoke(jvms->method()->holder(),
198 profile.receiver(1));
199 if (next_receiver_method != NULL) {
200 next_hit_cg = this->call_generator(next_receiver_method,
201 vtable_index, !call_is_virtual, jvms,
202 allow_inline, prof_factor);
203 if (next_hit_cg != NULL && !next_hit_cg->is_inline() &&
204 have_major_receiver && UseOnlyInlinedBimorphic) {
205 // Skip if we can't inline second receiver's method
206 next_hit_cg = NULL;
207 }
208 }
209 }
210 CallGenerator* miss_cg;
211 Deoptimization::DeoptReason reason = (profile.morphism() == 2) ?
212 Deoptimization::Reason_bimorphic :
213 Deoptimization::Reason_class_check;
214 if (( profile.morphism() == 1 ||
215 (profile.morphism() == 2 && next_hit_cg != NULL) ) &&
216 !too_many_traps(jvms->method(), jvms->bci(), reason)
217 ) {
218 // Generate uncommon trap for class check failure path
219 // in case of monomorphic or bimorphic virtual call site.
220 miss_cg = CallGenerator::for_uncommon_trap(callee, reason,
221 Deoptimization::Action_maybe_recompile);
222 } else {
223 // Generate virtual call for class check failure path
224 // in case of polymorphic virtual call site.
225 miss_cg = CallGenerator::for_virtual_call(callee, vtable_index);
226 }
227 if (miss_cg != NULL) {
228 if (next_hit_cg != NULL) {
229 NOT_PRODUCT(trace_type_profile(jvms->method(), jvms->depth() - 1, jvms->bci(), next_receiver_method, profile.receiver(1), site_count, profile.receiver_count(1)));
230 // We don't need to record dependency on a receiver here and below.
231 // Whenever we inline, the dependency is added by Parse::Parse().
232 miss_cg = CallGenerator::for_predicted_call(profile.receiver(1), miss_cg, next_hit_cg, PROB_MAX);
233 }
234 if (miss_cg != NULL) {
235 NOT_PRODUCT(trace_type_profile(jvms->method(), jvms->depth() - 1, jvms->bci(), receiver_method, profile.receiver(0), site_count, receiver_count));
236 CallGenerator* cg = CallGenerator::for_predicted_call(profile.receiver(0), miss_cg, hit_cg, profile.receiver_prob(0));
237 if (cg != NULL) return cg;
238 }
239 }
240 }
241 }
242 }
243 }
245 // There was no special inlining tactic, or it bailed out.
246 // Use a more generic tactic, like a simple call.
247 if (call_is_virtual) {
248 return CallGenerator::for_virtual_call(callee, vtable_index);
249 } else {
250 // Class Hierarchy Analysis or Type Profile reveals a unique target,
251 // or it is a static or special call.
252 return CallGenerator::for_direct_call(callee, should_delay_inlining(callee, jvms));
253 }
254 }
256 // Return true for methods that shouldn't be inlined early so that
257 // they are easier to analyze and optimize as intrinsics.
258 bool Compile::should_delay_inlining(ciMethod* call_method, JVMState* jvms) {
259 if (has_stringbuilder()) {
261 if ((call_method->holder() == C->env()->StringBuilder_klass() ||
262 call_method->holder() == C->env()->StringBuffer_klass()) &&
263 (jvms->method()->holder() == C->env()->StringBuilder_klass() ||
264 jvms->method()->holder() == C->env()->StringBuffer_klass())) {
265 // Delay SB calls only when called from non-SB code
266 return false;
267 }
269 switch (call_method->intrinsic_id()) {
270 case vmIntrinsics::_StringBuilder_void:
271 case vmIntrinsics::_StringBuilder_int:
272 case vmIntrinsics::_StringBuilder_String:
273 case vmIntrinsics::_StringBuilder_append_char:
274 case vmIntrinsics::_StringBuilder_append_int:
275 case vmIntrinsics::_StringBuilder_append_String:
276 case vmIntrinsics::_StringBuilder_toString:
277 case vmIntrinsics::_StringBuffer_void:
278 case vmIntrinsics::_StringBuffer_int:
279 case vmIntrinsics::_StringBuffer_String:
280 case vmIntrinsics::_StringBuffer_append_char:
281 case vmIntrinsics::_StringBuffer_append_int:
282 case vmIntrinsics::_StringBuffer_append_String:
283 case vmIntrinsics::_StringBuffer_toString:
284 case vmIntrinsics::_Integer_toString:
285 return true;
287 case vmIntrinsics::_String_String:
288 {
289 Node* receiver = jvms->map()->in(jvms->argoff() + 1);
290 if (receiver->is_Proj() && receiver->in(0)->is_CallStaticJava()) {
291 CallStaticJavaNode* csj = receiver->in(0)->as_CallStaticJava();
292 ciMethod* m = csj->method();
293 if (m != NULL &&
294 (m->intrinsic_id() == vmIntrinsics::_StringBuffer_toString ||
295 m->intrinsic_id() == vmIntrinsics::_StringBuilder_toString))
296 // Delay String.<init>(new SB())
297 return true;
298 }
299 return false;
300 }
302 default:
303 return false;
304 }
305 }
306 return false;
307 }
310 // uncommon-trap call-sites where callee is unloaded, uninitialized or will not link
311 bool Parse::can_not_compile_call_site(ciMethod *dest_method, ciInstanceKlass* klass) {
312 // Additional inputs to consider...
313 // bc = bc()
314 // caller = method()
315 // iter().get_method_holder_index()
316 assert( dest_method->is_loaded(), "ciTypeFlow should not let us get here" );
317 // Interface classes can be loaded & linked and never get around to
318 // being initialized. Uncommon-trap for not-initialized static or
319 // v-calls. Let interface calls happen.
320 ciInstanceKlass* holder_klass = dest_method->holder();
321 if (!holder_klass->is_being_initialized() &&
322 !holder_klass->is_initialized() &&
323 !holder_klass->is_interface()) {
324 uncommon_trap(Deoptimization::Reason_uninitialized,
325 Deoptimization::Action_reinterpret,
326 holder_klass);
327 return true;
328 }
330 assert(dest_method->will_link(method()->holder(), klass, bc()), "dest_method: typeflow responsibility");
331 return false;
332 }
335 //------------------------------do_call----------------------------------------
336 // Handle your basic call. Inline if we can & want to, else just setup call.
337 void Parse::do_call() {
338 // It's likely we are going to add debug info soon.
339 // Also, if we inline a guy who eventually needs debug info for this JVMS,
340 // our contribution to it is cleaned up right here.
341 kill_dead_locals();
343 // Set frequently used booleans
344 bool is_virtual = bc() == Bytecodes::_invokevirtual;
345 bool is_virtual_or_interface = is_virtual || bc() == Bytecodes::_invokeinterface;
346 bool has_receiver = is_virtual_or_interface || bc() == Bytecodes::_invokespecial;
347 bool is_invokedynamic = bc() == Bytecodes::_invokedynamic;
349 // Find target being called
350 bool will_link;
351 ciMethod* bc_callee = iter().get_method(will_link); // actual callee from bytecode
352 ciInstanceKlass* holder_klass = bc_callee->holder();
353 ciKlass* holder = iter().get_declared_method_holder();
354 ciInstanceKlass* klass = ciEnv::get_instance_klass_for_declared_method_holder(holder);
356 // uncommon-trap when callee is unloaded, uninitialized or will not link
357 // bailout when too many arguments for register representation
358 if (!will_link || can_not_compile_call_site(bc_callee, klass)) {
359 #ifndef PRODUCT
360 if (PrintOpto && (Verbose || WizardMode)) {
361 method()->print_name(); tty->print_cr(" can not compile call at bci %d to:", bci());
362 bc_callee->print_name(); tty->cr();
363 }
364 #endif
365 return;
366 }
367 assert(holder_klass->is_loaded(), "");
368 //assert((bc_callee->is_static() || is_invokedynamic) == !has_receiver , "must match bc"); // XXX invokehandle (cur_bc_raw)
369 // Note: this takes into account invokeinterface of methods declared in java/lang/Object,
370 // which should be invokevirtuals but according to the VM spec may be invokeinterfaces
371 assert(holder_klass->is_interface() || holder_klass->super() == NULL || (bc() != Bytecodes::_invokeinterface), "must match bc");
372 // Note: In the absence of miranda methods, an abstract class K can perform
373 // an invokevirtual directly on an interface method I.m if K implements I.
375 const int nargs = bc_callee->arg_size();
377 // Push appendix argument (MethodType, CallSite, etc.), if one.
378 if (iter().has_appendix()) {
379 ciObject* appendix_arg = iter().get_appendix();
380 const TypeOopPtr* appendix_arg_type = TypeOopPtr::make_from_constant(appendix_arg);
381 Node* appendix_arg_node = _gvn.makecon(appendix_arg_type);
382 push(appendix_arg_node);
383 }
385 // ---------------------
386 // Does Class Hierarchy Analysis reveal only a single target of a v-call?
387 // Then we may inline or make a static call, but become dependent on there being only 1 target.
388 // Does the call-site type profile reveal only one receiver?
389 // Then we may introduce a run-time check and inline on the path where it succeeds.
390 // The other path may uncommon_trap, check for another receiver, or do a v-call.
392 // Choose call strategy.
393 bool call_is_virtual = is_virtual_or_interface;
394 int vtable_index = methodOopDesc::invalid_vtable_index;
395 ciMethod* callee = bc_callee;
397 // Try to get the most accurate receiver type
398 if (is_virtual_or_interface) {
399 Node* receiver_node = stack(sp() - nargs);
400 const TypeOopPtr* receiver_type = _gvn.type(receiver_node)->isa_oopptr();
401 ciMethod* optimized_virtual_method = optimize_inlining(method(), bci(), klass, bc_callee, receiver_type);
403 // Have the call been sufficiently improved such that it is no longer a virtual?
404 if (optimized_virtual_method != NULL) {
405 callee = optimized_virtual_method;
406 call_is_virtual = false;
407 } else if (!UseInlineCaches && is_virtual && callee->is_loaded()) {
408 // We can make a vtable call at this site
409 vtable_index = callee->resolve_vtable_index(method()->holder(), klass);
410 }
411 }
413 // Note: It's OK to try to inline a virtual call.
414 // The call generator will not attempt to inline a polymorphic call
415 // unless it knows how to optimize the receiver dispatch.
416 bool try_inline = (C->do_inlining() || InlineAccessors);
418 // ---------------------
419 dec_sp(nargs); // Temporarily pop args for JVM state of call
420 JVMState* jvms = sync_jvms();
422 // ---------------------
423 // Decide call tactic.
424 // This call checks with CHA, the interpreter profile, intrinsics table, etc.
425 // It decides whether inlining is desirable or not.
426 CallGenerator* cg = C->call_generator(callee, vtable_index, call_is_virtual, jvms, try_inline, prof_factor());
428 bc_callee = callee = NULL; // don't use bc_callee and callee after this point
430 // ---------------------
431 // Round double arguments before call
432 round_double_arguments(cg->method());
434 #ifndef PRODUCT
435 // bump global counters for calls
436 count_compiled_calls(/*at_method_entry*/ false, cg->is_inline());
438 // Record first part of parsing work for this call
439 parse_histogram()->record_change();
440 #endif // not PRODUCT
442 assert(jvms == this->jvms(), "still operating on the right JVMS");
443 assert(jvms_in_sync(), "jvms must carry full info into CG");
445 // save across call, for a subsequent cast_not_null.
446 Node* receiver = has_receiver ? argument(0) : NULL;
448 // Bump method data counters (We profile *before* the call is made
449 // because exceptions don't return to the call site.)
450 profile_call(receiver);
452 JVMState* new_jvms = cg->generate(jvms);
453 if (new_jvms == NULL) {
454 // When inlining attempt fails (e.g., too many arguments),
455 // it may contaminate the current compile state, making it
456 // impossible to pull back and try again. Once we call
457 // cg->generate(), we are committed. If it fails, the whole
458 // compilation task is compromised.
459 if (failing()) return;
461 // This can happen if a library intrinsic is available, but refuses
462 // the call site, perhaps because it did not match a pattern the
463 // intrinsic was expecting to optimize. Should always be possible to
464 // get a normal java call that may inline in that case
465 cg = C->call_generator(cg->method(), vtable_index, call_is_virtual, jvms, try_inline, prof_factor(), /* allow_intrinsics= */ false);
466 if ((new_jvms = cg->generate(jvms)) == NULL) {
467 guarantee(failing(), "call failed to generate: calls should work");
468 return;
469 }
470 }
472 if (cg->is_inline()) {
473 // Accumulate has_loops estimate
474 C->set_has_loops(C->has_loops() || cg->method()->has_loops());
475 C->env()->notice_inlined_method(cg->method());
476 }
478 // Reset parser state from [new_]jvms, which now carries results of the call.
479 // Return value (if any) is already pushed on the stack by the cg.
480 add_exception_states_from(new_jvms);
481 if (new_jvms->map()->control() == top()) {
482 stop_and_kill_map();
483 } else {
484 assert(new_jvms->same_calls_as(jvms), "method/bci left unchanged");
485 set_jvms(new_jvms);
486 }
488 if (!stopped()) {
489 // This was some sort of virtual call, which did a null check for us.
490 // Now we can assert receiver-not-null, on the normal return path.
491 if (receiver != NULL && cg->is_virtual()) {
492 Node* cast = cast_not_null(receiver);
493 // %%% assert(receiver == cast, "should already have cast the receiver");
494 }
496 // Round double result after a call from strict to non-strict code
497 round_double_result(cg->method());
499 ciType* rtype = cg->method()->return_type();
500 if (iter().cur_bc_raw() == Bytecodes::_invokehandle || is_invokedynamic) {
501 // Be careful here with return types.
502 ciType* ctype = iter().get_declared_method_signature()->return_type();
503 if (ctype != rtype) {
504 BasicType rt = rtype->basic_type();
505 BasicType ct = ctype->basic_type();
506 Node* retnode = peek();
507 if (ct == T_VOID) {
508 // It's OK for a method to return a value that is discarded.
509 // The discarding does not require any special action from the caller.
510 // The Java code knows this, at VerifyType.isNullConversion.
511 pop_node(rt); // whatever it was, pop it
512 retnode = top();
513 } else if (rt == T_INT || is_subword_type(rt)) {
514 // FIXME: This logic should be factored out.
515 if (ct == T_BOOLEAN) {
516 retnode = _gvn.transform( new (C, 3) AndINode(retnode, intcon(0x1)) );
517 } else if (ct == T_CHAR) {
518 retnode = _gvn.transform( new (C, 3) AndINode(retnode, intcon(0xFFFF)) );
519 } else if (ct == T_BYTE) {
520 retnode = _gvn.transform( new (C, 3) LShiftINode(retnode, intcon(24)) );
521 retnode = _gvn.transform( new (C, 3) RShiftINode(retnode, intcon(24)) );
522 } else if (ct == T_SHORT) {
523 retnode = _gvn.transform( new (C, 3) LShiftINode(retnode, intcon(16)) );
524 retnode = _gvn.transform( new (C, 3) RShiftINode(retnode, intcon(16)) );
525 } else {
526 assert(ct == T_INT, err_msg("rt=%d, ct=%d", rt, ct));
527 }
528 } else if (rt == T_OBJECT) {
529 assert(ct == T_OBJECT, err_msg("rt=T_OBJECT, ct=%d", ct));
530 if (ctype->is_loaded()) {
531 Node* if_fail = top();
532 retnode = gen_checkcast(retnode, makecon(TypeKlassPtr::make(ctype->as_klass())), &if_fail);
533 if (if_fail != top()) {
534 PreserveJVMState pjvms(this);
535 set_control(if_fail);
536 builtin_throw(Deoptimization::Reason_class_check);
537 }
538 pop();
539 push(retnode);
540 }
541 } else {
542 assert(ct == rt, err_msg("unexpected mismatch rt=%d, ct=%d", rt, ct));
543 // push a zero; it's better than getting an oop/int mismatch
544 retnode = pop_node(rt);
545 retnode = zerocon(ct);
546 push_node(ct, retnode);
547 }
548 // Now that the value is well-behaved, continue with the call-site type.
549 rtype = ctype;
550 }
551 }
553 // If the return type of the method is not loaded, assert that the
554 // value we got is a null. Otherwise, we need to recompile.
555 if (!rtype->is_loaded()) {
556 #ifndef PRODUCT
557 if (PrintOpto && (Verbose || WizardMode)) {
558 method()->print_name(); tty->print_cr(" asserting nullness of result at bci: %d", bci());
559 cg->method()->print_name(); tty->cr();
560 }
561 #endif
562 if (C->log() != NULL) {
563 C->log()->elem("assert_null reason='return' klass='%d'",
564 C->log()->identify(rtype));
565 }
566 // If there is going to be a trap, put it at the next bytecode:
567 set_bci(iter().next_bci());
568 do_null_assert(peek(), T_OBJECT);
569 set_bci(iter().cur_bci()); // put it back
570 }
571 }
573 // Restart record of parsing work after possible inlining of call
574 #ifndef PRODUCT
575 parse_histogram()->set_initial_state(bc());
576 #endif
577 }
579 //---------------------------catch_call_exceptions-----------------------------
580 // Put a Catch and CatchProj nodes behind a just-created call.
581 // Send their caught exceptions to the proper handler.
582 // This may be used after a call to the rethrow VM stub,
583 // when it is needed to process unloaded exception classes.
584 void Parse::catch_call_exceptions(ciExceptionHandlerStream& handlers) {
585 // Exceptions are delivered through this channel:
586 Node* i_o = this->i_o();
588 // Add a CatchNode.
589 GrowableArray<int>* bcis = new (C->node_arena()) GrowableArray<int>(C->node_arena(), 8, 0, -1);
590 GrowableArray<const Type*>* extypes = new (C->node_arena()) GrowableArray<const Type*>(C->node_arena(), 8, 0, NULL);
591 GrowableArray<int>* saw_unloaded = new (C->node_arena()) GrowableArray<int>(C->node_arena(), 8, 0, 0);
593 for (; !handlers.is_done(); handlers.next()) {
594 ciExceptionHandler* h = handlers.handler();
595 int h_bci = h->handler_bci();
596 ciInstanceKlass* h_klass = h->is_catch_all() ? env()->Throwable_klass() : h->catch_klass();
597 // Do not introduce unloaded exception types into the graph:
598 if (!h_klass->is_loaded()) {
599 if (saw_unloaded->contains(h_bci)) {
600 /* We've already seen an unloaded exception with h_bci,
601 so don't duplicate. Duplication will cause the CatchNode to be
602 unnecessarily large. See 4713716. */
603 continue;
604 } else {
605 saw_unloaded->append(h_bci);
606 }
607 }
608 const Type* h_extype = TypeOopPtr::make_from_klass(h_klass);
609 // (We use make_from_klass because it respects UseUniqueSubclasses.)
610 h_extype = h_extype->join(TypeInstPtr::NOTNULL);
611 assert(!h_extype->empty(), "sanity");
612 // Note: It's OK if the BCIs repeat themselves.
613 bcis->append(h_bci);
614 extypes->append(h_extype);
615 }
617 int len = bcis->length();
618 CatchNode *cn = new (C, 2) CatchNode(control(), i_o, len+1);
619 Node *catch_ = _gvn.transform(cn);
621 // now branch with the exception state to each of the (potential)
622 // handlers
623 for(int i=0; i < len; i++) {
624 // Setup JVM state to enter the handler.
625 PreserveJVMState pjvms(this);
626 // Locals are just copied from before the call.
627 // Get control from the CatchNode.
628 int handler_bci = bcis->at(i);
629 Node* ctrl = _gvn.transform( new (C, 1) CatchProjNode(catch_, i+1,handler_bci));
630 // This handler cannot happen?
631 if (ctrl == top()) continue;
632 set_control(ctrl);
634 // Create exception oop
635 const TypeInstPtr* extype = extypes->at(i)->is_instptr();
636 Node *ex_oop = _gvn.transform(new (C, 2) CreateExNode(extypes->at(i), ctrl, i_o));
638 // Handle unloaded exception classes.
639 if (saw_unloaded->contains(handler_bci)) {
640 // An unloaded exception type is coming here. Do an uncommon trap.
641 #ifndef PRODUCT
642 // We do not expect the same handler bci to take both cold unloaded
643 // and hot loaded exceptions. But, watch for it.
644 if ((Verbose || WizardMode) && extype->is_loaded()) {
645 tty->print("Warning: Handler @%d takes mixed loaded/unloaded exceptions in ", bci());
646 method()->print_name(); tty->cr();
647 } else if (PrintOpto && (Verbose || WizardMode)) {
648 tty->print("Bailing out on unloaded exception type ");
649 extype->klass()->print_name();
650 tty->print(" at bci:%d in ", bci());
651 method()->print_name(); tty->cr();
652 }
653 #endif
654 // Emit an uncommon trap instead of processing the block.
655 set_bci(handler_bci);
656 push_ex_oop(ex_oop);
657 uncommon_trap(Deoptimization::Reason_unloaded,
658 Deoptimization::Action_reinterpret,
659 extype->klass(), "!loaded exception");
660 set_bci(iter().cur_bci()); // put it back
661 continue;
662 }
664 // go to the exception handler
665 if (handler_bci < 0) { // merge with corresponding rethrow node
666 throw_to_exit(make_exception_state(ex_oop));
667 } else { // Else jump to corresponding handle
668 push_ex_oop(ex_oop); // Clear stack and push just the oop.
669 merge_exception(handler_bci);
670 }
671 }
673 // The first CatchProj is for the normal return.
674 // (Note: If this is a call to rethrow_Java, this node goes dead.)
675 set_control(_gvn.transform( new (C, 1) CatchProjNode(catch_, CatchProjNode::fall_through_index, CatchProjNode::no_handler_bci)));
676 }
679 //----------------------------catch_inline_exceptions--------------------------
680 // Handle all exceptions thrown by an inlined method or individual bytecode.
681 // Common case 1: we have no handler, so all exceptions merge right into
682 // the rethrow case.
683 // Case 2: we have some handlers, with loaded exception klasses that have
684 // no subklasses. We do a Deutsch-Shiffman style type-check on the incoming
685 // exception oop and branch to the handler directly.
686 // Case 3: We have some handlers with subklasses or are not loaded at
687 // compile-time. We have to call the runtime to resolve the exception.
688 // So we insert a RethrowCall and all the logic that goes with it.
689 void Parse::catch_inline_exceptions(SafePointNode* ex_map) {
690 // Caller is responsible for saving away the map for normal control flow!
691 assert(stopped(), "call set_map(NULL) first");
692 assert(method()->has_exception_handlers(), "don't come here w/o work to do");
694 Node* ex_node = saved_ex_oop(ex_map);
695 if (ex_node == top()) {
696 // No action needed.
697 return;
698 }
699 const TypeInstPtr* ex_type = _gvn.type(ex_node)->isa_instptr();
700 NOT_PRODUCT(if (ex_type==NULL) tty->print_cr("*** Exception not InstPtr"));
701 if (ex_type == NULL)
702 ex_type = TypeOopPtr::make_from_klass(env()->Throwable_klass())->is_instptr();
704 // determine potential exception handlers
705 ciExceptionHandlerStream handlers(method(), bci(),
706 ex_type->klass()->as_instance_klass(),
707 ex_type->klass_is_exact());
709 // Start executing from the given throw state. (Keep its stack, for now.)
710 // Get the exception oop as known at compile time.
711 ex_node = use_exception_state(ex_map);
713 // Get the exception oop klass from its header
714 Node* ex_klass_node = NULL;
715 if (has_ex_handler() && !ex_type->klass_is_exact()) {
716 Node* p = basic_plus_adr( ex_node, ex_node, oopDesc::klass_offset_in_bytes());
717 ex_klass_node = _gvn.transform( LoadKlassNode::make(_gvn, immutable_memory(), p, TypeInstPtr::KLASS, TypeKlassPtr::OBJECT) );
719 // Compute the exception klass a little more cleverly.
720 // Obvious solution is to simple do a LoadKlass from the 'ex_node'.
721 // However, if the ex_node is a PhiNode, I'm going to do a LoadKlass for
722 // each arm of the Phi. If I know something clever about the exceptions
723 // I'm loading the class from, I can replace the LoadKlass with the
724 // klass constant for the exception oop.
725 if( ex_node->is_Phi() ) {
726 ex_klass_node = new (C, ex_node->req()) PhiNode( ex_node->in(0), TypeKlassPtr::OBJECT );
727 for( uint i = 1; i < ex_node->req(); i++ ) {
728 Node* p = basic_plus_adr( ex_node->in(i), ex_node->in(i), oopDesc::klass_offset_in_bytes() );
729 Node* k = _gvn.transform( LoadKlassNode::make(_gvn, immutable_memory(), p, TypeInstPtr::KLASS, TypeKlassPtr::OBJECT) );
730 ex_klass_node->init_req( i, k );
731 }
732 _gvn.set_type(ex_klass_node, TypeKlassPtr::OBJECT);
734 }
735 }
737 // Scan the exception table for applicable handlers.
738 // If none, we can call rethrow() and be done!
739 // If precise (loaded with no subklasses), insert a D.S. style
740 // pointer compare to the correct handler and loop back.
741 // If imprecise, switch to the Rethrow VM-call style handling.
743 int remaining = handlers.count_remaining();
745 // iterate through all entries sequentially
746 for (;!handlers.is_done(); handlers.next()) {
747 ciExceptionHandler* handler = handlers.handler();
749 if (handler->is_rethrow()) {
750 // If we fell off the end of the table without finding an imprecise
751 // exception klass (and without finding a generic handler) then we
752 // know this exception is not handled in this method. We just rethrow
753 // the exception into the caller.
754 throw_to_exit(make_exception_state(ex_node));
755 return;
756 }
758 // exception handler bci range covers throw_bci => investigate further
759 int handler_bci = handler->handler_bci();
761 if (remaining == 1) {
762 push_ex_oop(ex_node); // Push exception oop for handler
763 #ifndef PRODUCT
764 if (PrintOpto && WizardMode) {
765 tty->print_cr(" Catching every inline exception bci:%d -> handler_bci:%d", bci(), handler_bci);
766 }
767 #endif
768 merge_exception(handler_bci); // jump to handler
769 return; // No more handling to be done here!
770 }
772 // Get the handler's klass
773 ciInstanceKlass* klass = handler->catch_klass();
775 if (!klass->is_loaded()) { // klass is not loaded?
776 // fall through into catch_call_exceptions which will emit a
777 // handler with an uncommon trap.
778 break;
779 }
781 if (klass->is_interface()) // should not happen, but...
782 break; // bail out
784 // Check the type of the exception against the catch type
785 const TypeKlassPtr *tk = TypeKlassPtr::make(klass);
786 Node* con = _gvn.makecon(tk);
787 Node* not_subtype_ctrl = gen_subtype_check(ex_klass_node, con);
788 if (!stopped()) {
789 PreserveJVMState pjvms(this);
790 const TypeInstPtr* tinst = TypeOopPtr::make_from_klass_unique(klass)->cast_to_ptr_type(TypePtr::NotNull)->is_instptr();
791 assert(klass->has_subklass() || tinst->klass_is_exact(), "lost exactness");
792 Node* ex_oop = _gvn.transform(new (C, 2) CheckCastPPNode(control(), ex_node, tinst));
793 push_ex_oop(ex_oop); // Push exception oop for handler
794 #ifndef PRODUCT
795 if (PrintOpto && WizardMode) {
796 tty->print(" Catching inline exception bci:%d -> handler_bci:%d -- ", bci(), handler_bci);
797 klass->print_name();
798 tty->cr();
799 }
800 #endif
801 merge_exception(handler_bci);
802 }
803 set_control(not_subtype_ctrl);
805 // Come here if exception does not match handler.
806 // Carry on with more handler checks.
807 --remaining;
808 }
810 assert(!stopped(), "you should return if you finish the chain");
812 // Oops, need to call into the VM to resolve the klasses at runtime.
813 // Note: This call must not deoptimize, since it is not a real at this bci!
814 kill_dead_locals();
816 make_runtime_call(RC_NO_LEAF | RC_MUST_THROW,
817 OptoRuntime::rethrow_Type(),
818 OptoRuntime::rethrow_stub(),
819 NULL, NULL,
820 ex_node);
822 // Rethrow is a pure call, no side effects, only a result.
823 // The result cannot be allocated, so we use I_O
825 // Catch exceptions from the rethrow
826 catch_call_exceptions(handlers);
827 }
830 // (Note: Moved add_debug_info into GraphKit::add_safepoint_edges.)
833 #ifndef PRODUCT
834 void Parse::count_compiled_calls(bool at_method_entry, bool is_inline) {
835 if( CountCompiledCalls ) {
836 if( at_method_entry ) {
837 // bump invocation counter if top method (for statistics)
838 if (CountCompiledCalls && depth() == 1) {
839 const TypeOopPtr* addr_type = TypeOopPtr::make_from_constant(method());
840 Node* adr1 = makecon(addr_type);
841 Node* adr2 = basic_plus_adr(adr1, adr1, in_bytes(methodOopDesc::compiled_invocation_counter_offset()));
842 increment_counter(adr2);
843 }
844 } else if (is_inline) {
845 switch (bc()) {
846 case Bytecodes::_invokevirtual: increment_counter(SharedRuntime::nof_inlined_calls_addr()); break;
847 case Bytecodes::_invokeinterface: increment_counter(SharedRuntime::nof_inlined_interface_calls_addr()); break;
848 case Bytecodes::_invokestatic:
849 case Bytecodes::_invokedynamic:
850 case Bytecodes::_invokespecial: increment_counter(SharedRuntime::nof_inlined_static_calls_addr()); break;
851 default: fatal("unexpected call bytecode");
852 }
853 } else {
854 switch (bc()) {
855 case Bytecodes::_invokevirtual: increment_counter(SharedRuntime::nof_normal_calls_addr()); break;
856 case Bytecodes::_invokeinterface: increment_counter(SharedRuntime::nof_interface_calls_addr()); break;
857 case Bytecodes::_invokestatic:
858 case Bytecodes::_invokedynamic:
859 case Bytecodes::_invokespecial: increment_counter(SharedRuntime::nof_static_calls_addr()); break;
860 default: fatal("unexpected call bytecode");
861 }
862 }
863 }
864 }
865 #endif //PRODUCT
868 // Identify possible target method and inlining style
869 ciMethod* Parse::optimize_inlining(ciMethod* caller, int bci, ciInstanceKlass* klass,
870 ciMethod *dest_method, const TypeOopPtr* receiver_type) {
871 // only use for virtual or interface calls
873 // If it is obviously final, do not bother to call find_monomorphic_target,
874 // because the class hierarchy checks are not needed, and may fail due to
875 // incompletely loaded classes. Since we do our own class loading checks
876 // in this module, we may confidently bind to any method.
877 if (dest_method->can_be_statically_bound()) {
878 return dest_method;
879 }
881 // Attempt to improve the receiver
882 bool actual_receiver_is_exact = false;
883 ciInstanceKlass* actual_receiver = klass;
884 if (receiver_type != NULL) {
885 // Array methods are all inherited from Object, and are monomorphic.
886 if (receiver_type->isa_aryptr() &&
887 dest_method->holder() == env()->Object_klass()) {
888 return dest_method;
889 }
891 // All other interesting cases are instance klasses.
892 if (!receiver_type->isa_instptr()) {
893 return NULL;
894 }
896 ciInstanceKlass *ikl = receiver_type->klass()->as_instance_klass();
897 if (ikl->is_loaded() && ikl->is_initialized() && !ikl->is_interface() &&
898 (ikl == actual_receiver || ikl->is_subtype_of(actual_receiver))) {
899 // ikl is a same or better type than the original actual_receiver,
900 // e.g. static receiver from bytecodes.
901 actual_receiver = ikl;
902 // Is the actual_receiver exact?
903 actual_receiver_is_exact = receiver_type->klass_is_exact();
904 }
905 }
907 ciInstanceKlass* calling_klass = caller->holder();
908 ciMethod* cha_monomorphic_target = dest_method->find_monomorphic_target(calling_klass, klass, actual_receiver);
909 if (cha_monomorphic_target != NULL) {
910 assert(!cha_monomorphic_target->is_abstract(), "");
911 // Look at the method-receiver type. Does it add "too much information"?
912 ciKlass* mr_klass = cha_monomorphic_target->holder();
913 const Type* mr_type = TypeInstPtr::make(TypePtr::BotPTR, mr_klass);
914 if (receiver_type == NULL || !receiver_type->higher_equal(mr_type)) {
915 // Calling this method would include an implicit cast to its holder.
916 // %%% Not yet implemented. Would throw minor asserts at present.
917 // %%% The most common wins are already gained by +UseUniqueSubclasses.
918 // To fix, put the higher_equal check at the call of this routine,
919 // and add a CheckCastPP to the receiver.
920 if (TraceDependencies) {
921 tty->print_cr("found unique CHA method, but could not cast up");
922 tty->print(" method = ");
923 cha_monomorphic_target->print();
924 tty->cr();
925 }
926 if (C->log() != NULL) {
927 C->log()->elem("missed_CHA_opportunity klass='%d' method='%d'",
928 C->log()->identify(klass),
929 C->log()->identify(cha_monomorphic_target));
930 }
931 cha_monomorphic_target = NULL;
932 }
933 }
934 if (cha_monomorphic_target != NULL) {
935 // Hardwiring a virtual.
936 // If we inlined because CHA revealed only a single target method,
937 // then we are dependent on that target method not getting overridden
938 // by dynamic class loading. Be sure to test the "static" receiver
939 // dest_method here, as opposed to the actual receiver, which may
940 // falsely lead us to believe that the receiver is final or private.
941 C->dependencies()->assert_unique_concrete_method(actual_receiver, cha_monomorphic_target);
942 return cha_monomorphic_target;
943 }
945 // If the type is exact, we can still bind the method w/o a vcall.
946 // (This case comes after CHA so we can see how much extra work it does.)
947 if (actual_receiver_is_exact) {
948 // In case of evolution, there is a dependence on every inlined method, since each
949 // such method can be changed when its class is redefined.
950 ciMethod* exact_method = dest_method->resolve_invoke(calling_klass, actual_receiver);
951 if (exact_method != NULL) {
952 #ifndef PRODUCT
953 if (PrintOpto) {
954 tty->print(" Calling method via exact type @%d --- ", bci);
955 exact_method->print_name();
956 tty->cr();
957 }
958 #endif
959 return exact_method;
960 }
961 }
963 return NULL;
964 }