Fri, 18 Oct 2013 10:50:17 +0200
8022783: Nashorn test fails with: assert(!def_outside->member(r))
Summary: Enables private copies of inputs for recent spill copies as well
Reviewed-by: kvn, twisti
1 /*
2 * Copyright (c) 1998, 2012, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "precompiled.hpp"
26 #include "ci/ciCallSite.hpp"
27 #include "ci/ciMethodHandle.hpp"
28 #include "classfile/vmSymbols.hpp"
29 #include "compiler/compileBroker.hpp"
30 #include "compiler/compileLog.hpp"
31 #include "interpreter/linkResolver.hpp"
32 #include "opto/addnode.hpp"
33 #include "opto/callGenerator.hpp"
34 #include "opto/cfgnode.hpp"
35 #include "opto/mulnode.hpp"
36 #include "opto/parse.hpp"
37 #include "opto/rootnode.hpp"
38 #include "opto/runtime.hpp"
39 #include "opto/subnode.hpp"
40 #include "prims/nativeLookup.hpp"
41 #include "runtime/sharedRuntime.hpp"
43 void trace_type_profile(Compile* C, ciMethod *method, int depth, int bci, ciMethod *prof_method, ciKlass *prof_klass, int site_count, int receiver_count) {
44 if (TraceTypeProfile || C->print_inlining()) {
45 outputStream* out = tty;
46 if (!C->print_inlining()) {
47 if (NOT_PRODUCT(!PrintOpto &&) !PrintCompilation) {
48 method->print_short_name();
49 tty->cr();
50 }
51 CompileTask::print_inlining(prof_method, depth, bci);
52 } else {
53 out = C->print_inlining_stream();
54 }
55 CompileTask::print_inline_indent(depth, out);
56 out->print(" \\-> TypeProfile (%d/%d counts) = ", receiver_count, site_count);
57 stringStream ss;
58 prof_klass->name()->print_symbol_on(&ss);
59 out->print(ss.as_string());
60 out->cr();
61 }
62 }
64 CallGenerator* Compile::call_generator(ciMethod* callee, int vtable_index, bool call_does_dispatch,
65 JVMState* jvms, bool allow_inline,
66 float prof_factor, bool allow_intrinsics, bool delayed_forbidden) {
67 ciMethod* caller = jvms->method();
68 int bci = jvms->bci();
69 Bytecodes::Code bytecode = caller->java_code_at_bci(bci);
70 guarantee(callee != NULL, "failed method resolution");
72 // Dtrace currently doesn't work unless all calls are vanilla
73 if (env()->dtrace_method_probes()) {
74 allow_inline = false;
75 }
77 // Note: When we get profiling during stage-1 compiles, we want to pull
78 // from more specific profile data which pertains to this inlining.
79 // Right now, ignore the information in jvms->caller(), and do method[bci].
80 ciCallProfile profile = caller->call_profile_at_bci(bci);
82 // See how many times this site has been invoked.
83 int site_count = profile.count();
84 int receiver_count = -1;
85 if (call_does_dispatch && UseTypeProfile && profile.has_receiver(0)) {
86 // Receivers in the profile structure are ordered by call counts
87 // so that the most called (major) receiver is profile.receiver(0).
88 receiver_count = profile.receiver_count(0);
89 }
91 CompileLog* log = this->log();
92 if (log != NULL) {
93 int rid = (receiver_count >= 0)? log->identify(profile.receiver(0)): -1;
94 int r2id = (rid != -1 && profile.has_receiver(1))? log->identify(profile.receiver(1)):-1;
95 log->begin_elem("call method='%d' count='%d' prof_factor='%g'",
96 log->identify(callee), site_count, prof_factor);
97 if (call_does_dispatch) log->print(" virtual='1'");
98 if (allow_inline) log->print(" inline='1'");
99 if (receiver_count >= 0) {
100 log->print(" receiver='%d' receiver_count='%d'", rid, receiver_count);
101 if (profile.has_receiver(1)) {
102 log->print(" receiver2='%d' receiver2_count='%d'", r2id, profile.receiver_count(1));
103 }
104 }
105 log->end_elem();
106 }
108 // Special case the handling of certain common, profitable library
109 // methods. If these methods are replaced with specialized code,
110 // then we return it as the inlined version of the call.
111 // We do this before the strict f.p. check below because the
112 // intrinsics handle strict f.p. correctly.
113 CallGenerator* cg_intrinsic = NULL;
114 if (allow_inline && allow_intrinsics) {
115 CallGenerator* cg = find_intrinsic(callee, call_does_dispatch);
116 if (cg != NULL) {
117 if (cg->is_predicted()) {
118 // Code without intrinsic but, hopefully, inlined.
119 CallGenerator* inline_cg = this->call_generator(callee,
120 vtable_index, call_does_dispatch, jvms, allow_inline, prof_factor, false);
121 if (inline_cg != NULL) {
122 cg = CallGenerator::for_predicted_intrinsic(cg, inline_cg);
123 }
124 }
126 // If intrinsic does the virtual dispatch, we try to use the type profile
127 // first, and hopefully inline it as the regular virtual call below.
128 // We will retry the intrinsic if nothing had claimed it afterwards.
129 if (cg->does_virtual_dispatch()) {
130 cg_intrinsic = cg;
131 cg = NULL;
132 } else {
133 return cg;
134 }
135 }
136 }
138 // Do method handle calls.
139 // NOTE: This must happen before normal inlining logic below since
140 // MethodHandle.invoke* are native methods which obviously don't
141 // have bytecodes and so normal inlining fails.
142 if (callee->is_method_handle_intrinsic()) {
143 CallGenerator* cg = CallGenerator::for_method_handle_call(jvms, caller, callee, delayed_forbidden);
144 assert(cg == NULL || !delayed_forbidden || !cg->is_late_inline() || cg->is_mh_late_inline(), "unexpected CallGenerator");
145 return cg;
146 }
148 // Do not inline strict fp into non-strict code, or the reverse
149 if (caller->is_strict() ^ callee->is_strict()) {
150 allow_inline = false;
151 }
153 // Attempt to inline...
154 if (allow_inline) {
155 // The profile data is only partly attributable to this caller,
156 // scale back the call site information.
157 float past_uses = jvms->method()->scale_count(site_count, prof_factor);
158 // This is the number of times we expect the call code to be used.
159 float expected_uses = past_uses;
161 // Try inlining a bytecoded method:
162 if (!call_does_dispatch) {
163 InlineTree* ilt;
164 if (UseOldInlining) {
165 ilt = InlineTree::find_subtree_from_root(this->ilt(), jvms->caller(), jvms->method());
166 } else {
167 // Make a disembodied, stateless ILT.
168 // TO DO: When UseOldInlining is removed, copy the ILT code elsewhere.
169 float site_invoke_ratio = prof_factor;
170 // Note: ilt is for the root of this parse, not the present call site.
171 ilt = new InlineTree(this, jvms->method(), jvms->caller(), site_invoke_ratio, MaxInlineLevel);
172 }
173 WarmCallInfo scratch_ci;
174 if (!UseOldInlining)
175 scratch_ci.init(jvms, callee, profile, prof_factor);
176 bool should_delay = false;
177 WarmCallInfo* ci = ilt->ok_to_inline(callee, jvms, profile, &scratch_ci, should_delay);
178 assert(ci != &scratch_ci, "do not let this pointer escape");
179 bool allow_inline = (ci != NULL && !ci->is_cold());
180 bool require_inline = (allow_inline && ci->is_hot());
182 if (allow_inline) {
183 CallGenerator* cg = CallGenerator::for_inline(callee, expected_uses);
185 if (require_inline && cg != NULL) {
186 // Delay the inlining of this method to give us the
187 // opportunity to perform some high level optimizations
188 // first.
189 if (should_delay_string_inlining(callee, jvms)) {
190 assert(!delayed_forbidden, "strange");
191 return CallGenerator::for_string_late_inline(callee, cg);
192 } else if (should_delay_boxing_inlining(callee, jvms)) {
193 assert(!delayed_forbidden, "strange");
194 return CallGenerator::for_boxing_late_inline(callee, cg);
195 } else if ((should_delay || AlwaysIncrementalInline) && !delayed_forbidden) {
196 return CallGenerator::for_late_inline(callee, cg);
197 }
198 }
199 if (cg == NULL || should_delay) {
200 // Fall through.
201 } else if (require_inline || !InlineWarmCalls) {
202 return cg;
203 } else {
204 CallGenerator* cold_cg = call_generator(callee, vtable_index, call_does_dispatch, jvms, false, prof_factor);
205 return CallGenerator::for_warm_call(ci, cold_cg, cg);
206 }
207 }
208 }
210 // Try using the type profile.
211 if (call_does_dispatch && site_count > 0 && receiver_count > 0) {
212 // The major receiver's count >= TypeProfileMajorReceiverPercent of site_count.
213 bool have_major_receiver = (100.*profile.receiver_prob(0) >= (float)TypeProfileMajorReceiverPercent);
214 ciMethod* receiver_method = NULL;
215 if (have_major_receiver || profile.morphism() == 1 ||
216 (profile.morphism() == 2 && UseBimorphicInlining)) {
217 // receiver_method = profile.method();
218 // Profiles do not suggest methods now. Look it up in the major receiver.
219 receiver_method = callee->resolve_invoke(jvms->method()->holder(),
220 profile.receiver(0));
221 }
222 if (receiver_method != NULL) {
223 // The single majority receiver sufficiently outweighs the minority.
224 CallGenerator* hit_cg = this->call_generator(receiver_method,
225 vtable_index, !call_does_dispatch, jvms, allow_inline, prof_factor);
226 if (hit_cg != NULL) {
227 // Look up second receiver.
228 CallGenerator* next_hit_cg = NULL;
229 ciMethod* next_receiver_method = NULL;
230 if (profile.morphism() == 2 && UseBimorphicInlining) {
231 next_receiver_method = callee->resolve_invoke(jvms->method()->holder(),
232 profile.receiver(1));
233 if (next_receiver_method != NULL) {
234 next_hit_cg = this->call_generator(next_receiver_method,
235 vtable_index, !call_does_dispatch, jvms,
236 allow_inline, prof_factor);
237 if (next_hit_cg != NULL && !next_hit_cg->is_inline() &&
238 have_major_receiver && UseOnlyInlinedBimorphic) {
239 // Skip if we can't inline second receiver's method
240 next_hit_cg = NULL;
241 }
242 }
243 }
244 CallGenerator* miss_cg;
245 Deoptimization::DeoptReason reason = (profile.morphism() == 2) ?
246 Deoptimization::Reason_bimorphic :
247 Deoptimization::Reason_class_check;
248 if (( profile.morphism() == 1 ||
249 (profile.morphism() == 2 && next_hit_cg != NULL) ) &&
250 !too_many_traps(jvms->method(), jvms->bci(), reason)
251 ) {
252 // Generate uncommon trap for class check failure path
253 // in case of monomorphic or bimorphic virtual call site.
254 miss_cg = CallGenerator::for_uncommon_trap(callee, reason,
255 Deoptimization::Action_maybe_recompile);
256 } else {
257 // Generate virtual call for class check failure path
258 // in case of polymorphic virtual call site.
259 miss_cg = CallGenerator::for_virtual_call(callee, vtable_index);
260 }
261 if (miss_cg != NULL) {
262 if (next_hit_cg != NULL) {
263 trace_type_profile(C, jvms->method(), jvms->depth() - 1, jvms->bci(), next_receiver_method, profile.receiver(1), site_count, profile.receiver_count(1));
264 // We don't need to record dependency on a receiver here and below.
265 // Whenever we inline, the dependency is added by Parse::Parse().
266 miss_cg = CallGenerator::for_predicted_call(profile.receiver(1), miss_cg, next_hit_cg, PROB_MAX);
267 }
268 if (miss_cg != NULL) {
269 trace_type_profile(C, jvms->method(), jvms->depth() - 1, jvms->bci(), receiver_method, profile.receiver(0), site_count, receiver_count);
270 CallGenerator* cg = CallGenerator::for_predicted_call(profile.receiver(0), miss_cg, hit_cg, profile.receiver_prob(0));
271 if (cg != NULL) return cg;
272 }
273 }
274 }
275 }
276 }
277 }
279 // Nothing claimed the intrinsic, we go with straight-forward inlining
280 // for already discovered intrinsic.
281 if (allow_inline && allow_intrinsics && cg_intrinsic != NULL) {
282 assert(cg_intrinsic->does_virtual_dispatch(), "sanity");
283 return cg_intrinsic;
284 }
286 // There was no special inlining tactic, or it bailed out.
287 // Use a more generic tactic, like a simple call.
288 if (call_does_dispatch) {
289 return CallGenerator::for_virtual_call(callee, vtable_index);
290 } else {
291 // Class Hierarchy Analysis or Type Profile reveals a unique target,
292 // or it is a static or special call.
293 return CallGenerator::for_direct_call(callee, should_delay_inlining(callee, jvms));
294 }
295 }
297 // Return true for methods that shouldn't be inlined early so that
298 // they are easier to analyze and optimize as intrinsics.
299 bool Compile::should_delay_string_inlining(ciMethod* call_method, JVMState* jvms) {
300 if (has_stringbuilder()) {
302 if ((call_method->holder() == C->env()->StringBuilder_klass() ||
303 call_method->holder() == C->env()->StringBuffer_klass()) &&
304 (jvms->method()->holder() == C->env()->StringBuilder_klass() ||
305 jvms->method()->holder() == C->env()->StringBuffer_klass())) {
306 // Delay SB calls only when called from non-SB code
307 return false;
308 }
310 switch (call_method->intrinsic_id()) {
311 case vmIntrinsics::_StringBuilder_void:
312 case vmIntrinsics::_StringBuilder_int:
313 case vmIntrinsics::_StringBuilder_String:
314 case vmIntrinsics::_StringBuilder_append_char:
315 case vmIntrinsics::_StringBuilder_append_int:
316 case vmIntrinsics::_StringBuilder_append_String:
317 case vmIntrinsics::_StringBuilder_toString:
318 case vmIntrinsics::_StringBuffer_void:
319 case vmIntrinsics::_StringBuffer_int:
320 case vmIntrinsics::_StringBuffer_String:
321 case vmIntrinsics::_StringBuffer_append_char:
322 case vmIntrinsics::_StringBuffer_append_int:
323 case vmIntrinsics::_StringBuffer_append_String:
324 case vmIntrinsics::_StringBuffer_toString:
325 case vmIntrinsics::_Integer_toString:
326 return true;
328 case vmIntrinsics::_String_String:
329 {
330 Node* receiver = jvms->map()->in(jvms->argoff() + 1);
331 if (receiver->is_Proj() && receiver->in(0)->is_CallStaticJava()) {
332 CallStaticJavaNode* csj = receiver->in(0)->as_CallStaticJava();
333 ciMethod* m = csj->method();
334 if (m != NULL &&
335 (m->intrinsic_id() == vmIntrinsics::_StringBuffer_toString ||
336 m->intrinsic_id() == vmIntrinsics::_StringBuilder_toString))
337 // Delay String.<init>(new SB())
338 return true;
339 }
340 return false;
341 }
343 default:
344 return false;
345 }
346 }
347 return false;
348 }
350 bool Compile::should_delay_boxing_inlining(ciMethod* call_method, JVMState* jvms) {
351 if (eliminate_boxing() && call_method->is_boxing_method()) {
352 set_has_boxed_value(true);
353 return true;
354 }
355 return false;
356 }
358 // uncommon-trap call-sites where callee is unloaded, uninitialized or will not link
359 bool Parse::can_not_compile_call_site(ciMethod *dest_method, ciInstanceKlass* klass) {
360 // Additional inputs to consider...
361 // bc = bc()
362 // caller = method()
363 // iter().get_method_holder_index()
364 assert( dest_method->is_loaded(), "ciTypeFlow should not let us get here" );
365 // Interface classes can be loaded & linked and never get around to
366 // being initialized. Uncommon-trap for not-initialized static or
367 // v-calls. Let interface calls happen.
368 ciInstanceKlass* holder_klass = dest_method->holder();
369 if (!holder_klass->is_being_initialized() &&
370 !holder_klass->is_initialized() &&
371 !holder_klass->is_interface()) {
372 uncommon_trap(Deoptimization::Reason_uninitialized,
373 Deoptimization::Action_reinterpret,
374 holder_klass);
375 return true;
376 }
378 assert(dest_method->is_loaded(), "dest_method: typeflow responsibility");
379 return false;
380 }
383 //------------------------------do_call----------------------------------------
384 // Handle your basic call. Inline if we can & want to, else just setup call.
385 void Parse::do_call() {
386 // It's likely we are going to add debug info soon.
387 // Also, if we inline a guy who eventually needs debug info for this JVMS,
388 // our contribution to it is cleaned up right here.
389 kill_dead_locals();
391 // Set frequently used booleans
392 const bool is_virtual = bc() == Bytecodes::_invokevirtual;
393 const bool is_virtual_or_interface = is_virtual || bc() == Bytecodes::_invokeinterface;
394 const bool has_receiver = Bytecodes::has_receiver(bc());
396 // Find target being called
397 bool will_link;
398 ciSignature* declared_signature = NULL;
399 ciMethod* orig_callee = iter().get_method(will_link, &declared_signature); // callee in the bytecode
400 ciInstanceKlass* holder_klass = orig_callee->holder();
401 ciKlass* holder = iter().get_declared_method_holder();
402 ciInstanceKlass* klass = ciEnv::get_instance_klass_for_declared_method_holder(holder);
403 assert(declared_signature != NULL, "cannot be null");
405 // uncommon-trap when callee is unloaded, uninitialized or will not link
406 // bailout when too many arguments for register representation
407 if (!will_link || can_not_compile_call_site(orig_callee, klass)) {
408 #ifndef PRODUCT
409 if (PrintOpto && (Verbose || WizardMode)) {
410 method()->print_name(); tty->print_cr(" can not compile call at bci %d to:", bci());
411 orig_callee->print_name(); tty->cr();
412 }
413 #endif
414 return;
415 }
416 assert(holder_klass->is_loaded(), "");
417 //assert((bc_callee->is_static() || is_invokedynamic) == !has_receiver , "must match bc"); // XXX invokehandle (cur_bc_raw)
418 // Note: this takes into account invokeinterface of methods declared in java/lang/Object,
419 // which should be invokevirtuals but according to the VM spec may be invokeinterfaces
420 assert(holder_klass->is_interface() || holder_klass->super() == NULL || (bc() != Bytecodes::_invokeinterface), "must match bc");
421 // Note: In the absence of miranda methods, an abstract class K can perform
422 // an invokevirtual directly on an interface method I.m if K implements I.
424 // orig_callee is the resolved callee which's signature includes the
425 // appendix argument.
426 const int nargs = orig_callee->arg_size();
427 const bool is_signature_polymorphic = MethodHandles::is_signature_polymorphic(orig_callee->intrinsic_id());
429 // Push appendix argument (MethodType, CallSite, etc.), if one.
430 if (iter().has_appendix()) {
431 ciObject* appendix_arg = iter().get_appendix();
432 const TypeOopPtr* appendix_arg_type = TypeOopPtr::make_from_constant(appendix_arg);
433 Node* appendix_arg_node = _gvn.makecon(appendix_arg_type);
434 push(appendix_arg_node);
435 }
437 // ---------------------
438 // Does Class Hierarchy Analysis reveal only a single target of a v-call?
439 // Then we may inline or make a static call, but become dependent on there being only 1 target.
440 // Does the call-site type profile reveal only one receiver?
441 // Then we may introduce a run-time check and inline on the path where it succeeds.
442 // The other path may uncommon_trap, check for another receiver, or do a v-call.
444 // Try to get the most accurate receiver type
445 ciMethod* callee = orig_callee;
446 int vtable_index = Method::invalid_vtable_index;
447 bool call_does_dispatch = false;
449 if (is_virtual_or_interface) {
450 Node* receiver_node = stack(sp() - nargs);
451 const TypeOopPtr* receiver_type = _gvn.type(receiver_node)->isa_oopptr();
452 // call_does_dispatch and vtable_index are out-parameters. They might be changed.
453 callee = C->optimize_virtual_call(method(), bci(), klass, orig_callee, receiver_type,
454 is_virtual,
455 call_does_dispatch, vtable_index); // out-parameters
456 }
458 // Note: It's OK to try to inline a virtual call.
459 // The call generator will not attempt to inline a polymorphic call
460 // unless it knows how to optimize the receiver dispatch.
461 bool try_inline = (C->do_inlining() || InlineAccessors);
463 // ---------------------
464 dec_sp(nargs); // Temporarily pop args for JVM state of call
465 JVMState* jvms = sync_jvms();
467 // ---------------------
468 // Decide call tactic.
469 // This call checks with CHA, the interpreter profile, intrinsics table, etc.
470 // It decides whether inlining is desirable or not.
471 CallGenerator* cg = C->call_generator(callee, vtable_index, call_does_dispatch, jvms, try_inline, prof_factor());
473 // NOTE: Don't use orig_callee and callee after this point! Use cg->method() instead.
474 orig_callee = callee = NULL;
476 // ---------------------
477 // Round double arguments before call
478 round_double_arguments(cg->method());
480 #ifndef PRODUCT
481 // bump global counters for calls
482 count_compiled_calls(/*at_method_entry*/ false, cg->is_inline());
484 // Record first part of parsing work for this call
485 parse_histogram()->record_change();
486 #endif // not PRODUCT
488 assert(jvms == this->jvms(), "still operating on the right JVMS");
489 assert(jvms_in_sync(), "jvms must carry full info into CG");
491 // save across call, for a subsequent cast_not_null.
492 Node* receiver = has_receiver ? argument(0) : NULL;
494 // Bump method data counters (We profile *before* the call is made
495 // because exceptions don't return to the call site.)
496 profile_call(receiver);
498 JVMState* new_jvms = cg->generate(jvms);
499 if (new_jvms == NULL) {
500 // When inlining attempt fails (e.g., too many arguments),
501 // it may contaminate the current compile state, making it
502 // impossible to pull back and try again. Once we call
503 // cg->generate(), we are committed. If it fails, the whole
504 // compilation task is compromised.
505 if (failing()) return;
507 // This can happen if a library intrinsic is available, but refuses
508 // the call site, perhaps because it did not match a pattern the
509 // intrinsic was expecting to optimize. Should always be possible to
510 // get a normal java call that may inline in that case
511 cg = C->call_generator(cg->method(), vtable_index, call_does_dispatch, jvms, try_inline, prof_factor(), /* allow_intrinsics= */ false);
512 if ((new_jvms = cg->generate(jvms)) == NULL) {
513 guarantee(failing(), "call failed to generate: calls should work");
514 return;
515 }
516 }
518 if (cg->is_inline()) {
519 // Accumulate has_loops estimate
520 C->set_has_loops(C->has_loops() || cg->method()->has_loops());
521 C->env()->notice_inlined_method(cg->method());
522 }
524 // Reset parser state from [new_]jvms, which now carries results of the call.
525 // Return value (if any) is already pushed on the stack by the cg.
526 add_exception_states_from(new_jvms);
527 if (new_jvms->map()->control() == top()) {
528 stop_and_kill_map();
529 } else {
530 assert(new_jvms->same_calls_as(jvms), "method/bci left unchanged");
531 set_jvms(new_jvms);
532 }
534 if (!stopped()) {
535 // This was some sort of virtual call, which did a null check for us.
536 // Now we can assert receiver-not-null, on the normal return path.
537 if (receiver != NULL && cg->is_virtual()) {
538 Node* cast = cast_not_null(receiver);
539 // %%% assert(receiver == cast, "should already have cast the receiver");
540 }
542 // Round double result after a call from strict to non-strict code
543 round_double_result(cg->method());
545 ciType* rtype = cg->method()->return_type();
546 ciType* ctype = declared_signature->return_type();
548 if (Bytecodes::has_optional_appendix(iter().cur_bc_raw()) || is_signature_polymorphic) {
549 // Be careful here with return types.
550 if (ctype != rtype) {
551 BasicType rt = rtype->basic_type();
552 BasicType ct = ctype->basic_type();
553 if (ct == T_VOID) {
554 // It's OK for a method to return a value that is discarded.
555 // The discarding does not require any special action from the caller.
556 // The Java code knows this, at VerifyType.isNullConversion.
557 pop_node(rt); // whatever it was, pop it
558 } else if (rt == T_INT || is_subword_type(rt)) {
559 // Nothing. These cases are handled in lambda form bytecode.
560 assert(ct == T_INT || is_subword_type(ct), err_msg_res("must match: rt=%s, ct=%s", type2name(rt), type2name(ct)));
561 } else if (rt == T_OBJECT || rt == T_ARRAY) {
562 assert(ct == T_OBJECT || ct == T_ARRAY, err_msg_res("rt=%s, ct=%s", type2name(rt), type2name(ct)));
563 if (ctype->is_loaded()) {
564 const TypeOopPtr* arg_type = TypeOopPtr::make_from_klass(rtype->as_klass());
565 const Type* sig_type = TypeOopPtr::make_from_klass(ctype->as_klass());
566 if (arg_type != NULL && !arg_type->higher_equal(sig_type)) {
567 Node* retnode = pop();
568 Node* cast_obj = _gvn.transform(new (C) CheckCastPPNode(control(), retnode, sig_type));
569 push(cast_obj);
570 }
571 }
572 } else {
573 assert(rt == ct, err_msg_res("unexpected mismatch: rt=%s, ct=%s", type2name(rt), type2name(ct)));
574 // push a zero; it's better than getting an oop/int mismatch
575 pop_node(rt);
576 Node* retnode = zerocon(ct);
577 push_node(ct, retnode);
578 }
579 // Now that the value is well-behaved, continue with the call-site type.
580 rtype = ctype;
581 }
582 } else {
583 // Symbolic resolution enforces the types to be the same.
584 // NOTE: We must relax the assert for unloaded types because two
585 // different ciType instances of the same unloaded class type
586 // can appear to be "loaded" by different loaders (depending on
587 // the accessing class).
588 assert(!rtype->is_loaded() || !ctype->is_loaded() || rtype == ctype,
589 err_msg_res("mismatched return types: rtype=%s, ctype=%s", rtype->name(), ctype->name()));
590 }
592 // If the return type of the method is not loaded, assert that the
593 // value we got is a null. Otherwise, we need to recompile.
594 if (!rtype->is_loaded()) {
595 #ifndef PRODUCT
596 if (PrintOpto && (Verbose || WizardMode)) {
597 method()->print_name(); tty->print_cr(" asserting nullness of result at bci: %d", bci());
598 cg->method()->print_name(); tty->cr();
599 }
600 #endif
601 if (C->log() != NULL) {
602 C->log()->elem("assert_null reason='return' klass='%d'",
603 C->log()->identify(rtype));
604 }
605 // If there is going to be a trap, put it at the next bytecode:
606 set_bci(iter().next_bci());
607 null_assert(peek());
608 set_bci(iter().cur_bci()); // put it back
609 }
610 }
612 // Restart record of parsing work after possible inlining of call
613 #ifndef PRODUCT
614 parse_histogram()->set_initial_state(bc());
615 #endif
616 }
618 //---------------------------catch_call_exceptions-----------------------------
619 // Put a Catch and CatchProj nodes behind a just-created call.
620 // Send their caught exceptions to the proper handler.
621 // This may be used after a call to the rethrow VM stub,
622 // when it is needed to process unloaded exception classes.
623 void Parse::catch_call_exceptions(ciExceptionHandlerStream& handlers) {
624 // Exceptions are delivered through this channel:
625 Node* i_o = this->i_o();
627 // Add a CatchNode.
628 GrowableArray<int>* bcis = new (C->node_arena()) GrowableArray<int>(C->node_arena(), 8, 0, -1);
629 GrowableArray<const Type*>* extypes = new (C->node_arena()) GrowableArray<const Type*>(C->node_arena(), 8, 0, NULL);
630 GrowableArray<int>* saw_unloaded = new (C->node_arena()) GrowableArray<int>(C->node_arena(), 8, 0, 0);
632 for (; !handlers.is_done(); handlers.next()) {
633 ciExceptionHandler* h = handlers.handler();
634 int h_bci = h->handler_bci();
635 ciInstanceKlass* h_klass = h->is_catch_all() ? env()->Throwable_klass() : h->catch_klass();
636 // Do not introduce unloaded exception types into the graph:
637 if (!h_klass->is_loaded()) {
638 if (saw_unloaded->contains(h_bci)) {
639 /* We've already seen an unloaded exception with h_bci,
640 so don't duplicate. Duplication will cause the CatchNode to be
641 unnecessarily large. See 4713716. */
642 continue;
643 } else {
644 saw_unloaded->append(h_bci);
645 }
646 }
647 const Type* h_extype = TypeOopPtr::make_from_klass(h_klass);
648 // (We use make_from_klass because it respects UseUniqueSubclasses.)
649 h_extype = h_extype->join(TypeInstPtr::NOTNULL);
650 assert(!h_extype->empty(), "sanity");
651 // Note: It's OK if the BCIs repeat themselves.
652 bcis->append(h_bci);
653 extypes->append(h_extype);
654 }
656 int len = bcis->length();
657 CatchNode *cn = new (C) CatchNode(control(), i_o, len+1);
658 Node *catch_ = _gvn.transform(cn);
660 // now branch with the exception state to each of the (potential)
661 // handlers
662 for(int i=0; i < len; i++) {
663 // Setup JVM state to enter the handler.
664 PreserveJVMState pjvms(this);
665 // Locals are just copied from before the call.
666 // Get control from the CatchNode.
667 int handler_bci = bcis->at(i);
668 Node* ctrl = _gvn.transform( new (C) CatchProjNode(catch_, i+1,handler_bci));
669 // This handler cannot happen?
670 if (ctrl == top()) continue;
671 set_control(ctrl);
673 // Create exception oop
674 const TypeInstPtr* extype = extypes->at(i)->is_instptr();
675 Node *ex_oop = _gvn.transform(new (C) CreateExNode(extypes->at(i), ctrl, i_o));
677 // Handle unloaded exception classes.
678 if (saw_unloaded->contains(handler_bci)) {
679 // An unloaded exception type is coming here. Do an uncommon trap.
680 #ifndef PRODUCT
681 // We do not expect the same handler bci to take both cold unloaded
682 // and hot loaded exceptions. But, watch for it.
683 if ((Verbose || WizardMode) && extype->is_loaded()) {
684 tty->print("Warning: Handler @%d takes mixed loaded/unloaded exceptions in ", bci());
685 method()->print_name(); tty->cr();
686 } else if (PrintOpto && (Verbose || WizardMode)) {
687 tty->print("Bailing out on unloaded exception type ");
688 extype->klass()->print_name();
689 tty->print(" at bci:%d in ", bci());
690 method()->print_name(); tty->cr();
691 }
692 #endif
693 // Emit an uncommon trap instead of processing the block.
694 set_bci(handler_bci);
695 push_ex_oop(ex_oop);
696 uncommon_trap(Deoptimization::Reason_unloaded,
697 Deoptimization::Action_reinterpret,
698 extype->klass(), "!loaded exception");
699 set_bci(iter().cur_bci()); // put it back
700 continue;
701 }
703 // go to the exception handler
704 if (handler_bci < 0) { // merge with corresponding rethrow node
705 throw_to_exit(make_exception_state(ex_oop));
706 } else { // Else jump to corresponding handle
707 push_ex_oop(ex_oop); // Clear stack and push just the oop.
708 merge_exception(handler_bci);
709 }
710 }
712 // The first CatchProj is for the normal return.
713 // (Note: If this is a call to rethrow_Java, this node goes dead.)
714 set_control(_gvn.transform( new (C) CatchProjNode(catch_, CatchProjNode::fall_through_index, CatchProjNode::no_handler_bci)));
715 }
718 //----------------------------catch_inline_exceptions--------------------------
719 // Handle all exceptions thrown by an inlined method or individual bytecode.
720 // Common case 1: we have no handler, so all exceptions merge right into
721 // the rethrow case.
722 // Case 2: we have some handlers, with loaded exception klasses that have
723 // no subklasses. We do a Deutsch-Shiffman style type-check on the incoming
724 // exception oop and branch to the handler directly.
725 // Case 3: We have some handlers with subklasses or are not loaded at
726 // compile-time. We have to call the runtime to resolve the exception.
727 // So we insert a RethrowCall and all the logic that goes with it.
728 void Parse::catch_inline_exceptions(SafePointNode* ex_map) {
729 // Caller is responsible for saving away the map for normal control flow!
730 assert(stopped(), "call set_map(NULL) first");
731 assert(method()->has_exception_handlers(), "don't come here w/o work to do");
733 Node* ex_node = saved_ex_oop(ex_map);
734 if (ex_node == top()) {
735 // No action needed.
736 return;
737 }
738 const TypeInstPtr* ex_type = _gvn.type(ex_node)->isa_instptr();
739 NOT_PRODUCT(if (ex_type==NULL) tty->print_cr("*** Exception not InstPtr"));
740 if (ex_type == NULL)
741 ex_type = TypeOopPtr::make_from_klass(env()->Throwable_klass())->is_instptr();
743 // determine potential exception handlers
744 ciExceptionHandlerStream handlers(method(), bci(),
745 ex_type->klass()->as_instance_klass(),
746 ex_type->klass_is_exact());
748 // Start executing from the given throw state. (Keep its stack, for now.)
749 // Get the exception oop as known at compile time.
750 ex_node = use_exception_state(ex_map);
752 // Get the exception oop klass from its header
753 Node* ex_klass_node = NULL;
754 if (has_ex_handler() && !ex_type->klass_is_exact()) {
755 Node* p = basic_plus_adr( ex_node, ex_node, oopDesc::klass_offset_in_bytes());
756 ex_klass_node = _gvn.transform( LoadKlassNode::make(_gvn, immutable_memory(), p, TypeInstPtr::KLASS, TypeKlassPtr::OBJECT) );
758 // Compute the exception klass a little more cleverly.
759 // Obvious solution is to simple do a LoadKlass from the 'ex_node'.
760 // However, if the ex_node is a PhiNode, I'm going to do a LoadKlass for
761 // each arm of the Phi. If I know something clever about the exceptions
762 // I'm loading the class from, I can replace the LoadKlass with the
763 // klass constant for the exception oop.
764 if( ex_node->is_Phi() ) {
765 ex_klass_node = new (C) PhiNode( ex_node->in(0), TypeKlassPtr::OBJECT );
766 for( uint i = 1; i < ex_node->req(); i++ ) {
767 Node* p = basic_plus_adr( ex_node->in(i), ex_node->in(i), oopDesc::klass_offset_in_bytes() );
768 Node* k = _gvn.transform( LoadKlassNode::make(_gvn, immutable_memory(), p, TypeInstPtr::KLASS, TypeKlassPtr::OBJECT) );
769 ex_klass_node->init_req( i, k );
770 }
771 _gvn.set_type(ex_klass_node, TypeKlassPtr::OBJECT);
773 }
774 }
776 // Scan the exception table for applicable handlers.
777 // If none, we can call rethrow() and be done!
778 // If precise (loaded with no subklasses), insert a D.S. style
779 // pointer compare to the correct handler and loop back.
780 // If imprecise, switch to the Rethrow VM-call style handling.
782 int remaining = handlers.count_remaining();
784 // iterate through all entries sequentially
785 for (;!handlers.is_done(); handlers.next()) {
786 ciExceptionHandler* handler = handlers.handler();
788 if (handler->is_rethrow()) {
789 // If we fell off the end of the table without finding an imprecise
790 // exception klass (and without finding a generic handler) then we
791 // know this exception is not handled in this method. We just rethrow
792 // the exception into the caller.
793 throw_to_exit(make_exception_state(ex_node));
794 return;
795 }
797 // exception handler bci range covers throw_bci => investigate further
798 int handler_bci = handler->handler_bci();
800 if (remaining == 1) {
801 push_ex_oop(ex_node); // Push exception oop for handler
802 #ifndef PRODUCT
803 if (PrintOpto && WizardMode) {
804 tty->print_cr(" Catching every inline exception bci:%d -> handler_bci:%d", bci(), handler_bci);
805 }
806 #endif
807 merge_exception(handler_bci); // jump to handler
808 return; // No more handling to be done here!
809 }
811 // Get the handler's klass
812 ciInstanceKlass* klass = handler->catch_klass();
814 if (!klass->is_loaded()) { // klass is not loaded?
815 // fall through into catch_call_exceptions which will emit a
816 // handler with an uncommon trap.
817 break;
818 }
820 if (klass->is_interface()) // should not happen, but...
821 break; // bail out
823 // Check the type of the exception against the catch type
824 const TypeKlassPtr *tk = TypeKlassPtr::make(klass);
825 Node* con = _gvn.makecon(tk);
826 Node* not_subtype_ctrl = gen_subtype_check(ex_klass_node, con);
827 if (!stopped()) {
828 PreserveJVMState pjvms(this);
829 const TypeInstPtr* tinst = TypeOopPtr::make_from_klass_unique(klass)->cast_to_ptr_type(TypePtr::NotNull)->is_instptr();
830 assert(klass->has_subklass() || tinst->klass_is_exact(), "lost exactness");
831 Node* ex_oop = _gvn.transform(new (C) CheckCastPPNode(control(), ex_node, tinst));
832 push_ex_oop(ex_oop); // Push exception oop for handler
833 #ifndef PRODUCT
834 if (PrintOpto && WizardMode) {
835 tty->print(" Catching inline exception bci:%d -> handler_bci:%d -- ", bci(), handler_bci);
836 klass->print_name();
837 tty->cr();
838 }
839 #endif
840 merge_exception(handler_bci);
841 }
842 set_control(not_subtype_ctrl);
844 // Come here if exception does not match handler.
845 // Carry on with more handler checks.
846 --remaining;
847 }
849 assert(!stopped(), "you should return if you finish the chain");
851 // Oops, need to call into the VM to resolve the klasses at runtime.
852 // Note: This call must not deoptimize, since it is not a real at this bci!
853 kill_dead_locals();
855 make_runtime_call(RC_NO_LEAF | RC_MUST_THROW,
856 OptoRuntime::rethrow_Type(),
857 OptoRuntime::rethrow_stub(),
858 NULL, NULL,
859 ex_node);
861 // Rethrow is a pure call, no side effects, only a result.
862 // The result cannot be allocated, so we use I_O
864 // Catch exceptions from the rethrow
865 catch_call_exceptions(handlers);
866 }
869 // (Note: Moved add_debug_info into GraphKit::add_safepoint_edges.)
872 #ifndef PRODUCT
873 void Parse::count_compiled_calls(bool at_method_entry, bool is_inline) {
874 if( CountCompiledCalls ) {
875 if( at_method_entry ) {
876 // bump invocation counter if top method (for statistics)
877 if (CountCompiledCalls && depth() == 1) {
878 const TypePtr* addr_type = TypeMetadataPtr::make(method());
879 Node* adr1 = makecon(addr_type);
880 Node* adr2 = basic_plus_adr(adr1, adr1, in_bytes(Method::compiled_invocation_counter_offset()));
881 increment_counter(adr2);
882 }
883 } else if (is_inline) {
884 switch (bc()) {
885 case Bytecodes::_invokevirtual: increment_counter(SharedRuntime::nof_inlined_calls_addr()); break;
886 case Bytecodes::_invokeinterface: increment_counter(SharedRuntime::nof_inlined_interface_calls_addr()); break;
887 case Bytecodes::_invokestatic:
888 case Bytecodes::_invokedynamic:
889 case Bytecodes::_invokespecial: increment_counter(SharedRuntime::nof_inlined_static_calls_addr()); break;
890 default: fatal("unexpected call bytecode");
891 }
892 } else {
893 switch (bc()) {
894 case Bytecodes::_invokevirtual: increment_counter(SharedRuntime::nof_normal_calls_addr()); break;
895 case Bytecodes::_invokeinterface: increment_counter(SharedRuntime::nof_interface_calls_addr()); break;
896 case Bytecodes::_invokestatic:
897 case Bytecodes::_invokedynamic:
898 case Bytecodes::_invokespecial: increment_counter(SharedRuntime::nof_static_calls_addr()); break;
899 default: fatal("unexpected call bytecode");
900 }
901 }
902 }
903 }
904 #endif //PRODUCT
907 ciMethod* Compile::optimize_virtual_call(ciMethod* caller, int bci, ciInstanceKlass* klass,
908 ciMethod* callee, const TypeOopPtr* receiver_type,
909 bool is_virtual,
910 bool& call_does_dispatch, int& vtable_index) {
911 // Set default values for out-parameters.
912 call_does_dispatch = true;
913 vtable_index = Method::invalid_vtable_index;
915 // Choose call strategy.
916 ciMethod* optimized_virtual_method = optimize_inlining(caller, bci, klass, callee, receiver_type);
918 // Have the call been sufficiently improved such that it is no longer a virtual?
919 if (optimized_virtual_method != NULL) {
920 callee = optimized_virtual_method;
921 call_does_dispatch = false;
922 } else if (!UseInlineCaches && is_virtual && callee->is_loaded()) {
923 // We can make a vtable call at this site
924 vtable_index = callee->resolve_vtable_index(caller->holder(), klass);
925 }
926 return callee;
927 }
929 // Identify possible target method and inlining style
930 ciMethod* Compile::optimize_inlining(ciMethod* caller, int bci, ciInstanceKlass* klass,
931 ciMethod* callee, const TypeOopPtr* receiver_type) {
932 // only use for virtual or interface calls
934 // If it is obviously final, do not bother to call find_monomorphic_target,
935 // because the class hierarchy checks are not needed, and may fail due to
936 // incompletely loaded classes. Since we do our own class loading checks
937 // in this module, we may confidently bind to any method.
938 if (callee->can_be_statically_bound()) {
939 return callee;
940 }
942 // Attempt to improve the receiver
943 bool actual_receiver_is_exact = false;
944 ciInstanceKlass* actual_receiver = klass;
945 if (receiver_type != NULL) {
946 // Array methods are all inherited from Object, and are monomorphic.
947 if (receiver_type->isa_aryptr() &&
948 callee->holder() == env()->Object_klass()) {
949 return callee;
950 }
952 // All other interesting cases are instance klasses.
953 if (!receiver_type->isa_instptr()) {
954 return NULL;
955 }
957 ciInstanceKlass *ikl = receiver_type->klass()->as_instance_klass();
958 if (ikl->is_loaded() && ikl->is_initialized() && !ikl->is_interface() &&
959 (ikl == actual_receiver || ikl->is_subtype_of(actual_receiver))) {
960 // ikl is a same or better type than the original actual_receiver,
961 // e.g. static receiver from bytecodes.
962 actual_receiver = ikl;
963 // Is the actual_receiver exact?
964 actual_receiver_is_exact = receiver_type->klass_is_exact();
965 }
966 }
968 ciInstanceKlass* calling_klass = caller->holder();
969 ciMethod* cha_monomorphic_target = callee->find_monomorphic_target(calling_klass, klass, actual_receiver);
970 if (cha_monomorphic_target != NULL) {
971 assert(!cha_monomorphic_target->is_abstract(), "");
972 // Look at the method-receiver type. Does it add "too much information"?
973 ciKlass* mr_klass = cha_monomorphic_target->holder();
974 const Type* mr_type = TypeInstPtr::make(TypePtr::BotPTR, mr_klass);
975 if (receiver_type == NULL || !receiver_type->higher_equal(mr_type)) {
976 // Calling this method would include an implicit cast to its holder.
977 // %%% Not yet implemented. Would throw minor asserts at present.
978 // %%% The most common wins are already gained by +UseUniqueSubclasses.
979 // To fix, put the higher_equal check at the call of this routine,
980 // and add a CheckCastPP to the receiver.
981 if (TraceDependencies) {
982 tty->print_cr("found unique CHA method, but could not cast up");
983 tty->print(" method = ");
984 cha_monomorphic_target->print();
985 tty->cr();
986 }
987 if (log() != NULL) {
988 log()->elem("missed_CHA_opportunity klass='%d' method='%d'",
989 log()->identify(klass),
990 log()->identify(cha_monomorphic_target));
991 }
992 cha_monomorphic_target = NULL;
993 }
994 }
995 if (cha_monomorphic_target != NULL) {
996 // Hardwiring a virtual.
997 // If we inlined because CHA revealed only a single target method,
998 // then we are dependent on that target method not getting overridden
999 // by dynamic class loading. Be sure to test the "static" receiver
1000 // dest_method here, as opposed to the actual receiver, which may
1001 // falsely lead us to believe that the receiver is final or private.
1002 dependencies()->assert_unique_concrete_method(actual_receiver, cha_monomorphic_target);
1003 return cha_monomorphic_target;
1004 }
1006 // If the type is exact, we can still bind the method w/o a vcall.
1007 // (This case comes after CHA so we can see how much extra work it does.)
1008 if (actual_receiver_is_exact) {
1009 // In case of evolution, there is a dependence on every inlined method, since each
1010 // such method can be changed when its class is redefined.
1011 ciMethod* exact_method = callee->resolve_invoke(calling_klass, actual_receiver);
1012 if (exact_method != NULL) {
1013 #ifndef PRODUCT
1014 if (PrintOpto) {
1015 tty->print(" Calling method via exact type @%d --- ", bci);
1016 exact_method->print_name();
1017 tty->cr();
1018 }
1019 #endif
1020 return exact_method;
1021 }
1022 }
1024 return NULL;
1025 }