Wed, 06 Jan 2010 14:22:39 -0800
6914300: ciEnv should export all well known classes
Reviewed-by: kvn, twisti
1 /*
2 * Copyright 1998-2009 Sun Microsystems, Inc. All Rights Reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
20 * CA 95054 USA or visit www.sun.com if you need additional information or
21 * have any questions.
22 *
23 */
25 #include "incls/_precompiled.incl"
26 #include "incls/_doCall.cpp.incl"
28 #ifndef PRODUCT
29 void trace_type_profile(ciMethod *method, int depth, int bci, ciMethod *prof_method, ciKlass *prof_klass, int site_count, int receiver_count) {
30 if (TraceTypeProfile || PrintInlining || PrintOptoInlining) {
31 tty->print(" ");
32 for( int i = 0; i < depth; i++ ) tty->print(" ");
33 if (!PrintOpto) {
34 method->print_short_name();
35 tty->print(" ->");
36 }
37 tty->print(" @ %d ", bci);
38 prof_method->print_short_name();
39 tty->print(" >>TypeProfile (%d/%d counts) = ", receiver_count, site_count);
40 prof_klass->name()->print_symbol();
41 tty->print_cr(" (%d bytes)", prof_method->code_size());
42 }
43 }
44 #endif
46 CallGenerator* Compile::call_generator(ciMethod* call_method, int vtable_index, bool call_is_virtual, JVMState* jvms, bool allow_inline, float prof_factor) {
47 CallGenerator* cg;
49 // Dtrace currently doesn't work unless all calls are vanilla
50 if (env()->dtrace_method_probes()) {
51 allow_inline = false;
52 }
54 // Note: When we get profiling during stage-1 compiles, we want to pull
55 // from more specific profile data which pertains to this inlining.
56 // Right now, ignore the information in jvms->caller(), and do method[bci].
57 ciCallProfile profile = jvms->method()->call_profile_at_bci(jvms->bci());
59 // See how many times this site has been invoked.
60 int site_count = profile.count();
61 int receiver_count = -1;
62 if (call_is_virtual && UseTypeProfile && profile.has_receiver(0)) {
63 // Receivers in the profile structure are ordered by call counts
64 // so that the most called (major) receiver is profile.receiver(0).
65 receiver_count = profile.receiver_count(0);
66 }
68 CompileLog* log = this->log();
69 if (log != NULL) {
70 int rid = (receiver_count >= 0)? log->identify(profile.receiver(0)): -1;
71 int r2id = (profile.morphism() == 2)? log->identify(profile.receiver(1)):-1;
72 log->begin_elem("call method='%d' count='%d' prof_factor='%g'",
73 log->identify(call_method), site_count, prof_factor);
74 if (call_is_virtual) log->print(" virtual='1'");
75 if (allow_inline) log->print(" inline='1'");
76 if (receiver_count >= 0) {
77 log->print(" receiver='%d' receiver_count='%d'", rid, receiver_count);
78 if (profile.has_receiver(1)) {
79 log->print(" receiver2='%d' receiver2_count='%d'", r2id, profile.receiver_count(1));
80 }
81 }
82 log->end_elem();
83 }
85 // Special case the handling of certain common, profitable library
86 // methods. If these methods are replaced with specialized code,
87 // then we return it as the inlined version of the call.
88 // We do this before the strict f.p. check below because the
89 // intrinsics handle strict f.p. correctly.
90 if (allow_inline) {
91 cg = find_intrinsic(call_method, call_is_virtual);
92 if (cg != NULL) return cg;
93 }
95 // Do not inline strict fp into non-strict code, or the reverse
96 bool caller_method_is_strict = jvms->method()->is_strict();
97 if( caller_method_is_strict ^ call_method->is_strict() ) {
98 allow_inline = false;
99 }
101 // Attempt to inline...
102 if (allow_inline) {
103 // The profile data is only partly attributable to this caller,
104 // scale back the call site information.
105 float past_uses = jvms->method()->scale_count(site_count, prof_factor);
106 // This is the number of times we expect the call code to be used.
107 float expected_uses = past_uses;
109 // Try inlining a bytecoded method:
110 if (!call_is_virtual) {
111 InlineTree* ilt;
112 if (UseOldInlining) {
113 ilt = InlineTree::find_subtree_from_root(this->ilt(), jvms->caller(), jvms->method());
114 } else {
115 // Make a disembodied, stateless ILT.
116 // TO DO: When UseOldInlining is removed, copy the ILT code elsewhere.
117 float site_invoke_ratio = prof_factor;
118 // Note: ilt is for the root of this parse, not the present call site.
119 ilt = new InlineTree(this, jvms->method(), jvms->caller(), site_invoke_ratio);
120 }
121 WarmCallInfo scratch_ci;
122 if (!UseOldInlining)
123 scratch_ci.init(jvms, call_method, profile, prof_factor);
124 WarmCallInfo* ci = ilt->ok_to_inline(call_method, jvms, profile, &scratch_ci);
125 assert(ci != &scratch_ci, "do not let this pointer escape");
126 bool allow_inline = (ci != NULL && !ci->is_cold());
127 bool require_inline = (allow_inline && ci->is_hot());
129 if (allow_inline) {
130 CallGenerator* cg = CallGenerator::for_inline(call_method, expected_uses);
131 if (require_inline && cg != NULL && should_delay_inlining(call_method, jvms)) {
132 // Delay the inlining of this method to give us the
133 // opportunity to perform some high level optimizations
134 // first.
135 return CallGenerator::for_late_inline(call_method, cg);
136 }
137 if (cg == NULL) {
138 // Fall through.
139 } else if (require_inline || !InlineWarmCalls) {
140 return cg;
141 } else {
142 CallGenerator* cold_cg = call_generator(call_method, vtable_index, call_is_virtual, jvms, false, prof_factor);
143 return CallGenerator::for_warm_call(ci, cold_cg, cg);
144 }
145 }
146 }
148 // Try using the type profile.
149 if (call_is_virtual && site_count > 0 && receiver_count > 0) {
150 // The major receiver's count >= TypeProfileMajorReceiverPercent of site_count.
151 bool have_major_receiver = (100.*profile.receiver_prob(0) >= (float)TypeProfileMajorReceiverPercent);
152 ciMethod* receiver_method = NULL;
153 if (have_major_receiver || profile.morphism() == 1 ||
154 (profile.morphism() == 2 && UseBimorphicInlining)) {
155 // receiver_method = profile.method();
156 // Profiles do not suggest methods now. Look it up in the major receiver.
157 receiver_method = call_method->resolve_invoke(jvms->method()->holder(),
158 profile.receiver(0));
159 }
160 if (receiver_method != NULL) {
161 // The single majority receiver sufficiently outweighs the minority.
162 CallGenerator* hit_cg = this->call_generator(receiver_method,
163 vtable_index, !call_is_virtual, jvms, allow_inline, prof_factor);
164 if (hit_cg != NULL) {
165 // Look up second receiver.
166 CallGenerator* next_hit_cg = NULL;
167 ciMethod* next_receiver_method = NULL;
168 if (profile.morphism() == 2 && UseBimorphicInlining) {
169 next_receiver_method = call_method->resolve_invoke(jvms->method()->holder(),
170 profile.receiver(1));
171 if (next_receiver_method != NULL) {
172 next_hit_cg = this->call_generator(next_receiver_method,
173 vtable_index, !call_is_virtual, jvms,
174 allow_inline, prof_factor);
175 if (next_hit_cg != NULL && !next_hit_cg->is_inline() &&
176 have_major_receiver && UseOnlyInlinedBimorphic) {
177 // Skip if we can't inline second receiver's method
178 next_hit_cg = NULL;
179 }
180 }
181 }
182 CallGenerator* miss_cg;
183 if (( profile.morphism() == 1 ||
184 (profile.morphism() == 2 && next_hit_cg != NULL) ) &&
186 !too_many_traps(Deoptimization::Reason_class_check)
188 // Check only total number of traps per method to allow
189 // the transition from monomorphic to bimorphic case between
190 // compilations without falling into virtual call.
191 // A monomorphic case may have the class_check trap flag is set
192 // due to the time gap between the uncommon trap processing
193 // when flags are set in MDO and the call site bytecode execution
194 // in Interpreter when MDO counters are updated.
195 // There was also class_check trap in monomorphic case due to
196 // the bug 6225440.
198 ) {
199 // Generate uncommon trap for class check failure path
200 // in case of monomorphic or bimorphic virtual call site.
201 miss_cg = CallGenerator::for_uncommon_trap(call_method,
202 Deoptimization::Reason_class_check,
203 Deoptimization::Action_maybe_recompile);
204 } else {
205 // Generate virtual call for class check failure path
206 // in case of polymorphic virtual call site.
207 miss_cg = CallGenerator::for_virtual_call(call_method, vtable_index);
208 }
209 if (miss_cg != NULL) {
210 if (next_hit_cg != NULL) {
211 NOT_PRODUCT(trace_type_profile(jvms->method(), jvms->depth(), jvms->bci(), next_receiver_method, profile.receiver(1), site_count, profile.receiver_count(1)));
212 // We don't need to record dependency on a receiver here and below.
213 // Whenever we inline, the dependency is added by Parse::Parse().
214 miss_cg = CallGenerator::for_predicted_call(profile.receiver(1), miss_cg, next_hit_cg, PROB_MAX);
215 }
216 if (miss_cg != NULL) {
217 NOT_PRODUCT(trace_type_profile(jvms->method(), jvms->depth(), jvms->bci(), receiver_method, profile.receiver(0), site_count, receiver_count));
218 cg = CallGenerator::for_predicted_call(profile.receiver(0), miss_cg, hit_cg, profile.receiver_prob(0));
219 if (cg != NULL) return cg;
220 }
221 }
222 }
223 }
224 }
225 }
227 // Do MethodHandle calls.
228 if (call_method->is_method_handle_invoke()) {
229 if (jvms->method()->java_code_at_bci(jvms->bci()) != Bytecodes::_invokedynamic) {
230 GraphKit kit(jvms);
231 Node* n = kit.argument(0);
233 if (n->Opcode() == Op_ConP) {
234 const TypeOopPtr* oop_ptr = n->bottom_type()->is_oopptr();
235 ciObject* const_oop = oop_ptr->const_oop();
236 ciMethodHandle* method_handle = const_oop->as_method_handle();
238 // Set the actually called method to have access to the class
239 // and signature in the MethodHandleCompiler.
240 method_handle->set_callee(call_method);
242 // Get an adapter for the MethodHandle.
243 ciMethod* target_method = method_handle->get_method_handle_adapter();
245 CallGenerator* hit_cg = this->call_generator(target_method, vtable_index, false, jvms, true, prof_factor);
246 if (hit_cg != NULL && hit_cg->is_inline())
247 return hit_cg;
248 }
250 return CallGenerator::for_direct_call(call_method);
251 }
252 else {
253 // Get the MethodHandle from the CallSite.
254 ciMethod* caller_method = jvms->method();
255 ciBytecodeStream str(caller_method);
256 str.force_bci(jvms->bci()); // Set the stream to the invokedynamic bci.
257 ciCallSite* call_site = str.get_call_site();
258 ciMethodHandle* method_handle = call_site->get_target();
260 // Set the actually called method to have access to the class
261 // and signature in the MethodHandleCompiler.
262 method_handle->set_callee(call_method);
264 // Get an adapter for the MethodHandle.
265 ciMethod* target_method = method_handle->get_invokedynamic_adapter();
267 CallGenerator* hit_cg = this->call_generator(target_method, vtable_index, false, jvms, true, prof_factor);
268 if (hit_cg != NULL && hit_cg->is_inline()) {
269 CallGenerator* miss_cg = CallGenerator::for_dynamic_call(call_method);
270 return CallGenerator::for_predicted_dynamic_call(method_handle, miss_cg, hit_cg, prof_factor);
271 }
273 // If something failed, generate a normal dynamic call.
274 return CallGenerator::for_dynamic_call(call_method);
275 }
276 }
278 // There was no special inlining tactic, or it bailed out.
279 // Use a more generic tactic, like a simple call.
280 if (call_is_virtual) {
281 return CallGenerator::for_virtual_call(call_method, vtable_index);
282 } else {
283 // Class Hierarchy Analysis or Type Profile reveals a unique target,
284 // or it is a static or special call.
285 return CallGenerator::for_direct_call(call_method, should_delay_inlining(call_method, jvms));
286 }
287 }
289 // Return true for methods that shouldn't be inlined early so that
290 // they are easier to analyze and optimize as intrinsics.
291 bool Compile::should_delay_inlining(ciMethod* call_method, JVMState* jvms) {
292 if (has_stringbuilder()) {
294 if ((call_method->holder() == C->env()->StringBuilder_klass() ||
295 call_method->holder() == C->env()->StringBuffer_klass()) &&
296 (jvms->method()->holder() == C->env()->StringBuilder_klass() ||
297 jvms->method()->holder() == C->env()->StringBuffer_klass())) {
298 // Delay SB calls only when called from non-SB code
299 return false;
300 }
302 switch (call_method->intrinsic_id()) {
303 case vmIntrinsics::_StringBuilder_void:
304 case vmIntrinsics::_StringBuilder_int:
305 case vmIntrinsics::_StringBuilder_String:
306 case vmIntrinsics::_StringBuilder_append_char:
307 case vmIntrinsics::_StringBuilder_append_int:
308 case vmIntrinsics::_StringBuilder_append_String:
309 case vmIntrinsics::_StringBuilder_toString:
310 case vmIntrinsics::_StringBuffer_void:
311 case vmIntrinsics::_StringBuffer_int:
312 case vmIntrinsics::_StringBuffer_String:
313 case vmIntrinsics::_StringBuffer_append_char:
314 case vmIntrinsics::_StringBuffer_append_int:
315 case vmIntrinsics::_StringBuffer_append_String:
316 case vmIntrinsics::_StringBuffer_toString:
317 case vmIntrinsics::_Integer_toString:
318 return true;
320 case vmIntrinsics::_String_String:
321 {
322 Node* receiver = jvms->map()->in(jvms->argoff() + 1);
323 if (receiver->is_Proj() && receiver->in(0)->is_CallStaticJava()) {
324 CallStaticJavaNode* csj = receiver->in(0)->as_CallStaticJava();
325 ciMethod* m = csj->method();
326 if (m != NULL &&
327 (m->intrinsic_id() == vmIntrinsics::_StringBuffer_toString ||
328 m->intrinsic_id() == vmIntrinsics::_StringBuilder_toString))
329 // Delay String.<init>(new SB())
330 return true;
331 }
332 return false;
333 }
335 default:
336 return false;
337 }
338 }
339 return false;
340 }
343 // uncommon-trap call-sites where callee is unloaded, uninitialized or will not link
344 bool Parse::can_not_compile_call_site(ciMethod *dest_method, ciInstanceKlass* klass) {
345 // Additional inputs to consider...
346 // bc = bc()
347 // caller = method()
348 // iter().get_method_holder_index()
349 assert( dest_method->is_loaded(), "ciTypeFlow should not let us get here" );
350 // Interface classes can be loaded & linked and never get around to
351 // being initialized. Uncommon-trap for not-initialized static or
352 // v-calls. Let interface calls happen.
353 ciInstanceKlass* holder_klass = dest_method->holder();
354 if (!holder_klass->is_initialized() &&
355 !holder_klass->is_interface()) {
356 uncommon_trap(Deoptimization::Reason_uninitialized,
357 Deoptimization::Action_reinterpret,
358 holder_klass);
359 return true;
360 }
362 assert(dest_method->will_link(method()->holder(), klass, bc()), "dest_method: typeflow responsibility");
363 return false;
364 }
367 //------------------------------do_call----------------------------------------
368 // Handle your basic call. Inline if we can & want to, else just setup call.
369 void Parse::do_call() {
370 // It's likely we are going to add debug info soon.
371 // Also, if we inline a guy who eventually needs debug info for this JVMS,
372 // our contribution to it is cleaned up right here.
373 kill_dead_locals();
375 // Set frequently used booleans
376 bool is_virtual = bc() == Bytecodes::_invokevirtual;
377 bool is_virtual_or_interface = is_virtual || bc() == Bytecodes::_invokeinterface;
378 bool has_receiver = is_virtual_or_interface || bc() == Bytecodes::_invokespecial;
379 bool is_invokedynamic = bc() == Bytecodes::_invokedynamic;
381 // Find target being called
382 bool will_link;
383 ciMethod* dest_method = iter().get_method(will_link);
384 ciInstanceKlass* holder_klass = dest_method->holder();
385 ciKlass* holder = iter().get_declared_method_holder();
386 ciInstanceKlass* klass = ciEnv::get_instance_klass_for_declared_method_holder(holder);
388 int nargs = dest_method->arg_size();
389 if (is_invokedynamic) nargs -= 1;
391 // uncommon-trap when callee is unloaded, uninitialized or will not link
392 // bailout when too many arguments for register representation
393 if (!will_link || can_not_compile_call_site(dest_method, klass)) {
394 #ifndef PRODUCT
395 if (PrintOpto && (Verbose || WizardMode)) {
396 method()->print_name(); tty->print_cr(" can not compile call at bci %d to:", bci());
397 dest_method->print_name(); tty->cr();
398 }
399 #endif
400 return;
401 }
402 assert(holder_klass->is_loaded(), "");
403 assert((dest_method->is_static() || is_invokedynamic) == !has_receiver , "must match bc");
404 // Note: this takes into account invokeinterface of methods declared in java/lang/Object,
405 // which should be invokevirtuals but according to the VM spec may be invokeinterfaces
406 assert(holder_klass->is_interface() || holder_klass->super() == NULL || (bc() != Bytecodes::_invokeinterface), "must match bc");
407 // Note: In the absence of miranda methods, an abstract class K can perform
408 // an invokevirtual directly on an interface method I.m if K implements I.
410 // ---------------------
411 // Does Class Hierarchy Analysis reveal only a single target of a v-call?
412 // Then we may inline or make a static call, but become dependent on there being only 1 target.
413 // Does the call-site type profile reveal only one receiver?
414 // Then we may introduce a run-time check and inline on the path where it succeeds.
415 // The other path may uncommon_trap, check for another receiver, or do a v-call.
417 // Choose call strategy.
418 bool call_is_virtual = is_virtual_or_interface;
419 int vtable_index = methodOopDesc::invalid_vtable_index;
420 ciMethod* call_method = dest_method;
422 // Try to get the most accurate receiver type
423 if (is_virtual_or_interface) {
424 Node* receiver_node = stack(sp() - nargs);
425 const TypeOopPtr* receiver_type = _gvn.type(receiver_node)->isa_oopptr();
426 ciMethod* optimized_virtual_method = optimize_inlining(method(), bci(), klass, dest_method, receiver_type);
428 // Have the call been sufficiently improved such that it is no longer a virtual?
429 if (optimized_virtual_method != NULL) {
430 call_method = optimized_virtual_method;
431 call_is_virtual = false;
432 } else if (!UseInlineCaches && is_virtual && call_method->is_loaded()) {
433 // We can make a vtable call at this site
434 vtable_index = call_method->resolve_vtable_index(method()->holder(), klass);
435 }
436 }
438 // Note: It's OK to try to inline a virtual call.
439 // The call generator will not attempt to inline a polymorphic call
440 // unless it knows how to optimize the receiver dispatch.
441 bool try_inline = (C->do_inlining() || InlineAccessors);
443 // ---------------------
444 inc_sp(- nargs); // Temporarily pop args for JVM state of call
445 JVMState* jvms = sync_jvms();
447 // ---------------------
448 // Decide call tactic.
449 // This call checks with CHA, the interpreter profile, intrinsics table, etc.
450 // It decides whether inlining is desirable or not.
451 CallGenerator* cg = C->call_generator(call_method, vtable_index, call_is_virtual, jvms, try_inline, prof_factor());
453 // ---------------------
454 // Round double arguments before call
455 round_double_arguments(dest_method);
457 #ifndef PRODUCT
458 // bump global counters for calls
459 count_compiled_calls(false/*at_method_entry*/, cg->is_inline());
461 // Record first part of parsing work for this call
462 parse_histogram()->record_change();
463 #endif // not PRODUCT
465 assert(jvms == this->jvms(), "still operating on the right JVMS");
466 assert(jvms_in_sync(), "jvms must carry full info into CG");
468 // save across call, for a subsequent cast_not_null.
469 Node* receiver = has_receiver ? argument(0) : NULL;
471 // Bump method data counters (We profile *before* the call is made
472 // because exceptions don't return to the call site.)
473 profile_call(receiver);
475 JVMState* new_jvms;
476 if ((new_jvms = cg->generate(jvms)) == NULL) {
477 // When inlining attempt fails (e.g., too many arguments),
478 // it may contaminate the current compile state, making it
479 // impossible to pull back and try again. Once we call
480 // cg->generate(), we are committed. If it fails, the whole
481 // compilation task is compromised.
482 if (failing()) return;
483 #ifndef PRODUCT
484 if (PrintOpto || PrintOptoInlining || PrintInlining) {
485 // Only one fall-back, so if an intrinsic fails, ignore any bytecodes.
486 if (cg->is_intrinsic() && call_method->code_size() > 0) {
487 tty->print("Bailed out of intrinsic, will not inline: ");
488 call_method->print_name(); tty->cr();
489 }
490 }
491 #endif
492 // This can happen if a library intrinsic is available, but refuses
493 // the call site, perhaps because it did not match a pattern the
494 // intrinsic was expecting to optimize. The fallback position is
495 // to call out-of-line.
496 try_inline = false; // Inline tactic bailed out.
497 cg = C->call_generator(call_method, vtable_index, call_is_virtual, jvms, try_inline, prof_factor());
498 if ((new_jvms = cg->generate(jvms)) == NULL) {
499 guarantee(failing(), "call failed to generate: calls should work");
500 return;
501 }
502 }
504 if (cg->is_inline()) {
505 // Accumulate has_loops estimate
506 C->set_has_loops(C->has_loops() || call_method->has_loops());
507 C->env()->notice_inlined_method(call_method);
508 }
510 // Reset parser state from [new_]jvms, which now carries results of the call.
511 // Return value (if any) is already pushed on the stack by the cg.
512 add_exception_states_from(new_jvms);
513 if (new_jvms->map()->control() == top()) {
514 stop_and_kill_map();
515 } else {
516 assert(new_jvms->same_calls_as(jvms), "method/bci left unchanged");
517 set_jvms(new_jvms);
518 }
520 if (!stopped()) {
521 // This was some sort of virtual call, which did a null check for us.
522 // Now we can assert receiver-not-null, on the normal return path.
523 if (receiver != NULL && cg->is_virtual()) {
524 Node* cast = cast_not_null(receiver);
525 // %%% assert(receiver == cast, "should already have cast the receiver");
526 }
528 // Round double result after a call from strict to non-strict code
529 round_double_result(dest_method);
531 // If the return type of the method is not loaded, assert that the
532 // value we got is a null. Otherwise, we need to recompile.
533 if (!dest_method->return_type()->is_loaded()) {
534 #ifndef PRODUCT
535 if (PrintOpto && (Verbose || WizardMode)) {
536 method()->print_name(); tty->print_cr(" asserting nullness of result at bci: %d", bci());
537 dest_method->print_name(); tty->cr();
538 }
539 #endif
540 if (C->log() != NULL) {
541 C->log()->elem("assert_null reason='return' klass='%d'",
542 C->log()->identify(dest_method->return_type()));
543 }
544 // If there is going to be a trap, put it at the next bytecode:
545 set_bci(iter().next_bci());
546 do_null_assert(peek(), T_OBJECT);
547 set_bci(iter().cur_bci()); // put it back
548 }
549 }
551 // Restart record of parsing work after possible inlining of call
552 #ifndef PRODUCT
553 parse_histogram()->set_initial_state(bc());
554 #endif
555 }
557 //---------------------------catch_call_exceptions-----------------------------
558 // Put a Catch and CatchProj nodes behind a just-created call.
559 // Send their caught exceptions to the proper handler.
560 // This may be used after a call to the rethrow VM stub,
561 // when it is needed to process unloaded exception classes.
562 void Parse::catch_call_exceptions(ciExceptionHandlerStream& handlers) {
563 // Exceptions are delivered through this channel:
564 Node* i_o = this->i_o();
566 // Add a CatchNode.
567 GrowableArray<int>* bcis = new (C->node_arena()) GrowableArray<int>(C->node_arena(), 8, 0, -1);
568 GrowableArray<const Type*>* extypes = new (C->node_arena()) GrowableArray<const Type*>(C->node_arena(), 8, 0, NULL);
569 GrowableArray<int>* saw_unloaded = new (C->node_arena()) GrowableArray<int>(C->node_arena(), 8, 0, 0);
571 for (; !handlers.is_done(); handlers.next()) {
572 ciExceptionHandler* h = handlers.handler();
573 int h_bci = h->handler_bci();
574 ciInstanceKlass* h_klass = h->is_catch_all() ? env()->Throwable_klass() : h->catch_klass();
575 // Do not introduce unloaded exception types into the graph:
576 if (!h_klass->is_loaded()) {
577 if (saw_unloaded->contains(h_bci)) {
578 /* We've already seen an unloaded exception with h_bci,
579 so don't duplicate. Duplication will cause the CatchNode to be
580 unnecessarily large. See 4713716. */
581 continue;
582 } else {
583 saw_unloaded->append(h_bci);
584 }
585 }
586 const Type* h_extype = TypeOopPtr::make_from_klass(h_klass);
587 // (We use make_from_klass because it respects UseUniqueSubclasses.)
588 h_extype = h_extype->join(TypeInstPtr::NOTNULL);
589 assert(!h_extype->empty(), "sanity");
590 // Note: It's OK if the BCIs repeat themselves.
591 bcis->append(h_bci);
592 extypes->append(h_extype);
593 }
595 int len = bcis->length();
596 CatchNode *cn = new (C, 2) CatchNode(control(), i_o, len+1);
597 Node *catch_ = _gvn.transform(cn);
599 // now branch with the exception state to each of the (potential)
600 // handlers
601 for(int i=0; i < len; i++) {
602 // Setup JVM state to enter the handler.
603 PreserveJVMState pjvms(this);
604 // Locals are just copied from before the call.
605 // Get control from the CatchNode.
606 int handler_bci = bcis->at(i);
607 Node* ctrl = _gvn.transform( new (C, 1) CatchProjNode(catch_, i+1,handler_bci));
608 // This handler cannot happen?
609 if (ctrl == top()) continue;
610 set_control(ctrl);
612 // Create exception oop
613 const TypeInstPtr* extype = extypes->at(i)->is_instptr();
614 Node *ex_oop = _gvn.transform(new (C, 2) CreateExNode(extypes->at(i), ctrl, i_o));
616 // Handle unloaded exception classes.
617 if (saw_unloaded->contains(handler_bci)) {
618 // An unloaded exception type is coming here. Do an uncommon trap.
619 #ifndef PRODUCT
620 // We do not expect the same handler bci to take both cold unloaded
621 // and hot loaded exceptions. But, watch for it.
622 if (extype->is_loaded()) {
623 tty->print_cr("Warning: Handler @%d takes mixed loaded/unloaded exceptions in ");
624 method()->print_name(); tty->cr();
625 } else if (PrintOpto && (Verbose || WizardMode)) {
626 tty->print("Bailing out on unloaded exception type ");
627 extype->klass()->print_name();
628 tty->print(" at bci:%d in ", bci());
629 method()->print_name(); tty->cr();
630 }
631 #endif
632 // Emit an uncommon trap instead of processing the block.
633 set_bci(handler_bci);
634 push_ex_oop(ex_oop);
635 uncommon_trap(Deoptimization::Reason_unloaded,
636 Deoptimization::Action_reinterpret,
637 extype->klass(), "!loaded exception");
638 set_bci(iter().cur_bci()); // put it back
639 continue;
640 }
642 // go to the exception handler
643 if (handler_bci < 0) { // merge with corresponding rethrow node
644 throw_to_exit(make_exception_state(ex_oop));
645 } else { // Else jump to corresponding handle
646 push_ex_oop(ex_oop); // Clear stack and push just the oop.
647 merge_exception(handler_bci);
648 }
649 }
651 // The first CatchProj is for the normal return.
652 // (Note: If this is a call to rethrow_Java, this node goes dead.)
653 set_control(_gvn.transform( new (C, 1) CatchProjNode(catch_, CatchProjNode::fall_through_index, CatchProjNode::no_handler_bci)));
654 }
657 //----------------------------catch_inline_exceptions--------------------------
658 // Handle all exceptions thrown by an inlined method or individual bytecode.
659 // Common case 1: we have no handler, so all exceptions merge right into
660 // the rethrow case.
661 // Case 2: we have some handlers, with loaded exception klasses that have
662 // no subklasses. We do a Deutsch-Shiffman style type-check on the incoming
663 // exception oop and branch to the handler directly.
664 // Case 3: We have some handlers with subklasses or are not loaded at
665 // compile-time. We have to call the runtime to resolve the exception.
666 // So we insert a RethrowCall and all the logic that goes with it.
667 void Parse::catch_inline_exceptions(SafePointNode* ex_map) {
668 // Caller is responsible for saving away the map for normal control flow!
669 assert(stopped(), "call set_map(NULL) first");
670 assert(method()->has_exception_handlers(), "don't come here w/o work to do");
672 Node* ex_node = saved_ex_oop(ex_map);
673 if (ex_node == top()) {
674 // No action needed.
675 return;
676 }
677 const TypeInstPtr* ex_type = _gvn.type(ex_node)->isa_instptr();
678 NOT_PRODUCT(if (ex_type==NULL) tty->print_cr("*** Exception not InstPtr"));
679 if (ex_type == NULL)
680 ex_type = TypeOopPtr::make_from_klass(env()->Throwable_klass())->is_instptr();
682 // determine potential exception handlers
683 ciExceptionHandlerStream handlers(method(), bci(),
684 ex_type->klass()->as_instance_klass(),
685 ex_type->klass_is_exact());
687 // Start executing from the given throw state. (Keep its stack, for now.)
688 // Get the exception oop as known at compile time.
689 ex_node = use_exception_state(ex_map);
691 // Get the exception oop klass from its header
692 Node* ex_klass_node = NULL;
693 if (has_ex_handler() && !ex_type->klass_is_exact()) {
694 Node* p = basic_plus_adr( ex_node, ex_node, oopDesc::klass_offset_in_bytes());
695 ex_klass_node = _gvn.transform( LoadKlassNode::make(_gvn, immutable_memory(), p, TypeInstPtr::KLASS, TypeKlassPtr::OBJECT) );
697 // Compute the exception klass a little more cleverly.
698 // Obvious solution is to simple do a LoadKlass from the 'ex_node'.
699 // However, if the ex_node is a PhiNode, I'm going to do a LoadKlass for
700 // each arm of the Phi. If I know something clever about the exceptions
701 // I'm loading the class from, I can replace the LoadKlass with the
702 // klass constant for the exception oop.
703 if( ex_node->is_Phi() ) {
704 ex_klass_node = new (C, ex_node->req()) PhiNode( ex_node->in(0), TypeKlassPtr::OBJECT );
705 for( uint i = 1; i < ex_node->req(); i++ ) {
706 Node* p = basic_plus_adr( ex_node->in(i), ex_node->in(i), oopDesc::klass_offset_in_bytes() );
707 Node* k = _gvn.transform( LoadKlassNode::make(_gvn, immutable_memory(), p, TypeInstPtr::KLASS, TypeKlassPtr::OBJECT) );
708 ex_klass_node->init_req( i, k );
709 }
710 _gvn.set_type(ex_klass_node, TypeKlassPtr::OBJECT);
712 }
713 }
715 // Scan the exception table for applicable handlers.
716 // If none, we can call rethrow() and be done!
717 // If precise (loaded with no subklasses), insert a D.S. style
718 // pointer compare to the correct handler and loop back.
719 // If imprecise, switch to the Rethrow VM-call style handling.
721 int remaining = handlers.count_remaining();
723 // iterate through all entries sequentially
724 for (;!handlers.is_done(); handlers.next()) {
725 // Do nothing if turned off
726 if( !DeutschShiffmanExceptions ) break;
727 ciExceptionHandler* handler = handlers.handler();
729 if (handler->is_rethrow()) {
730 // If we fell off the end of the table without finding an imprecise
731 // exception klass (and without finding a generic handler) then we
732 // know this exception is not handled in this method. We just rethrow
733 // the exception into the caller.
734 throw_to_exit(make_exception_state(ex_node));
735 return;
736 }
738 // exception handler bci range covers throw_bci => investigate further
739 int handler_bci = handler->handler_bci();
741 if (remaining == 1) {
742 push_ex_oop(ex_node); // Push exception oop for handler
743 #ifndef PRODUCT
744 if (PrintOpto && WizardMode) {
745 tty->print_cr(" Catching every inline exception bci:%d -> handler_bci:%d", bci(), handler_bci);
746 }
747 #endif
748 merge_exception(handler_bci); // jump to handler
749 return; // No more handling to be done here!
750 }
752 // %%% The following logic replicates make_from_klass_unique.
753 // TO DO: Replace by a subroutine call. Then generalize
754 // the type check, as noted in the next "%%%" comment.
756 ciInstanceKlass* klass = handler->catch_klass();
757 if (UseUniqueSubclasses) {
758 // (We use make_from_klass because it respects UseUniqueSubclasses.)
759 const TypeOopPtr* tp = TypeOopPtr::make_from_klass(klass);
760 klass = tp->klass()->as_instance_klass();
761 }
763 // Get the handler's klass
764 if (!klass->is_loaded()) // klass is not loaded?
765 break; // Must call Rethrow!
766 if (klass->is_interface()) // should not happen, but...
767 break; // bail out
768 // See if the loaded exception klass has no subtypes
769 if (klass->has_subklass())
770 break; // Cannot easily do precise test ==> Rethrow
772 // %%% Now that subclass checking is very fast, we need to rewrite
773 // this section and remove the option "DeutschShiffmanExceptions".
774 // The exception processing chain should be a normal typecase pattern,
775 // with a bailout to the interpreter only in the case of unloaded
776 // classes. (The bailout should mark the method non-entrant.)
777 // This rewrite should be placed in GraphKit::, not Parse::.
779 // Add a dependence; if any subclass added we need to recompile
780 // %%% should use stronger assert_unique_concrete_subtype instead
781 if (!klass->is_final()) {
782 C->dependencies()->assert_leaf_type(klass);
783 }
785 // Implement precise test
786 const TypeKlassPtr *tk = TypeKlassPtr::make(klass);
787 Node* con = _gvn.makecon(tk);
788 Node* cmp = _gvn.transform( new (C, 3) CmpPNode(ex_klass_node, con) );
789 Node* bol = _gvn.transform( new (C, 2) BoolNode(cmp, BoolTest::ne) );
790 { BuildCutout unless(this, bol, PROB_LIKELY(0.7f));
791 const TypeInstPtr* tinst = TypeInstPtr::make_exact(TypePtr::NotNull, klass);
792 Node* ex_oop = _gvn.transform(new (C, 2) CheckCastPPNode(control(), ex_node, tinst));
793 push_ex_oop(ex_oop); // Push exception oop for handler
794 #ifndef PRODUCT
795 if (PrintOpto && WizardMode) {
796 tty->print(" Catching inline exception bci:%d -> handler_bci:%d -- ", bci(), handler_bci);
797 klass->print_name();
798 tty->cr();
799 }
800 #endif
801 merge_exception(handler_bci);
802 }
804 // Come here if exception does not match handler.
805 // Carry on with more handler checks.
806 --remaining;
807 }
809 assert(!stopped(), "you should return if you finish the chain");
811 if (remaining == 1) {
812 // Further checks do not matter.
813 }
815 if (can_rerun_bytecode()) {
816 // Do not push_ex_oop here!
817 // Re-executing the bytecode will reproduce the throwing condition.
818 bool must_throw = true;
819 uncommon_trap(Deoptimization::Reason_unhandled,
820 Deoptimization::Action_none,
821 (ciKlass*)NULL, (const char*)NULL, // default args
822 must_throw);
823 return;
824 }
826 // Oops, need to call into the VM to resolve the klasses at runtime.
827 // Note: This call must not deoptimize, since it is not a real at this bci!
828 kill_dead_locals();
830 make_runtime_call(RC_NO_LEAF | RC_MUST_THROW,
831 OptoRuntime::rethrow_Type(),
832 OptoRuntime::rethrow_stub(),
833 NULL, NULL,
834 ex_node);
836 // Rethrow is a pure call, no side effects, only a result.
837 // The result cannot be allocated, so we use I_O
839 // Catch exceptions from the rethrow
840 catch_call_exceptions(handlers);
841 }
844 // (Note: Moved add_debug_info into GraphKit::add_safepoint_edges.)
847 #ifndef PRODUCT
848 void Parse::count_compiled_calls(bool at_method_entry, bool is_inline) {
849 if( CountCompiledCalls ) {
850 if( at_method_entry ) {
851 // bump invocation counter if top method (for statistics)
852 if (CountCompiledCalls && depth() == 1) {
853 const TypeInstPtr* addr_type = TypeInstPtr::make(method());
854 Node* adr1 = makecon(addr_type);
855 Node* adr2 = basic_plus_adr(adr1, adr1, in_bytes(methodOopDesc::compiled_invocation_counter_offset()));
856 increment_counter(adr2);
857 }
858 } else if (is_inline) {
859 switch (bc()) {
860 case Bytecodes::_invokevirtual: increment_counter(SharedRuntime::nof_inlined_calls_addr()); break;
861 case Bytecodes::_invokeinterface: increment_counter(SharedRuntime::nof_inlined_interface_calls_addr()); break;
862 case Bytecodes::_invokestatic:
863 case Bytecodes::_invokedynamic:
864 case Bytecodes::_invokespecial: increment_counter(SharedRuntime::nof_inlined_static_calls_addr()); break;
865 default: fatal("unexpected call bytecode");
866 }
867 } else {
868 switch (bc()) {
869 case Bytecodes::_invokevirtual: increment_counter(SharedRuntime::nof_normal_calls_addr()); break;
870 case Bytecodes::_invokeinterface: increment_counter(SharedRuntime::nof_interface_calls_addr()); break;
871 case Bytecodes::_invokestatic:
872 case Bytecodes::_invokedynamic:
873 case Bytecodes::_invokespecial: increment_counter(SharedRuntime::nof_static_calls_addr()); break;
874 default: fatal("unexpected call bytecode");
875 }
876 }
877 }
878 }
879 #endif //PRODUCT
882 // Identify possible target method and inlining style
883 ciMethod* Parse::optimize_inlining(ciMethod* caller, int bci, ciInstanceKlass* klass,
884 ciMethod *dest_method, const TypeOopPtr* receiver_type) {
885 // only use for virtual or interface calls
887 // If it is obviously final, do not bother to call find_monomorphic_target,
888 // because the class hierarchy checks are not needed, and may fail due to
889 // incompletely loaded classes. Since we do our own class loading checks
890 // in this module, we may confidently bind to any method.
891 if (dest_method->can_be_statically_bound()) {
892 return dest_method;
893 }
895 // Attempt to improve the receiver
896 bool actual_receiver_is_exact = false;
897 ciInstanceKlass* actual_receiver = klass;
898 if (receiver_type != NULL) {
899 // Array methods are all inherited from Object, and are monomorphic.
900 if (receiver_type->isa_aryptr() &&
901 dest_method->holder() == env()->Object_klass()) {
902 return dest_method;
903 }
905 // All other interesting cases are instance klasses.
906 if (!receiver_type->isa_instptr()) {
907 return NULL;
908 }
910 ciInstanceKlass *ikl = receiver_type->klass()->as_instance_klass();
911 if (ikl->is_loaded() && ikl->is_initialized() && !ikl->is_interface() &&
912 (ikl == actual_receiver || ikl->is_subtype_of(actual_receiver))) {
913 // ikl is a same or better type than the original actual_receiver,
914 // e.g. static receiver from bytecodes.
915 actual_receiver = ikl;
916 // Is the actual_receiver exact?
917 actual_receiver_is_exact = receiver_type->klass_is_exact();
918 }
919 }
921 ciInstanceKlass* calling_klass = caller->holder();
922 ciMethod* cha_monomorphic_target = dest_method->find_monomorphic_target(calling_klass, klass, actual_receiver);
923 if (cha_monomorphic_target != NULL) {
924 assert(!cha_monomorphic_target->is_abstract(), "");
925 // Look at the method-receiver type. Does it add "too much information"?
926 ciKlass* mr_klass = cha_monomorphic_target->holder();
927 const Type* mr_type = TypeInstPtr::make(TypePtr::BotPTR, mr_klass);
928 if (receiver_type == NULL || !receiver_type->higher_equal(mr_type)) {
929 // Calling this method would include an implicit cast to its holder.
930 // %%% Not yet implemented. Would throw minor asserts at present.
931 // %%% The most common wins are already gained by +UseUniqueSubclasses.
932 // To fix, put the higher_equal check at the call of this routine,
933 // and add a CheckCastPP to the receiver.
934 if (TraceDependencies) {
935 tty->print_cr("found unique CHA method, but could not cast up");
936 tty->print(" method = ");
937 cha_monomorphic_target->print();
938 tty->cr();
939 }
940 if (C->log() != NULL) {
941 C->log()->elem("missed_CHA_opportunity klass='%d' method='%d'",
942 C->log()->identify(klass),
943 C->log()->identify(cha_monomorphic_target));
944 }
945 cha_monomorphic_target = NULL;
946 }
947 }
948 if (cha_monomorphic_target != NULL) {
949 // Hardwiring a virtual.
950 // If we inlined because CHA revealed only a single target method,
951 // then we are dependent on that target method not getting overridden
952 // by dynamic class loading. Be sure to test the "static" receiver
953 // dest_method here, as opposed to the actual receiver, which may
954 // falsely lead us to believe that the receiver is final or private.
955 C->dependencies()->assert_unique_concrete_method(actual_receiver, cha_monomorphic_target);
956 return cha_monomorphic_target;
957 }
959 // If the type is exact, we can still bind the method w/o a vcall.
960 // (This case comes after CHA so we can see how much extra work it does.)
961 if (actual_receiver_is_exact) {
962 // In case of evolution, there is a dependence on every inlined method, since each
963 // such method can be changed when its class is redefined.
964 ciMethod* exact_method = dest_method->resolve_invoke(calling_klass, actual_receiver);
965 if (exact_method != NULL) {
966 #ifndef PRODUCT
967 if (PrintOpto) {
968 tty->print(" Calling method via exact type @%d --- ", bci);
969 exact_method->print_name();
970 tty->cr();
971 }
972 #endif
973 return exact_method;
974 }
975 }
977 return NULL;
978 }