Mon, 29 Aug 2011 05:07:35 -0700
7083184: JSR 292: don't store context class argument with call site dependencies
Reviewed-by: jrose, never
1 /*
2 * Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "precompiled.hpp"
26 #include "ci/bcEscapeAnalyzer.hpp"
27 #include "ci/ciCallSite.hpp"
28 #include "ci/ciCPCache.hpp"
29 #include "ci/ciMethodHandle.hpp"
30 #include "classfile/javaClasses.hpp"
31 #include "compiler/compileLog.hpp"
32 #include "opto/addnode.hpp"
33 #include "opto/callGenerator.hpp"
34 #include "opto/callnode.hpp"
35 #include "opto/cfgnode.hpp"
36 #include "opto/connode.hpp"
37 #include "opto/parse.hpp"
38 #include "opto/rootnode.hpp"
39 #include "opto/runtime.hpp"
40 #include "opto/subnode.hpp"
42 CallGenerator::CallGenerator(ciMethod* method) {
43 _method = method;
44 }
46 // Utility function.
47 const TypeFunc* CallGenerator::tf() const {
48 return TypeFunc::make(method());
49 }
51 //-----------------------------ParseGenerator---------------------------------
52 // Internal class which handles all direct bytecode traversal.
53 class ParseGenerator : public InlineCallGenerator {
54 private:
55 bool _is_osr;
56 float _expected_uses;
58 public:
59 ParseGenerator(ciMethod* method, float expected_uses, bool is_osr = false)
60 : InlineCallGenerator(method)
61 {
62 _is_osr = is_osr;
63 _expected_uses = expected_uses;
64 assert(can_parse(method, is_osr), "parse must be possible");
65 }
67 // Can we build either an OSR or a regular parser for this method?
68 static bool can_parse(ciMethod* method, int is_osr = false);
70 virtual bool is_parse() const { return true; }
71 virtual JVMState* generate(JVMState* jvms);
72 int is_osr() { return _is_osr; }
74 };
76 JVMState* ParseGenerator::generate(JVMState* jvms) {
77 Compile* C = Compile::current();
79 if (is_osr()) {
80 // The JVMS for a OSR has a single argument (see its TypeFunc).
81 assert(jvms->depth() == 1, "no inline OSR");
82 }
84 if (C->failing()) {
85 return NULL; // bailing out of the compile; do not try to parse
86 }
88 Parse parser(jvms, method(), _expected_uses);
89 // Grab signature for matching/allocation
90 #ifdef ASSERT
91 if (parser.tf() != (parser.depth() == 1 ? C->tf() : tf())) {
92 MutexLockerEx ml(Compile_lock, Mutex::_no_safepoint_check_flag);
93 assert(C->env()->system_dictionary_modification_counter_changed(),
94 "Must invalidate if TypeFuncs differ");
95 }
96 #endif
98 GraphKit& exits = parser.exits();
100 if (C->failing()) {
101 while (exits.pop_exception_state() != NULL) ;
102 return NULL;
103 }
105 assert(exits.jvms()->same_calls_as(jvms), "sanity");
107 // Simply return the exit state of the parser,
108 // augmented by any exceptional states.
109 return exits.transfer_exceptions_into_jvms();
110 }
112 //---------------------------DirectCallGenerator------------------------------
113 // Internal class which handles all out-of-line calls w/o receiver type checks.
114 class DirectCallGenerator : public CallGenerator {
115 private:
116 CallStaticJavaNode* _call_node;
117 // Force separate memory and I/O projections for the exceptional
118 // paths to facilitate late inlinig.
119 bool _separate_io_proj;
121 public:
122 DirectCallGenerator(ciMethod* method, bool separate_io_proj)
123 : CallGenerator(method),
124 _separate_io_proj(separate_io_proj)
125 {
126 }
127 virtual JVMState* generate(JVMState* jvms);
129 CallStaticJavaNode* call_node() const { return _call_node; }
130 };
132 JVMState* DirectCallGenerator::generate(JVMState* jvms) {
133 GraphKit kit(jvms);
134 bool is_static = method()->is_static();
135 address target = is_static ? SharedRuntime::get_resolve_static_call_stub()
136 : SharedRuntime::get_resolve_opt_virtual_call_stub();
138 if (kit.C->log() != NULL) {
139 kit.C->log()->elem("direct_call bci='%d'", jvms->bci());
140 }
142 CallStaticJavaNode *call = new (kit.C, tf()->domain()->cnt()) CallStaticJavaNode(tf(), target, method(), kit.bci());
143 if (!is_static) {
144 // Make an explicit receiver null_check as part of this call.
145 // Since we share a map with the caller, his JVMS gets adjusted.
146 kit.null_check_receiver(method());
147 if (kit.stopped()) {
148 // And dump it back to the caller, decorated with any exceptions:
149 return kit.transfer_exceptions_into_jvms();
150 }
151 // Mark the call node as virtual, sort of:
152 call->set_optimized_virtual(true);
153 if (method()->is_method_handle_invoke()) {
154 call->set_method_handle_invoke(true);
155 kit.C->set_has_method_handle_invokes(true);
156 }
157 }
158 kit.set_arguments_for_java_call(call);
159 kit.set_edges_for_java_call(call, false, _separate_io_proj);
160 Node* ret = kit.set_results_for_java_call(call, _separate_io_proj);
161 kit.push_node(method()->return_type()->basic_type(), ret);
162 _call_node = call; // Save the call node in case we need it later
163 return kit.transfer_exceptions_into_jvms();
164 }
166 //---------------------------DynamicCallGenerator-----------------------------
167 // Internal class which handles all out-of-line invokedynamic calls.
168 class DynamicCallGenerator : public CallGenerator {
169 public:
170 DynamicCallGenerator(ciMethod* method)
171 : CallGenerator(method)
172 {
173 }
174 virtual JVMState* generate(JVMState* jvms);
175 };
177 JVMState* DynamicCallGenerator::generate(JVMState* jvms) {
178 GraphKit kit(jvms);
180 if (kit.C->log() != NULL) {
181 kit.C->log()->elem("dynamic_call bci='%d'", jvms->bci());
182 }
184 // Get the constant pool cache from the caller class.
185 ciMethod* caller_method = jvms->method();
186 ciBytecodeStream str(caller_method);
187 str.force_bci(jvms->bci()); // Set the stream to the invokedynamic bci.
188 assert(str.cur_bc() == Bytecodes::_invokedynamic, "wrong place to issue a dynamic call!");
189 ciCPCache* cpcache = str.get_cpcache();
191 // Get the offset of the CallSite from the constant pool cache
192 // pointer.
193 int index = str.get_method_index();
194 size_t call_site_offset = cpcache->get_f1_offset(index);
196 // Load the CallSite object from the constant pool cache.
197 const TypeOopPtr* cpcache_ptr = TypeOopPtr::make_from_constant(cpcache);
198 Node* cpcache_adr = kit.makecon(cpcache_ptr);
199 Node* call_site_adr = kit.basic_plus_adr(cpcache_adr, cpcache_adr, call_site_offset);
200 Node* call_site = kit.make_load(kit.control(), call_site_adr, TypeInstPtr::BOTTOM, T_OBJECT, Compile::AliasIdxRaw);
202 // Load the target MethodHandle from the CallSite object.
203 Node* target_mh_adr = kit.basic_plus_adr(call_site, call_site, java_lang_invoke_CallSite::target_offset_in_bytes());
204 Node* target_mh = kit.make_load(kit.control(), target_mh_adr, TypeInstPtr::BOTTOM, T_OBJECT);
206 address resolve_stub = SharedRuntime::get_resolve_opt_virtual_call_stub();
208 CallStaticJavaNode *call = new (kit.C, tf()->domain()->cnt()) CallStaticJavaNode(tf(), resolve_stub, method(), kit.bci());
209 // invokedynamic is treated as an optimized invokevirtual.
210 call->set_optimized_virtual(true);
211 // Take extra care (in the presence of argument motion) not to trash the SP:
212 call->set_method_handle_invoke(true);
213 kit.C->set_has_method_handle_invokes(true);
215 // Pass the target MethodHandle as first argument and shift the
216 // other arguments.
217 call->init_req(0 + TypeFunc::Parms, target_mh);
218 uint nargs = call->method()->arg_size();
219 for (uint i = 1; i < nargs; i++) {
220 Node* arg = kit.argument(i - 1);
221 call->init_req(i + TypeFunc::Parms, arg);
222 }
224 kit.set_edges_for_java_call(call);
225 Node* ret = kit.set_results_for_java_call(call);
226 kit.push_node(method()->return_type()->basic_type(), ret);
227 return kit.transfer_exceptions_into_jvms();
228 }
230 //--------------------------VirtualCallGenerator------------------------------
231 // Internal class which handles all out-of-line calls checking receiver type.
232 class VirtualCallGenerator : public CallGenerator {
233 private:
234 int _vtable_index;
235 public:
236 VirtualCallGenerator(ciMethod* method, int vtable_index)
237 : CallGenerator(method), _vtable_index(vtable_index)
238 {
239 assert(vtable_index == methodOopDesc::invalid_vtable_index ||
240 vtable_index >= 0, "either invalid or usable");
241 }
242 virtual bool is_virtual() const { return true; }
243 virtual JVMState* generate(JVMState* jvms);
244 };
246 JVMState* VirtualCallGenerator::generate(JVMState* jvms) {
247 GraphKit kit(jvms);
248 Node* receiver = kit.argument(0);
250 if (kit.C->log() != NULL) {
251 kit.C->log()->elem("virtual_call bci='%d'", jvms->bci());
252 }
254 // If the receiver is a constant null, do not torture the system
255 // by attempting to call through it. The compile will proceed
256 // correctly, but may bail out in final_graph_reshaping, because
257 // the call instruction will have a seemingly deficient out-count.
258 // (The bailout says something misleading about an "infinite loop".)
259 if (kit.gvn().type(receiver)->higher_equal(TypePtr::NULL_PTR)) {
260 kit.inc_sp(method()->arg_size()); // restore arguments
261 kit.uncommon_trap(Deoptimization::Reason_null_check,
262 Deoptimization::Action_none,
263 NULL, "null receiver");
264 return kit.transfer_exceptions_into_jvms();
265 }
267 // Ideally we would unconditionally do a null check here and let it
268 // be converted to an implicit check based on profile information.
269 // However currently the conversion to implicit null checks in
270 // Block::implicit_null_check() only looks for loads and stores, not calls.
271 ciMethod *caller = kit.method();
272 ciMethodData *caller_md = (caller == NULL) ? NULL : caller->method_data();
273 if (!UseInlineCaches || !ImplicitNullChecks ||
274 ((ImplicitNullCheckThreshold > 0) && caller_md &&
275 (caller_md->trap_count(Deoptimization::Reason_null_check)
276 >= (uint)ImplicitNullCheckThreshold))) {
277 // Make an explicit receiver null_check as part of this call.
278 // Since we share a map with the caller, his JVMS gets adjusted.
279 receiver = kit.null_check_receiver(method());
280 if (kit.stopped()) {
281 // And dump it back to the caller, decorated with any exceptions:
282 return kit.transfer_exceptions_into_jvms();
283 }
284 }
286 assert(!method()->is_static(), "virtual call must not be to static");
287 assert(!method()->is_final(), "virtual call should not be to final");
288 assert(!method()->is_private(), "virtual call should not be to private");
289 assert(_vtable_index == methodOopDesc::invalid_vtable_index || !UseInlineCaches,
290 "no vtable calls if +UseInlineCaches ");
291 address target = SharedRuntime::get_resolve_virtual_call_stub();
292 // Normal inline cache used for call
293 CallDynamicJavaNode *call = new (kit.C, tf()->domain()->cnt()) CallDynamicJavaNode(tf(), target, method(), _vtable_index, kit.bci());
294 kit.set_arguments_for_java_call(call);
295 kit.set_edges_for_java_call(call);
296 Node* ret = kit.set_results_for_java_call(call);
297 kit.push_node(method()->return_type()->basic_type(), ret);
299 // Represent the effect of an implicit receiver null_check
300 // as part of this call. Since we share a map with the caller,
301 // his JVMS gets adjusted.
302 kit.cast_not_null(receiver);
303 return kit.transfer_exceptions_into_jvms();
304 }
306 bool ParseGenerator::can_parse(ciMethod* m, int entry_bci) {
307 // Certain methods cannot be parsed at all:
308 if (!m->can_be_compiled()) return false;
309 if (!m->has_balanced_monitors()) return false;
310 if (m->get_flow_analysis()->failing()) return false;
312 // (Methods may bail out for other reasons, after the parser is run.
313 // We try to avoid this, but if forced, we must return (Node*)NULL.
314 // The user of the CallGenerator must check for this condition.)
315 return true;
316 }
318 CallGenerator* CallGenerator::for_inline(ciMethod* m, float expected_uses) {
319 if (!ParseGenerator::can_parse(m)) return NULL;
320 return new ParseGenerator(m, expected_uses);
321 }
323 // As a special case, the JVMS passed to this CallGenerator is
324 // for the method execution already in progress, not just the JVMS
325 // of the caller. Thus, this CallGenerator cannot be mixed with others!
326 CallGenerator* CallGenerator::for_osr(ciMethod* m, int osr_bci) {
327 if (!ParseGenerator::can_parse(m, true)) return NULL;
328 float past_uses = m->interpreter_invocation_count();
329 float expected_uses = past_uses;
330 return new ParseGenerator(m, expected_uses, true);
331 }
333 CallGenerator* CallGenerator::for_direct_call(ciMethod* m, bool separate_io_proj) {
334 assert(!m->is_abstract(), "for_direct_call mismatch");
335 return new DirectCallGenerator(m, separate_io_proj);
336 }
338 CallGenerator* CallGenerator::for_dynamic_call(ciMethod* m) {
339 assert(m->is_method_handle_invoke() || m->is_method_handle_adapter(), "for_dynamic_call mismatch");
340 return new DynamicCallGenerator(m);
341 }
343 CallGenerator* CallGenerator::for_virtual_call(ciMethod* m, int vtable_index) {
344 assert(!m->is_static(), "for_virtual_call mismatch");
345 assert(!m->is_method_handle_invoke(), "should be a direct call");
346 return new VirtualCallGenerator(m, vtable_index);
347 }
349 // Allow inlining decisions to be delayed
350 class LateInlineCallGenerator : public DirectCallGenerator {
351 CallGenerator* _inline_cg;
353 public:
354 LateInlineCallGenerator(ciMethod* method, CallGenerator* inline_cg) :
355 DirectCallGenerator(method, true), _inline_cg(inline_cg) {}
357 virtual bool is_late_inline() const { return true; }
359 // Convert the CallStaticJava into an inline
360 virtual void do_late_inline();
362 JVMState* generate(JVMState* jvms) {
363 // Record that this call site should be revisited once the main
364 // parse is finished.
365 Compile::current()->add_late_inline(this);
367 // Emit the CallStaticJava and request separate projections so
368 // that the late inlining logic can distinguish between fall
369 // through and exceptional uses of the memory and io projections
370 // as is done for allocations and macro expansion.
371 return DirectCallGenerator::generate(jvms);
372 }
374 };
377 void LateInlineCallGenerator::do_late_inline() {
378 // Can't inline it
379 if (call_node() == NULL || call_node()->outcnt() == 0 ||
380 call_node()->in(0) == NULL || call_node()->in(0)->is_top())
381 return;
383 CallStaticJavaNode* call = call_node();
385 // Make a clone of the JVMState that appropriate to use for driving a parse
386 Compile* C = Compile::current();
387 JVMState* jvms = call->jvms()->clone_shallow(C);
388 uint size = call->req();
389 SafePointNode* map = new (C, size) SafePointNode(size, jvms);
390 for (uint i1 = 0; i1 < size; i1++) {
391 map->init_req(i1, call->in(i1));
392 }
394 // Make sure the state is a MergeMem for parsing.
395 if (!map->in(TypeFunc::Memory)->is_MergeMem()) {
396 map->set_req(TypeFunc::Memory, MergeMemNode::make(C, map->in(TypeFunc::Memory)));
397 }
399 // Make enough space for the expression stack and transfer the incoming arguments
400 int nargs = method()->arg_size();
401 jvms->set_map(map);
402 map->ensure_stack(jvms, jvms->method()->max_stack());
403 if (nargs > 0) {
404 for (int i1 = 0; i1 < nargs; i1++) {
405 map->set_req(i1 + jvms->argoff(), call->in(TypeFunc::Parms + i1));
406 }
407 }
409 CompileLog* log = C->log();
410 if (log != NULL) {
411 log->head("late_inline method='%d'", log->identify(method()));
412 JVMState* p = jvms;
413 while (p != NULL) {
414 log->elem("jvms bci='%d' method='%d'", p->bci(), log->identify(p->method()));
415 p = p->caller();
416 }
417 log->tail("late_inline");
418 }
420 // Setup default node notes to be picked up by the inlining
421 Node_Notes* old_nn = C->default_node_notes();
422 if (old_nn != NULL) {
423 Node_Notes* entry_nn = old_nn->clone(C);
424 entry_nn->set_jvms(jvms);
425 C->set_default_node_notes(entry_nn);
426 }
428 // Now perform the inling using the synthesized JVMState
429 JVMState* new_jvms = _inline_cg->generate(jvms);
430 if (new_jvms == NULL) return; // no change
431 if (C->failing()) return;
433 // Capture any exceptional control flow
434 GraphKit kit(new_jvms);
436 // Find the result object
437 Node* result = C->top();
438 int result_size = method()->return_type()->size();
439 if (result_size != 0 && !kit.stopped()) {
440 result = (result_size == 1) ? kit.pop() : kit.pop_pair();
441 }
443 kit.replace_call(call, result);
444 }
447 CallGenerator* CallGenerator::for_late_inline(ciMethod* method, CallGenerator* inline_cg) {
448 return new LateInlineCallGenerator(method, inline_cg);
449 }
452 //---------------------------WarmCallGenerator--------------------------------
453 // Internal class which handles initial deferral of inlining decisions.
454 class WarmCallGenerator : public CallGenerator {
455 WarmCallInfo* _call_info;
456 CallGenerator* _if_cold;
457 CallGenerator* _if_hot;
458 bool _is_virtual; // caches virtuality of if_cold
459 bool _is_inline; // caches inline-ness of if_hot
461 public:
462 WarmCallGenerator(WarmCallInfo* ci,
463 CallGenerator* if_cold,
464 CallGenerator* if_hot)
465 : CallGenerator(if_cold->method())
466 {
467 assert(method() == if_hot->method(), "consistent choices");
468 _call_info = ci;
469 _if_cold = if_cold;
470 _if_hot = if_hot;
471 _is_virtual = if_cold->is_virtual();
472 _is_inline = if_hot->is_inline();
473 }
475 virtual bool is_inline() const { return _is_inline; }
476 virtual bool is_virtual() const { return _is_virtual; }
477 virtual bool is_deferred() const { return true; }
479 virtual JVMState* generate(JVMState* jvms);
480 };
483 CallGenerator* CallGenerator::for_warm_call(WarmCallInfo* ci,
484 CallGenerator* if_cold,
485 CallGenerator* if_hot) {
486 return new WarmCallGenerator(ci, if_cold, if_hot);
487 }
489 JVMState* WarmCallGenerator::generate(JVMState* jvms) {
490 Compile* C = Compile::current();
491 if (C->log() != NULL) {
492 C->log()->elem("warm_call bci='%d'", jvms->bci());
493 }
494 jvms = _if_cold->generate(jvms);
495 if (jvms != NULL) {
496 Node* m = jvms->map()->control();
497 if (m->is_CatchProj()) m = m->in(0); else m = C->top();
498 if (m->is_Catch()) m = m->in(0); else m = C->top();
499 if (m->is_Proj()) m = m->in(0); else m = C->top();
500 if (m->is_CallJava()) {
501 _call_info->set_call(m->as_Call());
502 _call_info->set_hot_cg(_if_hot);
503 #ifndef PRODUCT
504 if (PrintOpto || PrintOptoInlining) {
505 tty->print_cr("Queueing for warm inlining at bci %d:", jvms->bci());
506 tty->print("WCI: ");
507 _call_info->print();
508 }
509 #endif
510 _call_info->set_heat(_call_info->compute_heat());
511 C->set_warm_calls(_call_info->insert_into(C->warm_calls()));
512 }
513 }
514 return jvms;
515 }
517 void WarmCallInfo::make_hot() {
518 Unimplemented();
519 }
521 void WarmCallInfo::make_cold() {
522 // No action: Just dequeue.
523 }
526 //------------------------PredictedCallGenerator------------------------------
527 // Internal class which handles all out-of-line calls checking receiver type.
528 class PredictedCallGenerator : public CallGenerator {
529 ciKlass* _predicted_receiver;
530 CallGenerator* _if_missed;
531 CallGenerator* _if_hit;
532 float _hit_prob;
534 public:
535 PredictedCallGenerator(ciKlass* predicted_receiver,
536 CallGenerator* if_missed,
537 CallGenerator* if_hit, float hit_prob)
538 : CallGenerator(if_missed->method())
539 {
540 // The call profile data may predict the hit_prob as extreme as 0 or 1.
541 // Remove the extremes values from the range.
542 if (hit_prob > PROB_MAX) hit_prob = PROB_MAX;
543 if (hit_prob < PROB_MIN) hit_prob = PROB_MIN;
545 _predicted_receiver = predicted_receiver;
546 _if_missed = if_missed;
547 _if_hit = if_hit;
548 _hit_prob = hit_prob;
549 }
551 virtual bool is_virtual() const { return true; }
552 virtual bool is_inline() const { return _if_hit->is_inline(); }
553 virtual bool is_deferred() const { return _if_hit->is_deferred(); }
555 virtual JVMState* generate(JVMState* jvms);
556 };
559 CallGenerator* CallGenerator::for_predicted_call(ciKlass* predicted_receiver,
560 CallGenerator* if_missed,
561 CallGenerator* if_hit,
562 float hit_prob) {
563 return new PredictedCallGenerator(predicted_receiver, if_missed, if_hit, hit_prob);
564 }
567 JVMState* PredictedCallGenerator::generate(JVMState* jvms) {
568 GraphKit kit(jvms);
569 PhaseGVN& gvn = kit.gvn();
570 // We need an explicit receiver null_check before checking its type.
571 // We share a map with the caller, so his JVMS gets adjusted.
572 Node* receiver = kit.argument(0);
574 CompileLog* log = kit.C->log();
575 if (log != NULL) {
576 log->elem("predicted_call bci='%d' klass='%d'",
577 jvms->bci(), log->identify(_predicted_receiver));
578 }
580 receiver = kit.null_check_receiver(method());
581 if (kit.stopped()) {
582 return kit.transfer_exceptions_into_jvms();
583 }
585 Node* exact_receiver = receiver; // will get updated in place...
586 Node* slow_ctl = kit.type_check_receiver(receiver,
587 _predicted_receiver, _hit_prob,
588 &exact_receiver);
590 SafePointNode* slow_map = NULL;
591 JVMState* slow_jvms;
592 { PreserveJVMState pjvms(&kit);
593 kit.set_control(slow_ctl);
594 if (!kit.stopped()) {
595 slow_jvms = _if_missed->generate(kit.sync_jvms());
596 assert(slow_jvms != NULL, "miss path must not fail to generate");
597 kit.add_exception_states_from(slow_jvms);
598 kit.set_map(slow_jvms->map());
599 if (!kit.stopped())
600 slow_map = kit.stop();
601 }
602 }
604 if (kit.stopped()) {
605 // Instance exactly does not matches the desired type.
606 kit.set_jvms(slow_jvms);
607 return kit.transfer_exceptions_into_jvms();
608 }
610 // fall through if the instance exactly matches the desired type
611 kit.replace_in_map(receiver, exact_receiver);
613 // Make the hot call:
614 JVMState* new_jvms = _if_hit->generate(kit.sync_jvms());
615 if (new_jvms == NULL) {
616 // Inline failed, so make a direct call.
617 assert(_if_hit->is_inline(), "must have been a failed inline");
618 CallGenerator* cg = CallGenerator::for_direct_call(_if_hit->method());
619 new_jvms = cg->generate(kit.sync_jvms());
620 }
621 kit.add_exception_states_from(new_jvms);
622 kit.set_jvms(new_jvms);
624 // Need to merge slow and fast?
625 if (slow_map == NULL) {
626 // The fast path is the only path remaining.
627 return kit.transfer_exceptions_into_jvms();
628 }
630 if (kit.stopped()) {
631 // Inlined method threw an exception, so it's just the slow path after all.
632 kit.set_jvms(slow_jvms);
633 return kit.transfer_exceptions_into_jvms();
634 }
636 // Finish the diamond.
637 kit.C->set_has_split_ifs(true); // Has chance for split-if optimization
638 RegionNode* region = new (kit.C, 3) RegionNode(3);
639 region->init_req(1, kit.control());
640 region->init_req(2, slow_map->control());
641 kit.set_control(gvn.transform(region));
642 Node* iophi = PhiNode::make(region, kit.i_o(), Type::ABIO);
643 iophi->set_req(2, slow_map->i_o());
644 kit.set_i_o(gvn.transform(iophi));
645 kit.merge_memory(slow_map->merged_memory(), region, 2);
646 uint tos = kit.jvms()->stkoff() + kit.sp();
647 uint limit = slow_map->req();
648 for (uint i = TypeFunc::Parms; i < limit; i++) {
649 // Skip unused stack slots; fast forward to monoff();
650 if (i == tos) {
651 i = kit.jvms()->monoff();
652 if( i >= limit ) break;
653 }
654 Node* m = kit.map()->in(i);
655 Node* n = slow_map->in(i);
656 if (m != n) {
657 const Type* t = gvn.type(m)->meet(gvn.type(n));
658 Node* phi = PhiNode::make(region, m, t);
659 phi->set_req(2, n);
660 kit.map()->set_req(i, gvn.transform(phi));
661 }
662 }
663 return kit.transfer_exceptions_into_jvms();
664 }
667 //------------------------PredictedDynamicCallGenerator-----------------------
668 // Internal class which handles all out-of-line calls checking receiver type.
669 class PredictedDynamicCallGenerator : public CallGenerator {
670 ciMethodHandle* _predicted_method_handle;
671 CallGenerator* _if_missed;
672 CallGenerator* _if_hit;
673 float _hit_prob;
675 public:
676 PredictedDynamicCallGenerator(ciMethodHandle* predicted_method_handle,
677 CallGenerator* if_missed,
678 CallGenerator* if_hit,
679 float hit_prob)
680 : CallGenerator(if_missed->method()),
681 _predicted_method_handle(predicted_method_handle),
682 _if_missed(if_missed),
683 _if_hit(if_hit),
684 _hit_prob(hit_prob)
685 {}
687 virtual bool is_inline() const { return _if_hit->is_inline(); }
688 virtual bool is_deferred() const { return _if_hit->is_deferred(); }
690 virtual JVMState* generate(JVMState* jvms);
691 };
694 CallGenerator* CallGenerator::for_predicted_dynamic_call(ciMethodHandle* predicted_method_handle,
695 CallGenerator* if_missed,
696 CallGenerator* if_hit,
697 float hit_prob) {
698 return new PredictedDynamicCallGenerator(predicted_method_handle, if_missed, if_hit, hit_prob);
699 }
702 CallGenerator* CallGenerator::for_method_handle_inline(Node* method_handle, JVMState* jvms,
703 ciMethod* caller, ciMethod* callee, ciCallProfile profile) {
704 if (method_handle->Opcode() == Op_ConP) {
705 const TypeOopPtr* oop_ptr = method_handle->bottom_type()->is_oopptr();
706 ciObject* const_oop = oop_ptr->const_oop();
707 ciMethodHandle* method_handle = const_oop->as_method_handle();
709 // Set the callee to have access to the class and signature in
710 // the MethodHandleCompiler.
711 method_handle->set_callee(callee);
712 method_handle->set_caller(caller);
713 method_handle->set_call_profile(profile);
715 // Get an adapter for the MethodHandle.
716 ciMethod* target_method = method_handle->get_method_handle_adapter();
717 if (target_method != NULL) {
718 CallGenerator* cg = Compile::current()->call_generator(target_method, -1, false, jvms, true, PROB_ALWAYS);
719 if (cg != NULL && cg->is_inline())
720 return cg;
721 }
722 } else if (method_handle->Opcode() == Op_Phi && method_handle->req() == 3 &&
723 method_handle->in(1)->Opcode() == Op_ConP && method_handle->in(2)->Opcode() == Op_ConP) {
724 // selectAlternative idiom merging two constant MethodHandles.
725 // Generate a guard so that each can be inlined. We might want to
726 // do more inputs at later point but this gets the most common
727 // case.
728 const TypeOopPtr* oop_ptr = method_handle->in(1)->bottom_type()->is_oopptr();
729 ciObject* const_oop = oop_ptr->const_oop();
730 ciMethodHandle* mh = const_oop->as_method_handle();
732 CallGenerator* cg1 = for_method_handle_inline(method_handle->in(1), jvms, caller, callee, profile);
733 CallGenerator* cg2 = for_method_handle_inline(method_handle->in(2), jvms, caller, callee, profile);
734 if (cg1 != NULL && cg2 != NULL) {
735 return new PredictedDynamicCallGenerator(mh, cg2, cg1, PROB_FAIR);
736 }
737 }
738 return NULL;
739 }
742 CallGenerator* CallGenerator::for_invokedynamic_inline(ciCallSite* call_site, JVMState* jvms,
743 ciMethod* caller, ciMethod* callee, ciCallProfile profile) {
744 assert(call_site->is_constant_call_site() || call_site->is_mutable_call_site(), "must be");
745 ciMethodHandle* method_handle = call_site->get_target();
747 // Set the callee to have access to the class and signature in the
748 // MethodHandleCompiler.
749 method_handle->set_callee(callee);
750 method_handle->set_caller(caller);
751 method_handle->set_call_profile(profile);
753 // Get an adapter for the MethodHandle.
754 ciMethod* target_method = method_handle->get_invokedynamic_adapter();
755 if (target_method != NULL) {
756 Compile *C = Compile::current();
757 CallGenerator* cg = C->call_generator(target_method, -1, false, jvms, true, PROB_ALWAYS);
758 if (cg != NULL && cg->is_inline()) {
759 // Add a dependence for invalidation of the optimization.
760 if (call_site->is_mutable_call_site()) {
761 C->dependencies()->assert_call_site_target_value(call_site, method_handle);
762 }
763 return cg;
764 }
765 }
766 return NULL;
767 }
770 JVMState* PredictedDynamicCallGenerator::generate(JVMState* jvms) {
771 GraphKit kit(jvms);
772 PhaseGVN& gvn = kit.gvn();
774 CompileLog* log = kit.C->log();
775 if (log != NULL) {
776 log->elem("predicted_dynamic_call bci='%d'", jvms->bci());
777 }
779 const TypeOopPtr* predicted_mh_ptr = TypeOopPtr::make_from_constant(_predicted_method_handle, true);
780 Node* predicted_mh = kit.makecon(predicted_mh_ptr);
782 Node* bol = NULL;
783 int bc = jvms->method()->java_code_at_bci(jvms->bci());
784 if (bc == Bytecodes::_invokespecial) {
785 // This is the selectAlternative idiom for guardWithTest
786 Node* receiver = kit.argument(0);
788 // Check if the MethodHandle is the expected one
789 Node* cmp = gvn.transform(new(kit.C, 3) CmpPNode(receiver, predicted_mh));
790 bol = gvn.transform(new(kit.C, 2) BoolNode(cmp, BoolTest::eq) );
791 } else {
792 assert(bc == Bytecodes::_invokedynamic, "must be");
793 // Get the constant pool cache from the caller class.
794 ciMethod* caller_method = jvms->method();
795 ciBytecodeStream str(caller_method);
796 str.force_bci(jvms->bci()); // Set the stream to the invokedynamic bci.
797 ciCPCache* cpcache = str.get_cpcache();
799 // Get the offset of the CallSite from the constant pool cache
800 // pointer.
801 int index = str.get_method_index();
802 size_t call_site_offset = cpcache->get_f1_offset(index);
804 // Load the CallSite object from the constant pool cache.
805 const TypeOopPtr* cpcache_ptr = TypeOopPtr::make_from_constant(cpcache);
806 Node* cpcache_adr = kit.makecon(cpcache_ptr);
807 Node* call_site_adr = kit.basic_plus_adr(cpcache_adr, cpcache_adr, call_site_offset);
808 Node* call_site = kit.make_load(kit.control(), call_site_adr, TypeInstPtr::BOTTOM, T_OBJECT, Compile::AliasIdxRaw);
810 // Load the target MethodHandle from the CallSite object.
811 Node* target_adr = kit.basic_plus_adr(call_site, call_site, java_lang_invoke_CallSite::target_offset_in_bytes());
812 Node* target_mh = kit.make_load(kit.control(), target_adr, TypeInstPtr::BOTTOM, T_OBJECT);
814 // Check if the MethodHandle is still the same.
815 Node* cmp = gvn.transform(new(kit.C, 3) CmpPNode(target_mh, predicted_mh));
816 bol = gvn.transform(new(kit.C, 2) BoolNode(cmp, BoolTest::eq) );
817 }
818 IfNode* iff = kit.create_and_xform_if(kit.control(), bol, _hit_prob, COUNT_UNKNOWN);
819 kit.set_control( gvn.transform(new(kit.C, 1) IfTrueNode (iff)));
820 Node* slow_ctl = gvn.transform(new(kit.C, 1) IfFalseNode(iff));
822 SafePointNode* slow_map = NULL;
823 JVMState* slow_jvms;
824 { PreserveJVMState pjvms(&kit);
825 kit.set_control(slow_ctl);
826 if (!kit.stopped()) {
827 slow_jvms = _if_missed->generate(kit.sync_jvms());
828 assert(slow_jvms != NULL, "miss path must not fail to generate");
829 kit.add_exception_states_from(slow_jvms);
830 kit.set_map(slow_jvms->map());
831 if (!kit.stopped())
832 slow_map = kit.stop();
833 }
834 }
836 if (kit.stopped()) {
837 // Instance exactly does not matches the desired type.
838 kit.set_jvms(slow_jvms);
839 return kit.transfer_exceptions_into_jvms();
840 }
842 // Make the hot call:
843 JVMState* new_jvms = _if_hit->generate(kit.sync_jvms());
844 if (new_jvms == NULL) {
845 // Inline failed, so make a direct call.
846 assert(_if_hit->is_inline(), "must have been a failed inline");
847 CallGenerator* cg = CallGenerator::for_direct_call(_if_hit->method());
848 new_jvms = cg->generate(kit.sync_jvms());
849 }
850 kit.add_exception_states_from(new_jvms);
851 kit.set_jvms(new_jvms);
853 // Need to merge slow and fast?
854 if (slow_map == NULL) {
855 // The fast path is the only path remaining.
856 return kit.transfer_exceptions_into_jvms();
857 }
859 if (kit.stopped()) {
860 // Inlined method threw an exception, so it's just the slow path after all.
861 kit.set_jvms(slow_jvms);
862 return kit.transfer_exceptions_into_jvms();
863 }
865 // Finish the diamond.
866 kit.C->set_has_split_ifs(true); // Has chance for split-if optimization
867 RegionNode* region = new (kit.C, 3) RegionNode(3);
868 region->init_req(1, kit.control());
869 region->init_req(2, slow_map->control());
870 kit.set_control(gvn.transform(region));
871 Node* iophi = PhiNode::make(region, kit.i_o(), Type::ABIO);
872 iophi->set_req(2, slow_map->i_o());
873 kit.set_i_o(gvn.transform(iophi));
874 kit.merge_memory(slow_map->merged_memory(), region, 2);
875 uint tos = kit.jvms()->stkoff() + kit.sp();
876 uint limit = slow_map->req();
877 for (uint i = TypeFunc::Parms; i < limit; i++) {
878 // Skip unused stack slots; fast forward to monoff();
879 if (i == tos) {
880 i = kit.jvms()->monoff();
881 if( i >= limit ) break;
882 }
883 Node* m = kit.map()->in(i);
884 Node* n = slow_map->in(i);
885 if (m != n) {
886 const Type* t = gvn.type(m)->meet(gvn.type(n));
887 Node* phi = PhiNode::make(region, m, t);
888 phi->set_req(2, n);
889 kit.map()->set_req(i, gvn.transform(phi));
890 }
891 }
892 return kit.transfer_exceptions_into_jvms();
893 }
896 //-------------------------UncommonTrapCallGenerator-----------------------------
897 // Internal class which handles all out-of-line calls checking receiver type.
898 class UncommonTrapCallGenerator : public CallGenerator {
899 Deoptimization::DeoptReason _reason;
900 Deoptimization::DeoptAction _action;
902 public:
903 UncommonTrapCallGenerator(ciMethod* m,
904 Deoptimization::DeoptReason reason,
905 Deoptimization::DeoptAction action)
906 : CallGenerator(m)
907 {
908 _reason = reason;
909 _action = action;
910 }
912 virtual bool is_virtual() const { ShouldNotReachHere(); return false; }
913 virtual bool is_trap() const { return true; }
915 virtual JVMState* generate(JVMState* jvms);
916 };
919 CallGenerator*
920 CallGenerator::for_uncommon_trap(ciMethod* m,
921 Deoptimization::DeoptReason reason,
922 Deoptimization::DeoptAction action) {
923 return new UncommonTrapCallGenerator(m, reason, action);
924 }
927 JVMState* UncommonTrapCallGenerator::generate(JVMState* jvms) {
928 GraphKit kit(jvms);
929 // Take the trap with arguments pushed on the stack. (Cf. null_check_receiver).
930 int nargs = method()->arg_size();
931 kit.inc_sp(nargs);
932 assert(nargs <= kit.sp() && kit.sp() <= jvms->stk_size(), "sane sp w/ args pushed");
933 if (_reason == Deoptimization::Reason_class_check &&
934 _action == Deoptimization::Action_maybe_recompile) {
935 // Temp fix for 6529811
936 // Don't allow uncommon_trap to override our decision to recompile in the event
937 // of a class cast failure for a monomorphic call as it will never let us convert
938 // the call to either bi-morphic or megamorphic and can lead to unc-trap loops
939 bool keep_exact_action = true;
940 kit.uncommon_trap(_reason, _action, NULL, "monomorphic vcall checkcast", false, keep_exact_action);
941 } else {
942 kit.uncommon_trap(_reason, _action);
943 }
944 return kit.transfer_exceptions_into_jvms();
945 }
947 // (Note: Moved hook_up_call to GraphKit::set_edges_for_java_call.)
949 // (Node: Merged hook_up_exits into ParseGenerator::generate.)
951 #define NODES_OVERHEAD_PER_METHOD (30.0)
952 #define NODES_PER_BYTECODE (9.5)
954 void WarmCallInfo::init(JVMState* call_site, ciMethod* call_method, ciCallProfile& profile, float prof_factor) {
955 int call_count = profile.count();
956 int code_size = call_method->code_size();
958 // Expected execution count is based on the historical count:
959 _count = call_count < 0 ? 1 : call_site->method()->scale_count(call_count, prof_factor);
961 // Expected profit from inlining, in units of simple call-overheads.
962 _profit = 1.0;
964 // Expected work performed by the call in units of call-overheads.
965 // %%% need an empirical curve fit for "work" (time in call)
966 float bytecodes_per_call = 3;
967 _work = 1.0 + code_size / bytecodes_per_call;
969 // Expected size of compilation graph:
970 // -XX:+PrintParseStatistics once reported:
971 // Methods seen: 9184 Methods parsed: 9184 Nodes created: 1582391
972 // Histogram of 144298 parsed bytecodes:
973 // %%% Need an better predictor for graph size.
974 _size = NODES_OVERHEAD_PER_METHOD + (NODES_PER_BYTECODE * code_size);
975 }
977 // is_cold: Return true if the node should never be inlined.
978 // This is true if any of the key metrics are extreme.
979 bool WarmCallInfo::is_cold() const {
980 if (count() < WarmCallMinCount) return true;
981 if (profit() < WarmCallMinProfit) return true;
982 if (work() > WarmCallMaxWork) return true;
983 if (size() > WarmCallMaxSize) return true;
984 return false;
985 }
987 // is_hot: Return true if the node should be inlined immediately.
988 // This is true if any of the key metrics are extreme.
989 bool WarmCallInfo::is_hot() const {
990 assert(!is_cold(), "eliminate is_cold cases before testing is_hot");
991 if (count() >= HotCallCountThreshold) return true;
992 if (profit() >= HotCallProfitThreshold) return true;
993 if (work() <= HotCallTrivialWork) return true;
994 if (size() <= HotCallTrivialSize) return true;
995 return false;
996 }
998 // compute_heat:
999 float WarmCallInfo::compute_heat() const {
1000 assert(!is_cold(), "compute heat only on warm nodes");
1001 assert(!is_hot(), "compute heat only on warm nodes");
1002 int min_size = MAX2(0, (int)HotCallTrivialSize);
1003 int max_size = MIN2(500, (int)WarmCallMaxSize);
1004 float method_size = (size() - min_size) / MAX2(1, max_size - min_size);
1005 float size_factor;
1006 if (method_size < 0.05) size_factor = 4; // 2 sigmas better than avg.
1007 else if (method_size < 0.15) size_factor = 2; // 1 sigma better than avg.
1008 else if (method_size < 0.5) size_factor = 1; // better than avg.
1009 else size_factor = 0.5; // worse than avg.
1010 return (count() * profit() * size_factor);
1011 }
1013 bool WarmCallInfo::warmer_than(WarmCallInfo* that) {
1014 assert(this != that, "compare only different WCIs");
1015 assert(this->heat() != 0 && that->heat() != 0, "call compute_heat 1st");
1016 if (this->heat() > that->heat()) return true;
1017 if (this->heat() < that->heat()) return false;
1018 assert(this->heat() == that->heat(), "no NaN heat allowed");
1019 // Equal heat. Break the tie some other way.
1020 if (!this->call() || !that->call()) return (address)this > (address)that;
1021 return this->call()->_idx > that->call()->_idx;
1022 }
1024 //#define UNINIT_NEXT ((WarmCallInfo*)badAddress)
1025 #define UNINIT_NEXT ((WarmCallInfo*)NULL)
1027 WarmCallInfo* WarmCallInfo::insert_into(WarmCallInfo* head) {
1028 assert(next() == UNINIT_NEXT, "not yet on any list");
1029 WarmCallInfo* prev_p = NULL;
1030 WarmCallInfo* next_p = head;
1031 while (next_p != NULL && next_p->warmer_than(this)) {
1032 prev_p = next_p;
1033 next_p = prev_p->next();
1034 }
1035 // Install this between prev_p and next_p.
1036 this->set_next(next_p);
1037 if (prev_p == NULL)
1038 head = this;
1039 else
1040 prev_p->set_next(this);
1041 return head;
1042 }
1044 WarmCallInfo* WarmCallInfo::remove_from(WarmCallInfo* head) {
1045 WarmCallInfo* prev_p = NULL;
1046 WarmCallInfo* next_p = head;
1047 while (next_p != this) {
1048 assert(next_p != NULL, "this must be in the list somewhere");
1049 prev_p = next_p;
1050 next_p = prev_p->next();
1051 }
1052 next_p = this->next();
1053 debug_only(this->set_next(UNINIT_NEXT));
1054 // Remove this from between prev_p and next_p.
1055 if (prev_p == NULL)
1056 head = next_p;
1057 else
1058 prev_p->set_next(next_p);
1059 return head;
1060 }
1062 WarmCallInfo WarmCallInfo::_always_hot(WarmCallInfo::MAX_VALUE(), WarmCallInfo::MAX_VALUE(),
1063 WarmCallInfo::MIN_VALUE(), WarmCallInfo::MIN_VALUE());
1064 WarmCallInfo WarmCallInfo::_always_cold(WarmCallInfo::MIN_VALUE(), WarmCallInfo::MIN_VALUE(),
1065 WarmCallInfo::MAX_VALUE(), WarmCallInfo::MAX_VALUE());
1067 WarmCallInfo* WarmCallInfo::always_hot() {
1068 assert(_always_hot.is_hot(), "must always be hot");
1069 return &_always_hot;
1070 }
1072 WarmCallInfo* WarmCallInfo::always_cold() {
1073 assert(_always_cold.is_cold(), "must always be cold");
1074 return &_always_cold;
1075 }
1078 #ifndef PRODUCT
1080 void WarmCallInfo::print() const {
1081 tty->print("%s : C=%6.1f P=%6.1f W=%6.1f S=%6.1f H=%6.1f -> %p",
1082 is_cold() ? "cold" : is_hot() ? "hot " : "warm",
1083 count(), profit(), work(), size(), compute_heat(), next());
1084 tty->cr();
1085 if (call() != NULL) call()->dump();
1086 }
1088 void print_wci(WarmCallInfo* ci) {
1089 ci->print();
1090 }
1092 void WarmCallInfo::print_all() const {
1093 for (const WarmCallInfo* p = this; p != NULL; p = p->next())
1094 p->print();
1095 }
1097 int WarmCallInfo::count_all() const {
1098 int cnt = 0;
1099 for (const WarmCallInfo* p = this; p != NULL; p = p->next())
1100 cnt++;
1101 return cnt;
1102 }
1104 #endif //PRODUCT