Fri, 12 Feb 2010 15:27:36 -0800
Merge
1 /*
2 * Copyright 2000-2009 Sun Microsystems, Inc. All Rights Reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
20 * CA 95054 USA or visit www.sun.com if you need additional information or
21 * have any questions.
22 *
23 */
25 #include "incls/_precompiled.incl"
26 #include "incls/_callGenerator.cpp.incl"
28 CallGenerator::CallGenerator(ciMethod* method) {
29 _method = method;
30 }
32 // Utility function.
33 const TypeFunc* CallGenerator::tf() const {
34 return TypeFunc::make(method());
35 }
37 //-----------------------------ParseGenerator---------------------------------
38 // Internal class which handles all direct bytecode traversal.
39 class ParseGenerator : public InlineCallGenerator {
40 private:
41 bool _is_osr;
42 float _expected_uses;
44 public:
45 ParseGenerator(ciMethod* method, float expected_uses, bool is_osr = false)
46 : InlineCallGenerator(method)
47 {
48 _is_osr = is_osr;
49 _expected_uses = expected_uses;
50 assert(can_parse(method, is_osr), "parse must be possible");
51 }
53 // Can we build either an OSR or a regular parser for this method?
54 static bool can_parse(ciMethod* method, int is_osr = false);
56 virtual bool is_parse() const { return true; }
57 virtual JVMState* generate(JVMState* jvms);
58 int is_osr() { return _is_osr; }
60 };
62 JVMState* ParseGenerator::generate(JVMState* jvms) {
63 Compile* C = Compile::current();
65 if (is_osr()) {
66 // The JVMS for a OSR has a single argument (see its TypeFunc).
67 assert(jvms->depth() == 1, "no inline OSR");
68 }
70 if (C->failing()) {
71 return NULL; // bailing out of the compile; do not try to parse
72 }
74 Parse parser(jvms, method(), _expected_uses);
75 // Grab signature for matching/allocation
76 #ifdef ASSERT
77 if (parser.tf() != (parser.depth() == 1 ? C->tf() : tf())) {
78 MutexLockerEx ml(Compile_lock, Mutex::_no_safepoint_check_flag);
79 assert(C->env()->system_dictionary_modification_counter_changed(),
80 "Must invalidate if TypeFuncs differ");
81 }
82 #endif
84 GraphKit& exits = parser.exits();
86 if (C->failing()) {
87 while (exits.pop_exception_state() != NULL) ;
88 return NULL;
89 }
91 assert(exits.jvms()->same_calls_as(jvms), "sanity");
93 // Simply return the exit state of the parser,
94 // augmented by any exceptional states.
95 return exits.transfer_exceptions_into_jvms();
96 }
98 //---------------------------DirectCallGenerator------------------------------
99 // Internal class which handles all out-of-line calls w/o receiver type checks.
100 class DirectCallGenerator : public CallGenerator {
101 private:
102 CallStaticJavaNode* _call_node;
103 // Force separate memory and I/O projections for the exceptional
104 // paths to facilitate late inlinig.
105 bool _separate_io_proj;
107 public:
108 DirectCallGenerator(ciMethod* method, bool separate_io_proj)
109 : CallGenerator(method),
110 _separate_io_proj(separate_io_proj)
111 {
112 }
113 virtual JVMState* generate(JVMState* jvms);
115 CallStaticJavaNode* call_node() const { return _call_node; }
116 };
118 JVMState* DirectCallGenerator::generate(JVMState* jvms) {
119 GraphKit kit(jvms);
120 bool is_static = method()->is_static();
121 address target = is_static ? SharedRuntime::get_resolve_static_call_stub()
122 : SharedRuntime::get_resolve_opt_virtual_call_stub();
124 if (kit.C->log() != NULL) {
125 kit.C->log()->elem("direct_call bci='%d'", jvms->bci());
126 }
128 CallStaticJavaNode *call = new (kit.C, tf()->domain()->cnt()) CallStaticJavaNode(tf(), target, method(), kit.bci());
129 if (!is_static) {
130 // Make an explicit receiver null_check as part of this call.
131 // Since we share a map with the caller, his JVMS gets adjusted.
132 kit.null_check_receiver(method());
133 if (kit.stopped()) {
134 // And dump it back to the caller, decorated with any exceptions:
135 return kit.transfer_exceptions_into_jvms();
136 }
137 // Mark the call node as virtual, sort of:
138 call->set_optimized_virtual(true);
139 if (method()->is_method_handle_invoke())
140 call->set_method_handle_invoke(true);
141 }
142 kit.set_arguments_for_java_call(call);
143 kit.set_edges_for_java_call(call, false, _separate_io_proj);
144 Node* ret = kit.set_results_for_java_call(call, _separate_io_proj);
145 kit.push_node(method()->return_type()->basic_type(), ret);
146 _call_node = call; // Save the call node in case we need it later
147 return kit.transfer_exceptions_into_jvms();
148 }
150 //---------------------------DynamicCallGenerator-----------------------------
151 // Internal class which handles all out-of-line invokedynamic calls.
152 class DynamicCallGenerator : public CallGenerator {
153 public:
154 DynamicCallGenerator(ciMethod* method)
155 : CallGenerator(method)
156 {
157 }
158 virtual JVMState* generate(JVMState* jvms);
159 };
161 JVMState* DynamicCallGenerator::generate(JVMState* jvms) {
162 GraphKit kit(jvms);
164 if (kit.C->log() != NULL) {
165 kit.C->log()->elem("dynamic_call bci='%d'", jvms->bci());
166 }
168 // Get the constant pool cache from the caller class.
169 ciMethod* caller_method = jvms->method();
170 ciBytecodeStream str(caller_method);
171 str.force_bci(jvms->bci()); // Set the stream to the invokedynamic bci.
172 assert(str.cur_bc() == Bytecodes::_invokedynamic, "wrong place to issue a dynamic call!");
173 ciCPCache* cpcache = str.get_cpcache();
175 // Get the offset of the CallSite from the constant pool cache
176 // pointer.
177 int index = str.get_method_index();
178 size_t call_site_offset = cpcache->get_f1_offset(index);
180 // Load the CallSite object from the constant pool cache.
181 const TypeOopPtr* cpcache_ptr = TypeOopPtr::make_from_constant(cpcache);
182 Node* cpcache_adr = kit.makecon(cpcache_ptr);
183 Node* call_site_adr = kit.basic_plus_adr(cpcache_adr, cpcache_adr, call_site_offset);
184 Node* call_site = kit.make_load(kit.control(), call_site_adr, TypeInstPtr::BOTTOM, T_OBJECT, Compile::AliasIdxRaw);
186 // Load the target MethodHandle from the CallSite object.
187 Node* target_mh_adr = kit.basic_plus_adr(call_site, call_site, java_dyn_CallSite::target_offset_in_bytes());
188 Node* target_mh = kit.make_load(kit.control(), target_mh_adr, TypeInstPtr::BOTTOM, T_OBJECT);
190 address resolve_stub = SharedRuntime::get_resolve_opt_virtual_call_stub();
192 CallStaticJavaNode *call = new (kit.C, tf()->domain()->cnt()) CallStaticJavaNode(tf(), resolve_stub, method(), kit.bci());
193 // invokedynamic is treated as an optimized invokevirtual.
194 call->set_optimized_virtual(true);
195 // Take extra care (in the presence of argument motion) not to trash the SP:
196 call->set_method_handle_invoke(true);
198 // Pass the target MethodHandle as first argument and shift the
199 // other arguments.
200 call->init_req(0 + TypeFunc::Parms, target_mh);
201 uint nargs = call->method()->arg_size();
202 for (uint i = 1; i < nargs; i++) {
203 Node* arg = kit.argument(i - 1);
204 call->init_req(i + TypeFunc::Parms, arg);
205 }
207 kit.set_edges_for_java_call(call);
208 Node* ret = kit.set_results_for_java_call(call);
209 kit.push_node(method()->return_type()->basic_type(), ret);
210 return kit.transfer_exceptions_into_jvms();
211 }
213 //--------------------------VirtualCallGenerator------------------------------
214 // Internal class which handles all out-of-line calls checking receiver type.
215 class VirtualCallGenerator : public CallGenerator {
216 private:
217 int _vtable_index;
218 public:
219 VirtualCallGenerator(ciMethod* method, int vtable_index)
220 : CallGenerator(method), _vtable_index(vtable_index)
221 {
222 assert(vtable_index == methodOopDesc::invalid_vtable_index ||
223 vtable_index >= 0, "either invalid or usable");
224 }
225 virtual bool is_virtual() const { return true; }
226 virtual JVMState* generate(JVMState* jvms);
227 };
229 JVMState* VirtualCallGenerator::generate(JVMState* jvms) {
230 GraphKit kit(jvms);
231 Node* receiver = kit.argument(0);
233 if (kit.C->log() != NULL) {
234 kit.C->log()->elem("virtual_call bci='%d'", jvms->bci());
235 }
237 // If the receiver is a constant null, do not torture the system
238 // by attempting to call through it. The compile will proceed
239 // correctly, but may bail out in final_graph_reshaping, because
240 // the call instruction will have a seemingly deficient out-count.
241 // (The bailout says something misleading about an "infinite loop".)
242 if (kit.gvn().type(receiver)->higher_equal(TypePtr::NULL_PTR)) {
243 kit.inc_sp(method()->arg_size()); // restore arguments
244 kit.uncommon_trap(Deoptimization::Reason_null_check,
245 Deoptimization::Action_none,
246 NULL, "null receiver");
247 return kit.transfer_exceptions_into_jvms();
248 }
250 // Ideally we would unconditionally do a null check here and let it
251 // be converted to an implicit check based on profile information.
252 // However currently the conversion to implicit null checks in
253 // Block::implicit_null_check() only looks for loads and stores, not calls.
254 ciMethod *caller = kit.method();
255 ciMethodData *caller_md = (caller == NULL) ? NULL : caller->method_data();
256 if (!UseInlineCaches || !ImplicitNullChecks ||
257 ((ImplicitNullCheckThreshold > 0) && caller_md &&
258 (caller_md->trap_count(Deoptimization::Reason_null_check)
259 >= (uint)ImplicitNullCheckThreshold))) {
260 // Make an explicit receiver null_check as part of this call.
261 // Since we share a map with the caller, his JVMS gets adjusted.
262 receiver = kit.null_check_receiver(method());
263 if (kit.stopped()) {
264 // And dump it back to the caller, decorated with any exceptions:
265 return kit.transfer_exceptions_into_jvms();
266 }
267 }
269 assert(!method()->is_static(), "virtual call must not be to static");
270 assert(!method()->is_final(), "virtual call should not be to final");
271 assert(!method()->is_private(), "virtual call should not be to private");
272 assert(_vtable_index == methodOopDesc::invalid_vtable_index || !UseInlineCaches,
273 "no vtable calls if +UseInlineCaches ");
274 address target = SharedRuntime::get_resolve_virtual_call_stub();
275 // Normal inline cache used for call
276 CallDynamicJavaNode *call = new (kit.C, tf()->domain()->cnt()) CallDynamicJavaNode(tf(), target, method(), _vtable_index, kit.bci());
277 kit.set_arguments_for_java_call(call);
278 kit.set_edges_for_java_call(call);
279 Node* ret = kit.set_results_for_java_call(call);
280 kit.push_node(method()->return_type()->basic_type(), ret);
282 // Represent the effect of an implicit receiver null_check
283 // as part of this call. Since we share a map with the caller,
284 // his JVMS gets adjusted.
285 kit.cast_not_null(receiver);
286 return kit.transfer_exceptions_into_jvms();
287 }
289 bool ParseGenerator::can_parse(ciMethod* m, int entry_bci) {
290 // Certain methods cannot be parsed at all:
291 if (!m->can_be_compiled()) return false;
292 if (!m->has_balanced_monitors()) return false;
293 if (m->get_flow_analysis()->failing()) return false;
295 // (Methods may bail out for other reasons, after the parser is run.
296 // We try to avoid this, but if forced, we must return (Node*)NULL.
297 // The user of the CallGenerator must check for this condition.)
298 return true;
299 }
301 CallGenerator* CallGenerator::for_inline(ciMethod* m, float expected_uses) {
302 if (!ParseGenerator::can_parse(m)) return NULL;
303 return new ParseGenerator(m, expected_uses);
304 }
306 // As a special case, the JVMS passed to this CallGenerator is
307 // for the method execution already in progress, not just the JVMS
308 // of the caller. Thus, this CallGenerator cannot be mixed with others!
309 CallGenerator* CallGenerator::for_osr(ciMethod* m, int osr_bci) {
310 if (!ParseGenerator::can_parse(m, true)) return NULL;
311 float past_uses = m->interpreter_invocation_count();
312 float expected_uses = past_uses;
313 return new ParseGenerator(m, expected_uses, true);
314 }
316 CallGenerator* CallGenerator::for_direct_call(ciMethod* m, bool separate_io_proj) {
317 assert(!m->is_abstract(), "for_direct_call mismatch");
318 return new DirectCallGenerator(m, separate_io_proj);
319 }
321 CallGenerator* CallGenerator::for_dynamic_call(ciMethod* m) {
322 assert(m->is_method_handle_invoke(), "for_dynamic_call mismatch");
323 return new DynamicCallGenerator(m);
324 }
326 CallGenerator* CallGenerator::for_virtual_call(ciMethod* m, int vtable_index) {
327 assert(!m->is_static(), "for_virtual_call mismatch");
328 assert(!m->is_method_handle_invoke(), "should be a direct call");
329 return new VirtualCallGenerator(m, vtable_index);
330 }
332 // Allow inlining decisions to be delayed
333 class LateInlineCallGenerator : public DirectCallGenerator {
334 CallGenerator* _inline_cg;
336 public:
337 LateInlineCallGenerator(ciMethod* method, CallGenerator* inline_cg) :
338 DirectCallGenerator(method, true), _inline_cg(inline_cg) {}
340 virtual bool is_late_inline() const { return true; }
342 // Convert the CallStaticJava into an inline
343 virtual void do_late_inline();
345 JVMState* generate(JVMState* jvms) {
346 // Record that this call site should be revisited once the main
347 // parse is finished.
348 Compile::current()->add_late_inline(this);
350 // Emit the CallStaticJava and request separate projections so
351 // that the late inlining logic can distinguish between fall
352 // through and exceptional uses of the memory and io projections
353 // as is done for allocations and macro expansion.
354 return DirectCallGenerator::generate(jvms);
355 }
357 };
360 void LateInlineCallGenerator::do_late_inline() {
361 // Can't inline it
362 if (call_node() == NULL || call_node()->outcnt() == 0 ||
363 call_node()->in(0) == NULL || call_node()->in(0)->is_top())
364 return;
366 CallStaticJavaNode* call = call_node();
368 // Make a clone of the JVMState that appropriate to use for driving a parse
369 Compile* C = Compile::current();
370 JVMState* jvms = call->jvms()->clone_shallow(C);
371 uint size = call->req();
372 SafePointNode* map = new (C, size) SafePointNode(size, jvms);
373 for (uint i1 = 0; i1 < size; i1++) {
374 map->init_req(i1, call->in(i1));
375 }
377 // Make sure the state is a MergeMem for parsing.
378 if (!map->in(TypeFunc::Memory)->is_MergeMem()) {
379 map->set_req(TypeFunc::Memory, MergeMemNode::make(C, map->in(TypeFunc::Memory)));
380 }
382 // Make enough space for the expression stack and transfer the incoming arguments
383 int nargs = method()->arg_size();
384 jvms->set_map(map);
385 map->ensure_stack(jvms, jvms->method()->max_stack());
386 if (nargs > 0) {
387 for (int i1 = 0; i1 < nargs; i1++) {
388 map->set_req(i1 + jvms->argoff(), call->in(TypeFunc::Parms + i1));
389 }
390 }
392 CompileLog* log = C->log();
393 if (log != NULL) {
394 log->head("late_inline method='%d'", log->identify(method()));
395 JVMState* p = jvms;
396 while (p != NULL) {
397 log->elem("jvms bci='%d' method='%d'", p->bci(), log->identify(p->method()));
398 p = p->caller();
399 }
400 log->tail("late_inline");
401 }
403 // Setup default node notes to be picked up by the inlining
404 Node_Notes* old_nn = C->default_node_notes();
405 if (old_nn != NULL) {
406 Node_Notes* entry_nn = old_nn->clone(C);
407 entry_nn->set_jvms(jvms);
408 C->set_default_node_notes(entry_nn);
409 }
411 // Now perform the inling using the synthesized JVMState
412 JVMState* new_jvms = _inline_cg->generate(jvms);
413 if (new_jvms == NULL) return; // no change
414 if (C->failing()) return;
416 // Capture any exceptional control flow
417 GraphKit kit(new_jvms);
419 // Find the result object
420 Node* result = C->top();
421 int result_size = method()->return_type()->size();
422 if (result_size != 0 && !kit.stopped()) {
423 result = (result_size == 1) ? kit.pop() : kit.pop_pair();
424 }
426 kit.replace_call(call, result);
427 }
430 CallGenerator* CallGenerator::for_late_inline(ciMethod* method, CallGenerator* inline_cg) {
431 return new LateInlineCallGenerator(method, inline_cg);
432 }
435 //---------------------------WarmCallGenerator--------------------------------
436 // Internal class which handles initial deferral of inlining decisions.
437 class WarmCallGenerator : public CallGenerator {
438 WarmCallInfo* _call_info;
439 CallGenerator* _if_cold;
440 CallGenerator* _if_hot;
441 bool _is_virtual; // caches virtuality of if_cold
442 bool _is_inline; // caches inline-ness of if_hot
444 public:
445 WarmCallGenerator(WarmCallInfo* ci,
446 CallGenerator* if_cold,
447 CallGenerator* if_hot)
448 : CallGenerator(if_cold->method())
449 {
450 assert(method() == if_hot->method(), "consistent choices");
451 _call_info = ci;
452 _if_cold = if_cold;
453 _if_hot = if_hot;
454 _is_virtual = if_cold->is_virtual();
455 _is_inline = if_hot->is_inline();
456 }
458 virtual bool is_inline() const { return _is_inline; }
459 virtual bool is_virtual() const { return _is_virtual; }
460 virtual bool is_deferred() const { return true; }
462 virtual JVMState* generate(JVMState* jvms);
463 };
466 CallGenerator* CallGenerator::for_warm_call(WarmCallInfo* ci,
467 CallGenerator* if_cold,
468 CallGenerator* if_hot) {
469 return new WarmCallGenerator(ci, if_cold, if_hot);
470 }
472 JVMState* WarmCallGenerator::generate(JVMState* jvms) {
473 Compile* C = Compile::current();
474 if (C->log() != NULL) {
475 C->log()->elem("warm_call bci='%d'", jvms->bci());
476 }
477 jvms = _if_cold->generate(jvms);
478 if (jvms != NULL) {
479 Node* m = jvms->map()->control();
480 if (m->is_CatchProj()) m = m->in(0); else m = C->top();
481 if (m->is_Catch()) m = m->in(0); else m = C->top();
482 if (m->is_Proj()) m = m->in(0); else m = C->top();
483 if (m->is_CallJava()) {
484 _call_info->set_call(m->as_Call());
485 _call_info->set_hot_cg(_if_hot);
486 #ifndef PRODUCT
487 if (PrintOpto || PrintOptoInlining) {
488 tty->print_cr("Queueing for warm inlining at bci %d:", jvms->bci());
489 tty->print("WCI: ");
490 _call_info->print();
491 }
492 #endif
493 _call_info->set_heat(_call_info->compute_heat());
494 C->set_warm_calls(_call_info->insert_into(C->warm_calls()));
495 }
496 }
497 return jvms;
498 }
500 void WarmCallInfo::make_hot() {
501 Unimplemented();
502 }
504 void WarmCallInfo::make_cold() {
505 // No action: Just dequeue.
506 }
509 //------------------------PredictedCallGenerator------------------------------
510 // Internal class which handles all out-of-line calls checking receiver type.
511 class PredictedCallGenerator : public CallGenerator {
512 ciKlass* _predicted_receiver;
513 CallGenerator* _if_missed;
514 CallGenerator* _if_hit;
515 float _hit_prob;
517 public:
518 PredictedCallGenerator(ciKlass* predicted_receiver,
519 CallGenerator* if_missed,
520 CallGenerator* if_hit, float hit_prob)
521 : CallGenerator(if_missed->method())
522 {
523 // The call profile data may predict the hit_prob as extreme as 0 or 1.
524 // Remove the extremes values from the range.
525 if (hit_prob > PROB_MAX) hit_prob = PROB_MAX;
526 if (hit_prob < PROB_MIN) hit_prob = PROB_MIN;
528 _predicted_receiver = predicted_receiver;
529 _if_missed = if_missed;
530 _if_hit = if_hit;
531 _hit_prob = hit_prob;
532 }
534 virtual bool is_virtual() const { return true; }
535 virtual bool is_inline() const { return _if_hit->is_inline(); }
536 virtual bool is_deferred() const { return _if_hit->is_deferred(); }
538 virtual JVMState* generate(JVMState* jvms);
539 };
542 CallGenerator* CallGenerator::for_predicted_call(ciKlass* predicted_receiver,
543 CallGenerator* if_missed,
544 CallGenerator* if_hit,
545 float hit_prob) {
546 return new PredictedCallGenerator(predicted_receiver, if_missed, if_hit, hit_prob);
547 }
550 JVMState* PredictedCallGenerator::generate(JVMState* jvms) {
551 GraphKit kit(jvms);
552 PhaseGVN& gvn = kit.gvn();
553 // We need an explicit receiver null_check before checking its type.
554 // We share a map with the caller, so his JVMS gets adjusted.
555 Node* receiver = kit.argument(0);
557 CompileLog* log = kit.C->log();
558 if (log != NULL) {
559 log->elem("predicted_call bci='%d' klass='%d'",
560 jvms->bci(), log->identify(_predicted_receiver));
561 }
563 receiver = kit.null_check_receiver(method());
564 if (kit.stopped()) {
565 return kit.transfer_exceptions_into_jvms();
566 }
568 Node* exact_receiver = receiver; // will get updated in place...
569 Node* slow_ctl = kit.type_check_receiver(receiver,
570 _predicted_receiver, _hit_prob,
571 &exact_receiver);
573 SafePointNode* slow_map = NULL;
574 JVMState* slow_jvms;
575 { PreserveJVMState pjvms(&kit);
576 kit.set_control(slow_ctl);
577 if (!kit.stopped()) {
578 slow_jvms = _if_missed->generate(kit.sync_jvms());
579 assert(slow_jvms != NULL, "miss path must not fail to generate");
580 kit.add_exception_states_from(slow_jvms);
581 kit.set_map(slow_jvms->map());
582 if (!kit.stopped())
583 slow_map = kit.stop();
584 }
585 }
587 if (kit.stopped()) {
588 // Instance exactly does not matches the desired type.
589 kit.set_jvms(slow_jvms);
590 return kit.transfer_exceptions_into_jvms();
591 }
593 // fall through if the instance exactly matches the desired type
594 kit.replace_in_map(receiver, exact_receiver);
596 // Make the hot call:
597 JVMState* new_jvms = _if_hit->generate(kit.sync_jvms());
598 if (new_jvms == NULL) {
599 // Inline failed, so make a direct call.
600 assert(_if_hit->is_inline(), "must have been a failed inline");
601 CallGenerator* cg = CallGenerator::for_direct_call(_if_hit->method());
602 new_jvms = cg->generate(kit.sync_jvms());
603 }
604 kit.add_exception_states_from(new_jvms);
605 kit.set_jvms(new_jvms);
607 // Need to merge slow and fast?
608 if (slow_map == NULL) {
609 // The fast path is the only path remaining.
610 return kit.transfer_exceptions_into_jvms();
611 }
613 if (kit.stopped()) {
614 // Inlined method threw an exception, so it's just the slow path after all.
615 kit.set_jvms(slow_jvms);
616 return kit.transfer_exceptions_into_jvms();
617 }
619 // Finish the diamond.
620 kit.C->set_has_split_ifs(true); // Has chance for split-if optimization
621 RegionNode* region = new (kit.C, 3) RegionNode(3);
622 region->init_req(1, kit.control());
623 region->init_req(2, slow_map->control());
624 kit.set_control(gvn.transform(region));
625 Node* iophi = PhiNode::make(region, kit.i_o(), Type::ABIO);
626 iophi->set_req(2, slow_map->i_o());
627 kit.set_i_o(gvn.transform(iophi));
628 kit.merge_memory(slow_map->merged_memory(), region, 2);
629 uint tos = kit.jvms()->stkoff() + kit.sp();
630 uint limit = slow_map->req();
631 for (uint i = TypeFunc::Parms; i < limit; i++) {
632 // Skip unused stack slots; fast forward to monoff();
633 if (i == tos) {
634 i = kit.jvms()->monoff();
635 if( i >= limit ) break;
636 }
637 Node* m = kit.map()->in(i);
638 Node* n = slow_map->in(i);
639 if (m != n) {
640 const Type* t = gvn.type(m)->meet(gvn.type(n));
641 Node* phi = PhiNode::make(region, m, t);
642 phi->set_req(2, n);
643 kit.map()->set_req(i, gvn.transform(phi));
644 }
645 }
646 return kit.transfer_exceptions_into_jvms();
647 }
650 //------------------------PredictedDynamicCallGenerator-----------------------
651 // Internal class which handles all out-of-line calls checking receiver type.
652 class PredictedDynamicCallGenerator : public CallGenerator {
653 ciMethodHandle* _predicted_method_handle;
654 CallGenerator* _if_missed;
655 CallGenerator* _if_hit;
656 float _hit_prob;
658 public:
659 PredictedDynamicCallGenerator(ciMethodHandle* predicted_method_handle,
660 CallGenerator* if_missed,
661 CallGenerator* if_hit,
662 float hit_prob)
663 : CallGenerator(if_missed->method()),
664 _predicted_method_handle(predicted_method_handle),
665 _if_missed(if_missed),
666 _if_hit(if_hit),
667 _hit_prob(hit_prob)
668 {}
670 virtual bool is_inline() const { return _if_hit->is_inline(); }
671 virtual bool is_deferred() const { return _if_hit->is_deferred(); }
673 virtual JVMState* generate(JVMState* jvms);
674 };
677 CallGenerator* CallGenerator::for_predicted_dynamic_call(ciMethodHandle* predicted_method_handle,
678 CallGenerator* if_missed,
679 CallGenerator* if_hit,
680 float hit_prob) {
681 return new PredictedDynamicCallGenerator(predicted_method_handle, if_missed, if_hit, hit_prob);
682 }
685 JVMState* PredictedDynamicCallGenerator::generate(JVMState* jvms) {
686 GraphKit kit(jvms);
687 PhaseGVN& gvn = kit.gvn();
689 CompileLog* log = kit.C->log();
690 if (log != NULL) {
691 log->elem("predicted_dynamic_call bci='%d'", jvms->bci());
692 }
694 // Get the constant pool cache from the caller class.
695 ciMethod* caller_method = jvms->method();
696 ciBytecodeStream str(caller_method);
697 str.force_bci(jvms->bci()); // Set the stream to the invokedynamic bci.
698 ciCPCache* cpcache = str.get_cpcache();
700 // Get the offset of the CallSite from the constant pool cache
701 // pointer.
702 int index = str.get_method_index();
703 size_t call_site_offset = cpcache->get_f1_offset(index);
705 // Load the CallSite object from the constant pool cache.
706 const TypeOopPtr* cpcache_ptr = TypeOopPtr::make_from_constant(cpcache);
707 Node* cpcache_adr = kit.makecon(cpcache_ptr);
708 Node* call_site_adr = kit.basic_plus_adr(cpcache_adr, cpcache_adr, call_site_offset);
709 Node* call_site = kit.make_load(kit.control(), call_site_adr, TypeInstPtr::BOTTOM, T_OBJECT, Compile::AliasIdxRaw);
711 // Load the target MethodHandle from the CallSite object.
712 Node* target_adr = kit.basic_plus_adr(call_site, call_site, java_dyn_CallSite::target_offset_in_bytes());
713 Node* target_mh = kit.make_load(kit.control(), target_adr, TypeInstPtr::BOTTOM, T_OBJECT);
715 // Check if the MethodHandle is still the same.
716 const TypeOopPtr* predicted_mh_ptr = TypeOopPtr::make_from_constant(_predicted_method_handle, true);
717 Node* predicted_mh = kit.makecon(predicted_mh_ptr);
719 Node* cmp = gvn.transform(new(kit.C, 3) CmpPNode(target_mh, predicted_mh));
720 Node* bol = gvn.transform(new(kit.C, 2) BoolNode(cmp, BoolTest::eq) );
721 IfNode* iff = kit.create_and_xform_if(kit.control(), bol, _hit_prob, COUNT_UNKNOWN);
722 kit.set_control( gvn.transform(new(kit.C, 1) IfTrueNode (iff)));
723 Node* slow_ctl = gvn.transform(new(kit.C, 1) IfFalseNode(iff));
725 SafePointNode* slow_map = NULL;
726 JVMState* slow_jvms;
727 { PreserveJVMState pjvms(&kit);
728 kit.set_control(slow_ctl);
729 if (!kit.stopped()) {
730 slow_jvms = _if_missed->generate(kit.sync_jvms());
731 assert(slow_jvms != NULL, "miss path must not fail to generate");
732 kit.add_exception_states_from(slow_jvms);
733 kit.set_map(slow_jvms->map());
734 if (!kit.stopped())
735 slow_map = kit.stop();
736 }
737 }
739 if (kit.stopped()) {
740 // Instance exactly does not matches the desired type.
741 kit.set_jvms(slow_jvms);
742 return kit.transfer_exceptions_into_jvms();
743 }
745 // Make the hot call:
746 JVMState* new_jvms = _if_hit->generate(kit.sync_jvms());
747 if (new_jvms == NULL) {
748 // Inline failed, so make a direct call.
749 assert(_if_hit->is_inline(), "must have been a failed inline");
750 CallGenerator* cg = CallGenerator::for_direct_call(_if_hit->method());
751 new_jvms = cg->generate(kit.sync_jvms());
752 }
753 kit.add_exception_states_from(new_jvms);
754 kit.set_jvms(new_jvms);
756 // Need to merge slow and fast?
757 if (slow_map == NULL) {
758 // The fast path is the only path remaining.
759 return kit.transfer_exceptions_into_jvms();
760 }
762 if (kit.stopped()) {
763 // Inlined method threw an exception, so it's just the slow path after all.
764 kit.set_jvms(slow_jvms);
765 return kit.transfer_exceptions_into_jvms();
766 }
768 // Finish the diamond.
769 kit.C->set_has_split_ifs(true); // Has chance for split-if optimization
770 RegionNode* region = new (kit.C, 3) RegionNode(3);
771 region->init_req(1, kit.control());
772 region->init_req(2, slow_map->control());
773 kit.set_control(gvn.transform(region));
774 Node* iophi = PhiNode::make(region, kit.i_o(), Type::ABIO);
775 iophi->set_req(2, slow_map->i_o());
776 kit.set_i_o(gvn.transform(iophi));
777 kit.merge_memory(slow_map->merged_memory(), region, 2);
778 uint tos = kit.jvms()->stkoff() + kit.sp();
779 uint limit = slow_map->req();
780 for (uint i = TypeFunc::Parms; i < limit; i++) {
781 // Skip unused stack slots; fast forward to monoff();
782 if (i == tos) {
783 i = kit.jvms()->monoff();
784 if( i >= limit ) break;
785 }
786 Node* m = kit.map()->in(i);
787 Node* n = slow_map->in(i);
788 if (m != n) {
789 const Type* t = gvn.type(m)->meet(gvn.type(n));
790 Node* phi = PhiNode::make(region, m, t);
791 phi->set_req(2, n);
792 kit.map()->set_req(i, gvn.transform(phi));
793 }
794 }
795 return kit.transfer_exceptions_into_jvms();
796 }
799 //-------------------------UncommonTrapCallGenerator-----------------------------
800 // Internal class which handles all out-of-line calls checking receiver type.
801 class UncommonTrapCallGenerator : public CallGenerator {
802 Deoptimization::DeoptReason _reason;
803 Deoptimization::DeoptAction _action;
805 public:
806 UncommonTrapCallGenerator(ciMethod* m,
807 Deoptimization::DeoptReason reason,
808 Deoptimization::DeoptAction action)
809 : CallGenerator(m)
810 {
811 _reason = reason;
812 _action = action;
813 }
815 virtual bool is_virtual() const { ShouldNotReachHere(); return false; }
816 virtual bool is_trap() const { return true; }
818 virtual JVMState* generate(JVMState* jvms);
819 };
822 CallGenerator*
823 CallGenerator::for_uncommon_trap(ciMethod* m,
824 Deoptimization::DeoptReason reason,
825 Deoptimization::DeoptAction action) {
826 return new UncommonTrapCallGenerator(m, reason, action);
827 }
830 JVMState* UncommonTrapCallGenerator::generate(JVMState* jvms) {
831 GraphKit kit(jvms);
832 // Take the trap with arguments pushed on the stack. (Cf. null_check_receiver).
833 int nargs = method()->arg_size();
834 kit.inc_sp(nargs);
835 assert(nargs <= kit.sp() && kit.sp() <= jvms->stk_size(), "sane sp w/ args pushed");
836 if (_reason == Deoptimization::Reason_class_check &&
837 _action == Deoptimization::Action_maybe_recompile) {
838 // Temp fix for 6529811
839 // Don't allow uncommon_trap to override our decision to recompile in the event
840 // of a class cast failure for a monomorphic call as it will never let us convert
841 // the call to either bi-morphic or megamorphic and can lead to unc-trap loops
842 bool keep_exact_action = true;
843 kit.uncommon_trap(_reason, _action, NULL, "monomorphic vcall checkcast", false, keep_exact_action);
844 } else {
845 kit.uncommon_trap(_reason, _action);
846 }
847 return kit.transfer_exceptions_into_jvms();
848 }
850 // (Note: Moved hook_up_call to GraphKit::set_edges_for_java_call.)
852 // (Node: Merged hook_up_exits into ParseGenerator::generate.)
854 #define NODES_OVERHEAD_PER_METHOD (30.0)
855 #define NODES_PER_BYTECODE (9.5)
857 void WarmCallInfo::init(JVMState* call_site, ciMethod* call_method, ciCallProfile& profile, float prof_factor) {
858 int call_count = profile.count();
859 int code_size = call_method->code_size();
861 // Expected execution count is based on the historical count:
862 _count = call_count < 0 ? 1 : call_site->method()->scale_count(call_count, prof_factor);
864 // Expected profit from inlining, in units of simple call-overheads.
865 _profit = 1.0;
867 // Expected work performed by the call in units of call-overheads.
868 // %%% need an empirical curve fit for "work" (time in call)
869 float bytecodes_per_call = 3;
870 _work = 1.0 + code_size / bytecodes_per_call;
872 // Expected size of compilation graph:
873 // -XX:+PrintParseStatistics once reported:
874 // Methods seen: 9184 Methods parsed: 9184 Nodes created: 1582391
875 // Histogram of 144298 parsed bytecodes:
876 // %%% Need an better predictor for graph size.
877 _size = NODES_OVERHEAD_PER_METHOD + (NODES_PER_BYTECODE * code_size);
878 }
880 // is_cold: Return true if the node should never be inlined.
881 // This is true if any of the key metrics are extreme.
882 bool WarmCallInfo::is_cold() const {
883 if (count() < WarmCallMinCount) return true;
884 if (profit() < WarmCallMinProfit) return true;
885 if (work() > WarmCallMaxWork) return true;
886 if (size() > WarmCallMaxSize) return true;
887 return false;
888 }
890 // is_hot: Return true if the node should be inlined immediately.
891 // This is true if any of the key metrics are extreme.
892 bool WarmCallInfo::is_hot() const {
893 assert(!is_cold(), "eliminate is_cold cases before testing is_hot");
894 if (count() >= HotCallCountThreshold) return true;
895 if (profit() >= HotCallProfitThreshold) return true;
896 if (work() <= HotCallTrivialWork) return true;
897 if (size() <= HotCallTrivialSize) return true;
898 return false;
899 }
901 // compute_heat:
902 float WarmCallInfo::compute_heat() const {
903 assert(!is_cold(), "compute heat only on warm nodes");
904 assert(!is_hot(), "compute heat only on warm nodes");
905 int min_size = MAX2(0, (int)HotCallTrivialSize);
906 int max_size = MIN2(500, (int)WarmCallMaxSize);
907 float method_size = (size() - min_size) / MAX2(1, max_size - min_size);
908 float size_factor;
909 if (method_size < 0.05) size_factor = 4; // 2 sigmas better than avg.
910 else if (method_size < 0.15) size_factor = 2; // 1 sigma better than avg.
911 else if (method_size < 0.5) size_factor = 1; // better than avg.
912 else size_factor = 0.5; // worse than avg.
913 return (count() * profit() * size_factor);
914 }
916 bool WarmCallInfo::warmer_than(WarmCallInfo* that) {
917 assert(this != that, "compare only different WCIs");
918 assert(this->heat() != 0 && that->heat() != 0, "call compute_heat 1st");
919 if (this->heat() > that->heat()) return true;
920 if (this->heat() < that->heat()) return false;
921 assert(this->heat() == that->heat(), "no NaN heat allowed");
922 // Equal heat. Break the tie some other way.
923 if (!this->call() || !that->call()) return (address)this > (address)that;
924 return this->call()->_idx > that->call()->_idx;
925 }
927 //#define UNINIT_NEXT ((WarmCallInfo*)badAddress)
928 #define UNINIT_NEXT ((WarmCallInfo*)NULL)
930 WarmCallInfo* WarmCallInfo::insert_into(WarmCallInfo* head) {
931 assert(next() == UNINIT_NEXT, "not yet on any list");
932 WarmCallInfo* prev_p = NULL;
933 WarmCallInfo* next_p = head;
934 while (next_p != NULL && next_p->warmer_than(this)) {
935 prev_p = next_p;
936 next_p = prev_p->next();
937 }
938 // Install this between prev_p and next_p.
939 this->set_next(next_p);
940 if (prev_p == NULL)
941 head = this;
942 else
943 prev_p->set_next(this);
944 return head;
945 }
947 WarmCallInfo* WarmCallInfo::remove_from(WarmCallInfo* head) {
948 WarmCallInfo* prev_p = NULL;
949 WarmCallInfo* next_p = head;
950 while (next_p != this) {
951 assert(next_p != NULL, "this must be in the list somewhere");
952 prev_p = next_p;
953 next_p = prev_p->next();
954 }
955 next_p = this->next();
956 debug_only(this->set_next(UNINIT_NEXT));
957 // Remove this from between prev_p and next_p.
958 if (prev_p == NULL)
959 head = next_p;
960 else
961 prev_p->set_next(next_p);
962 return head;
963 }
965 WarmCallInfo* WarmCallInfo::_always_hot = NULL;
966 WarmCallInfo* WarmCallInfo::_always_cold = NULL;
968 WarmCallInfo* WarmCallInfo::always_hot() {
969 if (_always_hot == NULL) {
970 static double bits[sizeof(WarmCallInfo) / sizeof(double) + 1] = {0};
971 WarmCallInfo* ci = (WarmCallInfo*) bits;
972 ci->_profit = ci->_count = MAX_VALUE();
973 ci->_work = ci->_size = MIN_VALUE();
974 _always_hot = ci;
975 }
976 assert(_always_hot->is_hot(), "must always be hot");
977 return _always_hot;
978 }
980 WarmCallInfo* WarmCallInfo::always_cold() {
981 if (_always_cold == NULL) {
982 static double bits[sizeof(WarmCallInfo) / sizeof(double) + 1] = {0};
983 WarmCallInfo* ci = (WarmCallInfo*) bits;
984 ci->_profit = ci->_count = MIN_VALUE();
985 ci->_work = ci->_size = MAX_VALUE();
986 _always_cold = ci;
987 }
988 assert(_always_cold->is_cold(), "must always be cold");
989 return _always_cold;
990 }
993 #ifndef PRODUCT
995 void WarmCallInfo::print() const {
996 tty->print("%s : C=%6.1f P=%6.1f W=%6.1f S=%6.1f H=%6.1f -> %p",
997 is_cold() ? "cold" : is_hot() ? "hot " : "warm",
998 count(), profit(), work(), size(), compute_heat(), next());
999 tty->cr();
1000 if (call() != NULL) call()->dump();
1001 }
1003 void print_wci(WarmCallInfo* ci) {
1004 ci->print();
1005 }
1007 void WarmCallInfo::print_all() const {
1008 for (const WarmCallInfo* p = this; p != NULL; p = p->next())
1009 p->print();
1010 }
1012 int WarmCallInfo::count_all() const {
1013 int cnt = 0;
1014 for (const WarmCallInfo* p = this; p != NULL; p = p->next())
1015 cnt++;
1016 return cnt;
1017 }
1019 #endif //PRODUCT