Sat, 16 Mar 2013 07:39:14 -0700
8009166: [parfait] Null pointer deference in hotspot/src/share/vm/opto/type.cpp
Summary: add guarantee() to as_instance_type()
Reviewed-by: kvn, twisti
1 /*
2 * Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "precompiled.hpp"
26 #include "ci/bcEscapeAnalyzer.hpp"
27 #include "ci/ciCallSite.hpp"
28 #include "ci/ciObjArray.hpp"
29 #include "ci/ciMemberName.hpp"
30 #include "ci/ciMethodHandle.hpp"
31 #include "classfile/javaClasses.hpp"
32 #include "compiler/compileLog.hpp"
33 #include "opto/addnode.hpp"
34 #include "opto/callGenerator.hpp"
35 #include "opto/callnode.hpp"
36 #include "opto/cfgnode.hpp"
37 #include "opto/connode.hpp"
38 #include "opto/parse.hpp"
39 #include "opto/rootnode.hpp"
40 #include "opto/runtime.hpp"
41 #include "opto/subnode.hpp"
44 // Utility function.
45 const TypeFunc* CallGenerator::tf() const {
46 return TypeFunc::make(method());
47 }
49 //-----------------------------ParseGenerator---------------------------------
50 // Internal class which handles all direct bytecode traversal.
51 class ParseGenerator : public InlineCallGenerator {
52 private:
53 bool _is_osr;
54 float _expected_uses;
56 public:
57 ParseGenerator(ciMethod* method, float expected_uses, bool is_osr = false)
58 : InlineCallGenerator(method)
59 {
60 _is_osr = is_osr;
61 _expected_uses = expected_uses;
62 assert(InlineTree::check_can_parse(method) == NULL, "parse must be possible");
63 }
65 virtual bool is_parse() const { return true; }
66 virtual JVMState* generate(JVMState* jvms);
67 int is_osr() { return _is_osr; }
69 };
71 JVMState* ParseGenerator::generate(JVMState* jvms) {
72 Compile* C = Compile::current();
74 if (is_osr()) {
75 // The JVMS for a OSR has a single argument (see its TypeFunc).
76 assert(jvms->depth() == 1, "no inline OSR");
77 }
79 if (C->failing()) {
80 return NULL; // bailing out of the compile; do not try to parse
81 }
83 Parse parser(jvms, method(), _expected_uses);
84 // Grab signature for matching/allocation
85 #ifdef ASSERT
86 if (parser.tf() != (parser.depth() == 1 ? C->tf() : tf())) {
87 MutexLockerEx ml(Compile_lock, Mutex::_no_safepoint_check_flag);
88 assert(C->env()->system_dictionary_modification_counter_changed(),
89 "Must invalidate if TypeFuncs differ");
90 }
91 #endif
93 GraphKit& exits = parser.exits();
95 if (C->failing()) {
96 while (exits.pop_exception_state() != NULL) ;
97 return NULL;
98 }
100 assert(exits.jvms()->same_calls_as(jvms), "sanity");
102 // Simply return the exit state of the parser,
103 // augmented by any exceptional states.
104 return exits.transfer_exceptions_into_jvms();
105 }
107 //---------------------------DirectCallGenerator------------------------------
108 // Internal class which handles all out-of-line calls w/o receiver type checks.
109 class DirectCallGenerator : public CallGenerator {
110 private:
111 CallStaticJavaNode* _call_node;
112 // Force separate memory and I/O projections for the exceptional
113 // paths to facilitate late inlinig.
114 bool _separate_io_proj;
116 public:
117 DirectCallGenerator(ciMethod* method, bool separate_io_proj)
118 : CallGenerator(method),
119 _separate_io_proj(separate_io_proj)
120 {
121 }
122 virtual JVMState* generate(JVMState* jvms);
124 CallStaticJavaNode* call_node() const { return _call_node; }
125 };
127 JVMState* DirectCallGenerator::generate(JVMState* jvms) {
128 GraphKit kit(jvms);
129 bool is_static = method()->is_static();
130 address target = is_static ? SharedRuntime::get_resolve_static_call_stub()
131 : SharedRuntime::get_resolve_opt_virtual_call_stub();
133 if (kit.C->log() != NULL) {
134 kit.C->log()->elem("direct_call bci='%d'", jvms->bci());
135 }
137 CallStaticJavaNode *call = new (kit.C) CallStaticJavaNode(tf(), target, method(), kit.bci());
138 _call_node = call; // Save the call node in case we need it later
139 if (!is_static) {
140 // Make an explicit receiver null_check as part of this call.
141 // Since we share a map with the caller, his JVMS gets adjusted.
142 kit.null_check_receiver_before_call(method());
143 if (kit.stopped()) {
144 // And dump it back to the caller, decorated with any exceptions:
145 return kit.transfer_exceptions_into_jvms();
146 }
147 // Mark the call node as virtual, sort of:
148 call->set_optimized_virtual(true);
149 if (method()->is_method_handle_intrinsic() ||
150 method()->is_compiled_lambda_form()) {
151 call->set_method_handle_invoke(true);
152 }
153 }
154 kit.set_arguments_for_java_call(call);
155 kit.set_edges_for_java_call(call, false, _separate_io_proj);
156 Node* ret = kit.set_results_for_java_call(call, _separate_io_proj);
157 kit.push_node(method()->return_type()->basic_type(), ret);
158 return kit.transfer_exceptions_into_jvms();
159 }
161 //--------------------------VirtualCallGenerator------------------------------
162 // Internal class which handles all out-of-line calls checking receiver type.
163 class VirtualCallGenerator : public CallGenerator {
164 private:
165 int _vtable_index;
166 public:
167 VirtualCallGenerator(ciMethod* method, int vtable_index)
168 : CallGenerator(method), _vtable_index(vtable_index)
169 {
170 assert(vtable_index == Method::invalid_vtable_index ||
171 vtable_index >= 0, "either invalid or usable");
172 }
173 virtual bool is_virtual() const { return true; }
174 virtual JVMState* generate(JVMState* jvms);
175 };
177 JVMState* VirtualCallGenerator::generate(JVMState* jvms) {
178 GraphKit kit(jvms);
179 Node* receiver = kit.argument(0);
181 if (kit.C->log() != NULL) {
182 kit.C->log()->elem("virtual_call bci='%d'", jvms->bci());
183 }
185 // If the receiver is a constant null, do not torture the system
186 // by attempting to call through it. The compile will proceed
187 // correctly, but may bail out in final_graph_reshaping, because
188 // the call instruction will have a seemingly deficient out-count.
189 // (The bailout says something misleading about an "infinite loop".)
190 if (kit.gvn().type(receiver)->higher_equal(TypePtr::NULL_PTR)) {
191 kit.inc_sp(method()->arg_size()); // restore arguments
192 kit.uncommon_trap(Deoptimization::Reason_null_check,
193 Deoptimization::Action_none,
194 NULL, "null receiver");
195 return kit.transfer_exceptions_into_jvms();
196 }
198 // Ideally we would unconditionally do a null check here and let it
199 // be converted to an implicit check based on profile information.
200 // However currently the conversion to implicit null checks in
201 // Block::implicit_null_check() only looks for loads and stores, not calls.
202 ciMethod *caller = kit.method();
203 ciMethodData *caller_md = (caller == NULL) ? NULL : caller->method_data();
204 if (!UseInlineCaches || !ImplicitNullChecks ||
205 ((ImplicitNullCheckThreshold > 0) && caller_md &&
206 (caller_md->trap_count(Deoptimization::Reason_null_check)
207 >= (uint)ImplicitNullCheckThreshold))) {
208 // Make an explicit receiver null_check as part of this call.
209 // Since we share a map with the caller, his JVMS gets adjusted.
210 receiver = kit.null_check_receiver_before_call(method());
211 if (kit.stopped()) {
212 // And dump it back to the caller, decorated with any exceptions:
213 return kit.transfer_exceptions_into_jvms();
214 }
215 }
217 assert(!method()->is_static(), "virtual call must not be to static");
218 assert(!method()->is_final(), "virtual call should not be to final");
219 assert(!method()->is_private(), "virtual call should not be to private");
220 assert(_vtable_index == Method::invalid_vtable_index || !UseInlineCaches,
221 "no vtable calls if +UseInlineCaches ");
222 address target = SharedRuntime::get_resolve_virtual_call_stub();
223 // Normal inline cache used for call
224 CallDynamicJavaNode *call = new (kit.C) CallDynamicJavaNode(tf(), target, method(), _vtable_index, kit.bci());
225 kit.set_arguments_for_java_call(call);
226 kit.set_edges_for_java_call(call);
227 Node* ret = kit.set_results_for_java_call(call);
228 kit.push_node(method()->return_type()->basic_type(), ret);
230 // Represent the effect of an implicit receiver null_check
231 // as part of this call. Since we share a map with the caller,
232 // his JVMS gets adjusted.
233 kit.cast_not_null(receiver);
234 return kit.transfer_exceptions_into_jvms();
235 }
237 CallGenerator* CallGenerator::for_inline(ciMethod* m, float expected_uses) {
238 if (InlineTree::check_can_parse(m) != NULL) return NULL;
239 return new ParseGenerator(m, expected_uses);
240 }
242 // As a special case, the JVMS passed to this CallGenerator is
243 // for the method execution already in progress, not just the JVMS
244 // of the caller. Thus, this CallGenerator cannot be mixed with others!
245 CallGenerator* CallGenerator::for_osr(ciMethod* m, int osr_bci) {
246 if (InlineTree::check_can_parse(m) != NULL) return NULL;
247 float past_uses = m->interpreter_invocation_count();
248 float expected_uses = past_uses;
249 return new ParseGenerator(m, expected_uses, true);
250 }
252 CallGenerator* CallGenerator::for_direct_call(ciMethod* m, bool separate_io_proj) {
253 assert(!m->is_abstract(), "for_direct_call mismatch");
254 return new DirectCallGenerator(m, separate_io_proj);
255 }
257 CallGenerator* CallGenerator::for_virtual_call(ciMethod* m, int vtable_index) {
258 assert(!m->is_static(), "for_virtual_call mismatch");
259 assert(!m->is_method_handle_intrinsic(), "should be a direct call");
260 return new VirtualCallGenerator(m, vtable_index);
261 }
263 // Allow inlining decisions to be delayed
264 class LateInlineCallGenerator : public DirectCallGenerator {
265 protected:
266 CallGenerator* _inline_cg;
268 virtual bool do_late_inline_check(JVMState* jvms) { return true; }
270 public:
271 LateInlineCallGenerator(ciMethod* method, CallGenerator* inline_cg) :
272 DirectCallGenerator(method, true), _inline_cg(inline_cg) {}
274 virtual bool is_late_inline() const { return true; }
276 // Convert the CallStaticJava into an inline
277 virtual void do_late_inline();
279 virtual JVMState* generate(JVMState* jvms) {
280 Compile *C = Compile::current();
281 C->print_inlining_skip(this);
283 // Record that this call site should be revisited once the main
284 // parse is finished.
285 if (!is_mh_late_inline()) {
286 C->add_late_inline(this);
287 }
289 // Emit the CallStaticJava and request separate projections so
290 // that the late inlining logic can distinguish between fall
291 // through and exceptional uses of the memory and io projections
292 // as is done for allocations and macro expansion.
293 return DirectCallGenerator::generate(jvms);
294 }
296 virtual void print_inlining_late(const char* msg) {
297 CallNode* call = call_node();
298 Compile* C = Compile::current();
299 C->print_inlining_insert(this);
300 C->print_inlining(method(), call->jvms()->depth()-1, call->jvms()->bci(), msg);
301 }
303 };
305 void LateInlineCallGenerator::do_late_inline() {
306 // Can't inline it
307 if (call_node() == NULL || call_node()->outcnt() == 0 ||
308 call_node()->in(0) == NULL || call_node()->in(0)->is_top()) {
309 return;
310 }
312 const TypeTuple *r = call_node()->tf()->domain();
313 for (int i1 = 0; i1 < method()->arg_size(); i1++) {
314 if (call_node()->in(TypeFunc::Parms + i1)->is_top() && r->field_at(TypeFunc::Parms + i1) != Type::HALF) {
315 assert(Compile::current()->inlining_incrementally(), "shouldn't happen during parsing");
316 return;
317 }
318 }
320 if (call_node()->in(TypeFunc::Memory)->is_top()) {
321 assert(Compile::current()->inlining_incrementally(), "shouldn't happen during parsing");
322 return;
323 }
325 CallStaticJavaNode* call = call_node();
327 // Make a clone of the JVMState that appropriate to use for driving a parse
328 Compile* C = Compile::current();
329 JVMState* jvms = call->jvms()->clone_shallow(C);
330 uint size = call->req();
331 SafePointNode* map = new (C) SafePointNode(size, jvms);
332 for (uint i1 = 0; i1 < size; i1++) {
333 map->init_req(i1, call->in(i1));
334 }
336 // Make sure the state is a MergeMem for parsing.
337 if (!map->in(TypeFunc::Memory)->is_MergeMem()) {
338 Node* mem = MergeMemNode::make(C, map->in(TypeFunc::Memory));
339 C->initial_gvn()->set_type_bottom(mem);
340 map->set_req(TypeFunc::Memory, mem);
341 }
343 // Make enough space for the expression stack and transfer the incoming arguments
344 int nargs = method()->arg_size();
345 jvms->set_map(map);
346 map->ensure_stack(jvms, jvms->method()->max_stack());
347 if (nargs > 0) {
348 for (int i1 = 0; i1 < nargs; i1++) {
349 map->set_req(i1 + jvms->argoff(), call->in(TypeFunc::Parms + i1));
350 }
351 }
353 if (!do_late_inline_check(jvms)) {
354 map->disconnect_inputs(NULL, C);
355 return;
356 }
358 C->print_inlining_insert(this);
360 CompileLog* log = C->log();
361 if (log != NULL) {
362 log->head("late_inline method='%d'", log->identify(method()));
363 JVMState* p = jvms;
364 while (p != NULL) {
365 log->elem("jvms bci='%d' method='%d'", p->bci(), log->identify(p->method()));
366 p = p->caller();
367 }
368 log->tail("late_inline");
369 }
371 // Setup default node notes to be picked up by the inlining
372 Node_Notes* old_nn = C->default_node_notes();
373 if (old_nn != NULL) {
374 Node_Notes* entry_nn = old_nn->clone(C);
375 entry_nn->set_jvms(jvms);
376 C->set_default_node_notes(entry_nn);
377 }
379 // Now perform the inling using the synthesized JVMState
380 JVMState* new_jvms = _inline_cg->generate(jvms);
381 if (new_jvms == NULL) return; // no change
382 if (C->failing()) return;
384 // Capture any exceptional control flow
385 GraphKit kit(new_jvms);
387 // Find the result object
388 Node* result = C->top();
389 int result_size = method()->return_type()->size();
390 if (result_size != 0 && !kit.stopped()) {
391 result = (result_size == 1) ? kit.pop() : kit.pop_pair();
392 }
394 C->set_has_loops(C->has_loops() || _inline_cg->method()->has_loops());
395 C->env()->notice_inlined_method(_inline_cg->method());
396 C->set_inlining_progress(true);
398 kit.replace_call(call, result);
399 }
402 CallGenerator* CallGenerator::for_late_inline(ciMethod* method, CallGenerator* inline_cg) {
403 return new LateInlineCallGenerator(method, inline_cg);
404 }
406 class LateInlineMHCallGenerator : public LateInlineCallGenerator {
407 ciMethod* _caller;
408 int _attempt;
409 bool _input_not_const;
411 virtual bool do_late_inline_check(JVMState* jvms);
412 virtual bool already_attempted() const { return _attempt > 0; }
414 public:
415 LateInlineMHCallGenerator(ciMethod* caller, ciMethod* callee, bool input_not_const) :
416 LateInlineCallGenerator(callee, NULL), _caller(caller), _attempt(0), _input_not_const(input_not_const) {}
418 virtual bool is_mh_late_inline() const { return true; }
420 virtual JVMState* generate(JVMState* jvms) {
421 JVMState* new_jvms = LateInlineCallGenerator::generate(jvms);
422 if (_input_not_const) {
423 // inlining won't be possible so no need to enqueue right now.
424 call_node()->set_generator(this);
425 } else {
426 Compile::current()->add_late_inline(this);
427 }
428 return new_jvms;
429 }
431 virtual void print_inlining_late(const char* msg) {
432 if (!_input_not_const) return;
433 LateInlineCallGenerator::print_inlining_late(msg);
434 }
435 };
437 bool LateInlineMHCallGenerator::do_late_inline_check(JVMState* jvms) {
439 CallGenerator* cg = for_method_handle_inline(jvms, _caller, method(), _input_not_const);
441 if (!_input_not_const) {
442 _attempt++;
443 }
445 if (cg != NULL) {
446 assert(!cg->is_late_inline() && cg->is_inline(), "we're doing late inlining");
447 _inline_cg = cg;
448 Compile::current()->dec_number_of_mh_late_inlines();
449 return true;
450 }
452 call_node()->set_generator(this);
453 return false;
454 }
456 CallGenerator* CallGenerator::for_mh_late_inline(ciMethod* caller, ciMethod* callee, bool input_not_const) {
457 Compile::current()->inc_number_of_mh_late_inlines();
458 CallGenerator* cg = new LateInlineMHCallGenerator(caller, callee, input_not_const);
459 return cg;
460 }
462 class LateInlineStringCallGenerator : public LateInlineCallGenerator {
464 public:
465 LateInlineStringCallGenerator(ciMethod* method, CallGenerator* inline_cg) :
466 LateInlineCallGenerator(method, inline_cg) {}
468 virtual JVMState* generate(JVMState* jvms) {
469 Compile *C = Compile::current();
470 C->print_inlining_skip(this);
472 C->add_string_late_inline(this);
474 JVMState* new_jvms = DirectCallGenerator::generate(jvms);
475 return new_jvms;
476 }
477 };
479 CallGenerator* CallGenerator::for_string_late_inline(ciMethod* method, CallGenerator* inline_cg) {
480 return new LateInlineStringCallGenerator(method, inline_cg);
481 }
484 //---------------------------WarmCallGenerator--------------------------------
485 // Internal class which handles initial deferral of inlining decisions.
486 class WarmCallGenerator : public CallGenerator {
487 WarmCallInfo* _call_info;
488 CallGenerator* _if_cold;
489 CallGenerator* _if_hot;
490 bool _is_virtual; // caches virtuality of if_cold
491 bool _is_inline; // caches inline-ness of if_hot
493 public:
494 WarmCallGenerator(WarmCallInfo* ci,
495 CallGenerator* if_cold,
496 CallGenerator* if_hot)
497 : CallGenerator(if_cold->method())
498 {
499 assert(method() == if_hot->method(), "consistent choices");
500 _call_info = ci;
501 _if_cold = if_cold;
502 _if_hot = if_hot;
503 _is_virtual = if_cold->is_virtual();
504 _is_inline = if_hot->is_inline();
505 }
507 virtual bool is_inline() const { return _is_inline; }
508 virtual bool is_virtual() const { return _is_virtual; }
509 virtual bool is_deferred() const { return true; }
511 virtual JVMState* generate(JVMState* jvms);
512 };
515 CallGenerator* CallGenerator::for_warm_call(WarmCallInfo* ci,
516 CallGenerator* if_cold,
517 CallGenerator* if_hot) {
518 return new WarmCallGenerator(ci, if_cold, if_hot);
519 }
521 JVMState* WarmCallGenerator::generate(JVMState* jvms) {
522 Compile* C = Compile::current();
523 if (C->log() != NULL) {
524 C->log()->elem("warm_call bci='%d'", jvms->bci());
525 }
526 jvms = _if_cold->generate(jvms);
527 if (jvms != NULL) {
528 Node* m = jvms->map()->control();
529 if (m->is_CatchProj()) m = m->in(0); else m = C->top();
530 if (m->is_Catch()) m = m->in(0); else m = C->top();
531 if (m->is_Proj()) m = m->in(0); else m = C->top();
532 if (m->is_CallJava()) {
533 _call_info->set_call(m->as_Call());
534 _call_info->set_hot_cg(_if_hot);
535 #ifndef PRODUCT
536 if (PrintOpto || PrintOptoInlining) {
537 tty->print_cr("Queueing for warm inlining at bci %d:", jvms->bci());
538 tty->print("WCI: ");
539 _call_info->print();
540 }
541 #endif
542 _call_info->set_heat(_call_info->compute_heat());
543 C->set_warm_calls(_call_info->insert_into(C->warm_calls()));
544 }
545 }
546 return jvms;
547 }
549 void WarmCallInfo::make_hot() {
550 Unimplemented();
551 }
553 void WarmCallInfo::make_cold() {
554 // No action: Just dequeue.
555 }
558 //------------------------PredictedCallGenerator------------------------------
559 // Internal class which handles all out-of-line calls checking receiver type.
560 class PredictedCallGenerator : public CallGenerator {
561 ciKlass* _predicted_receiver;
562 CallGenerator* _if_missed;
563 CallGenerator* _if_hit;
564 float _hit_prob;
566 public:
567 PredictedCallGenerator(ciKlass* predicted_receiver,
568 CallGenerator* if_missed,
569 CallGenerator* if_hit, float hit_prob)
570 : CallGenerator(if_missed->method())
571 {
572 // The call profile data may predict the hit_prob as extreme as 0 or 1.
573 // Remove the extremes values from the range.
574 if (hit_prob > PROB_MAX) hit_prob = PROB_MAX;
575 if (hit_prob < PROB_MIN) hit_prob = PROB_MIN;
577 _predicted_receiver = predicted_receiver;
578 _if_missed = if_missed;
579 _if_hit = if_hit;
580 _hit_prob = hit_prob;
581 }
583 virtual bool is_virtual() const { return true; }
584 virtual bool is_inline() const { return _if_hit->is_inline(); }
585 virtual bool is_deferred() const { return _if_hit->is_deferred(); }
587 virtual JVMState* generate(JVMState* jvms);
588 };
591 CallGenerator* CallGenerator::for_predicted_call(ciKlass* predicted_receiver,
592 CallGenerator* if_missed,
593 CallGenerator* if_hit,
594 float hit_prob) {
595 return new PredictedCallGenerator(predicted_receiver, if_missed, if_hit, hit_prob);
596 }
599 JVMState* PredictedCallGenerator::generate(JVMState* jvms) {
600 GraphKit kit(jvms);
601 PhaseGVN& gvn = kit.gvn();
602 // We need an explicit receiver null_check before checking its type.
603 // We share a map with the caller, so his JVMS gets adjusted.
604 Node* receiver = kit.argument(0);
606 CompileLog* log = kit.C->log();
607 if (log != NULL) {
608 log->elem("predicted_call bci='%d' klass='%d'",
609 jvms->bci(), log->identify(_predicted_receiver));
610 }
612 receiver = kit.null_check_receiver_before_call(method());
613 if (kit.stopped()) {
614 return kit.transfer_exceptions_into_jvms();
615 }
617 Node* exact_receiver = receiver; // will get updated in place...
618 Node* slow_ctl = kit.type_check_receiver(receiver,
619 _predicted_receiver, _hit_prob,
620 &exact_receiver);
622 SafePointNode* slow_map = NULL;
623 JVMState* slow_jvms;
624 { PreserveJVMState pjvms(&kit);
625 kit.set_control(slow_ctl);
626 if (!kit.stopped()) {
627 slow_jvms = _if_missed->generate(kit.sync_jvms());
628 if (kit.failing())
629 return NULL; // might happen because of NodeCountInliningCutoff
630 assert(slow_jvms != NULL, "must be");
631 kit.add_exception_states_from(slow_jvms);
632 kit.set_map(slow_jvms->map());
633 if (!kit.stopped())
634 slow_map = kit.stop();
635 }
636 }
638 if (kit.stopped()) {
639 // Instance exactly does not matches the desired type.
640 kit.set_jvms(slow_jvms);
641 return kit.transfer_exceptions_into_jvms();
642 }
644 // fall through if the instance exactly matches the desired type
645 kit.replace_in_map(receiver, exact_receiver);
647 // Make the hot call:
648 JVMState* new_jvms = _if_hit->generate(kit.sync_jvms());
649 if (new_jvms == NULL) {
650 // Inline failed, so make a direct call.
651 assert(_if_hit->is_inline(), "must have been a failed inline");
652 CallGenerator* cg = CallGenerator::for_direct_call(_if_hit->method());
653 new_jvms = cg->generate(kit.sync_jvms());
654 }
655 kit.add_exception_states_from(new_jvms);
656 kit.set_jvms(new_jvms);
658 // Need to merge slow and fast?
659 if (slow_map == NULL) {
660 // The fast path is the only path remaining.
661 return kit.transfer_exceptions_into_jvms();
662 }
664 if (kit.stopped()) {
665 // Inlined method threw an exception, so it's just the slow path after all.
666 kit.set_jvms(slow_jvms);
667 return kit.transfer_exceptions_into_jvms();
668 }
670 // Finish the diamond.
671 kit.C->set_has_split_ifs(true); // Has chance for split-if optimization
672 RegionNode* region = new (kit.C) RegionNode(3);
673 region->init_req(1, kit.control());
674 region->init_req(2, slow_map->control());
675 kit.set_control(gvn.transform(region));
676 Node* iophi = PhiNode::make(region, kit.i_o(), Type::ABIO);
677 iophi->set_req(2, slow_map->i_o());
678 kit.set_i_o(gvn.transform(iophi));
679 kit.merge_memory(slow_map->merged_memory(), region, 2);
680 uint tos = kit.jvms()->stkoff() + kit.sp();
681 uint limit = slow_map->req();
682 for (uint i = TypeFunc::Parms; i < limit; i++) {
683 // Skip unused stack slots; fast forward to monoff();
684 if (i == tos) {
685 i = kit.jvms()->monoff();
686 if( i >= limit ) break;
687 }
688 Node* m = kit.map()->in(i);
689 Node* n = slow_map->in(i);
690 if (m != n) {
691 const Type* t = gvn.type(m)->meet(gvn.type(n));
692 Node* phi = PhiNode::make(region, m, t);
693 phi->set_req(2, n);
694 kit.map()->set_req(i, gvn.transform(phi));
695 }
696 }
697 return kit.transfer_exceptions_into_jvms();
698 }
701 CallGenerator* CallGenerator::for_method_handle_call(JVMState* jvms, ciMethod* caller, ciMethod* callee, bool delayed_forbidden) {
702 assert(callee->is_method_handle_intrinsic() ||
703 callee->is_compiled_lambda_form(), "for_method_handle_call mismatch");
704 bool input_not_const;
705 CallGenerator* cg = CallGenerator::for_method_handle_inline(jvms, caller, callee, input_not_const);
706 Compile* C = Compile::current();
707 if (cg != NULL) {
708 if (!delayed_forbidden && AlwaysIncrementalInline) {
709 return CallGenerator::for_late_inline(callee, cg);
710 } else {
711 return cg;
712 }
713 }
714 int bci = jvms->bci();
715 ciCallProfile profile = caller->call_profile_at_bci(bci);
716 int call_site_count = caller->scale_count(profile.count());
718 if (IncrementalInline && call_site_count > 0 &&
719 (input_not_const || !C->inlining_incrementally() || C->over_inlining_cutoff())) {
720 return CallGenerator::for_mh_late_inline(caller, callee, input_not_const);
721 } else {
722 // Out-of-line call.
723 return CallGenerator::for_direct_call(callee);
724 }
725 }
727 CallGenerator* CallGenerator::for_method_handle_inline(JVMState* jvms, ciMethod* caller, ciMethod* callee, bool& input_not_const) {
728 GraphKit kit(jvms);
729 PhaseGVN& gvn = kit.gvn();
730 Compile* C = kit.C;
731 vmIntrinsics::ID iid = callee->intrinsic_id();
732 input_not_const = true;
733 switch (iid) {
734 case vmIntrinsics::_invokeBasic:
735 {
736 // Get MethodHandle receiver:
737 Node* receiver = kit.argument(0);
738 if (receiver->Opcode() == Op_ConP) {
739 input_not_const = false;
740 const TypeOopPtr* oop_ptr = receiver->bottom_type()->is_oopptr();
741 ciMethod* target = oop_ptr->const_oop()->as_method_handle()->get_vmtarget();
742 guarantee(!target->is_method_handle_intrinsic(), "should not happen"); // XXX remove
743 const int vtable_index = Method::invalid_vtable_index;
744 CallGenerator* cg = C->call_generator(target, vtable_index, false, jvms, true, PROB_ALWAYS, true, true);
745 assert(!cg->is_late_inline() || cg->is_mh_late_inline(), "no late inline here");
746 if (cg != NULL && cg->is_inline())
747 return cg;
748 }
749 }
750 break;
752 case vmIntrinsics::_linkToVirtual:
753 case vmIntrinsics::_linkToStatic:
754 case vmIntrinsics::_linkToSpecial:
755 case vmIntrinsics::_linkToInterface:
756 {
757 // Get MemberName argument:
758 Node* member_name = kit.argument(callee->arg_size() - 1);
759 if (member_name->Opcode() == Op_ConP) {
760 input_not_const = false;
761 const TypeOopPtr* oop_ptr = member_name->bottom_type()->is_oopptr();
762 ciMethod* target = oop_ptr->const_oop()->as_member_name()->get_vmtarget();
764 // In lamda forms we erase signature types to avoid resolving issues
765 // involving class loaders. When we optimize a method handle invoke
766 // to a direct call we must cast the receiver and arguments to its
767 // actual types.
768 ciSignature* signature = target->signature();
769 const int receiver_skip = target->is_static() ? 0 : 1;
770 // Cast receiver to its type.
771 if (!target->is_static()) {
772 Node* arg = kit.argument(0);
773 const TypeOopPtr* arg_type = arg->bottom_type()->isa_oopptr();
774 const Type* sig_type = TypeOopPtr::make_from_klass(signature->accessing_klass());
775 if (arg_type != NULL && !arg_type->higher_equal(sig_type)) {
776 Node* cast_obj = gvn.transform(new (C) CheckCastPPNode(kit.control(), arg, sig_type));
777 kit.set_argument(0, cast_obj);
778 }
779 }
780 // Cast reference arguments to its type.
781 for (int i = 0; i < signature->count(); i++) {
782 ciType* t = signature->type_at(i);
783 if (t->is_klass()) {
784 Node* arg = kit.argument(receiver_skip + i);
785 const TypeOopPtr* arg_type = arg->bottom_type()->isa_oopptr();
786 const Type* sig_type = TypeOopPtr::make_from_klass(t->as_klass());
787 if (arg_type != NULL && !arg_type->higher_equal(sig_type)) {
788 Node* cast_obj = gvn.transform(new (C) CheckCastPPNode(kit.control(), arg, sig_type));
789 kit.set_argument(receiver_skip + i, cast_obj);
790 }
791 }
792 }
794 // Try to get the most accurate receiver type
795 const bool is_virtual = (iid == vmIntrinsics::_linkToVirtual);
796 const bool is_virtual_or_interface = (is_virtual || iid == vmIntrinsics::_linkToInterface);
797 int vtable_index = Method::invalid_vtable_index;
798 bool call_does_dispatch = false;
800 if (is_virtual_or_interface) {
801 ciInstanceKlass* klass = target->holder();
802 Node* receiver_node = kit.argument(0);
803 const TypeOopPtr* receiver_type = gvn.type(receiver_node)->isa_oopptr();
804 // call_does_dispatch and vtable_index are out-parameters. They might be changed.
805 target = C->optimize_virtual_call(caller, jvms->bci(), klass, target, receiver_type,
806 is_virtual,
807 call_does_dispatch, vtable_index); // out-parameters
808 }
810 CallGenerator* cg = C->call_generator(target, vtable_index, call_does_dispatch, jvms, true, PROB_ALWAYS, true, true);
811 assert(!cg->is_late_inline() || cg->is_mh_late_inline(), "no late inline here");
812 if (cg != NULL && cg->is_inline())
813 return cg;
814 }
815 }
816 break;
818 default:
819 fatal(err_msg_res("unexpected intrinsic %d: %s", iid, vmIntrinsics::name_at(iid)));
820 break;
821 }
822 return NULL;
823 }
826 //------------------------PredictedIntrinsicGenerator------------------------------
827 // Internal class which handles all predicted Intrinsic calls.
828 class PredictedIntrinsicGenerator : public CallGenerator {
829 CallGenerator* _intrinsic;
830 CallGenerator* _cg;
832 public:
833 PredictedIntrinsicGenerator(CallGenerator* intrinsic,
834 CallGenerator* cg)
835 : CallGenerator(cg->method())
836 {
837 _intrinsic = intrinsic;
838 _cg = cg;
839 }
841 virtual bool is_virtual() const { return true; }
842 virtual bool is_inlined() const { return true; }
843 virtual bool is_intrinsic() const { return true; }
845 virtual JVMState* generate(JVMState* jvms);
846 };
849 CallGenerator* CallGenerator::for_predicted_intrinsic(CallGenerator* intrinsic,
850 CallGenerator* cg) {
851 return new PredictedIntrinsicGenerator(intrinsic, cg);
852 }
855 JVMState* PredictedIntrinsicGenerator::generate(JVMState* jvms) {
856 GraphKit kit(jvms);
857 PhaseGVN& gvn = kit.gvn();
859 CompileLog* log = kit.C->log();
860 if (log != NULL) {
861 log->elem("predicted_intrinsic bci='%d' method='%d'",
862 jvms->bci(), log->identify(method()));
863 }
865 Node* slow_ctl = _intrinsic->generate_predicate(kit.sync_jvms());
866 if (kit.failing())
867 return NULL; // might happen because of NodeCountInliningCutoff
869 SafePointNode* slow_map = NULL;
870 JVMState* slow_jvms;
871 if (slow_ctl != NULL) {
872 PreserveJVMState pjvms(&kit);
873 kit.set_control(slow_ctl);
874 if (!kit.stopped()) {
875 slow_jvms = _cg->generate(kit.sync_jvms());
876 if (kit.failing())
877 return NULL; // might happen because of NodeCountInliningCutoff
878 assert(slow_jvms != NULL, "must be");
879 kit.add_exception_states_from(slow_jvms);
880 kit.set_map(slow_jvms->map());
881 if (!kit.stopped())
882 slow_map = kit.stop();
883 }
884 }
886 if (kit.stopped()) {
887 // Predicate is always false.
888 kit.set_jvms(slow_jvms);
889 return kit.transfer_exceptions_into_jvms();
890 }
892 // Generate intrinsic code:
893 JVMState* new_jvms = _intrinsic->generate(kit.sync_jvms());
894 if (new_jvms == NULL) {
895 // Intrinsic failed, so use slow code or make a direct call.
896 if (slow_map == NULL) {
897 CallGenerator* cg = CallGenerator::for_direct_call(method());
898 new_jvms = cg->generate(kit.sync_jvms());
899 } else {
900 kit.set_jvms(slow_jvms);
901 return kit.transfer_exceptions_into_jvms();
902 }
903 }
904 kit.add_exception_states_from(new_jvms);
905 kit.set_jvms(new_jvms);
907 // Need to merge slow and fast?
908 if (slow_map == NULL) {
909 // The fast path is the only path remaining.
910 return kit.transfer_exceptions_into_jvms();
911 }
913 if (kit.stopped()) {
914 // Intrinsic method threw an exception, so it's just the slow path after all.
915 kit.set_jvms(slow_jvms);
916 return kit.transfer_exceptions_into_jvms();
917 }
919 // Finish the diamond.
920 kit.C->set_has_split_ifs(true); // Has chance for split-if optimization
921 RegionNode* region = new (kit.C) RegionNode(3);
922 region->init_req(1, kit.control());
923 region->init_req(2, slow_map->control());
924 kit.set_control(gvn.transform(region));
925 Node* iophi = PhiNode::make(region, kit.i_o(), Type::ABIO);
926 iophi->set_req(2, slow_map->i_o());
927 kit.set_i_o(gvn.transform(iophi));
928 kit.merge_memory(slow_map->merged_memory(), region, 2);
929 uint tos = kit.jvms()->stkoff() + kit.sp();
930 uint limit = slow_map->req();
931 for (uint i = TypeFunc::Parms; i < limit; i++) {
932 // Skip unused stack slots; fast forward to monoff();
933 if (i == tos) {
934 i = kit.jvms()->monoff();
935 if( i >= limit ) break;
936 }
937 Node* m = kit.map()->in(i);
938 Node* n = slow_map->in(i);
939 if (m != n) {
940 const Type* t = gvn.type(m)->meet(gvn.type(n));
941 Node* phi = PhiNode::make(region, m, t);
942 phi->set_req(2, n);
943 kit.map()->set_req(i, gvn.transform(phi));
944 }
945 }
946 return kit.transfer_exceptions_into_jvms();
947 }
949 //-------------------------UncommonTrapCallGenerator-----------------------------
950 // Internal class which handles all out-of-line calls checking receiver type.
951 class UncommonTrapCallGenerator : public CallGenerator {
952 Deoptimization::DeoptReason _reason;
953 Deoptimization::DeoptAction _action;
955 public:
956 UncommonTrapCallGenerator(ciMethod* m,
957 Deoptimization::DeoptReason reason,
958 Deoptimization::DeoptAction action)
959 : CallGenerator(m)
960 {
961 _reason = reason;
962 _action = action;
963 }
965 virtual bool is_virtual() const { ShouldNotReachHere(); return false; }
966 virtual bool is_trap() const { return true; }
968 virtual JVMState* generate(JVMState* jvms);
969 };
972 CallGenerator*
973 CallGenerator::for_uncommon_trap(ciMethod* m,
974 Deoptimization::DeoptReason reason,
975 Deoptimization::DeoptAction action) {
976 return new UncommonTrapCallGenerator(m, reason, action);
977 }
980 JVMState* UncommonTrapCallGenerator::generate(JVMState* jvms) {
981 GraphKit kit(jvms);
982 // Take the trap with arguments pushed on the stack. (Cf. null_check_receiver).
983 int nargs = method()->arg_size();
984 kit.inc_sp(nargs);
985 assert(nargs <= kit.sp() && kit.sp() <= jvms->stk_size(), "sane sp w/ args pushed");
986 if (_reason == Deoptimization::Reason_class_check &&
987 _action == Deoptimization::Action_maybe_recompile) {
988 // Temp fix for 6529811
989 // Don't allow uncommon_trap to override our decision to recompile in the event
990 // of a class cast failure for a monomorphic call as it will never let us convert
991 // the call to either bi-morphic or megamorphic and can lead to unc-trap loops
992 bool keep_exact_action = true;
993 kit.uncommon_trap(_reason, _action, NULL, "monomorphic vcall checkcast", false, keep_exact_action);
994 } else {
995 kit.uncommon_trap(_reason, _action);
996 }
997 return kit.transfer_exceptions_into_jvms();
998 }
1000 // (Note: Moved hook_up_call to GraphKit::set_edges_for_java_call.)
1002 // (Node: Merged hook_up_exits into ParseGenerator::generate.)
1004 #define NODES_OVERHEAD_PER_METHOD (30.0)
1005 #define NODES_PER_BYTECODE (9.5)
1007 void WarmCallInfo::init(JVMState* call_site, ciMethod* call_method, ciCallProfile& profile, float prof_factor) {
1008 int call_count = profile.count();
1009 int code_size = call_method->code_size();
1011 // Expected execution count is based on the historical count:
1012 _count = call_count < 0 ? 1 : call_site->method()->scale_count(call_count, prof_factor);
1014 // Expected profit from inlining, in units of simple call-overheads.
1015 _profit = 1.0;
1017 // Expected work performed by the call in units of call-overheads.
1018 // %%% need an empirical curve fit for "work" (time in call)
1019 float bytecodes_per_call = 3;
1020 _work = 1.0 + code_size / bytecodes_per_call;
1022 // Expected size of compilation graph:
1023 // -XX:+PrintParseStatistics once reported:
1024 // Methods seen: 9184 Methods parsed: 9184 Nodes created: 1582391
1025 // Histogram of 144298 parsed bytecodes:
1026 // %%% Need an better predictor for graph size.
1027 _size = NODES_OVERHEAD_PER_METHOD + (NODES_PER_BYTECODE * code_size);
1028 }
1030 // is_cold: Return true if the node should never be inlined.
1031 // This is true if any of the key metrics are extreme.
1032 bool WarmCallInfo::is_cold() const {
1033 if (count() < WarmCallMinCount) return true;
1034 if (profit() < WarmCallMinProfit) return true;
1035 if (work() > WarmCallMaxWork) return true;
1036 if (size() > WarmCallMaxSize) return true;
1037 return false;
1038 }
1040 // is_hot: Return true if the node should be inlined immediately.
1041 // This is true if any of the key metrics are extreme.
1042 bool WarmCallInfo::is_hot() const {
1043 assert(!is_cold(), "eliminate is_cold cases before testing is_hot");
1044 if (count() >= HotCallCountThreshold) return true;
1045 if (profit() >= HotCallProfitThreshold) return true;
1046 if (work() <= HotCallTrivialWork) return true;
1047 if (size() <= HotCallTrivialSize) return true;
1048 return false;
1049 }
1051 // compute_heat:
1052 float WarmCallInfo::compute_heat() const {
1053 assert(!is_cold(), "compute heat only on warm nodes");
1054 assert(!is_hot(), "compute heat only on warm nodes");
1055 int min_size = MAX2(0, (int)HotCallTrivialSize);
1056 int max_size = MIN2(500, (int)WarmCallMaxSize);
1057 float method_size = (size() - min_size) / MAX2(1, max_size - min_size);
1058 float size_factor;
1059 if (method_size < 0.05) size_factor = 4; // 2 sigmas better than avg.
1060 else if (method_size < 0.15) size_factor = 2; // 1 sigma better than avg.
1061 else if (method_size < 0.5) size_factor = 1; // better than avg.
1062 else size_factor = 0.5; // worse than avg.
1063 return (count() * profit() * size_factor);
1064 }
1066 bool WarmCallInfo::warmer_than(WarmCallInfo* that) {
1067 assert(this != that, "compare only different WCIs");
1068 assert(this->heat() != 0 && that->heat() != 0, "call compute_heat 1st");
1069 if (this->heat() > that->heat()) return true;
1070 if (this->heat() < that->heat()) return false;
1071 assert(this->heat() == that->heat(), "no NaN heat allowed");
1072 // Equal heat. Break the tie some other way.
1073 if (!this->call() || !that->call()) return (address)this > (address)that;
1074 return this->call()->_idx > that->call()->_idx;
1075 }
1077 //#define UNINIT_NEXT ((WarmCallInfo*)badAddress)
1078 #define UNINIT_NEXT ((WarmCallInfo*)NULL)
1080 WarmCallInfo* WarmCallInfo::insert_into(WarmCallInfo* head) {
1081 assert(next() == UNINIT_NEXT, "not yet on any list");
1082 WarmCallInfo* prev_p = NULL;
1083 WarmCallInfo* next_p = head;
1084 while (next_p != NULL && next_p->warmer_than(this)) {
1085 prev_p = next_p;
1086 next_p = prev_p->next();
1087 }
1088 // Install this between prev_p and next_p.
1089 this->set_next(next_p);
1090 if (prev_p == NULL)
1091 head = this;
1092 else
1093 prev_p->set_next(this);
1094 return head;
1095 }
1097 WarmCallInfo* WarmCallInfo::remove_from(WarmCallInfo* head) {
1098 WarmCallInfo* prev_p = NULL;
1099 WarmCallInfo* next_p = head;
1100 while (next_p != this) {
1101 assert(next_p != NULL, "this must be in the list somewhere");
1102 prev_p = next_p;
1103 next_p = prev_p->next();
1104 }
1105 next_p = this->next();
1106 debug_only(this->set_next(UNINIT_NEXT));
1107 // Remove this from between prev_p and next_p.
1108 if (prev_p == NULL)
1109 head = next_p;
1110 else
1111 prev_p->set_next(next_p);
1112 return head;
1113 }
1115 WarmCallInfo WarmCallInfo::_always_hot(WarmCallInfo::MAX_VALUE(), WarmCallInfo::MAX_VALUE(),
1116 WarmCallInfo::MIN_VALUE(), WarmCallInfo::MIN_VALUE());
1117 WarmCallInfo WarmCallInfo::_always_cold(WarmCallInfo::MIN_VALUE(), WarmCallInfo::MIN_VALUE(),
1118 WarmCallInfo::MAX_VALUE(), WarmCallInfo::MAX_VALUE());
1120 WarmCallInfo* WarmCallInfo::always_hot() {
1121 assert(_always_hot.is_hot(), "must always be hot");
1122 return &_always_hot;
1123 }
1125 WarmCallInfo* WarmCallInfo::always_cold() {
1126 assert(_always_cold.is_cold(), "must always be cold");
1127 return &_always_cold;
1128 }
1131 #ifndef PRODUCT
1133 void WarmCallInfo::print() const {
1134 tty->print("%s : C=%6.1f P=%6.1f W=%6.1f S=%6.1f H=%6.1f -> %p",
1135 is_cold() ? "cold" : is_hot() ? "hot " : "warm",
1136 count(), profit(), work(), size(), compute_heat(), next());
1137 tty->cr();
1138 if (call() != NULL) call()->dump();
1139 }
1141 void print_wci(WarmCallInfo* ci) {
1142 ci->print();
1143 }
1145 void WarmCallInfo::print_all() const {
1146 for (const WarmCallInfo* p = this; p != NULL; p = p->next())
1147 p->print();
1148 }
1150 int WarmCallInfo::count_all() const {
1151 int cnt = 0;
1152 for (const WarmCallInfo* p = this; p != NULL; p = p->next())
1153 cnt++;
1154 return cnt;
1155 }
1157 #endif //PRODUCT