Mon, 30 Jul 2012 09:49:25 -0700
7187454: stack overflow in C2 compiler thread on Solaris x86
Summary: Added new FormatBufferResource class to use thread's resource area for error message buffer.
Reviewed-by: twisti
1 /*
2 * Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "precompiled.hpp"
26 #include "ci/bcEscapeAnalyzer.hpp"
27 #include "ci/ciCallSite.hpp"
28 #include "ci/ciCPCache.hpp"
29 #include "ci/ciMemberName.hpp"
30 #include "ci/ciMethodHandle.hpp"
31 #include "classfile/javaClasses.hpp"
32 #include "compiler/compileLog.hpp"
33 #include "opto/addnode.hpp"
34 #include "opto/callGenerator.hpp"
35 #include "opto/callnode.hpp"
36 #include "opto/cfgnode.hpp"
37 #include "opto/connode.hpp"
38 #include "opto/parse.hpp"
39 #include "opto/rootnode.hpp"
40 #include "opto/runtime.hpp"
41 #include "opto/subnode.hpp"
44 // Utility function.
45 const TypeFunc* CallGenerator::tf() const {
46 return TypeFunc::make(method());
47 }
49 //-----------------------------ParseGenerator---------------------------------
50 // Internal class which handles all direct bytecode traversal.
51 class ParseGenerator : public InlineCallGenerator {
52 private:
53 bool _is_osr;
54 float _expected_uses;
56 public:
57 ParseGenerator(ciMethod* method, float expected_uses, bool is_osr = false)
58 : InlineCallGenerator(method)
59 {
60 _is_osr = is_osr;
61 _expected_uses = expected_uses;
62 assert(InlineTree::check_can_parse(method) == NULL, "parse must be possible");
63 }
65 virtual bool is_parse() const { return true; }
66 virtual JVMState* generate(JVMState* jvms);
67 int is_osr() { return _is_osr; }
69 };
71 JVMState* ParseGenerator::generate(JVMState* jvms) {
72 Compile* C = Compile::current();
74 if (is_osr()) {
75 // The JVMS for a OSR has a single argument (see its TypeFunc).
76 assert(jvms->depth() == 1, "no inline OSR");
77 }
79 if (C->failing()) {
80 return NULL; // bailing out of the compile; do not try to parse
81 }
83 Parse parser(jvms, method(), _expected_uses);
84 // Grab signature for matching/allocation
85 #ifdef ASSERT
86 if (parser.tf() != (parser.depth() == 1 ? C->tf() : tf())) {
87 MutexLockerEx ml(Compile_lock, Mutex::_no_safepoint_check_flag);
88 assert(C->env()->system_dictionary_modification_counter_changed(),
89 "Must invalidate if TypeFuncs differ");
90 }
91 #endif
93 GraphKit& exits = parser.exits();
95 if (C->failing()) {
96 while (exits.pop_exception_state() != NULL) ;
97 return NULL;
98 }
100 assert(exits.jvms()->same_calls_as(jvms), "sanity");
102 // Simply return the exit state of the parser,
103 // augmented by any exceptional states.
104 return exits.transfer_exceptions_into_jvms();
105 }
107 //---------------------------DirectCallGenerator------------------------------
108 // Internal class which handles all out-of-line calls w/o receiver type checks.
109 class DirectCallGenerator : public CallGenerator {
110 private:
111 CallStaticJavaNode* _call_node;
112 // Force separate memory and I/O projections for the exceptional
113 // paths to facilitate late inlinig.
114 bool _separate_io_proj;
116 public:
117 DirectCallGenerator(ciMethod* method, bool separate_io_proj)
118 : CallGenerator(method),
119 _separate_io_proj(separate_io_proj)
120 {
121 }
122 virtual JVMState* generate(JVMState* jvms);
124 CallStaticJavaNode* call_node() const { return _call_node; }
125 };
127 JVMState* DirectCallGenerator::generate(JVMState* jvms) {
128 GraphKit kit(jvms);
129 bool is_static = method()->is_static();
130 address target = is_static ? SharedRuntime::get_resolve_static_call_stub()
131 : SharedRuntime::get_resolve_opt_virtual_call_stub();
133 if (kit.C->log() != NULL) {
134 kit.C->log()->elem("direct_call bci='%d'", jvms->bci());
135 }
137 CallStaticJavaNode *call = new (kit.C, tf()->domain()->cnt()) CallStaticJavaNode(tf(), target, method(), kit.bci());
138 _call_node = call; // Save the call node in case we need it later
139 if (!is_static) {
140 // Make an explicit receiver null_check as part of this call.
141 // Since we share a map with the caller, his JVMS gets adjusted.
142 kit.null_check_receiver(method());
143 if (kit.stopped()) {
144 // And dump it back to the caller, decorated with any exceptions:
145 return kit.transfer_exceptions_into_jvms();
146 }
147 // Mark the call node as virtual, sort of:
148 call->set_optimized_virtual(true);
149 if (method()->is_method_handle_intrinsic() ||
150 method()->is_compiled_lambda_form()) {
151 call->set_method_handle_invoke(true);
152 }
153 }
154 kit.set_arguments_for_java_call(call);
155 kit.set_edges_for_java_call(call, false, _separate_io_proj);
156 Node* ret = kit.set_results_for_java_call(call, _separate_io_proj);
157 kit.push_node(method()->return_type()->basic_type(), ret);
158 return kit.transfer_exceptions_into_jvms();
159 }
161 //---------------------------DynamicCallGenerator-----------------------------
162 // Internal class which handles all out-of-line invokedynamic calls.
163 class DynamicCallGenerator : public CallGenerator {
164 public:
165 DynamicCallGenerator(ciMethod* method)
166 : CallGenerator(method)
167 {
168 }
169 virtual JVMState* generate(JVMState* jvms);
170 };
172 JVMState* DynamicCallGenerator::generate(JVMState* jvms) {
173 GraphKit kit(jvms);
174 Compile* C = kit.C;
175 PhaseGVN& gvn = kit.gvn();
177 if (C->log() != NULL) {
178 C->log()->elem("dynamic_call bci='%d'", jvms->bci());
179 }
181 // Get the constant pool cache from the caller class.
182 ciMethod* caller_method = jvms->method();
183 ciBytecodeStream str(caller_method);
184 str.force_bci(jvms->bci()); // Set the stream to the invokedynamic bci.
185 assert(str.cur_bc() == Bytecodes::_invokedynamic, "wrong place to issue a dynamic call!");
186 ciCPCache* cpcache = str.get_cpcache();
188 // Get the offset of the CallSite from the constant pool cache
189 // pointer.
190 int index = str.get_method_index();
191 size_t call_site_offset = cpcache->get_f1_offset(index);
193 // Load the CallSite object from the constant pool cache.
194 const TypeOopPtr* cpcache_type = TypeOopPtr::make_from_constant(cpcache); // returns TypeAryPtr of type T_OBJECT
195 const TypeOopPtr* call_site_type = TypeOopPtr::make_from_klass(C->env()->CallSite_klass());
196 Node* cpcache_adr = kit.makecon(cpcache_type);
197 Node* call_site_adr = kit.basic_plus_adr(cpcache_adr, call_site_offset);
198 // The oops in the constant pool cache are not compressed; load then as raw pointers.
199 Node* call_site = kit.make_load(kit.control(), call_site_adr, call_site_type, T_ADDRESS, Compile::AliasIdxRaw);
201 // Load the target MethodHandle from the CallSite object.
202 const TypeOopPtr* target_type = TypeOopPtr::make_from_klass(C->env()->MethodHandle_klass());
203 Node* target_mh_adr = kit.basic_plus_adr(call_site, java_lang_invoke_CallSite::target_offset_in_bytes());
204 Node* target_mh = kit.make_load(kit.control(), target_mh_adr, target_type, T_OBJECT);
206 address resolve_stub = SharedRuntime::get_resolve_opt_virtual_call_stub();
208 CallStaticJavaNode* call = new (C, tf()->domain()->cnt()) CallStaticJavaNode(tf(), resolve_stub, method(), kit.bci());
209 // invokedynamic is treated as an optimized invokevirtual.
210 call->set_optimized_virtual(true);
211 // Take extra care (in the presence of argument motion) not to trash the SP:
212 call->set_method_handle_invoke(true);
214 // Pass the target MethodHandle as first argument and shift the
215 // other arguments.
216 call->init_req(0 + TypeFunc::Parms, target_mh);
217 uint nargs = call->method()->arg_size();
218 for (uint i = 1; i < nargs; i++) {
219 Node* arg = kit.argument(i - 1);
220 call->init_req(i + TypeFunc::Parms, arg);
221 }
223 kit.set_edges_for_java_call(call);
224 Node* ret = kit.set_results_for_java_call(call);
225 kit.push_node(method()->return_type()->basic_type(), ret);
226 return kit.transfer_exceptions_into_jvms();
227 }
229 //--------------------------VirtualCallGenerator------------------------------
230 // Internal class which handles all out-of-line calls checking receiver type.
231 class VirtualCallGenerator : public CallGenerator {
232 private:
233 int _vtable_index;
234 public:
235 VirtualCallGenerator(ciMethod* method, int vtable_index)
236 : CallGenerator(method), _vtable_index(vtable_index)
237 {
238 assert(vtable_index == methodOopDesc::invalid_vtable_index ||
239 vtable_index >= 0, "either invalid or usable");
240 }
241 virtual bool is_virtual() const { return true; }
242 virtual JVMState* generate(JVMState* jvms);
243 };
245 JVMState* VirtualCallGenerator::generate(JVMState* jvms) {
246 GraphKit kit(jvms);
247 Node* receiver = kit.argument(0);
249 if (kit.C->log() != NULL) {
250 kit.C->log()->elem("virtual_call bci='%d'", jvms->bci());
251 }
253 // If the receiver is a constant null, do not torture the system
254 // by attempting to call through it. The compile will proceed
255 // correctly, but may bail out in final_graph_reshaping, because
256 // the call instruction will have a seemingly deficient out-count.
257 // (The bailout says something misleading about an "infinite loop".)
258 if (kit.gvn().type(receiver)->higher_equal(TypePtr::NULL_PTR)) {
259 kit.inc_sp(method()->arg_size()); // restore arguments
260 kit.uncommon_trap(Deoptimization::Reason_null_check,
261 Deoptimization::Action_none,
262 NULL, "null receiver");
263 return kit.transfer_exceptions_into_jvms();
264 }
266 // Ideally we would unconditionally do a null check here and let it
267 // be converted to an implicit check based on profile information.
268 // However currently the conversion to implicit null checks in
269 // Block::implicit_null_check() only looks for loads and stores, not calls.
270 ciMethod *caller = kit.method();
271 ciMethodData *caller_md = (caller == NULL) ? NULL : caller->method_data();
272 if (!UseInlineCaches || !ImplicitNullChecks ||
273 ((ImplicitNullCheckThreshold > 0) && caller_md &&
274 (caller_md->trap_count(Deoptimization::Reason_null_check)
275 >= (uint)ImplicitNullCheckThreshold))) {
276 // Make an explicit receiver null_check as part of this call.
277 // Since we share a map with the caller, his JVMS gets adjusted.
278 receiver = kit.null_check_receiver(method());
279 if (kit.stopped()) {
280 // And dump it back to the caller, decorated with any exceptions:
281 return kit.transfer_exceptions_into_jvms();
282 }
283 }
285 assert(!method()->is_static(), "virtual call must not be to static");
286 assert(!method()->is_final(), "virtual call should not be to final");
287 assert(!method()->is_private(), "virtual call should not be to private");
288 assert(_vtable_index == methodOopDesc::invalid_vtable_index || !UseInlineCaches,
289 "no vtable calls if +UseInlineCaches ");
290 address target = SharedRuntime::get_resolve_virtual_call_stub();
291 // Normal inline cache used for call
292 CallDynamicJavaNode *call = new (kit.C, tf()->domain()->cnt()) CallDynamicJavaNode(tf(), target, method(), _vtable_index, kit.bci());
293 kit.set_arguments_for_java_call(call);
294 kit.set_edges_for_java_call(call);
295 Node* ret = kit.set_results_for_java_call(call);
296 kit.push_node(method()->return_type()->basic_type(), ret);
298 // Represent the effect of an implicit receiver null_check
299 // as part of this call. Since we share a map with the caller,
300 // his JVMS gets adjusted.
301 kit.cast_not_null(receiver);
302 return kit.transfer_exceptions_into_jvms();
303 }
305 CallGenerator* CallGenerator::for_inline(ciMethod* m, float expected_uses) {
306 if (InlineTree::check_can_parse(m) != NULL) return NULL;
307 return new ParseGenerator(m, expected_uses);
308 }
310 // As a special case, the JVMS passed to this CallGenerator is
311 // for the method execution already in progress, not just the JVMS
312 // of the caller. Thus, this CallGenerator cannot be mixed with others!
313 CallGenerator* CallGenerator::for_osr(ciMethod* m, int osr_bci) {
314 if (InlineTree::check_can_parse(m) != NULL) return NULL;
315 float past_uses = m->interpreter_invocation_count();
316 float expected_uses = past_uses;
317 return new ParseGenerator(m, expected_uses, true);
318 }
320 CallGenerator* CallGenerator::for_direct_call(ciMethod* m, bool separate_io_proj) {
321 assert(!m->is_abstract(), "for_direct_call mismatch");
322 return new DirectCallGenerator(m, separate_io_proj);
323 }
325 CallGenerator* CallGenerator::for_virtual_call(ciMethod* m, int vtable_index) {
326 assert(!m->is_static(), "for_virtual_call mismatch");
327 assert(!m->is_method_handle_intrinsic(), "should be a direct call");
328 return new VirtualCallGenerator(m, vtable_index);
329 }
331 CallGenerator* CallGenerator::for_dynamic_call(ciMethod* m) {
332 assert(m->is_compiled_lambda_form(), "for_dynamic_call mismatch");
333 //@@ FIXME: this should be done via a direct call
334 return new DynamicCallGenerator(m);
335 }
337 // Allow inlining decisions to be delayed
338 class LateInlineCallGenerator : public DirectCallGenerator {
339 CallGenerator* _inline_cg;
341 public:
342 LateInlineCallGenerator(ciMethod* method, CallGenerator* inline_cg) :
343 DirectCallGenerator(method, true), _inline_cg(inline_cg) {}
345 virtual bool is_late_inline() const { return true; }
347 // Convert the CallStaticJava into an inline
348 virtual void do_late_inline();
350 JVMState* generate(JVMState* jvms) {
351 // Record that this call site should be revisited once the main
352 // parse is finished.
353 Compile::current()->add_late_inline(this);
355 // Emit the CallStaticJava and request separate projections so
356 // that the late inlining logic can distinguish between fall
357 // through and exceptional uses of the memory and io projections
358 // as is done for allocations and macro expansion.
359 return DirectCallGenerator::generate(jvms);
360 }
362 };
365 void LateInlineCallGenerator::do_late_inline() {
366 // Can't inline it
367 if (call_node() == NULL || call_node()->outcnt() == 0 ||
368 call_node()->in(0) == NULL || call_node()->in(0)->is_top())
369 return;
371 CallStaticJavaNode* call = call_node();
373 // Make a clone of the JVMState that appropriate to use for driving a parse
374 Compile* C = Compile::current();
375 JVMState* jvms = call->jvms()->clone_shallow(C);
376 uint size = call->req();
377 SafePointNode* map = new (C, size) SafePointNode(size, jvms);
378 for (uint i1 = 0; i1 < size; i1++) {
379 map->init_req(i1, call->in(i1));
380 }
382 // Make sure the state is a MergeMem for parsing.
383 if (!map->in(TypeFunc::Memory)->is_MergeMem()) {
384 map->set_req(TypeFunc::Memory, MergeMemNode::make(C, map->in(TypeFunc::Memory)));
385 }
387 // Make enough space for the expression stack and transfer the incoming arguments
388 int nargs = method()->arg_size();
389 jvms->set_map(map);
390 map->ensure_stack(jvms, jvms->method()->max_stack());
391 if (nargs > 0) {
392 for (int i1 = 0; i1 < nargs; i1++) {
393 map->set_req(i1 + jvms->argoff(), call->in(TypeFunc::Parms + i1));
394 }
395 }
397 CompileLog* log = C->log();
398 if (log != NULL) {
399 log->head("late_inline method='%d'", log->identify(method()));
400 JVMState* p = jvms;
401 while (p != NULL) {
402 log->elem("jvms bci='%d' method='%d'", p->bci(), log->identify(p->method()));
403 p = p->caller();
404 }
405 log->tail("late_inline");
406 }
408 // Setup default node notes to be picked up by the inlining
409 Node_Notes* old_nn = C->default_node_notes();
410 if (old_nn != NULL) {
411 Node_Notes* entry_nn = old_nn->clone(C);
412 entry_nn->set_jvms(jvms);
413 C->set_default_node_notes(entry_nn);
414 }
416 // Now perform the inling using the synthesized JVMState
417 JVMState* new_jvms = _inline_cg->generate(jvms);
418 if (new_jvms == NULL) return; // no change
419 if (C->failing()) return;
421 // Capture any exceptional control flow
422 GraphKit kit(new_jvms);
424 // Find the result object
425 Node* result = C->top();
426 int result_size = method()->return_type()->size();
427 if (result_size != 0 && !kit.stopped()) {
428 result = (result_size == 1) ? kit.pop() : kit.pop_pair();
429 }
431 kit.replace_call(call, result);
432 }
435 CallGenerator* CallGenerator::for_late_inline(ciMethod* method, CallGenerator* inline_cg) {
436 return new LateInlineCallGenerator(method, inline_cg);
437 }
440 //---------------------------WarmCallGenerator--------------------------------
441 // Internal class which handles initial deferral of inlining decisions.
442 class WarmCallGenerator : public CallGenerator {
443 WarmCallInfo* _call_info;
444 CallGenerator* _if_cold;
445 CallGenerator* _if_hot;
446 bool _is_virtual; // caches virtuality of if_cold
447 bool _is_inline; // caches inline-ness of if_hot
449 public:
450 WarmCallGenerator(WarmCallInfo* ci,
451 CallGenerator* if_cold,
452 CallGenerator* if_hot)
453 : CallGenerator(if_cold->method())
454 {
455 assert(method() == if_hot->method(), "consistent choices");
456 _call_info = ci;
457 _if_cold = if_cold;
458 _if_hot = if_hot;
459 _is_virtual = if_cold->is_virtual();
460 _is_inline = if_hot->is_inline();
461 }
463 virtual bool is_inline() const { return _is_inline; }
464 virtual bool is_virtual() const { return _is_virtual; }
465 virtual bool is_deferred() const { return true; }
467 virtual JVMState* generate(JVMState* jvms);
468 };
471 CallGenerator* CallGenerator::for_warm_call(WarmCallInfo* ci,
472 CallGenerator* if_cold,
473 CallGenerator* if_hot) {
474 return new WarmCallGenerator(ci, if_cold, if_hot);
475 }
477 JVMState* WarmCallGenerator::generate(JVMState* jvms) {
478 Compile* C = Compile::current();
479 if (C->log() != NULL) {
480 C->log()->elem("warm_call bci='%d'", jvms->bci());
481 }
482 jvms = _if_cold->generate(jvms);
483 if (jvms != NULL) {
484 Node* m = jvms->map()->control();
485 if (m->is_CatchProj()) m = m->in(0); else m = C->top();
486 if (m->is_Catch()) m = m->in(0); else m = C->top();
487 if (m->is_Proj()) m = m->in(0); else m = C->top();
488 if (m->is_CallJava()) {
489 _call_info->set_call(m->as_Call());
490 _call_info->set_hot_cg(_if_hot);
491 #ifndef PRODUCT
492 if (PrintOpto || PrintOptoInlining) {
493 tty->print_cr("Queueing for warm inlining at bci %d:", jvms->bci());
494 tty->print("WCI: ");
495 _call_info->print();
496 }
497 #endif
498 _call_info->set_heat(_call_info->compute_heat());
499 C->set_warm_calls(_call_info->insert_into(C->warm_calls()));
500 }
501 }
502 return jvms;
503 }
505 void WarmCallInfo::make_hot() {
506 Unimplemented();
507 }
509 void WarmCallInfo::make_cold() {
510 // No action: Just dequeue.
511 }
514 //------------------------PredictedCallGenerator------------------------------
515 // Internal class which handles all out-of-line calls checking receiver type.
516 class PredictedCallGenerator : public CallGenerator {
517 ciKlass* _predicted_receiver;
518 CallGenerator* _if_missed;
519 CallGenerator* _if_hit;
520 float _hit_prob;
522 public:
523 PredictedCallGenerator(ciKlass* predicted_receiver,
524 CallGenerator* if_missed,
525 CallGenerator* if_hit, float hit_prob)
526 : CallGenerator(if_missed->method())
527 {
528 // The call profile data may predict the hit_prob as extreme as 0 or 1.
529 // Remove the extremes values from the range.
530 if (hit_prob > PROB_MAX) hit_prob = PROB_MAX;
531 if (hit_prob < PROB_MIN) hit_prob = PROB_MIN;
533 _predicted_receiver = predicted_receiver;
534 _if_missed = if_missed;
535 _if_hit = if_hit;
536 _hit_prob = hit_prob;
537 }
539 virtual bool is_virtual() const { return true; }
540 virtual bool is_inline() const { return _if_hit->is_inline(); }
541 virtual bool is_deferred() const { return _if_hit->is_deferred(); }
543 virtual JVMState* generate(JVMState* jvms);
544 };
547 CallGenerator* CallGenerator::for_predicted_call(ciKlass* predicted_receiver,
548 CallGenerator* if_missed,
549 CallGenerator* if_hit,
550 float hit_prob) {
551 return new PredictedCallGenerator(predicted_receiver, if_missed, if_hit, hit_prob);
552 }
555 JVMState* PredictedCallGenerator::generate(JVMState* jvms) {
556 GraphKit kit(jvms);
557 PhaseGVN& gvn = kit.gvn();
558 // We need an explicit receiver null_check before checking its type.
559 // We share a map with the caller, so his JVMS gets adjusted.
560 Node* receiver = kit.argument(0);
562 CompileLog* log = kit.C->log();
563 if (log != NULL) {
564 log->elem("predicted_call bci='%d' klass='%d'",
565 jvms->bci(), log->identify(_predicted_receiver));
566 }
568 receiver = kit.null_check_receiver(method());
569 if (kit.stopped()) {
570 return kit.transfer_exceptions_into_jvms();
571 }
573 Node* exact_receiver = receiver; // will get updated in place...
574 Node* slow_ctl = kit.type_check_receiver(receiver,
575 _predicted_receiver, _hit_prob,
576 &exact_receiver);
578 SafePointNode* slow_map = NULL;
579 JVMState* slow_jvms;
580 { PreserveJVMState pjvms(&kit);
581 kit.set_control(slow_ctl);
582 if (!kit.stopped()) {
583 slow_jvms = _if_missed->generate(kit.sync_jvms());
584 if (kit.failing())
585 return NULL; // might happen because of NodeCountInliningCutoff
586 assert(slow_jvms != NULL, "must be");
587 kit.add_exception_states_from(slow_jvms);
588 kit.set_map(slow_jvms->map());
589 if (!kit.stopped())
590 slow_map = kit.stop();
591 }
592 }
594 if (kit.stopped()) {
595 // Instance exactly does not matches the desired type.
596 kit.set_jvms(slow_jvms);
597 return kit.transfer_exceptions_into_jvms();
598 }
600 // fall through if the instance exactly matches the desired type
601 kit.replace_in_map(receiver, exact_receiver);
603 // Make the hot call:
604 JVMState* new_jvms = _if_hit->generate(kit.sync_jvms());
605 if (new_jvms == NULL) {
606 // Inline failed, so make a direct call.
607 assert(_if_hit->is_inline(), "must have been a failed inline");
608 CallGenerator* cg = CallGenerator::for_direct_call(_if_hit->method());
609 new_jvms = cg->generate(kit.sync_jvms());
610 }
611 kit.add_exception_states_from(new_jvms);
612 kit.set_jvms(new_jvms);
614 // Need to merge slow and fast?
615 if (slow_map == NULL) {
616 // The fast path is the only path remaining.
617 return kit.transfer_exceptions_into_jvms();
618 }
620 if (kit.stopped()) {
621 // Inlined method threw an exception, so it's just the slow path after all.
622 kit.set_jvms(slow_jvms);
623 return kit.transfer_exceptions_into_jvms();
624 }
626 // Finish the diamond.
627 kit.C->set_has_split_ifs(true); // Has chance for split-if optimization
628 RegionNode* region = new (kit.C, 3) RegionNode(3);
629 region->init_req(1, kit.control());
630 region->init_req(2, slow_map->control());
631 kit.set_control(gvn.transform(region));
632 Node* iophi = PhiNode::make(region, kit.i_o(), Type::ABIO);
633 iophi->set_req(2, slow_map->i_o());
634 kit.set_i_o(gvn.transform(iophi));
635 kit.merge_memory(slow_map->merged_memory(), region, 2);
636 uint tos = kit.jvms()->stkoff() + kit.sp();
637 uint limit = slow_map->req();
638 for (uint i = TypeFunc::Parms; i < limit; i++) {
639 // Skip unused stack slots; fast forward to monoff();
640 if (i == tos) {
641 i = kit.jvms()->monoff();
642 if( i >= limit ) break;
643 }
644 Node* m = kit.map()->in(i);
645 Node* n = slow_map->in(i);
646 if (m != n) {
647 const Type* t = gvn.type(m)->meet(gvn.type(n));
648 Node* phi = PhiNode::make(region, m, t);
649 phi->set_req(2, n);
650 kit.map()->set_req(i, gvn.transform(phi));
651 }
652 }
653 return kit.transfer_exceptions_into_jvms();
654 }
657 CallGenerator* CallGenerator::for_method_handle_call(JVMState* jvms, ciMethod* caller, ciMethod* callee) {
658 assert(callee->is_method_handle_intrinsic() ||
659 callee->is_compiled_lambda_form(), "for_method_handle_call mismatch");
660 CallGenerator* cg = CallGenerator::for_method_handle_inline(jvms, caller, callee);
661 if (cg != NULL)
662 return cg;
663 return CallGenerator::for_direct_call(callee);
664 }
666 CallGenerator* CallGenerator::for_method_handle_inline(JVMState* jvms, ciMethod* caller, ciMethod* callee) {
667 GraphKit kit(jvms);
668 PhaseGVN& gvn = kit.gvn();
669 Compile* C = kit.C;
670 vmIntrinsics::ID iid = callee->intrinsic_id();
671 switch (iid) {
672 case vmIntrinsics::_invokeBasic:
673 {
674 // get MethodHandle receiver
675 Node* receiver = kit.argument(0);
676 if (receiver->Opcode() == Op_ConP) {
677 const TypeOopPtr* oop_ptr = receiver->bottom_type()->is_oopptr();
678 ciMethod* target = oop_ptr->const_oop()->as_method_handle()->get_vmtarget();
679 guarantee(!target->is_method_handle_intrinsic(), "should not happen"); // XXX remove
680 const int vtable_index = methodOopDesc::invalid_vtable_index;
681 CallGenerator* cg = C->call_generator(target, vtable_index, false, jvms, true, PROB_ALWAYS);
682 if (cg != NULL && cg->is_inline())
683 return cg;
684 } else {
685 if (PrintInlining) CompileTask::print_inlining(callee, jvms->depth() - 1, jvms->bci(), "receiver not constant");
686 }
687 }
688 break;
690 case vmIntrinsics::_linkToVirtual:
691 case vmIntrinsics::_linkToStatic:
692 case vmIntrinsics::_linkToSpecial:
693 case vmIntrinsics::_linkToInterface:
694 {
695 // pop MemberName argument
696 Node* member_name = kit.argument(callee->arg_size() - 1);
697 if (member_name->Opcode() == Op_ConP) {
698 const TypeOopPtr* oop_ptr = member_name->bottom_type()->is_oopptr();
699 ciMethod* target = oop_ptr->const_oop()->as_member_name()->get_vmtarget();
701 // In lamda forms we erase signature types to avoid resolving issues
702 // involving class loaders. When we optimize a method handle invoke
703 // to a direct call we must cast the receiver and arguments to its
704 // actual types.
705 ciSignature* signature = target->signature();
706 const int receiver_skip = target->is_static() ? 0 : 1;
707 // Cast receiver to its type.
708 if (!target->is_static()) {
709 Node* arg = kit.argument(0);
710 const TypeOopPtr* arg_type = arg->bottom_type()->isa_oopptr();
711 const Type* sig_type = TypeOopPtr::make_from_klass(signature->accessing_klass());
712 if (arg_type != NULL && !arg_type->higher_equal(sig_type)) {
713 Node* cast_obj = gvn.transform(new (C, 2) CheckCastPPNode(kit.control(), arg, sig_type));
714 kit.set_argument(0, cast_obj);
715 }
716 }
717 // Cast reference arguments to its type.
718 for (int i = 0; i < signature->count(); i++) {
719 ciType* t = signature->type_at(i);
720 if (t->is_klass()) {
721 Node* arg = kit.argument(receiver_skip + i);
722 const TypeOopPtr* arg_type = arg->bottom_type()->isa_oopptr();
723 const Type* sig_type = TypeOopPtr::make_from_klass(t->as_klass());
724 if (arg_type != NULL && !arg_type->higher_equal(sig_type)) {
725 Node* cast_obj = gvn.transform(new (C, 2) CheckCastPPNode(kit.control(), arg, sig_type));
726 kit.set_argument(receiver_skip + i, cast_obj);
727 }
728 }
729 }
730 const int vtable_index = methodOopDesc::invalid_vtable_index;
731 const bool call_is_virtual = target->is_abstract(); // FIXME workaround
732 CallGenerator* cg = C->call_generator(target, vtable_index, call_is_virtual, jvms, true, PROB_ALWAYS);
733 if (cg != NULL && cg->is_inline())
734 return cg;
735 }
736 }
737 break;
739 default:
740 fatal(err_msg_res("unexpected intrinsic %d: %s", iid, vmIntrinsics::name_at(iid)));
741 break;
742 }
743 return NULL;
744 }
747 //-------------------------UncommonTrapCallGenerator-----------------------------
748 // Internal class which handles all out-of-line calls checking receiver type.
749 class UncommonTrapCallGenerator : public CallGenerator {
750 Deoptimization::DeoptReason _reason;
751 Deoptimization::DeoptAction _action;
753 public:
754 UncommonTrapCallGenerator(ciMethod* m,
755 Deoptimization::DeoptReason reason,
756 Deoptimization::DeoptAction action)
757 : CallGenerator(m)
758 {
759 _reason = reason;
760 _action = action;
761 }
763 virtual bool is_virtual() const { ShouldNotReachHere(); return false; }
764 virtual bool is_trap() const { return true; }
766 virtual JVMState* generate(JVMState* jvms);
767 };
770 CallGenerator*
771 CallGenerator::for_uncommon_trap(ciMethod* m,
772 Deoptimization::DeoptReason reason,
773 Deoptimization::DeoptAction action) {
774 return new UncommonTrapCallGenerator(m, reason, action);
775 }
778 JVMState* UncommonTrapCallGenerator::generate(JVMState* jvms) {
779 GraphKit kit(jvms);
780 // Take the trap with arguments pushed on the stack. (Cf. null_check_receiver).
781 int nargs = method()->arg_size();
782 kit.inc_sp(nargs);
783 assert(nargs <= kit.sp() && kit.sp() <= jvms->stk_size(), "sane sp w/ args pushed");
784 if (_reason == Deoptimization::Reason_class_check &&
785 _action == Deoptimization::Action_maybe_recompile) {
786 // Temp fix for 6529811
787 // Don't allow uncommon_trap to override our decision to recompile in the event
788 // of a class cast failure for a monomorphic call as it will never let us convert
789 // the call to either bi-morphic or megamorphic and can lead to unc-trap loops
790 bool keep_exact_action = true;
791 kit.uncommon_trap(_reason, _action, NULL, "monomorphic vcall checkcast", false, keep_exact_action);
792 } else {
793 kit.uncommon_trap(_reason, _action);
794 }
795 return kit.transfer_exceptions_into_jvms();
796 }
798 // (Note: Moved hook_up_call to GraphKit::set_edges_for_java_call.)
800 // (Node: Merged hook_up_exits into ParseGenerator::generate.)
802 #define NODES_OVERHEAD_PER_METHOD (30.0)
803 #define NODES_PER_BYTECODE (9.5)
805 void WarmCallInfo::init(JVMState* call_site, ciMethod* call_method, ciCallProfile& profile, float prof_factor) {
806 int call_count = profile.count();
807 int code_size = call_method->code_size();
809 // Expected execution count is based on the historical count:
810 _count = call_count < 0 ? 1 : call_site->method()->scale_count(call_count, prof_factor);
812 // Expected profit from inlining, in units of simple call-overheads.
813 _profit = 1.0;
815 // Expected work performed by the call in units of call-overheads.
816 // %%% need an empirical curve fit for "work" (time in call)
817 float bytecodes_per_call = 3;
818 _work = 1.0 + code_size / bytecodes_per_call;
820 // Expected size of compilation graph:
821 // -XX:+PrintParseStatistics once reported:
822 // Methods seen: 9184 Methods parsed: 9184 Nodes created: 1582391
823 // Histogram of 144298 parsed bytecodes:
824 // %%% Need an better predictor for graph size.
825 _size = NODES_OVERHEAD_PER_METHOD + (NODES_PER_BYTECODE * code_size);
826 }
828 // is_cold: Return true if the node should never be inlined.
829 // This is true if any of the key metrics are extreme.
830 bool WarmCallInfo::is_cold() const {
831 if (count() < WarmCallMinCount) return true;
832 if (profit() < WarmCallMinProfit) return true;
833 if (work() > WarmCallMaxWork) return true;
834 if (size() > WarmCallMaxSize) return true;
835 return false;
836 }
838 // is_hot: Return true if the node should be inlined immediately.
839 // This is true if any of the key metrics are extreme.
840 bool WarmCallInfo::is_hot() const {
841 assert(!is_cold(), "eliminate is_cold cases before testing is_hot");
842 if (count() >= HotCallCountThreshold) return true;
843 if (profit() >= HotCallProfitThreshold) return true;
844 if (work() <= HotCallTrivialWork) return true;
845 if (size() <= HotCallTrivialSize) return true;
846 return false;
847 }
849 // compute_heat:
850 float WarmCallInfo::compute_heat() const {
851 assert(!is_cold(), "compute heat only on warm nodes");
852 assert(!is_hot(), "compute heat only on warm nodes");
853 int min_size = MAX2(0, (int)HotCallTrivialSize);
854 int max_size = MIN2(500, (int)WarmCallMaxSize);
855 float method_size = (size() - min_size) / MAX2(1, max_size - min_size);
856 float size_factor;
857 if (method_size < 0.05) size_factor = 4; // 2 sigmas better than avg.
858 else if (method_size < 0.15) size_factor = 2; // 1 sigma better than avg.
859 else if (method_size < 0.5) size_factor = 1; // better than avg.
860 else size_factor = 0.5; // worse than avg.
861 return (count() * profit() * size_factor);
862 }
864 bool WarmCallInfo::warmer_than(WarmCallInfo* that) {
865 assert(this != that, "compare only different WCIs");
866 assert(this->heat() != 0 && that->heat() != 0, "call compute_heat 1st");
867 if (this->heat() > that->heat()) return true;
868 if (this->heat() < that->heat()) return false;
869 assert(this->heat() == that->heat(), "no NaN heat allowed");
870 // Equal heat. Break the tie some other way.
871 if (!this->call() || !that->call()) return (address)this > (address)that;
872 return this->call()->_idx > that->call()->_idx;
873 }
875 //#define UNINIT_NEXT ((WarmCallInfo*)badAddress)
876 #define UNINIT_NEXT ((WarmCallInfo*)NULL)
878 WarmCallInfo* WarmCallInfo::insert_into(WarmCallInfo* head) {
879 assert(next() == UNINIT_NEXT, "not yet on any list");
880 WarmCallInfo* prev_p = NULL;
881 WarmCallInfo* next_p = head;
882 while (next_p != NULL && next_p->warmer_than(this)) {
883 prev_p = next_p;
884 next_p = prev_p->next();
885 }
886 // Install this between prev_p and next_p.
887 this->set_next(next_p);
888 if (prev_p == NULL)
889 head = this;
890 else
891 prev_p->set_next(this);
892 return head;
893 }
895 WarmCallInfo* WarmCallInfo::remove_from(WarmCallInfo* head) {
896 WarmCallInfo* prev_p = NULL;
897 WarmCallInfo* next_p = head;
898 while (next_p != this) {
899 assert(next_p != NULL, "this must be in the list somewhere");
900 prev_p = next_p;
901 next_p = prev_p->next();
902 }
903 next_p = this->next();
904 debug_only(this->set_next(UNINIT_NEXT));
905 // Remove this from between prev_p and next_p.
906 if (prev_p == NULL)
907 head = next_p;
908 else
909 prev_p->set_next(next_p);
910 return head;
911 }
913 WarmCallInfo WarmCallInfo::_always_hot(WarmCallInfo::MAX_VALUE(), WarmCallInfo::MAX_VALUE(),
914 WarmCallInfo::MIN_VALUE(), WarmCallInfo::MIN_VALUE());
915 WarmCallInfo WarmCallInfo::_always_cold(WarmCallInfo::MIN_VALUE(), WarmCallInfo::MIN_VALUE(),
916 WarmCallInfo::MAX_VALUE(), WarmCallInfo::MAX_VALUE());
918 WarmCallInfo* WarmCallInfo::always_hot() {
919 assert(_always_hot.is_hot(), "must always be hot");
920 return &_always_hot;
921 }
923 WarmCallInfo* WarmCallInfo::always_cold() {
924 assert(_always_cold.is_cold(), "must always be cold");
925 return &_always_cold;
926 }
929 #ifndef PRODUCT
931 void WarmCallInfo::print() const {
932 tty->print("%s : C=%6.1f P=%6.1f W=%6.1f S=%6.1f H=%6.1f -> %p",
933 is_cold() ? "cold" : is_hot() ? "hot " : "warm",
934 count(), profit(), work(), size(), compute_heat(), next());
935 tty->cr();
936 if (call() != NULL) call()->dump();
937 }
939 void print_wci(WarmCallInfo* ci) {
940 ci->print();
941 }
943 void WarmCallInfo::print_all() const {
944 for (const WarmCallInfo* p = this; p != NULL; p = p->next())
945 p->print();
946 }
948 int WarmCallInfo::count_all() const {
949 int cnt = 0;
950 for (const WarmCallInfo* p = this; p != NULL; p = p->next())
951 cnt++;
952 return cnt;
953 }
955 #endif //PRODUCT