Mon, 25 May 2020 14:24:27 +0800
8244407: JVM crashes after transformation in C2 IdealLoopTree::split_fall_in
Reviewed-by: thartmann, kvn, andrew
Contributed-by: zhouyong44@huawei.com
1 /*
2 * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "precompiled.hpp"
26 #include "ci/bcEscapeAnalyzer.hpp"
27 #include "ci/ciCallSite.hpp"
28 #include "ci/ciObjArray.hpp"
29 #include "ci/ciMemberName.hpp"
30 #include "ci/ciMethodHandle.hpp"
31 #include "classfile/javaClasses.hpp"
32 #include "compiler/compileLog.hpp"
33 #include "opto/addnode.hpp"
34 #include "opto/callGenerator.hpp"
35 #include "opto/callnode.hpp"
36 #include "opto/cfgnode.hpp"
37 #include "opto/connode.hpp"
38 #include "opto/parse.hpp"
39 #include "opto/rootnode.hpp"
40 #include "opto/runtime.hpp"
41 #include "opto/subnode.hpp"
44 // Utility function.
45 const TypeFunc* CallGenerator::tf() const {
46 return TypeFunc::make(method());
47 }
49 //-----------------------------ParseGenerator---------------------------------
50 // Internal class which handles all direct bytecode traversal.
51 class ParseGenerator : public InlineCallGenerator {
52 private:
53 bool _is_osr;
54 float _expected_uses;
56 public:
57 ParseGenerator(ciMethod* method, float expected_uses, bool is_osr = false)
58 : InlineCallGenerator(method)
59 {
60 _is_osr = is_osr;
61 _expected_uses = expected_uses;
62 assert(InlineTree::check_can_parse(method) == NULL, "parse must be possible");
63 }
65 virtual bool is_parse() const { return true; }
66 virtual JVMState* generate(JVMState* jvms);
67 int is_osr() { return _is_osr; }
69 };
71 JVMState* ParseGenerator::generate(JVMState* jvms) {
72 Compile* C = Compile::current();
74 if (is_osr()) {
75 // The JVMS for a OSR has a single argument (see its TypeFunc).
76 assert(jvms->depth() == 1, "no inline OSR");
77 }
79 if (C->failing()) {
80 return NULL; // bailing out of the compile; do not try to parse
81 }
83 Parse parser(jvms, method(), _expected_uses);
84 // Grab signature for matching/allocation
85 #ifdef ASSERT
86 if (parser.tf() != (parser.depth() == 1 ? C->tf() : tf())) {
87 MutexLockerEx ml(Compile_lock, Mutex::_no_safepoint_check_flag);
88 assert(C->env()->system_dictionary_modification_counter_changed(),
89 "Must invalidate if TypeFuncs differ");
90 }
91 #endif
93 GraphKit& exits = parser.exits();
95 if (C->failing()) {
96 while (exits.pop_exception_state() != NULL) ;
97 return NULL;
98 }
100 assert(exits.jvms()->same_calls_as(jvms), "sanity");
102 // Simply return the exit state of the parser,
103 // augmented by any exceptional states.
104 return exits.transfer_exceptions_into_jvms();
105 }
107 //---------------------------DirectCallGenerator------------------------------
108 // Internal class which handles all out-of-line calls w/o receiver type checks.
109 class DirectCallGenerator : public CallGenerator {
110 private:
111 CallStaticJavaNode* _call_node;
112 // Force separate memory and I/O projections for the exceptional
113 // paths to facilitate late inlinig.
114 bool _separate_io_proj;
116 public:
117 DirectCallGenerator(ciMethod* method, bool separate_io_proj)
118 : CallGenerator(method),
119 _separate_io_proj(separate_io_proj)
120 {
121 }
122 virtual JVMState* generate(JVMState* jvms);
124 CallStaticJavaNode* call_node() const { return _call_node; }
125 };
127 JVMState* DirectCallGenerator::generate(JVMState* jvms) {
128 GraphKit kit(jvms);
129 bool is_static = method()->is_static();
130 address target = is_static ? SharedRuntime::get_resolve_static_call_stub()
131 : SharedRuntime::get_resolve_opt_virtual_call_stub();
133 if (kit.C->log() != NULL) {
134 kit.C->log()->elem("direct_call bci='%d'", jvms->bci());
135 }
137 CallStaticJavaNode *call = new (kit.C) CallStaticJavaNode(kit.C, tf(), target, method(), kit.bci());
138 _call_node = call; // Save the call node in case we need it later
139 if (!is_static) {
140 // Make an explicit receiver null_check as part of this call.
141 // Since we share a map with the caller, his JVMS gets adjusted.
142 kit.null_check_receiver_before_call(method());
143 if (kit.stopped()) {
144 // And dump it back to the caller, decorated with any exceptions:
145 return kit.transfer_exceptions_into_jvms();
146 }
147 // Mark the call node as virtual, sort of:
148 call->set_optimized_virtual(true);
149 if (method()->is_method_handle_intrinsic() ||
150 method()->is_compiled_lambda_form()) {
151 call->set_method_handle_invoke(true);
152 }
153 }
154 kit.set_arguments_for_java_call(call);
155 kit.set_edges_for_java_call(call, false, _separate_io_proj);
156 Node* ret = kit.set_results_for_java_call(call, _separate_io_proj);
157 kit.push_node(method()->return_type()->basic_type(), ret);
158 return kit.transfer_exceptions_into_jvms();
159 }
161 //--------------------------VirtualCallGenerator------------------------------
162 // Internal class which handles all out-of-line calls checking receiver type.
163 class VirtualCallGenerator : public CallGenerator {
164 private:
165 int _vtable_index;
166 public:
167 VirtualCallGenerator(ciMethod* method, int vtable_index)
168 : CallGenerator(method), _vtable_index(vtable_index)
169 {
170 assert(vtable_index == Method::invalid_vtable_index ||
171 vtable_index >= 0, "either invalid or usable");
172 }
173 virtual bool is_virtual() const { return true; }
174 virtual JVMState* generate(JVMState* jvms);
175 };
177 JVMState* VirtualCallGenerator::generate(JVMState* jvms) {
178 GraphKit kit(jvms);
179 Node* receiver = kit.argument(0);
181 if (kit.C->log() != NULL) {
182 kit.C->log()->elem("virtual_call bci='%d'", jvms->bci());
183 }
185 // If the receiver is a constant null, do not torture the system
186 // by attempting to call through it. The compile will proceed
187 // correctly, but may bail out in final_graph_reshaping, because
188 // the call instruction will have a seemingly deficient out-count.
189 // (The bailout says something misleading about an "infinite loop".)
190 if (kit.gvn().type(receiver)->higher_equal(TypePtr::NULL_PTR)) {
191 assert(Bytecodes::is_invoke(kit.java_bc()), err_msg("%d: %s", kit.java_bc(), Bytecodes::name(kit.java_bc())));
192 ciMethod* declared_method = kit.method()->get_method_at_bci(kit.bci());
193 int arg_size = declared_method->signature()->arg_size_for_bc(kit.java_bc());
194 kit.inc_sp(arg_size); // restore arguments
195 kit.uncommon_trap(Deoptimization::Reason_null_check,
196 Deoptimization::Action_none,
197 NULL, "null receiver");
198 return kit.transfer_exceptions_into_jvms();
199 }
201 // Ideally we would unconditionally do a null check here and let it
202 // be converted to an implicit check based on profile information.
203 // However currently the conversion to implicit null checks in
204 // Block::implicit_null_check() only looks for loads and stores, not calls.
205 ciMethod *caller = kit.method();
206 ciMethodData *caller_md = (caller == NULL) ? NULL : caller->method_data();
207 if (!UseInlineCaches || !ImplicitNullChecks || !os::zero_page_read_protected() ||
208 ((ImplicitNullCheckThreshold > 0) && caller_md &&
209 (caller_md->trap_count(Deoptimization::Reason_null_check)
210 >= (uint)ImplicitNullCheckThreshold))) {
211 // Make an explicit receiver null_check as part of this call.
212 // Since we share a map with the caller, his JVMS gets adjusted.
213 receiver = kit.null_check_receiver_before_call(method());
214 if (kit.stopped()) {
215 // And dump it back to the caller, decorated with any exceptions:
216 return kit.transfer_exceptions_into_jvms();
217 }
218 }
220 assert(!method()->is_static(), "virtual call must not be to static");
221 assert(!method()->is_final(), "virtual call should not be to final");
222 assert(!method()->is_private(), "virtual call should not be to private");
223 assert(_vtable_index == Method::invalid_vtable_index || !UseInlineCaches,
224 "no vtable calls if +UseInlineCaches ");
225 address target = SharedRuntime::get_resolve_virtual_call_stub();
226 // Normal inline cache used for call
227 CallDynamicJavaNode *call = new (kit.C) CallDynamicJavaNode(tf(), target, method(), _vtable_index, kit.bci());
228 kit.set_arguments_for_java_call(call);
229 kit.set_edges_for_java_call(call);
230 Node* ret = kit.set_results_for_java_call(call);
231 kit.push_node(method()->return_type()->basic_type(), ret);
233 // Represent the effect of an implicit receiver null_check
234 // as part of this call. Since we share a map with the caller,
235 // his JVMS gets adjusted.
236 kit.cast_not_null(receiver);
237 return kit.transfer_exceptions_into_jvms();
238 }
240 CallGenerator* CallGenerator::for_inline(ciMethod* m, float expected_uses) {
241 if (InlineTree::check_can_parse(m) != NULL) return NULL;
242 return new ParseGenerator(m, expected_uses);
243 }
245 // As a special case, the JVMS passed to this CallGenerator is
246 // for the method execution already in progress, not just the JVMS
247 // of the caller. Thus, this CallGenerator cannot be mixed with others!
248 CallGenerator* CallGenerator::for_osr(ciMethod* m, int osr_bci) {
249 if (InlineTree::check_can_parse(m) != NULL) return NULL;
250 float past_uses = m->interpreter_invocation_count();
251 float expected_uses = past_uses;
252 return new ParseGenerator(m, expected_uses, true);
253 }
255 CallGenerator* CallGenerator::for_direct_call(ciMethod* m, bool separate_io_proj) {
256 assert(!m->is_abstract(), "for_direct_call mismatch");
257 return new DirectCallGenerator(m, separate_io_proj);
258 }
260 CallGenerator* CallGenerator::for_virtual_call(ciMethod* m, int vtable_index) {
261 assert(!m->is_static(), "for_virtual_call mismatch");
262 assert(!m->is_method_handle_intrinsic(), "should be a direct call");
263 return new VirtualCallGenerator(m, vtable_index);
264 }
266 // Allow inlining decisions to be delayed
267 class LateInlineCallGenerator : public DirectCallGenerator {
268 protected:
269 CallGenerator* _inline_cg;
271 virtual bool do_late_inline_check(JVMState* jvms) { return true; }
273 public:
274 LateInlineCallGenerator(ciMethod* method, CallGenerator* inline_cg) :
275 DirectCallGenerator(method, true), _inline_cg(inline_cg) {}
277 virtual bool is_late_inline() const { return true; }
279 // Convert the CallStaticJava into an inline
280 virtual void do_late_inline();
282 virtual JVMState* generate(JVMState* jvms) {
283 Compile *C = Compile::current();
284 C->print_inlining_skip(this);
286 // Record that this call site should be revisited once the main
287 // parse is finished.
288 if (!is_mh_late_inline()) {
289 C->add_late_inline(this);
290 }
292 // Emit the CallStaticJava and request separate projections so
293 // that the late inlining logic can distinguish between fall
294 // through and exceptional uses of the memory and io projections
295 // as is done for allocations and macro expansion.
296 return DirectCallGenerator::generate(jvms);
297 }
299 virtual void print_inlining_late(const char* msg) {
300 CallNode* call = call_node();
301 Compile* C = Compile::current();
302 C->print_inlining_insert(this);
303 C->print_inlining(method(), call->jvms()->depth()-1, call->jvms()->bci(), msg);
304 }
306 };
308 void LateInlineCallGenerator::do_late_inline() {
309 // Can't inline it
310 CallStaticJavaNode* call = call_node();
311 if (call == NULL || call->outcnt() == 0 ||
312 call->in(0) == NULL || call->in(0)->is_top()) {
313 return;
314 }
316 const TypeTuple *r = call->tf()->domain();
317 for (int i1 = 0; i1 < method()->arg_size(); i1++) {
318 if (call->in(TypeFunc::Parms + i1)->is_top() && r->field_at(TypeFunc::Parms + i1) != Type::HALF) {
319 assert(Compile::current()->inlining_incrementally(), "shouldn't happen during parsing");
320 return;
321 }
322 }
324 if (call->in(TypeFunc::Memory)->is_top()) {
325 assert(Compile::current()->inlining_incrementally(), "shouldn't happen during parsing");
326 return;
327 }
329 Compile* C = Compile::current();
330 // Remove inlined methods from Compiler's lists.
331 if (call->is_macro()) {
332 C->remove_macro_node(call);
333 }
335 // Make a clone of the JVMState that appropriate to use for driving a parse
336 JVMState* old_jvms = call->jvms();
337 JVMState* jvms = old_jvms->clone_shallow(C);
338 uint size = call->req();
339 SafePointNode* map = new (C) SafePointNode(size, jvms);
340 for (uint i1 = 0; i1 < size; i1++) {
341 map->init_req(i1, call->in(i1));
342 }
344 // Make sure the state is a MergeMem for parsing.
345 if (!map->in(TypeFunc::Memory)->is_MergeMem()) {
346 Node* mem = MergeMemNode::make(C, map->in(TypeFunc::Memory));
347 C->initial_gvn()->set_type_bottom(mem);
348 map->set_req(TypeFunc::Memory, mem);
349 }
351 uint nargs = method()->arg_size();
352 // blow away old call arguments
353 Node* top = C->top();
354 for (uint i1 = 0; i1 < nargs; i1++) {
355 map->set_req(TypeFunc::Parms + i1, top);
356 }
357 jvms->set_map(map);
359 // Make enough space in the expression stack to transfer
360 // the incoming arguments and return value.
361 map->ensure_stack(jvms, jvms->method()->max_stack());
362 for (uint i1 = 0; i1 < nargs; i1++) {
363 map->set_argument(jvms, i1, call->in(TypeFunc::Parms + i1));
364 }
366 // This check is done here because for_method_handle_inline() method
367 // needs jvms for inlined state.
368 if (!do_late_inline_check(jvms)) {
369 map->disconnect_inputs(NULL, C);
370 return;
371 }
373 C->print_inlining_insert(this);
375 CompileLog* log = C->log();
376 if (log != NULL) {
377 log->head("late_inline method='%d'", log->identify(method()));
378 JVMState* p = jvms;
379 while (p != NULL) {
380 log->elem("jvms bci='%d' method='%d'", p->bci(), log->identify(p->method()));
381 p = p->caller();
382 }
383 log->tail("late_inline");
384 }
386 // Setup default node notes to be picked up by the inlining
387 Node_Notes* old_nn = C->node_notes_at(call->_idx);
388 if (old_nn != NULL) {
389 Node_Notes* entry_nn = old_nn->clone(C);
390 entry_nn->set_jvms(jvms);
391 C->set_default_node_notes(entry_nn);
392 }
394 // Now perform the inling using the synthesized JVMState
395 JVMState* new_jvms = _inline_cg->generate(jvms);
396 if (new_jvms == NULL) return; // no change
397 if (C->failing()) return;
399 // Capture any exceptional control flow
400 GraphKit kit(new_jvms);
402 // Find the result object
403 Node* result = C->top();
404 int result_size = method()->return_type()->size();
405 if (result_size != 0 && !kit.stopped()) {
406 result = (result_size == 1) ? kit.pop() : kit.pop_pair();
407 }
409 C->set_has_loops(C->has_loops() || _inline_cg->method()->has_loops());
410 C->env()->notice_inlined_method(_inline_cg->method());
411 C->set_inlining_progress(true);
413 kit.replace_call(call, result, true);
414 }
417 CallGenerator* CallGenerator::for_late_inline(ciMethod* method, CallGenerator* inline_cg) {
418 return new LateInlineCallGenerator(method, inline_cg);
419 }
421 class LateInlineMHCallGenerator : public LateInlineCallGenerator {
422 ciMethod* _caller;
423 int _attempt;
424 bool _input_not_const;
426 virtual bool do_late_inline_check(JVMState* jvms);
427 virtual bool already_attempted() const { return _attempt > 0; }
429 public:
430 LateInlineMHCallGenerator(ciMethod* caller, ciMethod* callee, bool input_not_const) :
431 LateInlineCallGenerator(callee, NULL), _caller(caller), _attempt(0), _input_not_const(input_not_const) {}
433 virtual bool is_mh_late_inline() const { return true; }
435 virtual JVMState* generate(JVMState* jvms) {
436 JVMState* new_jvms = LateInlineCallGenerator::generate(jvms);
437 if (_input_not_const) {
438 // inlining won't be possible so no need to enqueue right now.
439 call_node()->set_generator(this);
440 } else {
441 Compile::current()->add_late_inline(this);
442 }
443 return new_jvms;
444 }
446 virtual void print_inlining_late(const char* msg) {
447 if (!_input_not_const) return;
448 LateInlineCallGenerator::print_inlining_late(msg);
449 }
450 };
452 bool LateInlineMHCallGenerator::do_late_inline_check(JVMState* jvms) {
454 CallGenerator* cg = for_method_handle_inline(jvms, _caller, method(), _input_not_const);
456 if (!_input_not_const) {
457 _attempt++;
458 }
460 if (cg != NULL) {
461 assert(!cg->is_late_inline() && cg->is_inline(), "we're doing late inlining");
462 _inline_cg = cg;
463 Compile::current()->dec_number_of_mh_late_inlines();
464 return true;
465 }
467 call_node()->set_generator(this);
468 return false;
469 }
471 CallGenerator* CallGenerator::for_mh_late_inline(ciMethod* caller, ciMethod* callee, bool input_not_const) {
472 Compile::current()->inc_number_of_mh_late_inlines();
473 CallGenerator* cg = new LateInlineMHCallGenerator(caller, callee, input_not_const);
474 return cg;
475 }
477 class LateInlineStringCallGenerator : public LateInlineCallGenerator {
479 public:
480 LateInlineStringCallGenerator(ciMethod* method, CallGenerator* inline_cg) :
481 LateInlineCallGenerator(method, inline_cg) {}
483 virtual JVMState* generate(JVMState* jvms) {
484 Compile *C = Compile::current();
485 C->print_inlining_skip(this);
487 C->add_string_late_inline(this);
489 JVMState* new_jvms = DirectCallGenerator::generate(jvms);
490 return new_jvms;
491 }
493 virtual bool is_string_late_inline() const { return true; }
494 };
496 CallGenerator* CallGenerator::for_string_late_inline(ciMethod* method, CallGenerator* inline_cg) {
497 return new LateInlineStringCallGenerator(method, inline_cg);
498 }
500 class LateInlineBoxingCallGenerator : public LateInlineCallGenerator {
502 public:
503 LateInlineBoxingCallGenerator(ciMethod* method, CallGenerator* inline_cg) :
504 LateInlineCallGenerator(method, inline_cg) {}
506 virtual JVMState* generate(JVMState* jvms) {
507 Compile *C = Compile::current();
508 C->print_inlining_skip(this);
510 C->add_boxing_late_inline(this);
512 JVMState* new_jvms = DirectCallGenerator::generate(jvms);
513 return new_jvms;
514 }
515 };
517 CallGenerator* CallGenerator::for_boxing_late_inline(ciMethod* method, CallGenerator* inline_cg) {
518 return new LateInlineBoxingCallGenerator(method, inline_cg);
519 }
521 //---------------------------WarmCallGenerator--------------------------------
522 // Internal class which handles initial deferral of inlining decisions.
523 class WarmCallGenerator : public CallGenerator {
524 WarmCallInfo* _call_info;
525 CallGenerator* _if_cold;
526 CallGenerator* _if_hot;
527 bool _is_virtual; // caches virtuality of if_cold
528 bool _is_inline; // caches inline-ness of if_hot
530 public:
531 WarmCallGenerator(WarmCallInfo* ci,
532 CallGenerator* if_cold,
533 CallGenerator* if_hot)
534 : CallGenerator(if_cold->method())
535 {
536 assert(method() == if_hot->method(), "consistent choices");
537 _call_info = ci;
538 _if_cold = if_cold;
539 _if_hot = if_hot;
540 _is_virtual = if_cold->is_virtual();
541 _is_inline = if_hot->is_inline();
542 }
544 virtual bool is_inline() const { return _is_inline; }
545 virtual bool is_virtual() const { return _is_virtual; }
546 virtual bool is_deferred() const { return true; }
548 virtual JVMState* generate(JVMState* jvms);
549 };
552 CallGenerator* CallGenerator::for_warm_call(WarmCallInfo* ci,
553 CallGenerator* if_cold,
554 CallGenerator* if_hot) {
555 return new WarmCallGenerator(ci, if_cold, if_hot);
556 }
558 JVMState* WarmCallGenerator::generate(JVMState* jvms) {
559 Compile* C = Compile::current();
560 if (C->log() != NULL) {
561 C->log()->elem("warm_call bci='%d'", jvms->bci());
562 }
563 jvms = _if_cold->generate(jvms);
564 if (jvms != NULL) {
565 Node* m = jvms->map()->control();
566 if (m->is_CatchProj()) m = m->in(0); else m = C->top();
567 if (m->is_Catch()) m = m->in(0); else m = C->top();
568 if (m->is_Proj()) m = m->in(0); else m = C->top();
569 if (m->is_CallJava()) {
570 _call_info->set_call(m->as_Call());
571 _call_info->set_hot_cg(_if_hot);
572 #ifndef PRODUCT
573 if (PrintOpto || PrintOptoInlining) {
574 tty->print_cr("Queueing for warm inlining at bci %d:", jvms->bci());
575 tty->print("WCI: ");
576 _call_info->print();
577 }
578 #endif
579 _call_info->set_heat(_call_info->compute_heat());
580 C->set_warm_calls(_call_info->insert_into(C->warm_calls()));
581 }
582 }
583 return jvms;
584 }
586 void WarmCallInfo::make_hot() {
587 Unimplemented();
588 }
590 void WarmCallInfo::make_cold() {
591 // No action: Just dequeue.
592 }
595 //------------------------PredictedCallGenerator------------------------------
596 // Internal class which handles all out-of-line calls checking receiver type.
597 class PredictedCallGenerator : public CallGenerator {
598 ciKlass* _predicted_receiver;
599 CallGenerator* _if_missed;
600 CallGenerator* _if_hit;
601 float _hit_prob;
603 public:
604 PredictedCallGenerator(ciKlass* predicted_receiver,
605 CallGenerator* if_missed,
606 CallGenerator* if_hit, float hit_prob)
607 : CallGenerator(if_missed->method())
608 {
609 // The call profile data may predict the hit_prob as extreme as 0 or 1.
610 // Remove the extremes values from the range.
611 if (hit_prob > PROB_MAX) hit_prob = PROB_MAX;
612 if (hit_prob < PROB_MIN) hit_prob = PROB_MIN;
614 _predicted_receiver = predicted_receiver;
615 _if_missed = if_missed;
616 _if_hit = if_hit;
617 _hit_prob = hit_prob;
618 }
620 virtual bool is_virtual() const { return true; }
621 virtual bool is_inline() const { return _if_hit->is_inline(); }
622 virtual bool is_deferred() const { return _if_hit->is_deferred(); }
624 virtual JVMState* generate(JVMState* jvms);
625 };
628 CallGenerator* CallGenerator::for_predicted_call(ciKlass* predicted_receiver,
629 CallGenerator* if_missed,
630 CallGenerator* if_hit,
631 float hit_prob) {
632 return new PredictedCallGenerator(predicted_receiver, if_missed, if_hit, hit_prob);
633 }
636 JVMState* PredictedCallGenerator::generate(JVMState* jvms) {
637 GraphKit kit(jvms);
638 PhaseGVN& gvn = kit.gvn();
639 // We need an explicit receiver null_check before checking its type.
640 // We share a map with the caller, so his JVMS gets adjusted.
641 Node* receiver = kit.argument(0);
643 CompileLog* log = kit.C->log();
644 if (log != NULL) {
645 log->elem("predicted_call bci='%d' klass='%d'",
646 jvms->bci(), log->identify(_predicted_receiver));
647 }
649 receiver = kit.null_check_receiver_before_call(method());
650 if (kit.stopped()) {
651 return kit.transfer_exceptions_into_jvms();
652 }
654 // Make a copy of the replaced nodes in case we need to restore them
655 ReplacedNodes replaced_nodes = kit.map()->replaced_nodes();
656 replaced_nodes.clone();
658 Node* exact_receiver = receiver; // will get updated in place...
659 Node* slow_ctl = kit.type_check_receiver(receiver,
660 _predicted_receiver, _hit_prob,
661 &exact_receiver);
663 SafePointNode* slow_map = NULL;
664 JVMState* slow_jvms = NULL;
665 { PreserveJVMState pjvms(&kit);
666 kit.set_control(slow_ctl);
667 if (!kit.stopped()) {
668 slow_jvms = _if_missed->generate(kit.sync_jvms());
669 if (kit.failing())
670 return NULL; // might happen because of NodeCountInliningCutoff
671 assert(slow_jvms != NULL, "must be");
672 kit.add_exception_states_from(slow_jvms);
673 kit.set_map(slow_jvms->map());
674 if (!kit.stopped())
675 slow_map = kit.stop();
676 }
677 }
679 if (kit.stopped()) {
680 // Instance exactly does not matches the desired type.
681 kit.set_jvms(slow_jvms);
682 return kit.transfer_exceptions_into_jvms();
683 }
685 // fall through if the instance exactly matches the desired type
686 kit.replace_in_map(receiver, exact_receiver);
688 // Make the hot call:
689 JVMState* new_jvms = _if_hit->generate(kit.sync_jvms());
690 if (new_jvms == NULL) {
691 // Inline failed, so make a direct call.
692 assert(_if_hit->is_inline(), "must have been a failed inline");
693 CallGenerator* cg = CallGenerator::for_direct_call(_if_hit->method());
694 new_jvms = cg->generate(kit.sync_jvms());
695 }
696 kit.add_exception_states_from(new_jvms);
697 kit.set_jvms(new_jvms);
699 // Need to merge slow and fast?
700 if (slow_map == NULL) {
701 // The fast path is the only path remaining.
702 return kit.transfer_exceptions_into_jvms();
703 }
705 if (kit.stopped()) {
706 // Inlined method threw an exception, so it's just the slow path after all.
707 kit.set_jvms(slow_jvms);
708 return kit.transfer_exceptions_into_jvms();
709 }
711 // There are 2 branches and the replaced nodes are only valid on
712 // one: restore the replaced nodes to what they were before the
713 // branch.
714 kit.map()->set_replaced_nodes(replaced_nodes);
716 // Finish the diamond.
717 kit.C->set_has_split_ifs(true); // Has chance for split-if optimization
718 RegionNode* region = new (kit.C) RegionNode(3);
719 region->init_req(1, kit.control());
720 region->init_req(2, slow_map->control());
721 kit.set_control(gvn.transform(region));
722 Node* iophi = PhiNode::make(region, kit.i_o(), Type::ABIO);
723 iophi->set_req(2, slow_map->i_o());
724 kit.set_i_o(gvn.transform(iophi));
725 // Merge memory
726 kit.merge_memory(slow_map->merged_memory(), region, 2);
727 // Transform new memory Phis.
728 for (MergeMemStream mms(kit.merged_memory()); mms.next_non_empty();) {
729 Node* phi = mms.memory();
730 if (phi->is_Phi() && phi->in(0) == region) {
731 mms.set_memory(gvn.transform(phi));
732 }
733 }
734 uint tos = kit.jvms()->stkoff() + kit.sp();
735 uint limit = slow_map->req();
736 for (uint i = TypeFunc::Parms; i < limit; i++) {
737 // Skip unused stack slots; fast forward to monoff();
738 if (i == tos) {
739 i = kit.jvms()->monoff();
740 if( i >= limit ) break;
741 }
742 Node* m = kit.map()->in(i);
743 Node* n = slow_map->in(i);
744 if (m != n) {
745 const Type* t = gvn.type(m)->meet_speculative(gvn.type(n));
746 Node* phi = PhiNode::make(region, m, t);
747 phi->set_req(2, n);
748 kit.map()->set_req(i, gvn.transform(phi));
749 }
750 }
751 return kit.transfer_exceptions_into_jvms();
752 }
755 CallGenerator* CallGenerator::for_method_handle_call(JVMState* jvms, ciMethod* caller, ciMethod* callee, bool delayed_forbidden) {
756 assert(callee->is_method_handle_intrinsic() ||
757 callee->is_compiled_lambda_form(), "for_method_handle_call mismatch");
758 bool input_not_const;
759 CallGenerator* cg = CallGenerator::for_method_handle_inline(jvms, caller, callee, input_not_const);
760 Compile* C = Compile::current();
761 if (cg != NULL) {
762 if (!delayed_forbidden && AlwaysIncrementalInline) {
763 return CallGenerator::for_late_inline(callee, cg);
764 } else {
765 return cg;
766 }
767 }
768 int bci = jvms->bci();
769 ciCallProfile profile = caller->call_profile_at_bci(bci);
770 int call_site_count = caller->scale_count(profile.count());
772 if (IncrementalInline && call_site_count > 0 &&
773 (input_not_const || !C->inlining_incrementally() || C->over_inlining_cutoff())) {
774 return CallGenerator::for_mh_late_inline(caller, callee, input_not_const);
775 } else {
776 // Out-of-line call.
777 return CallGenerator::for_direct_call(callee);
778 }
779 }
781 CallGenerator* CallGenerator::for_method_handle_inline(JVMState* jvms, ciMethod* caller, ciMethod* callee, bool& input_not_const) {
782 GraphKit kit(jvms);
783 PhaseGVN& gvn = kit.gvn();
784 Compile* C = kit.C;
785 vmIntrinsics::ID iid = callee->intrinsic_id();
786 input_not_const = true;
787 switch (iid) {
788 case vmIntrinsics::_invokeBasic:
789 {
790 // Get MethodHandle receiver:
791 Node* receiver = kit.argument(0);
792 if (receiver->Opcode() == Op_ConP) {
793 input_not_const = false;
794 const TypeOopPtr* oop_ptr = receiver->bottom_type()->is_oopptr();
795 ciMethod* target = oop_ptr->const_oop()->as_method_handle()->get_vmtarget();
796 guarantee(!target->is_method_handle_intrinsic(), "should not happen"); // XXX remove
797 const int vtable_index = Method::invalid_vtable_index;
798 CallGenerator* cg = C->call_generator(target, vtable_index, false, jvms, true, PROB_ALWAYS, NULL, true, true);
799 assert(cg == NULL || !cg->is_late_inline() || cg->is_mh_late_inline(), "no late inline here");
800 if (cg != NULL && cg->is_inline())
801 return cg;
802 }
803 }
804 break;
806 case vmIntrinsics::_linkToVirtual:
807 case vmIntrinsics::_linkToStatic:
808 case vmIntrinsics::_linkToSpecial:
809 case vmIntrinsics::_linkToInterface:
810 {
811 // Get MemberName argument:
812 Node* member_name = kit.argument(callee->arg_size() - 1);
813 if (member_name->Opcode() == Op_ConP) {
814 input_not_const = false;
815 const TypeOopPtr* oop_ptr = member_name->bottom_type()->is_oopptr();
816 ciMethod* target = oop_ptr->const_oop()->as_member_name()->get_vmtarget();
818 // In lamda forms we erase signature types to avoid resolving issues
819 // involving class loaders. When we optimize a method handle invoke
820 // to a direct call we must cast the receiver and arguments to its
821 // actual types.
822 ciSignature* signature = target->signature();
823 const int receiver_skip = target->is_static() ? 0 : 1;
824 // Cast receiver to its type.
825 if (!target->is_static()) {
826 Node* arg = kit.argument(0);
827 const TypeOopPtr* arg_type = arg->bottom_type()->isa_oopptr();
828 const Type* sig_type = TypeOopPtr::make_from_klass(signature->accessing_klass());
829 if (arg_type != NULL && !arg_type->higher_equal(sig_type)) {
830 Node* cast_obj = gvn.transform(new (C) CheckCastPPNode(kit.control(), arg, sig_type));
831 kit.set_argument(0, cast_obj);
832 }
833 }
834 // Cast reference arguments to its type.
835 for (int i = 0, j = 0; i < signature->count(); i++) {
836 ciType* t = signature->type_at(i);
837 if (t->is_klass()) {
838 Node* arg = kit.argument(receiver_skip + j);
839 const TypeOopPtr* arg_type = arg->bottom_type()->isa_oopptr();
840 const Type* sig_type = TypeOopPtr::make_from_klass(t->as_klass());
841 if (arg_type != NULL && !arg_type->higher_equal(sig_type)) {
842 Node* cast_obj = gvn.transform(new (C) CheckCastPPNode(kit.control(), arg, sig_type));
843 kit.set_argument(receiver_skip + j, cast_obj);
844 }
845 }
846 j += t->size(); // long and double take two slots
847 }
849 // Try to get the most accurate receiver type
850 const bool is_virtual = (iid == vmIntrinsics::_linkToVirtual);
851 const bool is_virtual_or_interface = (is_virtual || iid == vmIntrinsics::_linkToInterface);
852 int vtable_index = Method::invalid_vtable_index;
853 bool call_does_dispatch = false;
855 ciKlass* speculative_receiver_type = NULL;
856 if (is_virtual_or_interface) {
857 ciInstanceKlass* klass = target->holder();
858 Node* receiver_node = kit.argument(0);
859 const TypeOopPtr* receiver_type = gvn.type(receiver_node)->isa_oopptr();
860 // call_does_dispatch and vtable_index are out-parameters. They might be changed.
861 // optimize_virtual_call() takes 2 different holder
862 // arguments for a corner case that doesn't apply here (see
863 // Parse::do_call())
864 target = C->optimize_virtual_call(caller, jvms->bci(), klass, klass,
865 target, receiver_type, is_virtual,
866 call_does_dispatch, vtable_index, // out-parameters
867 /*check_access=*/false);
868 // We lack profiling at this call but type speculation may
869 // provide us with a type
870 speculative_receiver_type = (receiver_type != NULL) ? receiver_type->speculative_type() : NULL;
871 }
873 CallGenerator* cg = C->call_generator(target, vtable_index, call_does_dispatch, jvms, true, PROB_ALWAYS, speculative_receiver_type, true, true);
874 assert(cg == NULL || !cg->is_late_inline() || cg->is_mh_late_inline(), "no late inline here");
875 if (cg != NULL && cg->is_inline())
876 return cg;
877 }
878 }
879 break;
881 default:
882 fatal(err_msg_res("unexpected intrinsic %d: %s", iid, vmIntrinsics::name_at(iid)));
883 break;
884 }
885 return NULL;
886 }
889 //------------------------PredicatedIntrinsicGenerator------------------------------
890 // Internal class which handles all predicated Intrinsic calls.
891 class PredicatedIntrinsicGenerator : public CallGenerator {
892 CallGenerator* _intrinsic;
893 CallGenerator* _cg;
895 public:
896 PredicatedIntrinsicGenerator(CallGenerator* intrinsic,
897 CallGenerator* cg)
898 : CallGenerator(cg->method())
899 {
900 _intrinsic = intrinsic;
901 _cg = cg;
902 }
904 virtual bool is_virtual() const { return true; }
905 virtual bool is_inlined() const { return true; }
906 virtual bool is_intrinsic() const { return true; }
908 virtual JVMState* generate(JVMState* jvms);
909 };
912 CallGenerator* CallGenerator::for_predicated_intrinsic(CallGenerator* intrinsic,
913 CallGenerator* cg) {
914 return new PredicatedIntrinsicGenerator(intrinsic, cg);
915 }
918 JVMState* PredicatedIntrinsicGenerator::generate(JVMState* jvms) {
919 // The code we want to generate here is:
920 // if (receiver == NULL)
921 // uncommon_Trap
922 // if (predicate(0))
923 // do_intrinsic(0)
924 // else
925 // if (predicate(1))
926 // do_intrinsic(1)
927 // ...
928 // else
929 // do_java_comp
931 GraphKit kit(jvms);
932 PhaseGVN& gvn = kit.gvn();
934 CompileLog* log = kit.C->log();
935 if (log != NULL) {
936 log->elem("predicated_intrinsic bci='%d' method='%d'",
937 jvms->bci(), log->identify(method()));
938 }
940 if (!method()->is_static()) {
941 // We need an explicit receiver null_check before checking its type in predicate.
942 // We share a map with the caller, so his JVMS gets adjusted.
943 Node* receiver = kit.null_check_receiver_before_call(method());
944 if (kit.stopped()) {
945 return kit.transfer_exceptions_into_jvms();
946 }
947 }
949 int n_predicates = _intrinsic->predicates_count();
950 assert(n_predicates > 0, "sanity");
952 JVMState** result_jvms = NEW_RESOURCE_ARRAY(JVMState*, (n_predicates+1));
954 // Region for normal compilation code if intrinsic failed.
955 Node* slow_region = new (kit.C) RegionNode(1);
957 int results = 0;
958 for (int predicate = 0; (predicate < n_predicates) && !kit.stopped(); predicate++) {
959 #ifdef ASSERT
960 JVMState* old_jvms = kit.jvms();
961 SafePointNode* old_map = kit.map();
962 Node* old_io = old_map->i_o();
963 Node* old_mem = old_map->memory();
964 Node* old_exc = old_map->next_exception();
965 #endif
966 Node* else_ctrl = _intrinsic->generate_predicate(kit.sync_jvms(), predicate);
967 #ifdef ASSERT
968 // Assert(no_new_memory && no_new_io && no_new_exceptions) after generate_predicate.
969 assert(old_jvms == kit.jvms(), "generate_predicate should not change jvm state");
970 SafePointNode* new_map = kit.map();
971 assert(old_io == new_map->i_o(), "generate_predicate should not change i_o");
972 assert(old_mem == new_map->memory(), "generate_predicate should not change memory");
973 assert(old_exc == new_map->next_exception(), "generate_predicate should not add exceptions");
974 #endif
975 if (!kit.stopped()) {
976 PreserveJVMState pjvms(&kit);
977 // Generate intrinsic code:
978 JVMState* new_jvms = _intrinsic->generate(kit.sync_jvms());
979 if (new_jvms == NULL) {
980 // Intrinsic failed, use normal compilation path for this predicate.
981 slow_region->add_req(kit.control());
982 } else {
983 kit.add_exception_states_from(new_jvms);
984 kit.set_jvms(new_jvms);
985 if (!kit.stopped()) {
986 result_jvms[results++] = kit.jvms();
987 }
988 }
989 }
990 if (else_ctrl == NULL) {
991 else_ctrl = kit.C->top();
992 }
993 kit.set_control(else_ctrl);
994 }
995 if (!kit.stopped()) {
996 // Final 'else' after predicates.
997 slow_region->add_req(kit.control());
998 }
999 if (slow_region->req() > 1) {
1000 PreserveJVMState pjvms(&kit);
1001 // Generate normal compilation code:
1002 kit.set_control(gvn.transform(slow_region));
1003 JVMState* new_jvms = _cg->generate(kit.sync_jvms());
1004 if (kit.failing())
1005 return NULL; // might happen because of NodeCountInliningCutoff
1006 assert(new_jvms != NULL, "must be");
1007 kit.add_exception_states_from(new_jvms);
1008 kit.set_jvms(new_jvms);
1009 if (!kit.stopped()) {
1010 result_jvms[results++] = kit.jvms();
1011 }
1012 }
1014 if (results == 0) {
1015 // All paths ended in uncommon traps.
1016 (void) kit.stop();
1017 return kit.transfer_exceptions_into_jvms();
1018 }
1020 if (results == 1) { // Only one path
1021 kit.set_jvms(result_jvms[0]);
1022 return kit.transfer_exceptions_into_jvms();
1023 }
1025 // Merge all paths.
1026 kit.C->set_has_split_ifs(true); // Has chance for split-if optimization
1027 RegionNode* region = new (kit.C) RegionNode(results + 1);
1028 Node* iophi = PhiNode::make(region, kit.i_o(), Type::ABIO);
1029 for (int i = 0; i < results; i++) {
1030 JVMState* jvms = result_jvms[i];
1031 int path = i + 1;
1032 SafePointNode* map = jvms->map();
1033 region->init_req(path, map->control());
1034 iophi->set_req(path, map->i_o());
1035 if (i == 0) {
1036 kit.set_jvms(jvms);
1037 } else {
1038 kit.merge_memory(map->merged_memory(), region, path);
1039 }
1040 }
1041 kit.set_control(gvn.transform(region));
1042 kit.set_i_o(gvn.transform(iophi));
1043 // Transform new memory Phis.
1044 for (MergeMemStream mms(kit.merged_memory()); mms.next_non_empty();) {
1045 Node* phi = mms.memory();
1046 if (phi->is_Phi() && phi->in(0) == region) {
1047 mms.set_memory(gvn.transform(phi));
1048 }
1049 }
1051 // Merge debug info.
1052 Node** ins = NEW_RESOURCE_ARRAY(Node*, results);
1053 uint tos = kit.jvms()->stkoff() + kit.sp();
1054 Node* map = kit.map();
1055 uint limit = map->req();
1056 for (uint i = TypeFunc::Parms; i < limit; i++) {
1057 // Skip unused stack slots; fast forward to monoff();
1058 if (i == tos) {
1059 i = kit.jvms()->monoff();
1060 if( i >= limit ) break;
1061 }
1062 Node* n = map->in(i);
1063 ins[0] = n;
1064 const Type* t = gvn.type(n);
1065 bool needs_phi = false;
1066 for (int j = 1; j < results; j++) {
1067 JVMState* jvms = result_jvms[j];
1068 Node* jmap = jvms->map();
1069 Node* m = NULL;
1070 if (jmap->req() > i) {
1071 m = jmap->in(i);
1072 if (m != n) {
1073 needs_phi = true;
1074 t = t->meet_speculative(gvn.type(m));
1075 }
1076 }
1077 ins[j] = m;
1078 }
1079 if (needs_phi) {
1080 Node* phi = PhiNode::make(region, n, t);
1081 for (int j = 1; j < results; j++) {
1082 phi->set_req(j + 1, ins[j]);
1083 }
1084 map->set_req(i, gvn.transform(phi));
1085 }
1086 }
1088 return kit.transfer_exceptions_into_jvms();
1089 }
1091 //-------------------------UncommonTrapCallGenerator-----------------------------
1092 // Internal class which handles all out-of-line calls checking receiver type.
1093 class UncommonTrapCallGenerator : public CallGenerator {
1094 Deoptimization::DeoptReason _reason;
1095 Deoptimization::DeoptAction _action;
1097 public:
1098 UncommonTrapCallGenerator(ciMethod* m,
1099 Deoptimization::DeoptReason reason,
1100 Deoptimization::DeoptAction action)
1101 : CallGenerator(m)
1102 {
1103 _reason = reason;
1104 _action = action;
1105 }
1107 virtual bool is_virtual() const { ShouldNotReachHere(); return false; }
1108 virtual bool is_trap() const { return true; }
1110 virtual JVMState* generate(JVMState* jvms);
1111 };
1114 CallGenerator*
1115 CallGenerator::for_uncommon_trap(ciMethod* m,
1116 Deoptimization::DeoptReason reason,
1117 Deoptimization::DeoptAction action) {
1118 return new UncommonTrapCallGenerator(m, reason, action);
1119 }
1122 JVMState* UncommonTrapCallGenerator::generate(JVMState* jvms) {
1123 GraphKit kit(jvms);
1124 // Take the trap with arguments pushed on the stack. (Cf. null_check_receiver).
1125 // Callsite signature can be different from actual method being called (i.e _linkTo* sites).
1126 // Use callsite signature always.
1127 ciMethod* declared_method = kit.method()->get_method_at_bci(kit.bci());
1128 int nargs = declared_method->arg_size();
1129 kit.inc_sp(nargs);
1130 assert(nargs <= kit.sp() && kit.sp() <= jvms->stk_size(), "sane sp w/ args pushed");
1131 if (_reason == Deoptimization::Reason_class_check &&
1132 _action == Deoptimization::Action_maybe_recompile) {
1133 // Temp fix for 6529811
1134 // Don't allow uncommon_trap to override our decision to recompile in the event
1135 // of a class cast failure for a monomorphic call as it will never let us convert
1136 // the call to either bi-morphic or megamorphic and can lead to unc-trap loops
1137 bool keep_exact_action = true;
1138 kit.uncommon_trap(_reason, _action, NULL, "monomorphic vcall checkcast", false, keep_exact_action);
1139 } else {
1140 kit.uncommon_trap(_reason, _action);
1141 }
1142 return kit.transfer_exceptions_into_jvms();
1143 }
1145 // (Note: Moved hook_up_call to GraphKit::set_edges_for_java_call.)
1147 // (Node: Merged hook_up_exits into ParseGenerator::generate.)
1149 #define NODES_OVERHEAD_PER_METHOD (30.0)
1150 #define NODES_PER_BYTECODE (9.5)
1152 void WarmCallInfo::init(JVMState* call_site, ciMethod* call_method, ciCallProfile& profile, float prof_factor) {
1153 int call_count = profile.count();
1154 int code_size = call_method->code_size();
1156 // Expected execution count is based on the historical count:
1157 _count = call_count < 0 ? 1 : call_site->method()->scale_count(call_count, prof_factor);
1159 // Expected profit from inlining, in units of simple call-overheads.
1160 _profit = 1.0;
1162 // Expected work performed by the call in units of call-overheads.
1163 // %%% need an empirical curve fit for "work" (time in call)
1164 float bytecodes_per_call = 3;
1165 _work = 1.0 + code_size / bytecodes_per_call;
1167 // Expected size of compilation graph:
1168 // -XX:+PrintParseStatistics once reported:
1169 // Methods seen: 9184 Methods parsed: 9184 Nodes created: 1582391
1170 // Histogram of 144298 parsed bytecodes:
1171 // %%% Need an better predictor for graph size.
1172 _size = NODES_OVERHEAD_PER_METHOD + (NODES_PER_BYTECODE * code_size);
1173 }
1175 // is_cold: Return true if the node should never be inlined.
1176 // This is true if any of the key metrics are extreme.
1177 bool WarmCallInfo::is_cold() const {
1178 if (count() < WarmCallMinCount) return true;
1179 if (profit() < WarmCallMinProfit) return true;
1180 if (work() > WarmCallMaxWork) return true;
1181 if (size() > WarmCallMaxSize) return true;
1182 return false;
1183 }
1185 // is_hot: Return true if the node should be inlined immediately.
1186 // This is true if any of the key metrics are extreme.
1187 bool WarmCallInfo::is_hot() const {
1188 assert(!is_cold(), "eliminate is_cold cases before testing is_hot");
1189 if (count() >= HotCallCountThreshold) return true;
1190 if (profit() >= HotCallProfitThreshold) return true;
1191 if (work() <= HotCallTrivialWork) return true;
1192 if (size() <= HotCallTrivialSize) return true;
1193 return false;
1194 }
1196 // compute_heat:
1197 float WarmCallInfo::compute_heat() const {
1198 assert(!is_cold(), "compute heat only on warm nodes");
1199 assert(!is_hot(), "compute heat only on warm nodes");
1200 int min_size = MAX2(0, (int)HotCallTrivialSize);
1201 int max_size = MIN2(500, (int)WarmCallMaxSize);
1202 float method_size = (size() - min_size) / MAX2(1, max_size - min_size);
1203 float size_factor;
1204 if (method_size < 0.05) size_factor = 4; // 2 sigmas better than avg.
1205 else if (method_size < 0.15) size_factor = 2; // 1 sigma better than avg.
1206 else if (method_size < 0.5) size_factor = 1; // better than avg.
1207 else size_factor = 0.5; // worse than avg.
1208 return (count() * profit() * size_factor);
1209 }
1211 bool WarmCallInfo::warmer_than(WarmCallInfo* that) {
1212 assert(this != that, "compare only different WCIs");
1213 assert(this->heat() != 0 && that->heat() != 0, "call compute_heat 1st");
1214 if (this->heat() > that->heat()) return true;
1215 if (this->heat() < that->heat()) return false;
1216 assert(this->heat() == that->heat(), "no NaN heat allowed");
1217 // Equal heat. Break the tie some other way.
1218 if (!this->call() || !that->call()) return (address)this > (address)that;
1219 return this->call()->_idx > that->call()->_idx;
1220 }
1222 //#define UNINIT_NEXT ((WarmCallInfo*)badAddress)
1223 #define UNINIT_NEXT ((WarmCallInfo*)NULL)
1225 WarmCallInfo* WarmCallInfo::insert_into(WarmCallInfo* head) {
1226 assert(next() == UNINIT_NEXT, "not yet on any list");
1227 WarmCallInfo* prev_p = NULL;
1228 WarmCallInfo* next_p = head;
1229 while (next_p != NULL && next_p->warmer_than(this)) {
1230 prev_p = next_p;
1231 next_p = prev_p->next();
1232 }
1233 // Install this between prev_p and next_p.
1234 this->set_next(next_p);
1235 if (prev_p == NULL)
1236 head = this;
1237 else
1238 prev_p->set_next(this);
1239 return head;
1240 }
1242 WarmCallInfo* WarmCallInfo::remove_from(WarmCallInfo* head) {
1243 WarmCallInfo* prev_p = NULL;
1244 WarmCallInfo* next_p = head;
1245 while (next_p != this) {
1246 assert(next_p != NULL, "this must be in the list somewhere");
1247 prev_p = next_p;
1248 next_p = prev_p->next();
1249 }
1250 next_p = this->next();
1251 debug_only(this->set_next(UNINIT_NEXT));
1252 // Remove this from between prev_p and next_p.
1253 if (prev_p == NULL)
1254 head = next_p;
1255 else
1256 prev_p->set_next(next_p);
1257 return head;
1258 }
1260 WarmCallInfo WarmCallInfo::_always_hot(WarmCallInfo::MAX_VALUE(), WarmCallInfo::MAX_VALUE(),
1261 WarmCallInfo::MIN_VALUE(), WarmCallInfo::MIN_VALUE());
1262 WarmCallInfo WarmCallInfo::_always_cold(WarmCallInfo::MIN_VALUE(), WarmCallInfo::MIN_VALUE(),
1263 WarmCallInfo::MAX_VALUE(), WarmCallInfo::MAX_VALUE());
1265 WarmCallInfo* WarmCallInfo::always_hot() {
1266 assert(_always_hot.is_hot(), "must always be hot");
1267 return &_always_hot;
1268 }
1270 WarmCallInfo* WarmCallInfo::always_cold() {
1271 assert(_always_cold.is_cold(), "must always be cold");
1272 return &_always_cold;
1273 }
1276 #ifndef PRODUCT
1278 void WarmCallInfo::print() const {
1279 tty->print("%s : C=%6.1f P=%6.1f W=%6.1f S=%6.1f H=%6.1f -> %p",
1280 is_cold() ? "cold" : is_hot() ? "hot " : "warm",
1281 count(), profit(), work(), size(), compute_heat(), next());
1282 tty->cr();
1283 if (call() != NULL) call()->dump();
1284 }
1286 void print_wci(WarmCallInfo* ci) {
1287 ci->print();
1288 }
1290 void WarmCallInfo::print_all() const {
1291 for (const WarmCallInfo* p = this; p != NULL; p = p->next())
1292 p->print();
1293 }
1295 int WarmCallInfo::count_all() const {
1296 int cnt = 0;
1297 for (const WarmCallInfo* p = this; p != NULL; p = p->next())
1298 cnt++;
1299 return cnt;
1300 }
1302 #endif //PRODUCT