Wed, 09 Dec 2009 16:40:45 -0800
6895383: JCK test throws NPE for method compiled with Escape Analysis
Summary: Add missing checks for MemBar nodes in EA.
Reviewed-by: never
1 /*
2 * Copyright 2000-2008 Sun Microsystems, Inc. All Rights Reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
20 * CA 95054 USA or visit www.sun.com if you need additional information or
21 * have any questions.
22 *
23 */
25 #include "incls/_precompiled.incl"
26 #include "incls/_callGenerator.cpp.incl"
28 CallGenerator::CallGenerator(ciMethod* method) {
29 _method = method;
30 }
32 // Utility function.
33 const TypeFunc* CallGenerator::tf() const {
34 return TypeFunc::make(method());
35 }
37 //-----------------------------ParseGenerator---------------------------------
38 // Internal class which handles all direct bytecode traversal.
39 class ParseGenerator : public InlineCallGenerator {
40 private:
41 bool _is_osr;
42 float _expected_uses;
44 public:
45 ParseGenerator(ciMethod* method, float expected_uses, bool is_osr = false)
46 : InlineCallGenerator(method)
47 {
48 _is_osr = is_osr;
49 _expected_uses = expected_uses;
50 assert(can_parse(method, is_osr), "parse must be possible");
51 }
53 // Can we build either an OSR or a regular parser for this method?
54 static bool can_parse(ciMethod* method, int is_osr = false);
56 virtual bool is_parse() const { return true; }
57 virtual JVMState* generate(JVMState* jvms);
58 int is_osr() { return _is_osr; }
60 };
62 JVMState* ParseGenerator::generate(JVMState* jvms) {
63 Compile* C = Compile::current();
65 if (is_osr()) {
66 // The JVMS for a OSR has a single argument (see its TypeFunc).
67 assert(jvms->depth() == 1, "no inline OSR");
68 }
70 if (C->failing()) {
71 return NULL; // bailing out of the compile; do not try to parse
72 }
74 Parse parser(jvms, method(), _expected_uses);
75 // Grab signature for matching/allocation
76 #ifdef ASSERT
77 if (parser.tf() != (parser.depth() == 1 ? C->tf() : tf())) {
78 MutexLockerEx ml(Compile_lock, Mutex::_no_safepoint_check_flag);
79 assert(C->env()->system_dictionary_modification_counter_changed(),
80 "Must invalidate if TypeFuncs differ");
81 }
82 #endif
84 GraphKit& exits = parser.exits();
86 if (C->failing()) {
87 while (exits.pop_exception_state() != NULL) ;
88 return NULL;
89 }
91 assert(exits.jvms()->same_calls_as(jvms), "sanity");
93 // Simply return the exit state of the parser,
94 // augmented by any exceptional states.
95 return exits.transfer_exceptions_into_jvms();
96 }
98 //---------------------------DirectCallGenerator------------------------------
99 // Internal class which handles all out-of-line calls w/o receiver type checks.
100 class DirectCallGenerator : public CallGenerator {
101 private:
102 CallStaticJavaNode* _call_node;
103 // Force separate memory and I/O projections for the exceptional
104 // paths to facilitate late inlinig.
105 bool _separate_io_proj;
107 public:
108 DirectCallGenerator(ciMethod* method, bool separate_io_proj)
109 : CallGenerator(method),
110 _separate_io_proj(separate_io_proj)
111 {
112 }
113 virtual JVMState* generate(JVMState* jvms);
115 CallStaticJavaNode* call_node() const { return _call_node; }
116 };
118 JVMState* DirectCallGenerator::generate(JVMState* jvms) {
119 GraphKit kit(jvms);
120 bool is_static = method()->is_static();
121 address target = is_static ? SharedRuntime::get_resolve_static_call_stub()
122 : SharedRuntime::get_resolve_opt_virtual_call_stub();
124 if (kit.C->log() != NULL) {
125 kit.C->log()->elem("direct_call bci='%d'", jvms->bci());
126 }
128 CallStaticJavaNode *call = new (kit.C, tf()->domain()->cnt()) CallStaticJavaNode(tf(), target, method(), kit.bci());
129 if (!is_static) {
130 // Make an explicit receiver null_check as part of this call.
131 // Since we share a map with the caller, his JVMS gets adjusted.
132 kit.null_check_receiver(method());
133 if (kit.stopped()) {
134 // And dump it back to the caller, decorated with any exceptions:
135 return kit.transfer_exceptions_into_jvms();
136 }
137 // Mark the call node as virtual, sort of:
138 call->set_optimized_virtual(true);
139 }
140 kit.set_arguments_for_java_call(call);
141 kit.set_edges_for_java_call(call, false, _separate_io_proj);
142 Node* ret = kit.set_results_for_java_call(call, _separate_io_proj);
143 kit.push_node(method()->return_type()->basic_type(), ret);
144 _call_node = call; // Save the call node in case we need it later
145 return kit.transfer_exceptions_into_jvms();
146 }
148 class VirtualCallGenerator : public CallGenerator {
149 private:
150 int _vtable_index;
151 public:
152 VirtualCallGenerator(ciMethod* method, int vtable_index)
153 : CallGenerator(method), _vtable_index(vtable_index)
154 {
155 assert(vtable_index == methodOopDesc::invalid_vtable_index ||
156 vtable_index >= 0, "either invalid or usable");
157 }
158 virtual bool is_virtual() const { return true; }
159 virtual JVMState* generate(JVMState* jvms);
160 };
162 //--------------------------VirtualCallGenerator------------------------------
163 // Internal class which handles all out-of-line calls checking receiver type.
164 JVMState* VirtualCallGenerator::generate(JVMState* jvms) {
165 GraphKit kit(jvms);
166 Node* receiver = kit.argument(0);
168 if (kit.C->log() != NULL) {
169 kit.C->log()->elem("virtual_call bci='%d'", jvms->bci());
170 }
172 // If the receiver is a constant null, do not torture the system
173 // by attempting to call through it. The compile will proceed
174 // correctly, but may bail out in final_graph_reshaping, because
175 // the call instruction will have a seemingly deficient out-count.
176 // (The bailout says something misleading about an "infinite loop".)
177 if (kit.gvn().type(receiver)->higher_equal(TypePtr::NULL_PTR)) {
178 kit.inc_sp(method()->arg_size()); // restore arguments
179 kit.uncommon_trap(Deoptimization::Reason_null_check,
180 Deoptimization::Action_none,
181 NULL, "null receiver");
182 return kit.transfer_exceptions_into_jvms();
183 }
185 // Ideally we would unconditionally do a null check here and let it
186 // be converted to an implicit check based on profile information.
187 // However currently the conversion to implicit null checks in
188 // Block::implicit_null_check() only looks for loads and stores, not calls.
189 ciMethod *caller = kit.method();
190 ciMethodData *caller_md = (caller == NULL) ? NULL : caller->method_data();
191 if (!UseInlineCaches || !ImplicitNullChecks ||
192 ((ImplicitNullCheckThreshold > 0) && caller_md &&
193 (caller_md->trap_count(Deoptimization::Reason_null_check)
194 >= (uint)ImplicitNullCheckThreshold))) {
195 // Make an explicit receiver null_check as part of this call.
196 // Since we share a map with the caller, his JVMS gets adjusted.
197 receiver = kit.null_check_receiver(method());
198 if (kit.stopped()) {
199 // And dump it back to the caller, decorated with any exceptions:
200 return kit.transfer_exceptions_into_jvms();
201 }
202 }
204 assert(!method()->is_static(), "virtual call must not be to static");
205 assert(!method()->is_final(), "virtual call should not be to final");
206 assert(!method()->is_private(), "virtual call should not be to private");
207 assert(_vtable_index == methodOopDesc::invalid_vtable_index || !UseInlineCaches,
208 "no vtable calls if +UseInlineCaches ");
209 address target = SharedRuntime::get_resolve_virtual_call_stub();
210 // Normal inline cache used for call
211 CallDynamicJavaNode *call = new (kit.C, tf()->domain()->cnt()) CallDynamicJavaNode(tf(), target, method(), _vtable_index, kit.bci());
212 kit.set_arguments_for_java_call(call);
213 kit.set_edges_for_java_call(call);
214 Node* ret = kit.set_results_for_java_call(call);
215 kit.push_node(method()->return_type()->basic_type(), ret);
217 // Represent the effect of an implicit receiver null_check
218 // as part of this call. Since we share a map with the caller,
219 // his JVMS gets adjusted.
220 kit.cast_not_null(receiver);
221 return kit.transfer_exceptions_into_jvms();
222 }
224 bool ParseGenerator::can_parse(ciMethod* m, int entry_bci) {
225 // Certain methods cannot be parsed at all:
226 if (!m->can_be_compiled()) return false;
227 if (!m->has_balanced_monitors()) return false;
228 if (m->get_flow_analysis()->failing()) return false;
230 // (Methods may bail out for other reasons, after the parser is run.
231 // We try to avoid this, but if forced, we must return (Node*)NULL.
232 // The user of the CallGenerator must check for this condition.)
233 return true;
234 }
236 CallGenerator* CallGenerator::for_inline(ciMethod* m, float expected_uses) {
237 if (!ParseGenerator::can_parse(m)) return NULL;
238 return new ParseGenerator(m, expected_uses);
239 }
241 // As a special case, the JVMS passed to this CallGenerator is
242 // for the method execution already in progress, not just the JVMS
243 // of the caller. Thus, this CallGenerator cannot be mixed with others!
244 CallGenerator* CallGenerator::for_osr(ciMethod* m, int osr_bci) {
245 if (!ParseGenerator::can_parse(m, true)) return NULL;
246 float past_uses = m->interpreter_invocation_count();
247 float expected_uses = past_uses;
248 return new ParseGenerator(m, expected_uses, true);
249 }
251 CallGenerator* CallGenerator::for_direct_call(ciMethod* m, bool separate_io_proj) {
252 assert(!m->is_abstract(), "for_direct_call mismatch");
253 return new DirectCallGenerator(m, separate_io_proj);
254 }
256 CallGenerator* CallGenerator::for_virtual_call(ciMethod* m, int vtable_index) {
257 assert(!m->is_static(), "for_virtual_call mismatch");
258 return new VirtualCallGenerator(m, vtable_index);
259 }
261 // Allow inlining decisions to be delayed
262 class LateInlineCallGenerator : public DirectCallGenerator {
263 CallGenerator* _inline_cg;
265 public:
266 LateInlineCallGenerator(ciMethod* method, CallGenerator* inline_cg) :
267 DirectCallGenerator(method, true), _inline_cg(inline_cg) {}
269 virtual bool is_late_inline() const { return true; }
271 // Convert the CallStaticJava into an inline
272 virtual void do_late_inline();
274 JVMState* generate(JVMState* jvms) {
275 // Record that this call site should be revisited once the main
276 // parse is finished.
277 Compile::current()->add_late_inline(this);
279 // Emit the CallStaticJava and request separate projections so
280 // that the late inlining logic can distinguish between fall
281 // through and exceptional uses of the memory and io projections
282 // as is done for allocations and macro expansion.
283 return DirectCallGenerator::generate(jvms);
284 }
286 };
289 void LateInlineCallGenerator::do_late_inline() {
290 // Can't inline it
291 if (call_node() == NULL || call_node()->outcnt() == 0 ||
292 call_node()->in(0) == NULL || call_node()->in(0)->is_top())
293 return;
295 CallStaticJavaNode* call = call_node();
297 // Make a clone of the JVMState that appropriate to use for driving a parse
298 Compile* C = Compile::current();
299 JVMState* jvms = call->jvms()->clone_shallow(C);
300 uint size = call->req();
301 SafePointNode* map = new (C, size) SafePointNode(size, jvms);
302 for (uint i1 = 0; i1 < size; i1++) {
303 map->init_req(i1, call->in(i1));
304 }
306 // Make sure the state is a MergeMem for parsing.
307 if (!map->in(TypeFunc::Memory)->is_MergeMem()) {
308 map->set_req(TypeFunc::Memory, MergeMemNode::make(C, map->in(TypeFunc::Memory)));
309 }
311 // Make enough space for the expression stack and transfer the incoming arguments
312 int nargs = method()->arg_size();
313 jvms->set_map(map);
314 map->ensure_stack(jvms, jvms->method()->max_stack());
315 if (nargs > 0) {
316 for (int i1 = 0; i1 < nargs; i1++) {
317 map->set_req(i1 + jvms->argoff(), call->in(TypeFunc::Parms + i1));
318 }
319 }
321 CompileLog* log = C->log();
322 if (log != NULL) {
323 log->head("late_inline method='%d'", log->identify(method()));
324 JVMState* p = jvms;
325 while (p != NULL) {
326 log->elem("jvms bci='%d' method='%d'", p->bci(), log->identify(p->method()));
327 p = p->caller();
328 }
329 log->tail("late_inline");
330 }
332 // Setup default node notes to be picked up by the inlining
333 Node_Notes* old_nn = C->default_node_notes();
334 if (old_nn != NULL) {
335 Node_Notes* entry_nn = old_nn->clone(C);
336 entry_nn->set_jvms(jvms);
337 C->set_default_node_notes(entry_nn);
338 }
340 // Now perform the inling using the synthesized JVMState
341 JVMState* new_jvms = _inline_cg->generate(jvms);
342 if (new_jvms == NULL) return; // no change
343 if (C->failing()) return;
345 // Capture any exceptional control flow
346 GraphKit kit(new_jvms);
348 // Find the result object
349 Node* result = C->top();
350 int result_size = method()->return_type()->size();
351 if (result_size != 0 && !kit.stopped()) {
352 result = (result_size == 1) ? kit.pop() : kit.pop_pair();
353 }
355 kit.replace_call(call, result);
356 }
359 CallGenerator* CallGenerator::for_late_inline(ciMethod* method, CallGenerator* inline_cg) {
360 return new LateInlineCallGenerator(method, inline_cg);
361 }
364 //---------------------------WarmCallGenerator--------------------------------
365 // Internal class which handles initial deferral of inlining decisions.
366 class WarmCallGenerator : public CallGenerator {
367 WarmCallInfo* _call_info;
368 CallGenerator* _if_cold;
369 CallGenerator* _if_hot;
370 bool _is_virtual; // caches virtuality of if_cold
371 bool _is_inline; // caches inline-ness of if_hot
373 public:
374 WarmCallGenerator(WarmCallInfo* ci,
375 CallGenerator* if_cold,
376 CallGenerator* if_hot)
377 : CallGenerator(if_cold->method())
378 {
379 assert(method() == if_hot->method(), "consistent choices");
380 _call_info = ci;
381 _if_cold = if_cold;
382 _if_hot = if_hot;
383 _is_virtual = if_cold->is_virtual();
384 _is_inline = if_hot->is_inline();
385 }
387 virtual bool is_inline() const { return _is_inline; }
388 virtual bool is_virtual() const { return _is_virtual; }
389 virtual bool is_deferred() const { return true; }
391 virtual JVMState* generate(JVMState* jvms);
392 };
395 CallGenerator* CallGenerator::for_warm_call(WarmCallInfo* ci,
396 CallGenerator* if_cold,
397 CallGenerator* if_hot) {
398 return new WarmCallGenerator(ci, if_cold, if_hot);
399 }
401 JVMState* WarmCallGenerator::generate(JVMState* jvms) {
402 Compile* C = Compile::current();
403 if (C->log() != NULL) {
404 C->log()->elem("warm_call bci='%d'", jvms->bci());
405 }
406 jvms = _if_cold->generate(jvms);
407 if (jvms != NULL) {
408 Node* m = jvms->map()->control();
409 if (m->is_CatchProj()) m = m->in(0); else m = C->top();
410 if (m->is_Catch()) m = m->in(0); else m = C->top();
411 if (m->is_Proj()) m = m->in(0); else m = C->top();
412 if (m->is_CallJava()) {
413 _call_info->set_call(m->as_Call());
414 _call_info->set_hot_cg(_if_hot);
415 #ifndef PRODUCT
416 if (PrintOpto || PrintOptoInlining) {
417 tty->print_cr("Queueing for warm inlining at bci %d:", jvms->bci());
418 tty->print("WCI: ");
419 _call_info->print();
420 }
421 #endif
422 _call_info->set_heat(_call_info->compute_heat());
423 C->set_warm_calls(_call_info->insert_into(C->warm_calls()));
424 }
425 }
426 return jvms;
427 }
429 void WarmCallInfo::make_hot() {
430 Unimplemented();
431 }
433 void WarmCallInfo::make_cold() {
434 // No action: Just dequeue.
435 }
438 //------------------------PredictedCallGenerator------------------------------
439 // Internal class which handles all out-of-line calls checking receiver type.
440 class PredictedCallGenerator : public CallGenerator {
441 ciKlass* _predicted_receiver;
442 CallGenerator* _if_missed;
443 CallGenerator* _if_hit;
444 float _hit_prob;
446 public:
447 PredictedCallGenerator(ciKlass* predicted_receiver,
448 CallGenerator* if_missed,
449 CallGenerator* if_hit, float hit_prob)
450 : CallGenerator(if_missed->method())
451 {
452 // The call profile data may predict the hit_prob as extreme as 0 or 1.
453 // Remove the extremes values from the range.
454 if (hit_prob > PROB_MAX) hit_prob = PROB_MAX;
455 if (hit_prob < PROB_MIN) hit_prob = PROB_MIN;
457 _predicted_receiver = predicted_receiver;
458 _if_missed = if_missed;
459 _if_hit = if_hit;
460 _hit_prob = hit_prob;
461 }
463 virtual bool is_virtual() const { return true; }
464 virtual bool is_inline() const { return _if_hit->is_inline(); }
465 virtual bool is_deferred() const { return _if_hit->is_deferred(); }
467 virtual JVMState* generate(JVMState* jvms);
468 };
471 CallGenerator* CallGenerator::for_predicted_call(ciKlass* predicted_receiver,
472 CallGenerator* if_missed,
473 CallGenerator* if_hit,
474 float hit_prob) {
475 return new PredictedCallGenerator(predicted_receiver, if_missed, if_hit, hit_prob);
476 }
479 JVMState* PredictedCallGenerator::generate(JVMState* jvms) {
480 GraphKit kit(jvms);
481 PhaseGVN& gvn = kit.gvn();
482 // We need an explicit receiver null_check before checking its type.
483 // We share a map with the caller, so his JVMS gets adjusted.
484 Node* receiver = kit.argument(0);
486 CompileLog* log = kit.C->log();
487 if (log != NULL) {
488 log->elem("predicted_call bci='%d' klass='%d'",
489 jvms->bci(), log->identify(_predicted_receiver));
490 }
492 receiver = kit.null_check_receiver(method());
493 if (kit.stopped()) {
494 return kit.transfer_exceptions_into_jvms();
495 }
497 Node* exact_receiver = receiver; // will get updated in place...
498 Node* slow_ctl = kit.type_check_receiver(receiver,
499 _predicted_receiver, _hit_prob,
500 &exact_receiver);
502 SafePointNode* slow_map = NULL;
503 JVMState* slow_jvms;
504 { PreserveJVMState pjvms(&kit);
505 kit.set_control(slow_ctl);
506 if (!kit.stopped()) {
507 slow_jvms = _if_missed->generate(kit.sync_jvms());
508 assert(slow_jvms != NULL, "miss path must not fail to generate");
509 kit.add_exception_states_from(slow_jvms);
510 kit.set_map(slow_jvms->map());
511 if (!kit.stopped())
512 slow_map = kit.stop();
513 }
514 }
516 if (kit.stopped()) {
517 // Instance exactly does not matches the desired type.
518 kit.set_jvms(slow_jvms);
519 return kit.transfer_exceptions_into_jvms();
520 }
522 // fall through if the instance exactly matches the desired type
523 kit.replace_in_map(receiver, exact_receiver);
525 // Make the hot call:
526 JVMState* new_jvms = _if_hit->generate(kit.sync_jvms());
527 if (new_jvms == NULL) {
528 // Inline failed, so make a direct call.
529 assert(_if_hit->is_inline(), "must have been a failed inline");
530 CallGenerator* cg = CallGenerator::for_direct_call(_if_hit->method());
531 new_jvms = cg->generate(kit.sync_jvms());
532 }
533 kit.add_exception_states_from(new_jvms);
534 kit.set_jvms(new_jvms);
536 // Need to merge slow and fast?
537 if (slow_map == NULL) {
538 // The fast path is the only path remaining.
539 return kit.transfer_exceptions_into_jvms();
540 }
542 if (kit.stopped()) {
543 // Inlined method threw an exception, so it's just the slow path after all.
544 kit.set_jvms(slow_jvms);
545 return kit.transfer_exceptions_into_jvms();
546 }
548 // Finish the diamond.
549 kit.C->set_has_split_ifs(true); // Has chance for split-if optimization
550 RegionNode* region = new (kit.C, 3) RegionNode(3);
551 region->init_req(1, kit.control());
552 region->init_req(2, slow_map->control());
553 kit.set_control(gvn.transform(region));
554 Node* iophi = PhiNode::make(region, kit.i_o(), Type::ABIO);
555 iophi->set_req(2, slow_map->i_o());
556 kit.set_i_o(gvn.transform(iophi));
557 kit.merge_memory(slow_map->merged_memory(), region, 2);
558 uint tos = kit.jvms()->stkoff() + kit.sp();
559 uint limit = slow_map->req();
560 for (uint i = TypeFunc::Parms; i < limit; i++) {
561 // Skip unused stack slots; fast forward to monoff();
562 if (i == tos) {
563 i = kit.jvms()->monoff();
564 if( i >= limit ) break;
565 }
566 Node* m = kit.map()->in(i);
567 Node* n = slow_map->in(i);
568 if (m != n) {
569 const Type* t = gvn.type(m)->meet(gvn.type(n));
570 Node* phi = PhiNode::make(region, m, t);
571 phi->set_req(2, n);
572 kit.map()->set_req(i, gvn.transform(phi));
573 }
574 }
575 return kit.transfer_exceptions_into_jvms();
576 }
579 //-------------------------UncommonTrapCallGenerator-----------------------------
580 // Internal class which handles all out-of-line calls checking receiver type.
581 class UncommonTrapCallGenerator : public CallGenerator {
582 Deoptimization::DeoptReason _reason;
583 Deoptimization::DeoptAction _action;
585 public:
586 UncommonTrapCallGenerator(ciMethod* m,
587 Deoptimization::DeoptReason reason,
588 Deoptimization::DeoptAction action)
589 : CallGenerator(m)
590 {
591 _reason = reason;
592 _action = action;
593 }
595 virtual bool is_virtual() const { ShouldNotReachHere(); return false; }
596 virtual bool is_trap() const { return true; }
598 virtual JVMState* generate(JVMState* jvms);
599 };
602 CallGenerator*
603 CallGenerator::for_uncommon_trap(ciMethod* m,
604 Deoptimization::DeoptReason reason,
605 Deoptimization::DeoptAction action) {
606 return new UncommonTrapCallGenerator(m, reason, action);
607 }
610 JVMState* UncommonTrapCallGenerator::generate(JVMState* jvms) {
611 GraphKit kit(jvms);
612 // Take the trap with arguments pushed on the stack. (Cf. null_check_receiver).
613 int nargs = method()->arg_size();
614 kit.inc_sp(nargs);
615 assert(nargs <= kit.sp() && kit.sp() <= jvms->stk_size(), "sane sp w/ args pushed");
616 if (_reason == Deoptimization::Reason_class_check &&
617 _action == Deoptimization::Action_maybe_recompile) {
618 // Temp fix for 6529811
619 // Don't allow uncommon_trap to override our decision to recompile in the event
620 // of a class cast failure for a monomorphic call as it will never let us convert
621 // the call to either bi-morphic or megamorphic and can lead to unc-trap loops
622 bool keep_exact_action = true;
623 kit.uncommon_trap(_reason, _action, NULL, "monomorphic vcall checkcast", false, keep_exact_action);
624 } else {
625 kit.uncommon_trap(_reason, _action);
626 }
627 return kit.transfer_exceptions_into_jvms();
628 }
630 // (Note: Moved hook_up_call to GraphKit::set_edges_for_java_call.)
632 // (Node: Merged hook_up_exits into ParseGenerator::generate.)
634 #define NODES_OVERHEAD_PER_METHOD (30.0)
635 #define NODES_PER_BYTECODE (9.5)
637 void WarmCallInfo::init(JVMState* call_site, ciMethod* call_method, ciCallProfile& profile, float prof_factor) {
638 int call_count = profile.count();
639 int code_size = call_method->code_size();
641 // Expected execution count is based on the historical count:
642 _count = call_count < 0 ? 1 : call_site->method()->scale_count(call_count, prof_factor);
644 // Expected profit from inlining, in units of simple call-overheads.
645 _profit = 1.0;
647 // Expected work performed by the call in units of call-overheads.
648 // %%% need an empirical curve fit for "work" (time in call)
649 float bytecodes_per_call = 3;
650 _work = 1.0 + code_size / bytecodes_per_call;
652 // Expected size of compilation graph:
653 // -XX:+PrintParseStatistics once reported:
654 // Methods seen: 9184 Methods parsed: 9184 Nodes created: 1582391
655 // Histogram of 144298 parsed bytecodes:
656 // %%% Need an better predictor for graph size.
657 _size = NODES_OVERHEAD_PER_METHOD + (NODES_PER_BYTECODE * code_size);
658 }
660 // is_cold: Return true if the node should never be inlined.
661 // This is true if any of the key metrics are extreme.
662 bool WarmCallInfo::is_cold() const {
663 if (count() < WarmCallMinCount) return true;
664 if (profit() < WarmCallMinProfit) return true;
665 if (work() > WarmCallMaxWork) return true;
666 if (size() > WarmCallMaxSize) return true;
667 return false;
668 }
670 // is_hot: Return true if the node should be inlined immediately.
671 // This is true if any of the key metrics are extreme.
672 bool WarmCallInfo::is_hot() const {
673 assert(!is_cold(), "eliminate is_cold cases before testing is_hot");
674 if (count() >= HotCallCountThreshold) return true;
675 if (profit() >= HotCallProfitThreshold) return true;
676 if (work() <= HotCallTrivialWork) return true;
677 if (size() <= HotCallTrivialSize) return true;
678 return false;
679 }
681 // compute_heat:
682 float WarmCallInfo::compute_heat() const {
683 assert(!is_cold(), "compute heat only on warm nodes");
684 assert(!is_hot(), "compute heat only on warm nodes");
685 int min_size = MAX2(0, (int)HotCallTrivialSize);
686 int max_size = MIN2(500, (int)WarmCallMaxSize);
687 float method_size = (size() - min_size) / MAX2(1, max_size - min_size);
688 float size_factor;
689 if (method_size < 0.05) size_factor = 4; // 2 sigmas better than avg.
690 else if (method_size < 0.15) size_factor = 2; // 1 sigma better than avg.
691 else if (method_size < 0.5) size_factor = 1; // better than avg.
692 else size_factor = 0.5; // worse than avg.
693 return (count() * profit() * size_factor);
694 }
696 bool WarmCallInfo::warmer_than(WarmCallInfo* that) {
697 assert(this != that, "compare only different WCIs");
698 assert(this->heat() != 0 && that->heat() != 0, "call compute_heat 1st");
699 if (this->heat() > that->heat()) return true;
700 if (this->heat() < that->heat()) return false;
701 assert(this->heat() == that->heat(), "no NaN heat allowed");
702 // Equal heat. Break the tie some other way.
703 if (!this->call() || !that->call()) return (address)this > (address)that;
704 return this->call()->_idx > that->call()->_idx;
705 }
707 //#define UNINIT_NEXT ((WarmCallInfo*)badAddress)
708 #define UNINIT_NEXT ((WarmCallInfo*)NULL)
710 WarmCallInfo* WarmCallInfo::insert_into(WarmCallInfo* head) {
711 assert(next() == UNINIT_NEXT, "not yet on any list");
712 WarmCallInfo* prev_p = NULL;
713 WarmCallInfo* next_p = head;
714 while (next_p != NULL && next_p->warmer_than(this)) {
715 prev_p = next_p;
716 next_p = prev_p->next();
717 }
718 // Install this between prev_p and next_p.
719 this->set_next(next_p);
720 if (prev_p == NULL)
721 head = this;
722 else
723 prev_p->set_next(this);
724 return head;
725 }
727 WarmCallInfo* WarmCallInfo::remove_from(WarmCallInfo* head) {
728 WarmCallInfo* prev_p = NULL;
729 WarmCallInfo* next_p = head;
730 while (next_p != this) {
731 assert(next_p != NULL, "this must be in the list somewhere");
732 prev_p = next_p;
733 next_p = prev_p->next();
734 }
735 next_p = this->next();
736 debug_only(this->set_next(UNINIT_NEXT));
737 // Remove this from between prev_p and next_p.
738 if (prev_p == NULL)
739 head = next_p;
740 else
741 prev_p->set_next(next_p);
742 return head;
743 }
745 WarmCallInfo* WarmCallInfo::_always_hot = NULL;
746 WarmCallInfo* WarmCallInfo::_always_cold = NULL;
748 WarmCallInfo* WarmCallInfo::always_hot() {
749 if (_always_hot == NULL) {
750 static double bits[sizeof(WarmCallInfo) / sizeof(double) + 1] = {0};
751 WarmCallInfo* ci = (WarmCallInfo*) bits;
752 ci->_profit = ci->_count = MAX_VALUE();
753 ci->_work = ci->_size = MIN_VALUE();
754 _always_hot = ci;
755 }
756 assert(_always_hot->is_hot(), "must always be hot");
757 return _always_hot;
758 }
760 WarmCallInfo* WarmCallInfo::always_cold() {
761 if (_always_cold == NULL) {
762 static double bits[sizeof(WarmCallInfo) / sizeof(double) + 1] = {0};
763 WarmCallInfo* ci = (WarmCallInfo*) bits;
764 ci->_profit = ci->_count = MIN_VALUE();
765 ci->_work = ci->_size = MAX_VALUE();
766 _always_cold = ci;
767 }
768 assert(_always_cold->is_cold(), "must always be cold");
769 return _always_cold;
770 }
773 #ifndef PRODUCT
775 void WarmCallInfo::print() const {
776 tty->print("%s : C=%6.1f P=%6.1f W=%6.1f S=%6.1f H=%6.1f -> %p",
777 is_cold() ? "cold" : is_hot() ? "hot " : "warm",
778 count(), profit(), work(), size(), compute_heat(), next());
779 tty->cr();
780 if (call() != NULL) call()->dump();
781 }
783 void print_wci(WarmCallInfo* ci) {
784 ci->print();
785 }
787 void WarmCallInfo::print_all() const {
788 for (const WarmCallInfo* p = this; p != NULL; p = p->next())
789 p->print();
790 }
792 int WarmCallInfo::count_all() const {
793 int cnt = 0;
794 for (const WarmCallInfo* p = this; p != NULL; p = p->next())
795 cnt++;
796 return cnt;
797 }
799 #endif //PRODUCT