aoqi@0: /* aoqi@0: * Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved. aoqi@0: * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. aoqi@0: * aoqi@0: * This code is free software; you can redistribute it and/or modify it aoqi@0: * under the terms of the GNU General Public License version 2 only, as aoqi@0: * published by the Free Software Foundation. aoqi@0: * aoqi@0: * This code is distributed in the hope that it will be useful, but WITHOUT aoqi@0: * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or aoqi@0: * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License aoqi@0: * version 2 for more details (a copy is included in the LICENSE file that aoqi@0: * accompanied this code). aoqi@0: * aoqi@0: * You should have received a copy of the GNU General Public License version aoqi@0: * 2 along with this work; if not, write to the Free Software Foundation, aoqi@0: * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. aoqi@0: * aoqi@0: * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA aoqi@0: * or visit www.oracle.com if you need additional information or have any aoqi@0: * questions. aoqi@0: * aoqi@0: */ aoqi@0: aoqi@0: #include "precompiled.hpp" aoqi@0: #include "ci/ciReplay.hpp" aoqi@0: #include "classfile/systemDictionary.hpp" aoqi@0: #include "classfile/vmSymbols.hpp" aoqi@0: #include "compiler/compileBroker.hpp" aoqi@0: #include "compiler/compileLog.hpp" aoqi@0: #include "interpreter/linkResolver.hpp" aoqi@0: #include "oops/objArrayKlass.hpp" aoqi@0: #include "opto/callGenerator.hpp" aoqi@0: #include "opto/parse.hpp" aoqi@0: #include "runtime/handles.inline.hpp" aoqi@0: aoqi@0: //============================================================================= aoqi@0: //------------------------------InlineTree------------------------------------- aoqi@0: InlineTree::InlineTree(Compile* c, aoqi@0: const InlineTree *caller_tree, ciMethod* callee, aoqi@0: JVMState* caller_jvms, int caller_bci, aoqi@0: float site_invoke_ratio, int max_inline_level) : aoqi@0: C(c), aoqi@0: _caller_jvms(caller_jvms), aoqi@0: _caller_tree((InlineTree*) caller_tree), aoqi@0: _method(callee), aoqi@0: _site_invoke_ratio(site_invoke_ratio), aoqi@0: _max_inline_level(max_inline_level), aoqi@0: _count_inline_bcs(method()->code_size_for_inlining()), aoqi@0: _subtrees(c->comp_arena(), 2, 0, NULL), aoqi@0: _msg(NULL) aoqi@0: { aoqi@0: #ifndef PRODUCT aoqi@0: _count_inlines = 0; aoqi@0: _forced_inline = false; aoqi@0: #endif aoqi@0: if (_caller_jvms != NULL) { aoqi@0: // Keep a private copy of the caller_jvms: aoqi@0: _caller_jvms = new (C) JVMState(caller_jvms->method(), caller_tree->caller_jvms()); aoqi@0: _caller_jvms->set_bci(caller_jvms->bci()); aoqi@0: assert(!caller_jvms->should_reexecute(), "there should be no reexecute bytecode with inlining"); aoqi@0: } aoqi@0: assert(_caller_jvms->same_calls_as(caller_jvms), "consistent JVMS"); aoqi@0: assert((caller_tree == NULL ? 0 : caller_tree->stack_depth() + 1) == stack_depth(), "correct (redundant) depth parameter"); aoqi@0: assert(caller_bci == this->caller_bci(), "correct (redundant) bci parameter"); aoqi@0: // Update hierarchical counts, count_inline_bcs() and count_inlines() aoqi@0: InlineTree *caller = (InlineTree *)caller_tree; aoqi@0: for( ; caller != NULL; caller = ((InlineTree *)(caller->caller_tree())) ) { aoqi@0: caller->_count_inline_bcs += count_inline_bcs(); aoqi@0: NOT_PRODUCT(caller->_count_inlines++;) aoqi@0: } aoqi@0: } aoqi@0: aoqi@0: /** aoqi@0: * Return true when EA is ON and a java constructor is called or aoqi@0: * a super constructor is called from an inlined java constructor. aoqi@0: * Also return true for boxing methods. aoqi@0: */ aoqi@0: static bool is_init_with_ea(ciMethod* callee_method, aoqi@0: ciMethod* caller_method, Compile* C) { aoqi@0: if (!C->do_escape_analysis() || !EliminateAllocations) { aoqi@0: return false; // EA is off aoqi@0: } aoqi@0: if (callee_method->is_initializer()) { aoqi@0: return true; // constuctor aoqi@0: } aoqi@0: if (caller_method->is_initializer() && aoqi@0: caller_method != C->method() && aoqi@0: caller_method->holder()->is_subclass_of(callee_method->holder())) { aoqi@0: return true; // super constructor is called from inlined constructor aoqi@0: } aoqi@0: if (C->eliminate_boxing() && callee_method->is_boxing_method()) { aoqi@0: return true; aoqi@0: } aoqi@0: return false; aoqi@0: } aoqi@0: aoqi@0: /** aoqi@0: * Force inlining unboxing accessor. aoqi@0: */ aoqi@0: static bool is_unboxing_method(ciMethod* callee_method, Compile* C) { aoqi@0: return C->eliminate_boxing() && callee_method->is_unboxing_method(); aoqi@0: } aoqi@0: aoqi@0: // positive filter: should callee be inlined? aoqi@0: bool InlineTree::should_inline(ciMethod* callee_method, ciMethod* caller_method, aoqi@0: int caller_bci, ciCallProfile& profile, aoqi@0: WarmCallInfo* wci_result) { aoqi@0: // Allows targeted inlining aoqi@0: if(callee_method->should_inline()) { aoqi@0: *wci_result = *(WarmCallInfo::always_hot()); aoqi@0: if (C->print_inlining() && Verbose) { aoqi@0: CompileTask::print_inline_indent(inline_level()); aoqi@0: tty->print_cr("Inlined method is hot: "); aoqi@0: } aoqi@0: set_msg("force inline by CompilerOracle"); aoqi@0: _forced_inline = true; aoqi@0: return true; aoqi@0: } aoqi@0: aoqi@0: #ifndef PRODUCT aoqi@0: int inline_depth = inline_level()+1; aoqi@0: if (ciReplay::should_inline(C->replay_inline_data(), callee_method, caller_bci, inline_depth)) { aoqi@0: set_msg("force inline by ciReplay"); aoqi@0: _forced_inline = true; aoqi@0: return true; aoqi@0: } aoqi@0: #endif aoqi@0: aoqi@0: int size = callee_method->code_size_for_inlining(); aoqi@0: aoqi@0: // Check for too many throws (and not too huge) aoqi@0: if(callee_method->interpreter_throwout_count() > InlineThrowCount && aoqi@0: size < InlineThrowMaxSize ) { aoqi@0: wci_result->set_profit(wci_result->profit() * 100); aoqi@0: if (C->print_inlining() && Verbose) { aoqi@0: CompileTask::print_inline_indent(inline_level()); aoqi@0: tty->print_cr("Inlined method with many throws (throws=%d):", callee_method->interpreter_throwout_count()); aoqi@0: } aoqi@0: set_msg("many throws"); aoqi@0: return true; aoqi@0: } aoqi@0: aoqi@0: int default_max_inline_size = C->max_inline_size(); aoqi@0: int inline_small_code_size = InlineSmallCode / 4; aoqi@0: int max_inline_size = default_max_inline_size; aoqi@0: aoqi@0: int call_site_count = method()->scale_count(profile.count()); aoqi@0: int invoke_count = method()->interpreter_invocation_count(); aoqi@0: aoqi@0: assert(invoke_count != 0, "require invocation count greater than zero"); aoqi@0: int freq = call_site_count / invoke_count; aoqi@0: aoqi@0: // bump the max size if the call is frequent aoqi@0: if ((freq >= InlineFrequencyRatio) || aoqi@0: (call_site_count >= InlineFrequencyCount) || aoqi@0: is_unboxing_method(callee_method, C) || aoqi@0: is_init_with_ea(callee_method, caller_method, C)) { aoqi@0: aoqi@0: max_inline_size = C->freq_inline_size(); aoqi@0: if (size <= max_inline_size && TraceFrequencyInlining) { aoqi@0: CompileTask::print_inline_indent(inline_level()); aoqi@0: tty->print_cr("Inlined frequent method (freq=%d count=%d):", freq, call_site_count); aoqi@0: CompileTask::print_inline_indent(inline_level()); aoqi@0: callee_method->print(); aoqi@0: tty->cr(); aoqi@0: } aoqi@0: } else { aoqi@0: // Not hot. Check for medium-sized pre-existing nmethod at cold sites. aoqi@0: if (callee_method->has_compiled_code() && aoqi@0: callee_method->instructions_size() > inline_small_code_size) { aoqi@0: set_msg("already compiled into a medium method"); aoqi@0: return false; aoqi@0: } aoqi@0: } aoqi@0: if (size > max_inline_size) { aoqi@0: if (max_inline_size > default_max_inline_size) { aoqi@0: set_msg("hot method too big"); aoqi@0: } else { aoqi@0: set_msg("too big"); aoqi@0: } aoqi@0: return false; aoqi@0: } aoqi@0: return true; aoqi@0: } aoqi@0: aoqi@0: aoqi@0: // negative filter: should callee NOT be inlined? aoqi@0: bool InlineTree::should_not_inline(ciMethod *callee_method, aoqi@0: ciMethod* caller_method, aoqi@0: JVMState* jvms, aoqi@0: WarmCallInfo* wci_result) { aoqi@0: aoqi@0: const char* fail_msg = NULL; aoqi@0: aoqi@0: // First check all inlining restrictions which are required for correctness aoqi@0: if ( callee_method->is_abstract()) { aoqi@0: fail_msg = "abstract method"; // // note: we allow ik->is_abstract() aoqi@0: } else if (!callee_method->holder()->is_initialized()) { aoqi@0: fail_msg = "method holder not initialized"; aoqi@0: } else if ( callee_method->is_native()) { aoqi@0: fail_msg = "native method"; aoqi@0: } else if ( callee_method->dont_inline()) { aoqi@0: fail_msg = "don't inline by annotation"; aoqi@0: } aoqi@0: aoqi@0: // one more inlining restriction aoqi@0: if (fail_msg == NULL && callee_method->has_unloaded_classes_in_signature()) { aoqi@0: fail_msg = "unloaded signature classes"; aoqi@0: } aoqi@0: aoqi@0: if (fail_msg != NULL) { aoqi@0: set_msg(fail_msg); aoqi@0: return true; aoqi@0: } aoqi@0: aoqi@0: // ignore heuristic controls on inlining aoqi@0: if (callee_method->should_inline()) { aoqi@0: set_msg("force inline by CompilerOracle"); aoqi@0: return false; aoqi@0: } aoqi@0: aoqi@0: if (callee_method->should_not_inline()) { aoqi@0: set_msg("disallowed by CompilerOracle"); aoqi@0: return true; aoqi@0: } aoqi@0: aoqi@0: #ifndef PRODUCT aoqi@0: int caller_bci = jvms->bci(); aoqi@0: int inline_depth = inline_level()+1; aoqi@0: if (ciReplay::should_inline(C->replay_inline_data(), callee_method, caller_bci, inline_depth)) { aoqi@0: set_msg("force inline by ciReplay"); aoqi@0: return false; aoqi@0: } aoqi@0: aoqi@0: if (ciReplay::should_not_inline(C->replay_inline_data(), callee_method, caller_bci, inline_depth)) { aoqi@0: set_msg("disallowed by ciReplay"); aoqi@0: return true; aoqi@0: } aoqi@0: aoqi@0: if (ciReplay::should_not_inline(callee_method)) { aoqi@0: set_msg("disallowed by ciReplay"); aoqi@0: return true; aoqi@0: } aoqi@0: #endif aoqi@0: aoqi@0: // Now perform checks which are heuristic aoqi@0: aoqi@0: if (is_unboxing_method(callee_method, C)) { aoqi@0: // Inline unboxing methods. aoqi@0: return false; aoqi@0: } aoqi@0: aoqi@0: if (!callee_method->force_inline()) { aoqi@0: if (callee_method->has_compiled_code() && aoqi@0: callee_method->instructions_size() > InlineSmallCode) { aoqi@0: set_msg("already compiled into a big method"); aoqi@0: return true; aoqi@0: } aoqi@0: } aoqi@0: aoqi@0: // don't inline exception code unless the top method belongs to an aoqi@0: // exception class aoqi@0: if (caller_tree() != NULL && aoqi@0: callee_method->holder()->is_subclass_of(C->env()->Throwable_klass())) { aoqi@0: const InlineTree *top = this; aoqi@0: while (top->caller_tree() != NULL) top = top->caller_tree(); aoqi@0: ciInstanceKlass* k = top->method()->holder(); aoqi@0: if (!k->is_subclass_of(C->env()->Throwable_klass())) { aoqi@0: set_msg("exception method"); aoqi@0: return true; aoqi@0: } aoqi@0: } aoqi@0: aoqi@0: // use frequency-based objections only for non-trivial methods aoqi@0: if (callee_method->code_size() <= MaxTrivialSize) { aoqi@0: return false; aoqi@0: } aoqi@0: aoqi@0: // don't use counts with -Xcomp or CTW aoqi@0: if (UseInterpreter && !CompileTheWorld) { aoqi@0: aoqi@0: if (!callee_method->has_compiled_code() && aoqi@0: !callee_method->was_executed_more_than(0)) { aoqi@0: set_msg("never executed"); aoqi@0: return true; aoqi@0: } aoqi@0: aoqi@0: if (is_init_with_ea(callee_method, caller_method, C)) { aoqi@0: // Escape Analysis: inline all executed constructors aoqi@0: return false; aoqi@0: } else if (!callee_method->was_executed_more_than(MIN2(MinInliningThreshold, aoqi@0: CompileThreshold >> 1))) { aoqi@0: set_msg("executed < MinInliningThreshold times"); aoqi@0: return true; aoqi@0: } aoqi@0: } aoqi@0: aoqi@0: return false; aoqi@0: } aoqi@0: aoqi@0: //-----------------------------try_to_inline----------------------------------- aoqi@0: // return true if ok aoqi@0: // Relocated from "InliningClosure::try_to_inline" aoqi@0: bool InlineTree::try_to_inline(ciMethod* callee_method, ciMethod* caller_method, aoqi@0: int caller_bci, JVMState* jvms, ciCallProfile& profile, aoqi@0: WarmCallInfo* wci_result, bool& should_delay) { aoqi@0: aoqi@0: if (ClipInlining && (int)count_inline_bcs() >= DesiredMethodLimit) { aoqi@0: if (!callee_method->force_inline() || !IncrementalInline) { aoqi@0: set_msg("size > DesiredMethodLimit"); aoqi@0: return false; aoqi@0: } else if (!C->inlining_incrementally()) { aoqi@0: should_delay = true; aoqi@0: } aoqi@0: } aoqi@0: aoqi@0: _forced_inline = false; // Reset aoqi@0: if (!should_inline(callee_method, caller_method, caller_bci, profile, aoqi@0: wci_result)) { aoqi@0: return false; aoqi@0: } aoqi@0: if (should_not_inline(callee_method, caller_method, jvms, wci_result)) { aoqi@0: return false; aoqi@0: } aoqi@0: aoqi@0: if (InlineAccessors && callee_method->is_accessor()) { aoqi@0: // accessor methods are not subject to any of the following limits. aoqi@0: set_msg("accessor"); aoqi@0: return true; aoqi@0: } aoqi@0: aoqi@0: // suppress a few checks for accessors and trivial methods aoqi@0: if (callee_method->code_size() > MaxTrivialSize) { aoqi@0: aoqi@0: // don't inline into giant methods aoqi@0: if (C->over_inlining_cutoff()) { aoqi@0: if ((!callee_method->force_inline() && !caller_method->is_compiled_lambda_form()) aoqi@0: || !IncrementalInline) { aoqi@0: set_msg("NodeCountInliningCutoff"); aoqi@0: return false; aoqi@0: } else { aoqi@0: should_delay = true; aoqi@0: } aoqi@0: } aoqi@0: aoqi@0: if ((!UseInterpreter || CompileTheWorld) && aoqi@0: is_init_with_ea(callee_method, caller_method, C)) { aoqi@0: // Escape Analysis stress testing when running Xcomp or CTW: aoqi@0: // inline constructors even if they are not reached. aoqi@0: } else if (forced_inline()) { aoqi@0: // Inlining was forced by CompilerOracle or ciReplay aoqi@0: } else if (profile.count() == 0) { aoqi@0: // don't inline unreached call sites aoqi@0: set_msg("call site not reached"); aoqi@0: return false; aoqi@0: } aoqi@0: } aoqi@0: aoqi@0: if (!C->do_inlining() && InlineAccessors) { aoqi@0: set_msg("not an accessor"); aoqi@0: return false; aoqi@0: } aoqi@0: aoqi@0: // Limit inlining depth in case inlining is forced or aoqi@0: // _max_inline_level was increased to compensate for lambda forms. aoqi@0: if (inline_level() > MaxForceInlineLevel) { aoqi@0: set_msg("MaxForceInlineLevel"); aoqi@0: return false; aoqi@0: } aoqi@0: if (inline_level() > _max_inline_level) { aoqi@0: if (!callee_method->force_inline() || !IncrementalInline) { aoqi@0: set_msg("inlining too deep"); aoqi@0: return false; aoqi@0: } else if (!C->inlining_incrementally()) { aoqi@0: should_delay = true; aoqi@0: } aoqi@0: } aoqi@0: aoqi@0: // detect direct and indirect recursive inlining aoqi@0: { aoqi@0: // count the current method and the callee aoqi@0: const bool is_compiled_lambda_form = callee_method->is_compiled_lambda_form(); aoqi@0: int inline_level = 0; aoqi@0: if (!is_compiled_lambda_form) { aoqi@0: if (method() == callee_method) { aoqi@0: inline_level++; aoqi@0: } aoqi@0: } aoqi@0: // count callers of current method and callee aoqi@0: Node* callee_argument0 = is_compiled_lambda_form ? jvms->map()->argument(jvms, 0)->uncast() : NULL; aoqi@0: for (JVMState* j = jvms->caller(); j != NULL && j->has_method(); j = j->caller()) { aoqi@0: if (j->method() == callee_method) { aoqi@0: if (is_compiled_lambda_form) { aoqi@0: // Since compiled lambda forms are heavily reused we allow recursive inlining. If it is truly aoqi@0: // a recursion (using the same "receiver") we limit inlining otherwise we can easily blow the aoqi@0: // compiler stack. aoqi@0: Node* caller_argument0 = j->map()->argument(j, 0)->uncast(); aoqi@0: if (caller_argument0 == callee_argument0) { aoqi@0: inline_level++; aoqi@0: } aoqi@0: } else { aoqi@0: inline_level++; aoqi@0: } aoqi@0: } aoqi@0: } aoqi@0: if (inline_level > MaxRecursiveInlineLevel) { aoqi@0: set_msg("recursive inlining is too deep"); aoqi@0: return false; aoqi@0: } aoqi@0: } aoqi@0: aoqi@0: int size = callee_method->code_size_for_inlining(); aoqi@0: aoqi@0: if (ClipInlining && (int)count_inline_bcs() + size >= DesiredMethodLimit) { aoqi@0: if (!callee_method->force_inline() || !IncrementalInline) { aoqi@0: set_msg("size > DesiredMethodLimit"); aoqi@0: return false; aoqi@0: } else if (!C->inlining_incrementally()) { aoqi@0: should_delay = true; aoqi@0: } aoqi@0: } aoqi@0: aoqi@0: // ok, inline this method aoqi@0: return true; aoqi@0: } aoqi@0: aoqi@0: //------------------------------pass_initial_checks---------------------------- aoqi@0: bool pass_initial_checks(ciMethod* caller_method, int caller_bci, ciMethod* callee_method) { aoqi@0: ciInstanceKlass *callee_holder = callee_method ? callee_method->holder() : NULL; aoqi@0: // Check if a callee_method was suggested aoqi@0: if( callee_method == NULL ) return false; aoqi@0: // Check if klass of callee_method is loaded aoqi@0: if( !callee_holder->is_loaded() ) return false; aoqi@0: if( !callee_holder->is_initialized() ) return false; aoqi@0: if( !UseInterpreter || CompileTheWorld /* running Xcomp or CTW */ ) { aoqi@0: // Checks that constant pool's call site has been visited aoqi@0: // stricter than callee_holder->is_initialized() aoqi@0: ciBytecodeStream iter(caller_method); aoqi@0: iter.force_bci(caller_bci); aoqi@0: Bytecodes::Code call_bc = iter.cur_bc(); aoqi@0: // An invokedynamic instruction does not have a klass. aoqi@0: if (call_bc != Bytecodes::_invokedynamic) { aoqi@0: int index = iter.get_index_u2_cpcache(); aoqi@0: if (!caller_method->is_klass_loaded(index, true)) { aoqi@0: return false; aoqi@0: } aoqi@0: // Try to do constant pool resolution if running Xcomp aoqi@0: if( !caller_method->check_call(index, call_bc == Bytecodes::_invokestatic) ) { aoqi@0: return false; aoqi@0: } aoqi@0: } aoqi@0: } aoqi@0: // We will attempt to see if a class/field/etc got properly loaded. If it aoqi@0: // did not, it may attempt to throw an exception during our probing. Catch aoqi@0: // and ignore such exceptions and do not attempt to compile the method. aoqi@0: if( callee_method->should_exclude() ) return false; aoqi@0: aoqi@0: return true; aoqi@0: } aoqi@0: aoqi@0: //------------------------------check_can_parse-------------------------------- aoqi@0: const char* InlineTree::check_can_parse(ciMethod* callee) { aoqi@0: // Certain methods cannot be parsed at all: aoqi@0: if ( callee->is_native()) return "native method"; aoqi@0: if ( callee->is_abstract()) return "abstract method"; aoqi@0: if (!callee->can_be_compiled()) return "not compilable (disabled)"; aoqi@0: if (!callee->has_balanced_monitors()) return "not compilable (unbalanced monitors)"; aoqi@0: if ( callee->get_flow_analysis()->failing()) return "not compilable (flow analysis failed)"; aoqi@0: return NULL; aoqi@0: } aoqi@0: aoqi@0: //------------------------------print_inlining--------------------------------- aoqi@0: void InlineTree::print_inlining(ciMethod* callee_method, int caller_bci, aoqi@0: bool success) const { aoqi@0: const char* inline_msg = msg(); aoqi@0: assert(inline_msg != NULL, "just checking"); aoqi@0: if (C->log() != NULL) { aoqi@0: if (success) { aoqi@0: C->log()->inline_success(inline_msg); aoqi@0: } else { aoqi@0: C->log()->inline_fail(inline_msg); aoqi@0: } aoqi@0: } aoqi@0: if (C->print_inlining()) { aoqi@0: C->print_inlining(callee_method, inline_level(), caller_bci, inline_msg); aoqi@0: if (callee_method == NULL) tty->print(" callee not monotonic or profiled"); aoqi@0: if (Verbose && callee_method) { aoqi@0: const InlineTree *top = this; aoqi@0: while( top->caller_tree() != NULL ) { top = top->caller_tree(); } aoqi@0: //tty->print(" bcs: %d+%d invoked: %d", top->count_inline_bcs(), callee_method->code_size(), callee_method->interpreter_invocation_count()); aoqi@0: } aoqi@0: } aoqi@0: } aoqi@0: aoqi@0: //------------------------------ok_to_inline----------------------------------- aoqi@0: WarmCallInfo* InlineTree::ok_to_inline(ciMethod* callee_method, JVMState* jvms, ciCallProfile& profile, WarmCallInfo* initial_wci, bool& should_delay) { aoqi@0: assert(callee_method != NULL, "caller checks for optimized virtual!"); aoqi@0: assert(!should_delay, "should be initialized to false"); aoqi@0: #ifdef ASSERT aoqi@0: // Make sure the incoming jvms has the same information content as me. aoqi@0: // This means that we can eventually make this whole class AllStatic. aoqi@0: if (jvms->caller() == NULL) { aoqi@0: assert(_caller_jvms == NULL, "redundant instance state"); aoqi@0: } else { aoqi@0: assert(_caller_jvms->same_calls_as(jvms->caller()), "redundant instance state"); aoqi@0: } aoqi@0: assert(_method == jvms->method(), "redundant instance state"); aoqi@0: #endif aoqi@0: int caller_bci = jvms->bci(); aoqi@0: ciMethod* caller_method = jvms->method(); aoqi@0: aoqi@0: // Do some initial checks. aoqi@0: if (!pass_initial_checks(caller_method, caller_bci, callee_method)) { aoqi@0: set_msg("failed initial checks"); aoqi@0: print_inlining(callee_method, caller_bci, false /* !success */); aoqi@0: return NULL; aoqi@0: } aoqi@0: aoqi@0: // Do some parse checks. aoqi@0: set_msg(check_can_parse(callee_method)); aoqi@0: if (msg() != NULL) { aoqi@0: print_inlining(callee_method, caller_bci, false /* !success */); aoqi@0: return NULL; aoqi@0: } aoqi@0: aoqi@0: // Check if inlining policy says no. aoqi@0: WarmCallInfo wci = *(initial_wci); aoqi@0: bool success = try_to_inline(callee_method, caller_method, caller_bci, aoqi@0: jvms, profile, &wci, should_delay); aoqi@0: aoqi@0: #ifndef PRODUCT aoqi@0: if (InlineWarmCalls && (PrintOpto || C->print_inlining())) { aoqi@0: bool cold = wci.is_cold(); aoqi@0: bool hot = !cold && wci.is_hot(); aoqi@0: bool old_cold = !success; aoqi@0: if (old_cold != cold || (Verbose || WizardMode)) { aoqi@0: if (msg() == NULL) { aoqi@0: set_msg("OK"); aoqi@0: } aoqi@0: tty->print(" OldInlining= %4s : %s\n WCI=", aoqi@0: old_cold ? "cold" : "hot", msg()); aoqi@0: wci.print(); aoqi@0: } aoqi@0: } aoqi@0: #endif aoqi@0: if (success) { aoqi@0: wci = *(WarmCallInfo::always_hot()); aoqi@0: } else { aoqi@0: wci = *(WarmCallInfo::always_cold()); aoqi@0: } aoqi@0: aoqi@0: if (!InlineWarmCalls) { aoqi@0: if (!wci.is_cold() && !wci.is_hot()) { aoqi@0: // Do not inline the warm calls. aoqi@0: wci = *(WarmCallInfo::always_cold()); aoqi@0: } aoqi@0: } aoqi@0: aoqi@0: if (!wci.is_cold()) { aoqi@0: // Inline! aoqi@0: if (msg() == NULL) { aoqi@0: set_msg("inline (hot)"); aoqi@0: } aoqi@0: print_inlining(callee_method, caller_bci, true /* success */); aoqi@0: build_inline_tree_for_callee(callee_method, jvms, caller_bci); aoqi@0: if (InlineWarmCalls && !wci.is_hot()) aoqi@0: return new (C) WarmCallInfo(wci); // copy to heap aoqi@0: return WarmCallInfo::always_hot(); aoqi@0: } aoqi@0: aoqi@0: // Do not inline aoqi@0: if (msg() == NULL) { aoqi@0: set_msg("too cold to inline"); aoqi@0: } aoqi@0: print_inlining(callee_method, caller_bci, false /* !success */ ); aoqi@0: return NULL; aoqi@0: } aoqi@0: aoqi@0: //------------------------------compute_callee_frequency----------------------- aoqi@0: float InlineTree::compute_callee_frequency( int caller_bci ) const { aoqi@0: int count = method()->interpreter_call_site_count(caller_bci); aoqi@0: int invcnt = method()->interpreter_invocation_count(); aoqi@0: float freq = (float)count/(float)invcnt; aoqi@0: // Call-site count / interpreter invocation count, scaled recursively. aoqi@0: // Always between 0.0 and 1.0. Represents the percentage of the method's aoqi@0: // total execution time used at this call site. aoqi@0: aoqi@0: return freq; aoqi@0: } aoqi@0: aoqi@0: //------------------------------build_inline_tree_for_callee------------------- aoqi@0: InlineTree *InlineTree::build_inline_tree_for_callee( ciMethod* callee_method, JVMState* caller_jvms, int caller_bci) { aoqi@0: float recur_frequency = _site_invoke_ratio * compute_callee_frequency(caller_bci); aoqi@0: // Attempt inlining. aoqi@0: InlineTree* old_ilt = callee_at(caller_bci, callee_method); aoqi@0: if (old_ilt != NULL) { aoqi@0: return old_ilt; aoqi@0: } aoqi@0: int max_inline_level_adjust = 0; aoqi@0: if (caller_jvms->method() != NULL) { aoqi@0: if (caller_jvms->method()->is_compiled_lambda_form()) aoqi@0: max_inline_level_adjust += 1; // don't count actions in MH or indy adapter frames aoqi@0: else if (callee_method->is_method_handle_intrinsic() || aoqi@0: callee_method->is_compiled_lambda_form()) { aoqi@0: max_inline_level_adjust += 1; // don't count method handle calls from java.lang.invoke implem aoqi@0: } aoqi@0: if (max_inline_level_adjust != 0 && C->print_inlining() && (Verbose || WizardMode)) { aoqi@0: CompileTask::print_inline_indent(inline_level()); aoqi@0: tty->print_cr(" \\-> discounting inline depth"); aoqi@0: } aoqi@0: if (max_inline_level_adjust != 0 && C->log()) { aoqi@0: int id1 = C->log()->identify(caller_jvms->method()); aoqi@0: int id2 = C->log()->identify(callee_method); aoqi@0: C->log()->elem("inline_level_discount caller='%d' callee='%d'", id1, id2); aoqi@0: } aoqi@0: } aoqi@0: InlineTree* ilt = new InlineTree(C, this, callee_method, caller_jvms, caller_bci, recur_frequency, _max_inline_level + max_inline_level_adjust); aoqi@0: _subtrees.append(ilt); aoqi@0: aoqi@0: NOT_PRODUCT( _count_inlines += 1; ) aoqi@0: aoqi@0: return ilt; aoqi@0: } aoqi@0: aoqi@0: aoqi@0: //---------------------------------------callee_at----------------------------- aoqi@0: InlineTree *InlineTree::callee_at(int bci, ciMethod* callee) const { aoqi@0: for (int i = 0; i < _subtrees.length(); i++) { aoqi@0: InlineTree* sub = _subtrees.at(i); aoqi@0: if (sub->caller_bci() == bci && callee == sub->method()) { aoqi@0: return sub; aoqi@0: } aoqi@0: } aoqi@0: return NULL; aoqi@0: } aoqi@0: aoqi@0: aoqi@0: //------------------------------build_inline_tree_root------------------------- aoqi@0: InlineTree *InlineTree::build_inline_tree_root() { aoqi@0: Compile* C = Compile::current(); aoqi@0: aoqi@0: // Root of inline tree aoqi@0: InlineTree* ilt = new InlineTree(C, NULL, C->method(), NULL, -1, 1.0F, MaxInlineLevel); aoqi@0: aoqi@0: return ilt; aoqi@0: } aoqi@0: aoqi@0: aoqi@0: //-------------------------find_subtree_from_root----------------------------- aoqi@0: // Given a jvms, which determines a call chain from the root method, aoqi@0: // find the corresponding inline tree. aoqi@0: // Note: This method will be removed or replaced as InlineTree goes away. aoqi@0: InlineTree* InlineTree::find_subtree_from_root(InlineTree* root, JVMState* jvms, ciMethod* callee) { aoqi@0: InlineTree* iltp = root; aoqi@0: uint depth = jvms && jvms->has_method() ? jvms->depth() : 0; aoqi@0: for (uint d = 1; d <= depth; d++) { aoqi@0: JVMState* jvmsp = jvms->of_depth(d); aoqi@0: // Select the corresponding subtree for this bci. aoqi@0: assert(jvmsp->method() == iltp->method(), "tree still in sync"); aoqi@0: ciMethod* d_callee = (d == depth) ? callee : jvms->of_depth(d+1)->method(); aoqi@0: InlineTree* sub = iltp->callee_at(jvmsp->bci(), d_callee); aoqi@0: if (sub == NULL) { aoqi@0: if (d == depth) { aoqi@0: sub = iltp->build_inline_tree_for_callee(d_callee, jvmsp, jvmsp->bci()); aoqi@0: } aoqi@0: guarantee(sub != NULL, "should be a sub-ilt here"); aoqi@0: return sub; aoqi@0: } aoqi@0: iltp = sub; aoqi@0: } aoqi@0: return iltp; aoqi@0: } aoqi@0: aoqi@0: // Count number of nodes in this subtree aoqi@0: int InlineTree::count() const { aoqi@0: int result = 1; aoqi@0: for (int i = 0 ; i < _subtrees.length(); i++) { aoqi@0: result += _subtrees.at(i)->count(); aoqi@0: } aoqi@0: return result; aoqi@0: } aoqi@0: aoqi@0: void InlineTree::dump_replay_data(outputStream* out) { aoqi@0: out->print(" %d %d ", inline_level(), caller_bci()); aoqi@0: method()->dump_name_as_ascii(out); aoqi@0: for (int i = 0 ; i < _subtrees.length(); i++) { aoqi@0: _subtrees.at(i)->dump_replay_data(out); aoqi@0: } aoqi@0: } aoqi@0: aoqi@0: aoqi@0: #ifndef PRODUCT aoqi@0: void InlineTree::print_impl(outputStream* st, int indent) const { aoqi@0: for (int i = 0; i < indent; i++) st->print(" "); aoqi@0: st->print(" @ %d", caller_bci()); aoqi@0: method()->print_short_name(st); aoqi@0: st->cr(); aoqi@0: aoqi@0: for (int i = 0 ; i < _subtrees.length(); i++) { aoqi@0: _subtrees.at(i)->print_impl(st, indent + 2); aoqi@0: } aoqi@0: } aoqi@0: aoqi@0: void InlineTree::print_value_on(outputStream* st) const { aoqi@0: print_impl(st, 2); aoqi@0: } aoqi@0: #endif