src/share/vm/runtime/compilationPolicy.cpp

changeset 0
f90c822e73f8
child 1
2d8a650513c2
     1.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     1.2 +++ b/src/share/vm/runtime/compilationPolicy.cpp	Wed Apr 27 01:25:04 2016 +0800
     1.3 @@ -0,0 +1,717 @@
     1.4 +/*
     1.5 + * Copyright (c) 2000, 2014, Oracle and/or its affiliates. All rights reserved.
     1.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     1.7 + *
     1.8 + * This code is free software; you can redistribute it and/or modify it
     1.9 + * under the terms of the GNU General Public License version 2 only, as
    1.10 + * published by the Free Software Foundation.
    1.11 + *
    1.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
    1.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    1.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    1.15 + * version 2 for more details (a copy is included in the LICENSE file that
    1.16 + * accompanied this code).
    1.17 + *
    1.18 + * You should have received a copy of the GNU General Public License version
    1.19 + * 2 along with this work; if not, write to the Free Software Foundation,
    1.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    1.21 + *
    1.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    1.23 + * or visit www.oracle.com if you need additional information or have any
    1.24 + * questions.
    1.25 + *
    1.26 + */
    1.27 +
    1.28 +#include "precompiled.hpp"
    1.29 +#include "code/compiledIC.hpp"
    1.30 +#include "code/nmethod.hpp"
    1.31 +#include "code/scopeDesc.hpp"
    1.32 +#include "compiler/compilerOracle.hpp"
    1.33 +#include "interpreter/interpreter.hpp"
    1.34 +#include "oops/methodData.hpp"
    1.35 +#include "oops/method.hpp"
    1.36 +#include "oops/oop.inline.hpp"
    1.37 +#include "prims/nativeLookup.hpp"
    1.38 +#include "runtime/advancedThresholdPolicy.hpp"
    1.39 +#include "runtime/compilationPolicy.hpp"
    1.40 +#include "runtime/frame.hpp"
    1.41 +#include "runtime/handles.inline.hpp"
    1.42 +#include "runtime/rframe.hpp"
    1.43 +#include "runtime/simpleThresholdPolicy.hpp"
    1.44 +#include "runtime/stubRoutines.hpp"
    1.45 +#include "runtime/thread.hpp"
    1.46 +#include "runtime/timer.hpp"
    1.47 +#include "runtime/vframe.hpp"
    1.48 +#include "runtime/vm_operations.hpp"
    1.49 +#include "utilities/events.hpp"
    1.50 +#include "utilities/globalDefinitions.hpp"
    1.51 +
    1.52 +CompilationPolicy* CompilationPolicy::_policy;
    1.53 +elapsedTimer       CompilationPolicy::_accumulated_time;
    1.54 +bool               CompilationPolicy::_in_vm_startup;
    1.55 +
    1.56 +// Determine compilation policy based on command line argument
    1.57 +void compilationPolicy_init() {
    1.58 +  CompilationPolicy::set_in_vm_startup(DelayCompilationDuringStartup);
    1.59 +
    1.60 +  switch(CompilationPolicyChoice) {
    1.61 +  case 0:
    1.62 +    CompilationPolicy::set_policy(new SimpleCompPolicy());
    1.63 +    break;
    1.64 +
    1.65 +  case 1:
    1.66 +#ifdef COMPILER2
    1.67 +    CompilationPolicy::set_policy(new StackWalkCompPolicy());
    1.68 +#else
    1.69 +    Unimplemented();
    1.70 +#endif
    1.71 +    break;
    1.72 +  case 2:
    1.73 +#ifdef TIERED
    1.74 +    CompilationPolicy::set_policy(new SimpleThresholdPolicy());
    1.75 +#else
    1.76 +    Unimplemented();
    1.77 +#endif
    1.78 +    break;
    1.79 +  case 3:
    1.80 +#ifdef TIERED
    1.81 +    CompilationPolicy::set_policy(new AdvancedThresholdPolicy());
    1.82 +#else
    1.83 +    Unimplemented();
    1.84 +#endif
    1.85 +    break;
    1.86 +  default:
    1.87 +    fatal("CompilationPolicyChoice must be in the range: [0-3]");
    1.88 +  }
    1.89 +  CompilationPolicy::policy()->initialize();
    1.90 +}
    1.91 +
    1.92 +void CompilationPolicy::completed_vm_startup() {
    1.93 +  if (TraceCompilationPolicy) {
    1.94 +    tty->print("CompilationPolicy: completed vm startup.\n");
    1.95 +  }
    1.96 +  _in_vm_startup = false;
    1.97 +}
    1.98 +
    1.99 +// Returns true if m must be compiled before executing it
   1.100 +// This is intended to force compiles for methods (usually for
   1.101 +// debugging) that would otherwise be interpreted for some reason.
   1.102 +bool CompilationPolicy::must_be_compiled(methodHandle m, int comp_level) {
   1.103 +  // Don't allow Xcomp to cause compiles in replay mode
   1.104 +  if (ReplayCompiles) return false;
   1.105 +
   1.106 +  if (m->has_compiled_code()) return false;       // already compiled
   1.107 +  if (!can_be_compiled(m, comp_level)) return false;
   1.108 +
   1.109 +  return !UseInterpreter ||                                              // must compile all methods
   1.110 +         (UseCompiler && AlwaysCompileLoopMethods && m->has_loops() && CompileBroker::should_compile_new_jobs()); // eagerly compile loop methods
   1.111 +}
   1.112 +
   1.113 +// Returns true if m is allowed to be compiled
   1.114 +bool CompilationPolicy::can_be_compiled(methodHandle m, int comp_level) {
   1.115 +  // allow any levels for WhiteBox
   1.116 +  assert(WhiteBoxAPI || comp_level == CompLevel_all || is_compile(comp_level), "illegal compilation level");
   1.117 +
   1.118 +  if (m->is_abstract()) return false;
   1.119 +  if (DontCompileHugeMethods && m->code_size() > HugeMethodLimit) return false;
   1.120 +
   1.121 +  // Math intrinsics should never be compiled as this can lead to
   1.122 +  // monotonicity problems because the interpreter will prefer the
   1.123 +  // compiled code to the intrinsic version.  This can't happen in
   1.124 +  // production because the invocation counter can't be incremented
   1.125 +  // but we shouldn't expose the system to this problem in testing
   1.126 +  // modes.
   1.127 +  if (!AbstractInterpreter::can_be_compiled(m)) {
   1.128 +    return false;
   1.129 +  }
   1.130 +  if (comp_level == CompLevel_all) {
   1.131 +    if (TieredCompilation) {
   1.132 +      // enough to be compilable at any level for tiered
   1.133 +      return !m->is_not_compilable(CompLevel_simple) || !m->is_not_compilable(CompLevel_full_optimization);
   1.134 +    } else {
   1.135 +      // must be compilable at available level for non-tiered
   1.136 +      return !m->is_not_compilable(CompLevel_highest_tier);
   1.137 +    }
   1.138 +  } else if (is_compile(comp_level)) {
   1.139 +    return !m->is_not_compilable(comp_level);
   1.140 +  }
   1.141 +  return false;
   1.142 +}
   1.143 +
   1.144 +// Returns true if m is allowed to be osr compiled
   1.145 +bool CompilationPolicy::can_be_osr_compiled(methodHandle m, int comp_level) {
   1.146 +  bool result = false;
   1.147 +  if (comp_level == CompLevel_all) {
   1.148 +    if (TieredCompilation) {
   1.149 +      // enough to be osr compilable at any level for tiered
   1.150 +      result = !m->is_not_osr_compilable(CompLevel_simple) || !m->is_not_osr_compilable(CompLevel_full_optimization);
   1.151 +    } else {
   1.152 +      // must be osr compilable at available level for non-tiered
   1.153 +      result = !m->is_not_osr_compilable(CompLevel_highest_tier);
   1.154 +    }
   1.155 +  } else if (is_compile(comp_level)) {
   1.156 +    result = !m->is_not_osr_compilable(comp_level);
   1.157 +  }
   1.158 +  return (result && can_be_compiled(m, comp_level));
   1.159 +}
   1.160 +
   1.161 +bool CompilationPolicy::is_compilation_enabled() {
   1.162 +  // NOTE: CompileBroker::should_compile_new_jobs() checks for UseCompiler
   1.163 +  return !delay_compilation_during_startup() && CompileBroker::should_compile_new_jobs();
   1.164 +}
   1.165 +
   1.166 +#ifndef PRODUCT
   1.167 +void CompilationPolicy::print_time() {
   1.168 +  tty->print_cr ("Accumulated compilationPolicy times:");
   1.169 +  tty->print_cr ("---------------------------");
   1.170 +  tty->print_cr ("  Total: %3.3f sec.", _accumulated_time.seconds());
   1.171 +}
   1.172 +
   1.173 +void NonTieredCompPolicy::trace_osr_completion(nmethod* osr_nm) {
   1.174 +  if (TraceOnStackReplacement) {
   1.175 +    if (osr_nm == NULL) tty->print_cr("compilation failed");
   1.176 +    else tty->print_cr("nmethod " INTPTR_FORMAT, p2i(osr_nm));
   1.177 +  }
   1.178 +}
   1.179 +#endif // !PRODUCT
   1.180 +
   1.181 +void NonTieredCompPolicy::initialize() {
   1.182 +  // Setup the compiler thread numbers
   1.183 +  if (CICompilerCountPerCPU) {
   1.184 +    // Example: if CICompilerCountPerCPU is true, then we get
   1.185 +    // max(log2(8)-1,1) = 2 compiler threads on an 8-way machine.
   1.186 +    // May help big-app startup time.
   1.187 +    _compiler_count = MAX2(log2_intptr(os::active_processor_count())-1,1);
   1.188 +    FLAG_SET_ERGO(intx, CICompilerCount, _compiler_count);
   1.189 +  } else {
   1.190 +    _compiler_count = CICompilerCount;
   1.191 +  }
   1.192 +}
   1.193 +
   1.194 +// Note: this policy is used ONLY if TieredCompilation is off.
   1.195 +// compiler_count() behaves the following way:
   1.196 +// - with TIERED build (with both COMPILER1 and COMPILER2 defined) it should return
   1.197 +//   zero for the c1 compilation levels, hence the particular ordering of the
   1.198 +//   statements.
   1.199 +// - the same should happen when COMPILER2 is defined and COMPILER1 is not
   1.200 +//   (server build without TIERED defined).
   1.201 +// - if only COMPILER1 is defined (client build), zero should be returned for
   1.202 +//   the c2 level.
   1.203 +// - if neither is defined - always return zero.
   1.204 +int NonTieredCompPolicy::compiler_count(CompLevel comp_level) {
   1.205 +  assert(!TieredCompilation, "This policy should not be used with TieredCompilation");
   1.206 +#ifdef COMPILER2
   1.207 +  if (is_c2_compile(comp_level)) {
   1.208 +    return _compiler_count;
   1.209 +  } else {
   1.210 +    return 0;
   1.211 +  }
   1.212 +#endif
   1.213 +
   1.214 +#ifdef COMPILER1
   1.215 +  if (is_c1_compile(comp_level)) {
   1.216 +    return _compiler_count;
   1.217 +  } else {
   1.218 +    return 0;
   1.219 +  }
   1.220 +#endif
   1.221 +
   1.222 +  return 0;
   1.223 +}
   1.224 +
   1.225 +void NonTieredCompPolicy::reset_counter_for_invocation_event(methodHandle m) {
   1.226 +  // Make sure invocation and backedge counter doesn't overflow again right away
   1.227 +  // as would be the case for native methods.
   1.228 +
   1.229 +  // BUT also make sure the method doesn't look like it was never executed.
   1.230 +  // Set carry bit and reduce counter's value to min(count, CompileThreshold/2).
   1.231 +  MethodCounters* mcs = m->method_counters();
   1.232 +  assert(mcs != NULL, "MethodCounters cannot be NULL for profiling");
   1.233 +  mcs->invocation_counter()->set_carry();
   1.234 +  mcs->backedge_counter()->set_carry();
   1.235 +
   1.236 +  assert(!m->was_never_executed(), "don't reset to 0 -- could be mistaken for never-executed");
   1.237 +}
   1.238 +
   1.239 +void NonTieredCompPolicy::reset_counter_for_back_branch_event(methodHandle m) {
   1.240 +  // Delay next back-branch event but pump up invocation counter to triger
   1.241 +  // whole method compilation.
   1.242 +  MethodCounters* mcs = m->method_counters();
   1.243 +  assert(mcs != NULL, "MethodCounters cannot be NULL for profiling");
   1.244 +  InvocationCounter* i = mcs->invocation_counter();
   1.245 +  InvocationCounter* b = mcs->backedge_counter();
   1.246 +
   1.247 +  // Don't set invocation_counter's value too low otherwise the method will
   1.248 +  // look like immature (ic < ~5300) which prevents the inlining based on
   1.249 +  // the type profiling.
   1.250 +  i->set(i->state(), CompileThreshold);
   1.251 +  // Don't reset counter too low - it is used to check if OSR method is ready.
   1.252 +  b->set(b->state(), CompileThreshold / 2);
   1.253 +}
   1.254 +
   1.255 +//
   1.256 +// CounterDecay
   1.257 +//
   1.258 +// Interates through invocation counters and decrements them. This
   1.259 +// is done at each safepoint.
   1.260 +//
   1.261 +class CounterDecay : public AllStatic {
   1.262 +  static jlong _last_timestamp;
   1.263 +  static void do_method(Method* m) {
   1.264 +    MethodCounters* mcs = m->method_counters();
   1.265 +    if (mcs != NULL) {
   1.266 +      mcs->invocation_counter()->decay();
   1.267 +    }
   1.268 +  }
   1.269 +public:
   1.270 +  static void decay();
   1.271 +  static bool is_decay_needed() {
   1.272 +    return (os::javaTimeMillis() - _last_timestamp) > CounterDecayMinIntervalLength;
   1.273 +  }
   1.274 +};
   1.275 +
   1.276 +jlong CounterDecay::_last_timestamp = 0;
   1.277 +
   1.278 +void CounterDecay::decay() {
   1.279 +  _last_timestamp = os::javaTimeMillis();
   1.280 +
   1.281 +  // This operation is going to be performed only at the end of a safepoint
   1.282 +  // and hence GC's will not be going on, all Java mutators are suspended
   1.283 +  // at this point and hence SystemDictionary_lock is also not needed.
   1.284 +  assert(SafepointSynchronize::is_at_safepoint(), "can only be executed at a safepoint");
   1.285 +  int nclasses = SystemDictionary::number_of_classes();
   1.286 +  double classes_per_tick = nclasses * (CounterDecayMinIntervalLength * 1e-3 /
   1.287 +                                        CounterHalfLifeTime);
   1.288 +  for (int i = 0; i < classes_per_tick; i++) {
   1.289 +    Klass* k = SystemDictionary::try_get_next_class();
   1.290 +    if (k != NULL && k->oop_is_instance()) {
   1.291 +      InstanceKlass::cast(k)->methods_do(do_method);
   1.292 +    }
   1.293 +  }
   1.294 +}
   1.295 +
   1.296 +// Called at the end of the safepoint
   1.297 +void NonTieredCompPolicy::do_safepoint_work() {
   1.298 +  if(UseCounterDecay && CounterDecay::is_decay_needed()) {
   1.299 +    CounterDecay::decay();
   1.300 +  }
   1.301 +}
   1.302 +
   1.303 +void NonTieredCompPolicy::reprofile(ScopeDesc* trap_scope, bool is_osr) {
   1.304 +  ScopeDesc* sd = trap_scope;
   1.305 +  MethodCounters* mcs;
   1.306 +  InvocationCounter* c;
   1.307 +  for (; !sd->is_top(); sd = sd->sender()) {
   1.308 +    mcs = sd->method()->method_counters();
   1.309 +    if (mcs != NULL) {
   1.310 +      // Reset ICs of inlined methods, since they can trigger compilations also.
   1.311 +      mcs->invocation_counter()->reset();
   1.312 +    }
   1.313 +  }
   1.314 +  mcs = sd->method()->method_counters();
   1.315 +  if (mcs != NULL) {
   1.316 +    c = mcs->invocation_counter();
   1.317 +    if (is_osr) {
   1.318 +      // It was an OSR method, so bump the count higher.
   1.319 +      c->set(c->state(), CompileThreshold);
   1.320 +    } else {
   1.321 +      c->reset();
   1.322 +    }
   1.323 +    mcs->backedge_counter()->reset();
   1.324 +  }
   1.325 +}
   1.326 +
   1.327 +// This method can be called by any component of the runtime to notify the policy
   1.328 +// that it's recommended to delay the complation of this method.
   1.329 +void NonTieredCompPolicy::delay_compilation(Method* method) {
   1.330 +  MethodCounters* mcs = method->method_counters();
   1.331 +  if (mcs != NULL) {
   1.332 +    mcs->invocation_counter()->decay();
   1.333 +    mcs->backedge_counter()->decay();
   1.334 +  }
   1.335 +}
   1.336 +
   1.337 +void NonTieredCompPolicy::disable_compilation(Method* method) {
   1.338 +  MethodCounters* mcs = method->method_counters();
   1.339 +  if (mcs != NULL) {
   1.340 +    mcs->invocation_counter()->set_state(InvocationCounter::wait_for_nothing);
   1.341 +    mcs->backedge_counter()->set_state(InvocationCounter::wait_for_nothing);
   1.342 +  }
   1.343 +}
   1.344 +
   1.345 +CompileTask* NonTieredCompPolicy::select_task(CompileQueue* compile_queue) {
   1.346 +  return compile_queue->first();
   1.347 +}
   1.348 +
   1.349 +bool NonTieredCompPolicy::is_mature(Method* method) {
   1.350 +  MethodData* mdo = method->method_data();
   1.351 +  assert(mdo != NULL, "Should be");
   1.352 +  uint current = mdo->mileage_of(method);
   1.353 +  uint initial = mdo->creation_mileage();
   1.354 +  if (current < initial)
   1.355 +    return true;  // some sort of overflow
   1.356 +  uint target;
   1.357 +  if (ProfileMaturityPercentage <= 0)
   1.358 +    target = (uint) -ProfileMaturityPercentage;  // absolute value
   1.359 +  else
   1.360 +    target = (uint)( (ProfileMaturityPercentage * CompileThreshold) / 100 );
   1.361 +  return (current >= initial + target);
   1.362 +}
   1.363 +
   1.364 +nmethod* NonTieredCompPolicy::event(methodHandle method, methodHandle inlinee, int branch_bci,
   1.365 +                                    int bci, CompLevel comp_level, nmethod* nm, JavaThread* thread) {
   1.366 +  assert(comp_level == CompLevel_none, "This should be only called from the interpreter");
   1.367 +  NOT_PRODUCT(trace_frequency_counter_overflow(method, branch_bci, bci));
   1.368 +  if (JvmtiExport::can_post_interpreter_events() && thread->is_interp_only_mode()) {
   1.369 +    // If certain JVMTI events (e.g. frame pop event) are requested then the
   1.370 +    // thread is forced to remain in interpreted code. This is
   1.371 +    // implemented partly by a check in the run_compiled_code
   1.372 +    // section of the interpreter whether we should skip running
   1.373 +    // compiled code, and partly by skipping OSR compiles for
   1.374 +    // interpreted-only threads.
   1.375 +    if (bci != InvocationEntryBci) {
   1.376 +      reset_counter_for_back_branch_event(method);
   1.377 +      return NULL;
   1.378 +    }
   1.379 +  }
   1.380 +  if (CompileTheWorld || ReplayCompiles) {
   1.381 +    // Don't trigger other compiles in testing mode
   1.382 +    if (bci == InvocationEntryBci) {
   1.383 +      reset_counter_for_invocation_event(method);
   1.384 +    } else {
   1.385 +      reset_counter_for_back_branch_event(method);
   1.386 +    }
   1.387 +    return NULL;
   1.388 +  }
   1.389 +
   1.390 +  if (bci == InvocationEntryBci) {
   1.391 +    // when code cache is full, compilation gets switched off, UseCompiler
   1.392 +    // is set to false
   1.393 +    if (!method->has_compiled_code() && UseCompiler) {
   1.394 +      method_invocation_event(method, thread);
   1.395 +    } else {
   1.396 +      // Force counter overflow on method entry, even if no compilation
   1.397 +      // happened.  (The method_invocation_event call does this also.)
   1.398 +      reset_counter_for_invocation_event(method);
   1.399 +    }
   1.400 +    // compilation at an invocation overflow no longer goes and retries test for
   1.401 +    // compiled method. We always run the loser of the race as interpreted.
   1.402 +    // so return NULL
   1.403 +    return NULL;
   1.404 +  } else {
   1.405 +    // counter overflow in a loop => try to do on-stack-replacement
   1.406 +    nmethod* osr_nm = method->lookup_osr_nmethod_for(bci, CompLevel_highest_tier, true);
   1.407 +    NOT_PRODUCT(trace_osr_request(method, osr_nm, bci));
   1.408 +    // when code cache is full, we should not compile any more...
   1.409 +    if (osr_nm == NULL && UseCompiler) {
   1.410 +      method_back_branch_event(method, bci, thread);
   1.411 +      osr_nm = method->lookup_osr_nmethod_for(bci, CompLevel_highest_tier, true);
   1.412 +    }
   1.413 +    if (osr_nm == NULL) {
   1.414 +      reset_counter_for_back_branch_event(method);
   1.415 +      return NULL;
   1.416 +    }
   1.417 +    return osr_nm;
   1.418 +  }
   1.419 +  return NULL;
   1.420 +}
   1.421 +
   1.422 +#ifndef PRODUCT
   1.423 +PRAGMA_FORMAT_NONLITERAL_IGNORED_EXTERNAL
   1.424 +void NonTieredCompPolicy::trace_frequency_counter_overflow(methodHandle m, int branch_bci, int bci) {
   1.425 +  if (TraceInvocationCounterOverflow) {
   1.426 +    MethodCounters* mcs = m->method_counters();
   1.427 +    assert(mcs != NULL, "MethodCounters cannot be NULL for profiling");
   1.428 +    InvocationCounter* ic = mcs->invocation_counter();
   1.429 +    InvocationCounter* bc = mcs->backedge_counter();
   1.430 +    ResourceMark rm;
   1.431 +    const char* msg =
   1.432 +      bci == InvocationEntryBci
   1.433 +      ? "comp-policy cntr ovfl @ %d in entry of "
   1.434 +      : "comp-policy cntr ovfl @ %d in loop of ";
   1.435 +PRAGMA_DIAG_PUSH
   1.436 +PRAGMA_FORMAT_NONLITERAL_IGNORED_INTERNAL
   1.437 +    tty->print(msg, bci);
   1.438 +PRAGMA_DIAG_POP
   1.439 +    m->print_value();
   1.440 +    tty->cr();
   1.441 +    ic->print();
   1.442 +    bc->print();
   1.443 +    if (ProfileInterpreter) {
   1.444 +      if (bci != InvocationEntryBci) {
   1.445 +        MethodData* mdo = m->method_data();
   1.446 +        if (mdo != NULL) {
   1.447 +          int count = mdo->bci_to_data(branch_bci)->as_JumpData()->taken();
   1.448 +          tty->print_cr("back branch count = %d", count);
   1.449 +        }
   1.450 +      }
   1.451 +    }
   1.452 +  }
   1.453 +}
   1.454 +
   1.455 +void NonTieredCompPolicy::trace_osr_request(methodHandle method, nmethod* osr, int bci) {
   1.456 +  if (TraceOnStackReplacement) {
   1.457 +    ResourceMark rm;
   1.458 +    tty->print(osr != NULL ? "Reused OSR entry for " : "Requesting OSR entry for ");
   1.459 +    method->print_short_name(tty);
   1.460 +    tty->print_cr(" at bci %d", bci);
   1.461 +  }
   1.462 +}
   1.463 +#endif // !PRODUCT
   1.464 +
   1.465 +// SimpleCompPolicy - compile current method
   1.466 +
   1.467 +void SimpleCompPolicy::method_invocation_event(methodHandle m, JavaThread* thread) {
   1.468 +  const int comp_level = CompLevel_highest_tier;
   1.469 +  const int hot_count = m->invocation_count();
   1.470 +  reset_counter_for_invocation_event(m);
   1.471 +  const char* comment = "count";
   1.472 +
   1.473 +  if (is_compilation_enabled() && can_be_compiled(m, comp_level)) {
   1.474 +    nmethod* nm = m->code();
   1.475 +    if (nm == NULL ) {
   1.476 +      CompileBroker::compile_method(m, InvocationEntryBci, comp_level, m, hot_count, comment, thread);
   1.477 +    }
   1.478 +  }
   1.479 +}
   1.480 +
   1.481 +void SimpleCompPolicy::method_back_branch_event(methodHandle m, int bci, JavaThread* thread) {
   1.482 +  const int comp_level = CompLevel_highest_tier;
   1.483 +  const int hot_count = m->backedge_count();
   1.484 +  const char* comment = "backedge_count";
   1.485 +
   1.486 +  if (is_compilation_enabled() && can_be_osr_compiled(m, comp_level)) {
   1.487 +    CompileBroker::compile_method(m, bci, comp_level, m, hot_count, comment, thread);
   1.488 +    NOT_PRODUCT(trace_osr_completion(m->lookup_osr_nmethod_for(bci, comp_level, true));)
   1.489 +  }
   1.490 +}
   1.491 +// StackWalkCompPolicy - walk up stack to find a suitable method to compile
   1.492 +
   1.493 +#ifdef COMPILER2
   1.494 +const char* StackWalkCompPolicy::_msg = NULL;
   1.495 +
   1.496 +
   1.497 +// Consider m for compilation
   1.498 +void StackWalkCompPolicy::method_invocation_event(methodHandle m, JavaThread* thread) {
   1.499 +  const int comp_level = CompLevel_highest_tier;
   1.500 +  const int hot_count = m->invocation_count();
   1.501 +  reset_counter_for_invocation_event(m);
   1.502 +  const char* comment = "count";
   1.503 +
   1.504 +  if (is_compilation_enabled() && m->code() == NULL && can_be_compiled(m, comp_level)) {
   1.505 +    ResourceMark rm(thread);
   1.506 +    frame       fr     = thread->last_frame();
   1.507 +    assert(fr.is_interpreted_frame(), "must be interpreted");
   1.508 +    assert(fr.interpreter_frame_method() == m(), "bad method");
   1.509 +
   1.510 +    if (TraceCompilationPolicy) {
   1.511 +      tty->print("method invocation trigger: ");
   1.512 +      m->print_short_name(tty);
   1.513 +      tty->print(" ( interpreted " INTPTR_FORMAT ", size=%d ) ", p2i((address)m()), m->code_size());
   1.514 +    }
   1.515 +    RegisterMap reg_map(thread, false);
   1.516 +    javaVFrame* triggerVF = thread->last_java_vframe(&reg_map);
   1.517 +    // triggerVF is the frame that triggered its counter
   1.518 +    RFrame* first = new InterpretedRFrame(triggerVF->fr(), thread, m);
   1.519 +
   1.520 +    if (first->top_method()->code() != NULL) {
   1.521 +      // called obsolete method/nmethod -- no need to recompile
   1.522 +      if (TraceCompilationPolicy) tty->print_cr(" --> " INTPTR_FORMAT, p2i(first->top_method()->code()));
   1.523 +    } else {
   1.524 +      if (TimeCompilationPolicy) accumulated_time()->start();
   1.525 +      GrowableArray<RFrame*>* stack = new GrowableArray<RFrame*>(50);
   1.526 +      stack->push(first);
   1.527 +      RFrame* top = findTopInlinableFrame(stack);
   1.528 +      if (TimeCompilationPolicy) accumulated_time()->stop();
   1.529 +      assert(top != NULL, "findTopInlinableFrame returned null");
   1.530 +      if (TraceCompilationPolicy) top->print();
   1.531 +      CompileBroker::compile_method(top->top_method(), InvocationEntryBci, comp_level,
   1.532 +                                    m, hot_count, comment, thread);
   1.533 +    }
   1.534 +  }
   1.535 +}
   1.536 +
   1.537 +void StackWalkCompPolicy::method_back_branch_event(methodHandle m, int bci, JavaThread* thread) {
   1.538 +  const int comp_level = CompLevel_highest_tier;
   1.539 +  const int hot_count = m->backedge_count();
   1.540 +  const char* comment = "backedge_count";
   1.541 +
   1.542 +  if (is_compilation_enabled() && can_be_osr_compiled(m, comp_level)) {
   1.543 +    CompileBroker::compile_method(m, bci, comp_level, m, hot_count, comment, thread);
   1.544 +    NOT_PRODUCT(trace_osr_completion(m->lookup_osr_nmethod_for(bci, comp_level, true));)
   1.545 +  }
   1.546 +}
   1.547 +
   1.548 +RFrame* StackWalkCompPolicy::findTopInlinableFrame(GrowableArray<RFrame*>* stack) {
   1.549 +  // go up the stack until finding a frame that (probably) won't be inlined
   1.550 +  // into its caller
   1.551 +  RFrame* current = stack->at(0); // current choice for stopping
   1.552 +  assert( current && !current->is_compiled(), "" );
   1.553 +  const char* msg = NULL;
   1.554 +
   1.555 +  while (1) {
   1.556 +
   1.557 +    // before going up the stack further, check if doing so would get us into
   1.558 +    // compiled code
   1.559 +    RFrame* next = senderOf(current, stack);
   1.560 +    if( !next )               // No next frame up the stack?
   1.561 +      break;                  // Then compile with current frame
   1.562 +
   1.563 +    methodHandle m = current->top_method();
   1.564 +    methodHandle next_m = next->top_method();
   1.565 +
   1.566 +    if (TraceCompilationPolicy && Verbose) {
   1.567 +      tty->print("[caller: ");
   1.568 +      next_m->print_short_name(tty);
   1.569 +      tty->print("] ");
   1.570 +    }
   1.571 +
   1.572 +    if( !Inline ) {           // Inlining turned off
   1.573 +      msg = "Inlining turned off";
   1.574 +      break;
   1.575 +    }
   1.576 +    if (next_m->is_not_compilable()) { // Did fail to compile this before/
   1.577 +      msg = "caller not compilable";
   1.578 +      break;
   1.579 +    }
   1.580 +    if (next->num() > MaxRecompilationSearchLength) {
   1.581 +      // don't go up too high when searching for recompilees
   1.582 +      msg = "don't go up any further: > MaxRecompilationSearchLength";
   1.583 +      break;
   1.584 +    }
   1.585 +    if (next->distance() > MaxInterpretedSearchLength) {
   1.586 +      // don't go up too high when searching for recompilees
   1.587 +      msg = "don't go up any further: next > MaxInterpretedSearchLength";
   1.588 +      break;
   1.589 +    }
   1.590 +    // Compiled frame above already decided not to inline;
   1.591 +    // do not recompile him.
   1.592 +    if (next->is_compiled()) {
   1.593 +      msg = "not going up into optimized code";
   1.594 +      break;
   1.595 +    }
   1.596 +
   1.597 +    // Interpreted frame above us was already compiled.  Do not force
   1.598 +    // a recompile, although if the frame above us runs long enough an
   1.599 +    // OSR might still happen.
   1.600 +    if( current->is_interpreted() && next_m->has_compiled_code() ) {
   1.601 +      msg = "not going up -- already compiled caller";
   1.602 +      break;
   1.603 +    }
   1.604 +
   1.605 +    // Compute how frequent this call site is.  We have current method 'm'.
   1.606 +    // We know next method 'next_m' is interpreted.  Find the call site and
   1.607 +    // check the various invocation counts.
   1.608 +    int invcnt = 0;             // Caller counts
   1.609 +    if (ProfileInterpreter) {
   1.610 +      invcnt = next_m->interpreter_invocation_count();
   1.611 +    }
   1.612 +    int cnt = 0;                // Call site counts
   1.613 +    if (ProfileInterpreter && next_m->method_data() != NULL) {
   1.614 +      ResourceMark rm;
   1.615 +      int bci = next->top_vframe()->bci();
   1.616 +      ProfileData* data = next_m->method_data()->bci_to_data(bci);
   1.617 +      if (data != NULL && data->is_CounterData())
   1.618 +        cnt = data->as_CounterData()->count();
   1.619 +    }
   1.620 +
   1.621 +    // Caller counts / call-site counts; i.e. is this call site
   1.622 +    // a hot call site for method next_m?
   1.623 +    int freq = (invcnt) ? cnt/invcnt : cnt;
   1.624 +
   1.625 +    // Check size and frequency limits
   1.626 +    if ((msg = shouldInline(m, freq, cnt)) != NULL) {
   1.627 +      break;
   1.628 +    }
   1.629 +    // Check inlining negative tests
   1.630 +    if ((msg = shouldNotInline(m)) != NULL) {
   1.631 +      break;
   1.632 +    }
   1.633 +
   1.634 +
   1.635 +    // If the caller method is too big or something then we do not want to
   1.636 +    // compile it just to inline a method
   1.637 +    if (!can_be_compiled(next_m, CompLevel_any)) {
   1.638 +      msg = "caller cannot be compiled";
   1.639 +      break;
   1.640 +    }
   1.641 +
   1.642 +    if( next_m->name() == vmSymbols::class_initializer_name() ) {
   1.643 +      msg = "do not compile class initializer (OSR ok)";
   1.644 +      break;
   1.645 +    }
   1.646 +
   1.647 +    if (TraceCompilationPolicy && Verbose) {
   1.648 +      tty->print("\n\t     check caller: ");
   1.649 +      next_m->print_short_name(tty);
   1.650 +      tty->print(" ( interpreted " INTPTR_FORMAT ", size=%d ) ", p2i((address)next_m()), next_m->code_size());
   1.651 +    }
   1.652 +
   1.653 +    current = next;
   1.654 +  }
   1.655 +
   1.656 +  assert( !current || !current->is_compiled(), "" );
   1.657 +
   1.658 +  if (TraceCompilationPolicy && msg) tty->print("(%s)\n", msg);
   1.659 +
   1.660 +  return current;
   1.661 +}
   1.662 +
   1.663 +RFrame* StackWalkCompPolicy::senderOf(RFrame* rf, GrowableArray<RFrame*>* stack) {
   1.664 +  RFrame* sender = rf->caller();
   1.665 +  if (sender && sender->num() == stack->length()) stack->push(sender);
   1.666 +  return sender;
   1.667 +}
   1.668 +
   1.669 +
   1.670 +const char* StackWalkCompPolicy::shouldInline(methodHandle m, float freq, int cnt) {
   1.671 +  // Allows targeted inlining
   1.672 +  // positive filter: should send be inlined?  returns NULL (--> yes)
   1.673 +  // or rejection msg
   1.674 +  int max_size = MaxInlineSize;
   1.675 +  int cost = m->code_size();
   1.676 +
   1.677 +  // Check for too many throws (and not too huge)
   1.678 +  if (m->interpreter_throwout_count() > InlineThrowCount && cost < InlineThrowMaxSize ) {
   1.679 +    return NULL;
   1.680 +  }
   1.681 +
   1.682 +  // bump the max size if the call is frequent
   1.683 +  if ((freq >= InlineFrequencyRatio) || (cnt >= InlineFrequencyCount)) {
   1.684 +    if (TraceFrequencyInlining) {
   1.685 +      tty->print("(Inlined frequent method)\n");
   1.686 +      m->print();
   1.687 +    }
   1.688 +    max_size = FreqInlineSize;
   1.689 +  }
   1.690 +  if (cost > max_size) {
   1.691 +    return (_msg = "too big");
   1.692 +  }
   1.693 +  return NULL;
   1.694 +}
   1.695 +
   1.696 +
   1.697 +const char* StackWalkCompPolicy::shouldNotInline(methodHandle m) {
   1.698 +  // negative filter: should send NOT be inlined?  returns NULL (--> inline) or rejection msg
   1.699 +  if (m->is_abstract()) return (_msg = "abstract method");
   1.700 +  // note: we allow ik->is_abstract()
   1.701 +  if (!m->method_holder()->is_initialized()) return (_msg = "method holder not initialized");
   1.702 +  if (m->is_native()) return (_msg = "native method");
   1.703 +  nmethod* m_code = m->code();
   1.704 +  if (m_code != NULL && m_code->code_size() > InlineSmallCode)
   1.705 +    return (_msg = "already compiled into a big method");
   1.706 +
   1.707 +  // use frequency-based objections only for non-trivial methods
   1.708 +  if (m->code_size() <= MaxTrivialSize) return NULL;
   1.709 +  if (UseInterpreter) {     // don't use counts with -Xcomp
   1.710 +    if ((m->code() == NULL) && m->was_never_executed()) return (_msg = "never executed");
   1.711 +    if (!m->was_executed_more_than(MIN2(MinInliningThreshold, CompileThreshold >> 1))) return (_msg = "executed < MinInliningThreshold times");
   1.712 +  }
   1.713 +  if (Method::has_unloaded_classes_in_signature(m, JavaThread::current())) return (_msg = "unloaded signature classes");
   1.714 +
   1.715 +  return NULL;
   1.716 +}
   1.717 +
   1.718 +
   1.719 +
   1.720 +#endif // COMPILER2

mercurial