src/share/vm/runtime/advancedThresholdPolicy.cpp

changeset 0
f90c822e73f8
child 6876
710a3c8b516e
     1.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     1.2 +++ b/src/share/vm/runtime/advancedThresholdPolicy.cpp	Wed Apr 27 01:25:04 2016 +0800
     1.3 @@ -0,0 +1,529 @@
     1.4 +/*
     1.5 + * Copyright (c) 2010, 2013, Oracle and/or its affiliates. All rights reserved.
     1.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     1.7 + *
     1.8 + * This code is free software; you can redistribute it and/or modify it
     1.9 + * under the terms of the GNU General Public License version 2 only, as
    1.10 + * published by the Free Software Foundation.
    1.11 + *
    1.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
    1.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    1.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    1.15 + * version 2 for more details (a copy is included in the LICENSE file that
    1.16 + * accompanied this code).
    1.17 + *
    1.18 + * You should have received a copy of the GNU General Public License version
    1.19 + * 2 along with this work; if not, write to the Free Software Foundation,
    1.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    1.21 + *
    1.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    1.23 + * or visit www.oracle.com if you need additional information or have any
    1.24 + * questions.
    1.25 + *
    1.26 + */
    1.27 +
    1.28 +#include "precompiled.hpp"
    1.29 +#include "runtime/advancedThresholdPolicy.hpp"
    1.30 +#include "runtime/simpleThresholdPolicy.inline.hpp"
    1.31 +
    1.32 +#ifdef TIERED
    1.33 +// Print an event.
    1.34 +void AdvancedThresholdPolicy::print_specific(EventType type, methodHandle mh, methodHandle imh,
    1.35 +                                             int bci, CompLevel level) {
    1.36 +  tty->print(" rate=");
    1.37 +  if (mh->prev_time() == 0) tty->print("n/a");
    1.38 +  else tty->print("%f", mh->rate());
    1.39 +
    1.40 +  tty->print(" k=%.2lf,%.2lf", threshold_scale(CompLevel_full_profile, Tier3LoadFeedback),
    1.41 +                               threshold_scale(CompLevel_full_optimization, Tier4LoadFeedback));
    1.42 +
    1.43 +}
    1.44 +
    1.45 +void AdvancedThresholdPolicy::initialize() {
    1.46 +  // Turn on ergonomic compiler count selection
    1.47 +  if (FLAG_IS_DEFAULT(CICompilerCountPerCPU) && FLAG_IS_DEFAULT(CICompilerCount)) {
    1.48 +    FLAG_SET_DEFAULT(CICompilerCountPerCPU, true);
    1.49 +  }
    1.50 +  int count = CICompilerCount;
    1.51 +  if (CICompilerCountPerCPU) {
    1.52 +    // Simple log n seems to grow too slowly for tiered, try something faster: log n * log log n
    1.53 +    int log_cpu = log2_intptr(os::active_processor_count());
    1.54 +    int loglog_cpu = log2_intptr(MAX2(log_cpu, 1));
    1.55 +    count = MAX2(log_cpu * loglog_cpu, 1) * 3 / 2;
    1.56 +  }
    1.57 +
    1.58 +  set_c1_count(MAX2(count / 3, 1));
    1.59 +  set_c2_count(MAX2(count - c1_count(), 1));
    1.60 +  FLAG_SET_ERGO(intx, CICompilerCount, c1_count() + c2_count());
    1.61 +
    1.62 +  // Some inlining tuning
    1.63 +#ifdef X86
    1.64 +  if (FLAG_IS_DEFAULT(InlineSmallCode)) {
    1.65 +    FLAG_SET_DEFAULT(InlineSmallCode, 2000);
    1.66 +  }
    1.67 +#endif
    1.68 +
    1.69 +#ifdef SPARC
    1.70 +  if (FLAG_IS_DEFAULT(InlineSmallCode)) {
    1.71 +    FLAG_SET_DEFAULT(InlineSmallCode, 2500);
    1.72 +  }
    1.73 +#endif
    1.74 +
    1.75 +  set_increase_threshold_at_ratio();
    1.76 +  set_start_time(os::javaTimeMillis());
    1.77 +}
    1.78 +
    1.79 +// update_rate() is called from select_task() while holding a compile queue lock.
    1.80 +void AdvancedThresholdPolicy::update_rate(jlong t, Method* m) {
    1.81 +  JavaThread* THREAD = JavaThread::current();
    1.82 +  if (is_old(m)) {
    1.83 +    // We don't remove old methods from the queue,
    1.84 +    // so we can just zero the rate.
    1.85 +    m->set_rate(0, THREAD);
    1.86 +    return;
    1.87 +  }
    1.88 +
    1.89 +  // We don't update the rate if we've just came out of a safepoint.
    1.90 +  // delta_s is the time since last safepoint in milliseconds.
    1.91 +  jlong delta_s = t - SafepointSynchronize::end_of_last_safepoint();
    1.92 +  jlong delta_t = t - (m->prev_time() != 0 ? m->prev_time() : start_time()); // milliseconds since the last measurement
    1.93 +  // How many events were there since the last time?
    1.94 +  int event_count = m->invocation_count() + m->backedge_count();
    1.95 +  int delta_e = event_count - m->prev_event_count();
    1.96 +
    1.97 +  // We should be running for at least 1ms.
    1.98 +  if (delta_s >= TieredRateUpdateMinTime) {
    1.99 +    // And we must've taken the previous point at least 1ms before.
   1.100 +    if (delta_t >= TieredRateUpdateMinTime && delta_e > 0) {
   1.101 +      m->set_prev_time(t, THREAD);
   1.102 +      m->set_prev_event_count(event_count, THREAD);
   1.103 +      m->set_rate((float)delta_e / (float)delta_t, THREAD); // Rate is events per millisecond
   1.104 +    } else
   1.105 +      if (delta_t > TieredRateUpdateMaxTime && delta_e == 0) {
   1.106 +        // If nothing happened for 25ms, zero the rate. Don't modify prev values.
   1.107 +        m->set_rate(0, THREAD);
   1.108 +      }
   1.109 +  }
   1.110 +}
   1.111 +
   1.112 +// Check if this method has been stale from a given number of milliseconds.
   1.113 +// See select_task().
   1.114 +bool AdvancedThresholdPolicy::is_stale(jlong t, jlong timeout, Method* m) {
   1.115 +  jlong delta_s = t - SafepointSynchronize::end_of_last_safepoint();
   1.116 +  jlong delta_t = t - m->prev_time();
   1.117 +  if (delta_t > timeout && delta_s > timeout) {
   1.118 +    int event_count = m->invocation_count() + m->backedge_count();
   1.119 +    int delta_e = event_count - m->prev_event_count();
   1.120 +    // Return true if there were no events.
   1.121 +    return delta_e == 0;
   1.122 +  }
   1.123 +  return false;
   1.124 +}
   1.125 +
   1.126 +// We don't remove old methods from the compile queue even if they have
   1.127 +// very low activity. See select_task().
   1.128 +bool AdvancedThresholdPolicy::is_old(Method* method) {
   1.129 +  return method->invocation_count() > 50000 || method->backedge_count() > 500000;
   1.130 +}
   1.131 +
   1.132 +double AdvancedThresholdPolicy::weight(Method* method) {
   1.133 +  return (method->rate() + 1) * ((method->invocation_count() + 1) *  (method->backedge_count() + 1));
   1.134 +}
   1.135 +
   1.136 +// Apply heuristics and return true if x should be compiled before y
   1.137 +bool AdvancedThresholdPolicy::compare_methods(Method* x, Method* y) {
   1.138 +  if (x->highest_comp_level() > y->highest_comp_level()) {
   1.139 +    // recompilation after deopt
   1.140 +    return true;
   1.141 +  } else
   1.142 +    if (x->highest_comp_level() == y->highest_comp_level()) {
   1.143 +      if (weight(x) > weight(y)) {
   1.144 +        return true;
   1.145 +      }
   1.146 +    }
   1.147 +  return false;
   1.148 +}
   1.149 +
   1.150 +// Is method profiled enough?
   1.151 +bool AdvancedThresholdPolicy::is_method_profiled(Method* method) {
   1.152 +  MethodData* mdo = method->method_data();
   1.153 +  if (mdo != NULL) {
   1.154 +    int i = mdo->invocation_count_delta();
   1.155 +    int b = mdo->backedge_count_delta();
   1.156 +    return call_predicate_helper<CompLevel_full_profile>(i, b, 1);
   1.157 +  }
   1.158 +  return false;
   1.159 +}
   1.160 +
   1.161 +// Called with the queue locked and with at least one element
   1.162 +CompileTask* AdvancedThresholdPolicy::select_task(CompileQueue* compile_queue) {
   1.163 +  CompileTask *max_task = NULL;
   1.164 +  Method* max_method = NULL;
   1.165 +  jlong t = os::javaTimeMillis();
   1.166 +  // Iterate through the queue and find a method with a maximum rate.
   1.167 +  for (CompileTask* task = compile_queue->first(); task != NULL;) {
   1.168 +    CompileTask* next_task = task->next();
   1.169 +    Method* method = task->method();
   1.170 +    MethodData* mdo = method->method_data();
   1.171 +    update_rate(t, method);
   1.172 +    if (max_task == NULL) {
   1.173 +      max_task = task;
   1.174 +      max_method = method;
   1.175 +    } else {
   1.176 +      // If a method has been stale for some time, remove it from the queue.
   1.177 +      if (is_stale(t, TieredCompileTaskTimeout, method) && !is_old(method)) {
   1.178 +        if (PrintTieredEvents) {
   1.179 +          print_event(REMOVE_FROM_QUEUE, method, method, task->osr_bci(), (CompLevel)task->comp_level());
   1.180 +        }
   1.181 +        CompileTaskWrapper ctw(task); // Frees the task
   1.182 +        compile_queue->remove(task);
   1.183 +        method->clear_queued_for_compilation();
   1.184 +        task = next_task;
   1.185 +        continue;
   1.186 +      }
   1.187 +
   1.188 +      // Select a method with a higher rate
   1.189 +      if (compare_methods(method, max_method)) {
   1.190 +        max_task = task;
   1.191 +        max_method = method;
   1.192 +      }
   1.193 +    }
   1.194 +    task = next_task;
   1.195 +  }
   1.196 +
   1.197 +  if (max_task->comp_level() == CompLevel_full_profile && TieredStopAtLevel > CompLevel_full_profile
   1.198 +      && is_method_profiled(max_method)) {
   1.199 +    max_task->set_comp_level(CompLevel_limited_profile);
   1.200 +    if (PrintTieredEvents) {
   1.201 +      print_event(UPDATE_IN_QUEUE, max_method, max_method, max_task->osr_bci(), (CompLevel)max_task->comp_level());
   1.202 +    }
   1.203 +  }
   1.204 +
   1.205 +  return max_task;
   1.206 +}
   1.207 +
   1.208 +double AdvancedThresholdPolicy::threshold_scale(CompLevel level, int feedback_k) {
   1.209 +  double queue_size = CompileBroker::queue_size(level);
   1.210 +  int comp_count = compiler_count(level);
   1.211 +  double k = queue_size / (feedback_k * comp_count) + 1;
   1.212 +
   1.213 +  // Increase C1 compile threshold when the code cache is filled more
   1.214 +  // than specified by IncreaseFirstTierCompileThresholdAt percentage.
   1.215 +  // The main intention is to keep enough free space for C2 compiled code
   1.216 +  // to achieve peak performance if the code cache is under stress.
   1.217 +  if ((TieredStopAtLevel == CompLevel_full_optimization) && (level != CompLevel_full_optimization))  {
   1.218 +    double current_reverse_free_ratio = CodeCache::reverse_free_ratio();
   1.219 +    if (current_reverse_free_ratio > _increase_threshold_at_ratio) {
   1.220 +      k *= exp(current_reverse_free_ratio - _increase_threshold_at_ratio);
   1.221 +    }
   1.222 +  }
   1.223 +  return k;
   1.224 +}
   1.225 +
   1.226 +// Call and loop predicates determine whether a transition to a higher
   1.227 +// compilation level should be performed (pointers to predicate functions
   1.228 +// are passed to common()).
   1.229 +// Tier?LoadFeedback is basically a coefficient that determines of
   1.230 +// how many methods per compiler thread can be in the queue before
   1.231 +// the threshold values double.
   1.232 +bool AdvancedThresholdPolicy::loop_predicate(int i, int b, CompLevel cur_level) {
   1.233 +  switch(cur_level) {
   1.234 +  case CompLevel_none:
   1.235 +  case CompLevel_limited_profile: {
   1.236 +    double k = threshold_scale(CompLevel_full_profile, Tier3LoadFeedback);
   1.237 +    return loop_predicate_helper<CompLevel_none>(i, b, k);
   1.238 +  }
   1.239 +  case CompLevel_full_profile: {
   1.240 +    double k = threshold_scale(CompLevel_full_optimization, Tier4LoadFeedback);
   1.241 +    return loop_predicate_helper<CompLevel_full_profile>(i, b, k);
   1.242 +  }
   1.243 +  default:
   1.244 +    return true;
   1.245 +  }
   1.246 +}
   1.247 +
   1.248 +bool AdvancedThresholdPolicy::call_predicate(int i, int b, CompLevel cur_level) {
   1.249 +  switch(cur_level) {
   1.250 +  case CompLevel_none:
   1.251 +  case CompLevel_limited_profile: {
   1.252 +    double k = threshold_scale(CompLevel_full_profile, Tier3LoadFeedback);
   1.253 +    return call_predicate_helper<CompLevel_none>(i, b, k);
   1.254 +  }
   1.255 +  case CompLevel_full_profile: {
   1.256 +    double k = threshold_scale(CompLevel_full_optimization, Tier4LoadFeedback);
   1.257 +    return call_predicate_helper<CompLevel_full_profile>(i, b, k);
   1.258 +  }
   1.259 +  default:
   1.260 +    return true;
   1.261 +  }
   1.262 +}
   1.263 +
   1.264 +// If a method is old enough and is still in the interpreter we would want to
   1.265 +// start profiling without waiting for the compiled method to arrive.
   1.266 +// We also take the load on compilers into the account.
   1.267 +bool AdvancedThresholdPolicy::should_create_mdo(Method* method, CompLevel cur_level) {
   1.268 +  if (cur_level == CompLevel_none &&
   1.269 +      CompileBroker::queue_size(CompLevel_full_optimization) <=
   1.270 +      Tier3DelayOn * compiler_count(CompLevel_full_optimization)) {
   1.271 +    int i = method->invocation_count();
   1.272 +    int b = method->backedge_count();
   1.273 +    double k = Tier0ProfilingStartPercentage / 100.0;
   1.274 +    return call_predicate_helper<CompLevel_none>(i, b, k) || loop_predicate_helper<CompLevel_none>(i, b, k);
   1.275 +  }
   1.276 +  return false;
   1.277 +}
   1.278 +
   1.279 +// Inlining control: if we're compiling a profiled method with C1 and the callee
   1.280 +// is known to have OSRed in a C2 version, don't inline it.
   1.281 +bool AdvancedThresholdPolicy::should_not_inline(ciEnv* env, ciMethod* callee) {
   1.282 +  CompLevel comp_level = (CompLevel)env->comp_level();
   1.283 +  if (comp_level == CompLevel_full_profile ||
   1.284 +      comp_level == CompLevel_limited_profile) {
   1.285 +    return callee->highest_osr_comp_level() == CompLevel_full_optimization;
   1.286 +  }
   1.287 +  return false;
   1.288 +}
   1.289 +
   1.290 +// Create MDO if necessary.
   1.291 +void AdvancedThresholdPolicy::create_mdo(methodHandle mh, JavaThread* THREAD) {
   1.292 +  if (mh->is_native() || mh->is_abstract() || mh->is_accessor()) return;
   1.293 +  if (mh->method_data() == NULL) {
   1.294 +    Method::build_interpreter_method_data(mh, CHECK_AND_CLEAR);
   1.295 +  }
   1.296 +}
   1.297 +
   1.298 +
   1.299 +/*
   1.300 + * Method states:
   1.301 + *   0 - interpreter (CompLevel_none)
   1.302 + *   1 - pure C1 (CompLevel_simple)
   1.303 + *   2 - C1 with invocation and backedge counting (CompLevel_limited_profile)
   1.304 + *   3 - C1 with full profiling (CompLevel_full_profile)
   1.305 + *   4 - C2 (CompLevel_full_optimization)
   1.306 + *
   1.307 + * Common state transition patterns:
   1.308 + * a. 0 -> 3 -> 4.
   1.309 + *    The most common path. But note that even in this straightforward case
   1.310 + *    profiling can start at level 0 and finish at level 3.
   1.311 + *
   1.312 + * b. 0 -> 2 -> 3 -> 4.
   1.313 + *    This case occures when the load on C2 is deemed too high. So, instead of transitioning
   1.314 + *    into state 3 directly and over-profiling while a method is in the C2 queue we transition to
   1.315 + *    level 2 and wait until the load on C2 decreases. This path is disabled for OSRs.
   1.316 + *
   1.317 + * c. 0 -> (3->2) -> 4.
   1.318 + *    In this case we enqueue a method for compilation at level 3, but the C1 queue is long enough
   1.319 + *    to enable the profiling to fully occur at level 0. In this case we change the compilation level
   1.320 + *    of the method to 2, because it'll allow it to run much faster without full profiling while c2
   1.321 + *    is compiling.
   1.322 + *
   1.323 + * d. 0 -> 3 -> 1 or 0 -> 2 -> 1.
   1.324 + *    After a method was once compiled with C1 it can be identified as trivial and be compiled to
   1.325 + *    level 1. These transition can also occur if a method can't be compiled with C2 but can with C1.
   1.326 + *
   1.327 + * e. 0 -> 4.
   1.328 + *    This can happen if a method fails C1 compilation (it will still be profiled in the interpreter)
   1.329 + *    or because of a deopt that didn't require reprofiling (compilation won't happen in this case because
   1.330 + *    the compiled version already exists).
   1.331 + *
   1.332 + * Note that since state 0 can be reached from any other state via deoptimization different loops
   1.333 + * are possible.
   1.334 + *
   1.335 + */
   1.336 +
   1.337 +// Common transition function. Given a predicate determines if a method should transition to another level.
   1.338 +CompLevel AdvancedThresholdPolicy::common(Predicate p, Method* method, CompLevel cur_level, bool disable_feedback) {
   1.339 +  CompLevel next_level = cur_level;
   1.340 +  int i = method->invocation_count();
   1.341 +  int b = method->backedge_count();
   1.342 +
   1.343 +  if (is_trivial(method)) {
   1.344 +    next_level = CompLevel_simple;
   1.345 +  } else {
   1.346 +    switch(cur_level) {
   1.347 +    case CompLevel_none:
   1.348 +      // If we were at full profile level, would we switch to full opt?
   1.349 +      if (common(p, method, CompLevel_full_profile, disable_feedback) == CompLevel_full_optimization) {
   1.350 +        next_level = CompLevel_full_optimization;
   1.351 +      } else if ((this->*p)(i, b, cur_level)) {
   1.352 +        // C1-generated fully profiled code is about 30% slower than the limited profile
   1.353 +        // code that has only invocation and backedge counters. The observation is that
   1.354 +        // if C2 queue is large enough we can spend too much time in the fully profiled code
   1.355 +        // while waiting for C2 to pick the method from the queue. To alleviate this problem
   1.356 +        // we introduce a feedback on the C2 queue size. If the C2 queue is sufficiently long
   1.357 +        // we choose to compile a limited profiled version and then recompile with full profiling
   1.358 +        // when the load on C2 goes down.
   1.359 +        if (!disable_feedback && CompileBroker::queue_size(CompLevel_full_optimization) >
   1.360 +                                 Tier3DelayOn * compiler_count(CompLevel_full_optimization)) {
   1.361 +          next_level = CompLevel_limited_profile;
   1.362 +        } else {
   1.363 +          next_level = CompLevel_full_profile;
   1.364 +        }
   1.365 +      }
   1.366 +      break;
   1.367 +    case CompLevel_limited_profile:
   1.368 +      if (is_method_profiled(method)) {
   1.369 +        // Special case: we got here because this method was fully profiled in the interpreter.
   1.370 +        next_level = CompLevel_full_optimization;
   1.371 +      } else {
   1.372 +        MethodData* mdo = method->method_data();
   1.373 +        if (mdo != NULL) {
   1.374 +          if (mdo->would_profile()) {
   1.375 +            if (disable_feedback || (CompileBroker::queue_size(CompLevel_full_optimization) <=
   1.376 +                                     Tier3DelayOff * compiler_count(CompLevel_full_optimization) &&
   1.377 +                                     (this->*p)(i, b, cur_level))) {
   1.378 +              next_level = CompLevel_full_profile;
   1.379 +            }
   1.380 +          } else {
   1.381 +            next_level = CompLevel_full_optimization;
   1.382 +          }
   1.383 +        }
   1.384 +      }
   1.385 +      break;
   1.386 +    case CompLevel_full_profile:
   1.387 +      {
   1.388 +        MethodData* mdo = method->method_data();
   1.389 +        if (mdo != NULL) {
   1.390 +          if (mdo->would_profile()) {
   1.391 +            int mdo_i = mdo->invocation_count_delta();
   1.392 +            int mdo_b = mdo->backedge_count_delta();
   1.393 +            if ((this->*p)(mdo_i, mdo_b, cur_level)) {
   1.394 +              next_level = CompLevel_full_optimization;
   1.395 +            }
   1.396 +          } else {
   1.397 +            next_level = CompLevel_full_optimization;
   1.398 +          }
   1.399 +        }
   1.400 +      }
   1.401 +      break;
   1.402 +    }
   1.403 +  }
   1.404 +  return MIN2(next_level, (CompLevel)TieredStopAtLevel);
   1.405 +}
   1.406 +
   1.407 +// Determine if a method should be compiled with a normal entry point at a different level.
   1.408 +CompLevel AdvancedThresholdPolicy::call_event(Method* method, CompLevel cur_level) {
   1.409 +  CompLevel osr_level = MIN2((CompLevel) method->highest_osr_comp_level(),
   1.410 +                             common(&AdvancedThresholdPolicy::loop_predicate, method, cur_level, true));
   1.411 +  CompLevel next_level = common(&AdvancedThresholdPolicy::call_predicate, method, cur_level);
   1.412 +
   1.413 +  // If OSR method level is greater than the regular method level, the levels should be
   1.414 +  // equalized by raising the regular method level in order to avoid OSRs during each
   1.415 +  // invocation of the method.
   1.416 +  if (osr_level == CompLevel_full_optimization && cur_level == CompLevel_full_profile) {
   1.417 +    MethodData* mdo = method->method_data();
   1.418 +    guarantee(mdo != NULL, "MDO should not be NULL");
   1.419 +    if (mdo->invocation_count() >= 1) {
   1.420 +      next_level = CompLevel_full_optimization;
   1.421 +    }
   1.422 +  } else {
   1.423 +    next_level = MAX2(osr_level, next_level);
   1.424 +  }
   1.425 +  return next_level;
   1.426 +}
   1.427 +
   1.428 +// Determine if we should do an OSR compilation of a given method.
   1.429 +CompLevel AdvancedThresholdPolicy::loop_event(Method* method, CompLevel cur_level) {
   1.430 +  CompLevel next_level = common(&AdvancedThresholdPolicy::loop_predicate, method, cur_level, true);
   1.431 +  if (cur_level == CompLevel_none) {
   1.432 +    // If there is a live OSR method that means that we deopted to the interpreter
   1.433 +    // for the transition.
   1.434 +    CompLevel osr_level = MIN2((CompLevel)method->highest_osr_comp_level(), next_level);
   1.435 +    if (osr_level > CompLevel_none) {
   1.436 +      return osr_level;
   1.437 +    }
   1.438 +  }
   1.439 +  return next_level;
   1.440 +}
   1.441 +
   1.442 +// Update the rate and submit compile
   1.443 +void AdvancedThresholdPolicy::submit_compile(methodHandle mh, int bci, CompLevel level, JavaThread* thread) {
   1.444 +  int hot_count = (bci == InvocationEntryBci) ? mh->invocation_count() : mh->backedge_count();
   1.445 +  update_rate(os::javaTimeMillis(), mh());
   1.446 +  CompileBroker::compile_method(mh, bci, level, mh, hot_count, "tiered", thread);
   1.447 +}
   1.448 +
   1.449 +// Handle the invocation event.
   1.450 +void AdvancedThresholdPolicy::method_invocation_event(methodHandle mh, methodHandle imh,
   1.451 +                                                      CompLevel level, nmethod* nm, JavaThread* thread) {
   1.452 +  if (should_create_mdo(mh(), level)) {
   1.453 +    create_mdo(mh, thread);
   1.454 +  }
   1.455 +  if (is_compilation_enabled() && !CompileBroker::compilation_is_in_queue(mh, InvocationEntryBci)) {
   1.456 +    CompLevel next_level = call_event(mh(), level);
   1.457 +    if (next_level != level) {
   1.458 +      compile(mh, InvocationEntryBci, next_level, thread);
   1.459 +    }
   1.460 +  }
   1.461 +}
   1.462 +
   1.463 +// Handle the back branch event. Notice that we can compile the method
   1.464 +// with a regular entry from here.
   1.465 +void AdvancedThresholdPolicy::method_back_branch_event(methodHandle mh, methodHandle imh,
   1.466 +                                                       int bci, CompLevel level, nmethod* nm, JavaThread* thread) {
   1.467 +  if (should_create_mdo(mh(), level)) {
   1.468 +    create_mdo(mh, thread);
   1.469 +  }
   1.470 +  // Check if MDO should be created for the inlined method
   1.471 +  if (should_create_mdo(imh(), level)) {
   1.472 +    create_mdo(imh, thread);
   1.473 +  }
   1.474 +
   1.475 +  if (is_compilation_enabled()) {
   1.476 +    CompLevel next_osr_level = loop_event(imh(), level);
   1.477 +    CompLevel max_osr_level = (CompLevel)imh->highest_osr_comp_level();
   1.478 +    // At the very least compile the OSR version
   1.479 +    if (!CompileBroker::compilation_is_in_queue(imh, bci) && next_osr_level != level) {
   1.480 +      compile(imh, bci, next_osr_level, thread);
   1.481 +    }
   1.482 +
   1.483 +    // Use loop event as an opportunity to also check if there's been
   1.484 +    // enough calls.
   1.485 +    CompLevel cur_level, next_level;
   1.486 +    if (mh() != imh()) { // If there is an enclosing method
   1.487 +      guarantee(nm != NULL, "Should have nmethod here");
   1.488 +      cur_level = comp_level(mh());
   1.489 +      next_level = call_event(mh(), cur_level);
   1.490 +
   1.491 +      if (max_osr_level == CompLevel_full_optimization) {
   1.492 +        // The inlinee OSRed to full opt, we need to modify the enclosing method to avoid deopts
   1.493 +        bool make_not_entrant = false;
   1.494 +        if (nm->is_osr_method()) {
   1.495 +          // This is an osr method, just make it not entrant and recompile later if needed
   1.496 +          make_not_entrant = true;
   1.497 +        } else {
   1.498 +          if (next_level != CompLevel_full_optimization) {
   1.499 +            // next_level is not full opt, so we need to recompile the
   1.500 +            // enclosing method without the inlinee
   1.501 +            cur_level = CompLevel_none;
   1.502 +            make_not_entrant = true;
   1.503 +          }
   1.504 +        }
   1.505 +        if (make_not_entrant) {
   1.506 +          if (PrintTieredEvents) {
   1.507 +            int osr_bci = nm->is_osr_method() ? nm->osr_entry_bci() : InvocationEntryBci;
   1.508 +            print_event(MAKE_NOT_ENTRANT, mh(), mh(), osr_bci, level);
   1.509 +          }
   1.510 +          nm->make_not_entrant();
   1.511 +        }
   1.512 +      }
   1.513 +      if (!CompileBroker::compilation_is_in_queue(mh, InvocationEntryBci)) {
   1.514 +        // Fix up next_level if necessary to avoid deopts
   1.515 +        if (next_level == CompLevel_limited_profile && max_osr_level == CompLevel_full_profile) {
   1.516 +          next_level = CompLevel_full_profile;
   1.517 +        }
   1.518 +        if (cur_level != next_level) {
   1.519 +          compile(mh, InvocationEntryBci, next_level, thread);
   1.520 +        }
   1.521 +      }
   1.522 +    } else {
   1.523 +      cur_level = comp_level(imh());
   1.524 +      next_level = call_event(imh(), cur_level);
   1.525 +      if (!CompileBroker::compilation_is_in_queue(imh, bci) && next_level != cur_level) {
   1.526 +        compile(imh, InvocationEntryBci, next_level, thread);
   1.527 +      }
   1.528 +    }
   1.529 +  }
   1.530 +}
   1.531 +
   1.532 +#endif // TIERED

mercurial