src/share/vm/runtime/compilationPolicy.cpp

changeset 2138
d5d065957597
parent 2103
3e8fbc61cee8
child 2176
df015ec64052
     1.1 --- a/src/share/vm/runtime/compilationPolicy.cpp	Thu Sep 02 11:40:02 2010 -0700
     1.2 +++ b/src/share/vm/runtime/compilationPolicy.cpp	Fri Sep 03 17:51:07 2010 -0700
     1.3 @@ -45,10 +45,17 @@
     1.4      Unimplemented();
     1.5  #endif
     1.6      break;
     1.7 -
     1.8 +  case 2:
     1.9 +#ifdef TIERED
    1.10 +    CompilationPolicy::set_policy(new SimpleThresholdPolicy());
    1.11 +#else
    1.12 +    Unimplemented();
    1.13 +#endif
    1.14 +    break;
    1.15    default:
    1.16 -    fatal("CompilationPolicyChoice must be in the range: [0-1]");
    1.17 +    fatal("CompilationPolicyChoice must be in the range: [0-2]");
    1.18    }
    1.19 +  CompilationPolicy::policy()->initialize();
    1.20  }
    1.21  
    1.22  void CompilationPolicy::completed_vm_startup() {
    1.23 @@ -61,16 +68,16 @@
    1.24  // Returns true if m must be compiled before executing it
    1.25  // This is intended to force compiles for methods (usually for
    1.26  // debugging) that would otherwise be interpreted for some reason.
    1.27 -bool CompilationPolicy::mustBeCompiled(methodHandle m) {
    1.28 +bool CompilationPolicy::must_be_compiled(methodHandle m, int comp_level) {
    1.29    if (m->has_compiled_code()) return false;       // already compiled
    1.30 -  if (!canBeCompiled(m))      return false;
    1.31 +  if (!can_be_compiled(m, comp_level)) return false;
    1.32  
    1.33    return !UseInterpreter ||                                              // must compile all methods
    1.34           (UseCompiler && AlwaysCompileLoopMethods && m->has_loops() && CompileBroker::should_compile_new_jobs()); // eagerly compile loop methods
    1.35  }
    1.36  
    1.37  // Returns true if m is allowed to be compiled
    1.38 -bool CompilationPolicy::canBeCompiled(methodHandle m) {
    1.39 +bool CompilationPolicy::can_be_compiled(methodHandle m, int comp_level) {
    1.40    if (m->is_abstract()) return false;
    1.41    if (DontCompileHugeMethods && m->code_size() > HugeMethodLimit) return false;
    1.42  
    1.43 @@ -83,8 +90,16 @@
    1.44    if (!AbstractInterpreter::can_be_compiled(m)) {
    1.45      return false;
    1.46    }
    1.47 +  if (comp_level == CompLevel_all) {
    1.48 +    return !m->is_not_compilable(CompLevel_simple) && !m->is_not_compilable(CompLevel_full_optimization);
    1.49 +  } else {
    1.50 +    return !m->is_not_compilable(comp_level);
    1.51 +  }
    1.52 +}
    1.53  
    1.54 -  return !m->is_not_compilable();
    1.55 +bool CompilationPolicy::is_compilation_enabled() {
    1.56 +  // NOTE: CompileBroker::should_compile_new_jobs() checks for UseCompiler
    1.57 +  return !delay_compilation_during_startup() && CompileBroker::should_compile_new_jobs();
    1.58  }
    1.59  
    1.60  #ifndef PRODUCT
    1.61 @@ -94,7 +109,7 @@
    1.62    tty->print_cr ("  Total: %3.3f sec.", _accumulated_time.seconds());
    1.63  }
    1.64  
    1.65 -static void trace_osr_completion(nmethod* osr_nm) {
    1.66 +void NonTieredCompPolicy::trace_osr_completion(nmethod* osr_nm) {
    1.67    if (TraceOnStackReplacement) {
    1.68      if (osr_nm == NULL) tty->print_cr("compilation failed");
    1.69      else tty->print_cr("nmethod " INTPTR_FORMAT, osr_nm);
    1.70 @@ -102,7 +117,35 @@
    1.71  }
    1.72  #endif // !PRODUCT
    1.73  
    1.74 -void CompilationPolicy::reset_counter_for_invocation_event(methodHandle m) {
    1.75 +void NonTieredCompPolicy::initialize() {
    1.76 +  // Setup the compiler thread numbers
    1.77 +  if (CICompilerCountPerCPU) {
    1.78 +    // Example: if CICompilerCountPerCPU is true, then we get
    1.79 +    // max(log2(8)-1,1) = 2 compiler threads on an 8-way machine.
    1.80 +    // May help big-app startup time.
    1.81 +    _compiler_count = MAX2(log2_intptr(os::active_processor_count())-1,1);
    1.82 +  } else {
    1.83 +    _compiler_count = CICompilerCount;
    1.84 +  }
    1.85 +}
    1.86 +
    1.87 +int NonTieredCompPolicy::compiler_count(CompLevel comp_level) {
    1.88 +#ifdef COMPILER1
    1.89 +  if (is_c1_compile(comp_level)) {
    1.90 +    return _compiler_count;
    1.91 +  }
    1.92 +#endif
    1.93 +
    1.94 +#ifdef COMPILER2
    1.95 +  if (is_c2_compile(comp_level)) {
    1.96 +    return _compiler_count;
    1.97 +  }
    1.98 +#endif
    1.99 +
   1.100 +  return 0;
   1.101 +}
   1.102 +
   1.103 +void NonTieredCompPolicy::reset_counter_for_invocation_event(methodHandle m) {
   1.104    // Make sure invocation and backedge counter doesn't overflow again right away
   1.105    // as would be the case for native methods.
   1.106  
   1.107 @@ -114,7 +157,7 @@
   1.108    assert(!m->was_never_executed(), "don't reset to 0 -- could be mistaken for never-executed");
   1.109  }
   1.110  
   1.111 -void CompilationPolicy::reset_counter_for_back_branch_event(methodHandle m) {
   1.112 +void NonTieredCompPolicy::reset_counter_for_back_branch_event(methodHandle m) {
   1.113    // Delay next back-branch event but pump up invocation counter to triger
   1.114    // whole method compilation.
   1.115    InvocationCounter* i = m->invocation_counter();
   1.116 @@ -128,6 +171,185 @@
   1.117    b->set(b->state(), CompileThreshold / 2);
   1.118  }
   1.119  
   1.120 +//
   1.121 +// CounterDecay
   1.122 +//
   1.123 +// Interates through invocation counters and decrements them. This
   1.124 +// is done at each safepoint.
   1.125 +//
   1.126 +class CounterDecay : public AllStatic {
   1.127 +  static jlong _last_timestamp;
   1.128 +  static void do_method(methodOop m) {
   1.129 +    m->invocation_counter()->decay();
   1.130 +  }
   1.131 +public:
   1.132 +  static void decay();
   1.133 +  static bool is_decay_needed() {
   1.134 +    return (os::javaTimeMillis() - _last_timestamp) > CounterDecayMinIntervalLength;
   1.135 +  }
   1.136 +};
   1.137 +
   1.138 +jlong CounterDecay::_last_timestamp = 0;
   1.139 +
   1.140 +void CounterDecay::decay() {
   1.141 +  _last_timestamp = os::javaTimeMillis();
   1.142 +
   1.143 +  // This operation is going to be performed only at the end of a safepoint
   1.144 +  // and hence GC's will not be going on, all Java mutators are suspended
   1.145 +  // at this point and hence SystemDictionary_lock is also not needed.
   1.146 +  assert(SafepointSynchronize::is_at_safepoint(), "can only be executed at a safepoint");
   1.147 +  int nclasses = SystemDictionary::number_of_classes();
   1.148 +  double classes_per_tick = nclasses * (CounterDecayMinIntervalLength * 1e-3 /
   1.149 +                                        CounterHalfLifeTime);
   1.150 +  for (int i = 0; i < classes_per_tick; i++) {
   1.151 +    klassOop k = SystemDictionary::try_get_next_class();
   1.152 +    if (k != NULL && k->klass_part()->oop_is_instance()) {
   1.153 +      instanceKlass::cast(k)->methods_do(do_method);
   1.154 +    }
   1.155 +  }
   1.156 +}
   1.157 +
   1.158 +// Called at the end of the safepoint
   1.159 +void NonTieredCompPolicy::do_safepoint_work() {
   1.160 +  if(UseCounterDecay && CounterDecay::is_decay_needed()) {
   1.161 +    CounterDecay::decay();
   1.162 +  }
   1.163 +}
   1.164 +
   1.165 +void NonTieredCompPolicy::reprofile(ScopeDesc* trap_scope, bool is_osr) {
   1.166 +  ScopeDesc* sd = trap_scope;
   1.167 +  for (; !sd->is_top(); sd = sd->sender()) {
   1.168 +    // Reset ICs of inlined methods, since they can trigger compilations also.
   1.169 +    sd->method()->invocation_counter()->reset();
   1.170 +  }
   1.171 +  InvocationCounter* c = sd->method()->invocation_counter();
   1.172 +  if (is_osr) {
   1.173 +    // It was an OSR method, so bump the count higher.
   1.174 +    c->set(c->state(), CompileThreshold);
   1.175 +  } else {
   1.176 +    c->reset();
   1.177 +  }
   1.178 +  sd->method()->backedge_counter()->reset();
   1.179 +}
   1.180 +
   1.181 +// This method can be called by any component of the runtime to notify the policy
   1.182 +// that it's recommended to delay the complation of this method.
   1.183 +void NonTieredCompPolicy::delay_compilation(methodOop method) {
   1.184 +  method->invocation_counter()->decay();
   1.185 +  method->backedge_counter()->decay();
   1.186 +}
   1.187 +
   1.188 +void NonTieredCompPolicy::disable_compilation(methodOop method) {
   1.189 +  method->invocation_counter()->set_state(InvocationCounter::wait_for_nothing);
   1.190 +  method->backedge_counter()->set_state(InvocationCounter::wait_for_nothing);
   1.191 +}
   1.192 +
   1.193 +CompileTask* NonTieredCompPolicy::select_task(CompileQueue* compile_queue) {
   1.194 +  return compile_queue->first();
   1.195 +}
   1.196 +
   1.197 +bool NonTieredCompPolicy::is_mature(methodOop method) {
   1.198 +  methodDataOop mdo = method->method_data();
   1.199 +  assert(mdo != NULL, "Should be");
   1.200 +  uint current = mdo->mileage_of(method);
   1.201 +  uint initial = mdo->creation_mileage();
   1.202 +  if (current < initial)
   1.203 +    return true;  // some sort of overflow
   1.204 +  uint target;
   1.205 +  if (ProfileMaturityPercentage <= 0)
   1.206 +    target = (uint) -ProfileMaturityPercentage;  // absolute value
   1.207 +  else
   1.208 +    target = (uint)( (ProfileMaturityPercentage * CompileThreshold) / 100 );
   1.209 +  return (current >= initial + target);
   1.210 +}
   1.211 +
   1.212 +nmethod* NonTieredCompPolicy::event(methodHandle method, methodHandle inlinee, int branch_bci, int bci, CompLevel comp_level, TRAPS) {
   1.213 +  assert(comp_level == CompLevel_none, "This should be only called from the interpreter");
   1.214 +  NOT_PRODUCT(trace_frequency_counter_overflow(method, branch_bci, bci));
   1.215 +  if (JvmtiExport::can_post_interpreter_events()) {
   1.216 +    assert(THREAD->is_Java_thread(), "Wrong type of thread");
   1.217 +    if (((JavaThread*)THREAD)->is_interp_only_mode()) {
   1.218 +      // If certain JVMTI events (e.g. frame pop event) are requested then the
   1.219 +      // thread is forced to remain in interpreted code. This is
   1.220 +      // implemented partly by a check in the run_compiled_code
   1.221 +      // section of the interpreter whether we should skip running
   1.222 +      // compiled code, and partly by skipping OSR compiles for
   1.223 +      // interpreted-only threads.
   1.224 +      if (bci != InvocationEntryBci) {
   1.225 +        reset_counter_for_back_branch_event(method);
   1.226 +        return NULL;
   1.227 +      }
   1.228 +    }
   1.229 +  }
   1.230 +  if (bci == InvocationEntryBci) {
   1.231 +    // when code cache is full, compilation gets switched off, UseCompiler
   1.232 +    // is set to false
   1.233 +    if (!method->has_compiled_code() && UseCompiler) {
   1.234 +      method_invocation_event(method, CHECK_NULL);
   1.235 +    } else {
   1.236 +      // Force counter overflow on method entry, even if no compilation
   1.237 +      // happened.  (The method_invocation_event call does this also.)
   1.238 +      reset_counter_for_invocation_event(method);
   1.239 +    }
   1.240 +    // compilation at an invocation overflow no longer goes and retries test for
   1.241 +    // compiled method. We always run the loser of the race as interpreted.
   1.242 +    // so return NULL
   1.243 +    return NULL;
   1.244 +  } else {
   1.245 +    // counter overflow in a loop => try to do on-stack-replacement
   1.246 +    nmethod* osr_nm = method->lookup_osr_nmethod_for(bci, CompLevel_highest_tier, true);
   1.247 +    NOT_PRODUCT(trace_osr_request(method, osr_nm, bci));
   1.248 +    // when code cache is full, we should not compile any more...
   1.249 +    if (osr_nm == NULL && UseCompiler) {
   1.250 +      method_back_branch_event(method, bci, CHECK_NULL);
   1.251 +      osr_nm = method->lookup_osr_nmethod_for(bci, CompLevel_highest_tier, true);
   1.252 +    }
   1.253 +    if (osr_nm == NULL) {
   1.254 +      reset_counter_for_back_branch_event(method);
   1.255 +      return NULL;
   1.256 +    }
   1.257 +    return osr_nm;
   1.258 +  }
   1.259 +  return NULL;
   1.260 +}
   1.261 +
   1.262 +#ifndef PRODUCT
   1.263 +void NonTieredCompPolicy::trace_frequency_counter_overflow(methodHandle m, int branch_bci, int bci) {
   1.264 +  if (TraceInvocationCounterOverflow) {
   1.265 +    InvocationCounter* ic = m->invocation_counter();
   1.266 +    InvocationCounter* bc = m->backedge_counter();
   1.267 +    ResourceMark rm;
   1.268 +    const char* msg =
   1.269 +      bci == InvocationEntryBci
   1.270 +      ? "comp-policy cntr ovfl @ %d in entry of "
   1.271 +      : "comp-policy cntr ovfl @ %d in loop of ";
   1.272 +    tty->print(msg, bci);
   1.273 +    m->print_value();
   1.274 +    tty->cr();
   1.275 +    ic->print();
   1.276 +    bc->print();
   1.277 +    if (ProfileInterpreter) {
   1.278 +      if (bci != InvocationEntryBci) {
   1.279 +        methodDataOop mdo = m->method_data();
   1.280 +        if (mdo != NULL) {
   1.281 +          int count = mdo->bci_to_data(branch_bci)->as_JumpData()->taken();
   1.282 +          tty->print_cr("back branch count = %d", count);
   1.283 +        }
   1.284 +      }
   1.285 +    }
   1.286 +  }
   1.287 +}
   1.288 +
   1.289 +void NonTieredCompPolicy::trace_osr_request(methodHandle method, nmethod* osr, int bci) {
   1.290 +  if (TraceOnStackReplacement) {
   1.291 +    ResourceMark rm;
   1.292 +    tty->print(osr != NULL ? "Reused OSR entry for " : "Requesting OSR entry for ");
   1.293 +    method->print_short_name(tty);
   1.294 +    tty->print_cr(" at bci %d", bci);
   1.295 +  }
   1.296 +}
   1.297 +#endif // !PRODUCT
   1.298 +
   1.299  // SimpleCompPolicy - compile current method
   1.300  
   1.301  void SimpleCompPolicy::method_invocation_event( methodHandle m, TRAPS) {
   1.302 @@ -137,59 +359,28 @@
   1.303    reset_counter_for_invocation_event(m);
   1.304    const char* comment = "count";
   1.305  
   1.306 -  if (!delayCompilationDuringStartup() && canBeCompiled(m) && UseCompiler && CompileBroker::should_compile_new_jobs()) {
   1.307 +  if (is_compilation_enabled() && can_be_compiled(m)) {
   1.308      nmethod* nm = m->code();
   1.309      if (nm == NULL ) {
   1.310        const char* comment = "count";
   1.311 -      CompileBroker::compile_method(m, InvocationEntryBci,
   1.312 +      CompileBroker::compile_method(m, InvocationEntryBci, CompLevel_highest_tier,
   1.313                                      m, hot_count, comment, CHECK);
   1.314 -    } else {
   1.315 -#ifdef TIERED
   1.316 -
   1.317 -      if (nm->is_compiled_by_c1()) {
   1.318 -        const char* comment = "tier1 overflow";
   1.319 -        CompileBroker::compile_method(m, InvocationEntryBci,
   1.320 -                                      m, hot_count, comment, CHECK);
   1.321 -      }
   1.322 -#endif // TIERED
   1.323      }
   1.324    }
   1.325  }
   1.326  
   1.327 -void SimpleCompPolicy::method_back_branch_event(methodHandle m, int branch_bci, int loop_top_bci, TRAPS) {
   1.328 +void SimpleCompPolicy::method_back_branch_event(methodHandle m, int bci, TRAPS) {
   1.329    assert(UseCompiler || CompileTheWorld, "UseCompiler should be set by now.");
   1.330  
   1.331    int hot_count = m->backedge_count();
   1.332    const char* comment = "backedge_count";
   1.333  
   1.334 -  if (!m->is_not_osr_compilable() && !delayCompilationDuringStartup() && canBeCompiled(m) && CompileBroker::should_compile_new_jobs()) {
   1.335 -    CompileBroker::compile_method(m, loop_top_bci, m, hot_count, comment, CHECK);
   1.336 -
   1.337 -    NOT_PRODUCT(trace_osr_completion(m->lookup_osr_nmethod_for(loop_top_bci));)
   1.338 +  if (is_compilation_enabled() && !m->is_not_osr_compilable() && can_be_compiled(m)) {
   1.339 +    CompileBroker::compile_method(m, bci, CompLevel_highest_tier,
   1.340 +                                  m, hot_count, comment, CHECK);
   1.341 +    NOT_PRODUCT(trace_osr_completion(m->lookup_osr_nmethod_for(bci, CompLevel_highest_tier, true));)
   1.342    }
   1.343  }
   1.344 -
   1.345 -int SimpleCompPolicy::compilation_level(methodHandle m, int branch_bci)
   1.346 -{
   1.347 -#ifdef TIERED
   1.348 -  if (!TieredCompilation) {
   1.349 -    return CompLevel_highest_tier;
   1.350 -  }
   1.351 -  if (/* m()->tier1_compile_done() && */
   1.352 -     // QQQ HACK FIX ME set tier1_compile_done!!
   1.353 -      !m()->is_native()) {
   1.354 -    // Grab the nmethod so it doesn't go away while it's being queried
   1.355 -    nmethod* code = m()->code();
   1.356 -    if (code != NULL && code->is_compiled_by_c1()) {
   1.357 -      return CompLevel_highest_tier;
   1.358 -    }
   1.359 -  }
   1.360 -  return CompLevel_fast_compile;
   1.361 -#else
   1.362 -  return CompLevel_highest_tier;
   1.363 -#endif // TIERED
   1.364 -}
   1.365 -
   1.366  // StackWalkCompPolicy - walk up stack to find a suitable method to compile
   1.367  
   1.368  #ifdef COMPILER2
   1.369 @@ -204,7 +395,7 @@
   1.370    reset_counter_for_invocation_event(m);
   1.371    const char* comment = "count";
   1.372  
   1.373 -  if (m->code() == NULL && !delayCompilationDuringStartup() && canBeCompiled(m) && UseCompiler && CompileBroker::should_compile_new_jobs()) {
   1.374 +  if (is_compilation_enabled() && m->code() == NULL && can_be_compiled(m)) {
   1.375      ResourceMark rm(THREAD);
   1.376      JavaThread *thread = (JavaThread*)THREAD;
   1.377      frame       fr     = thread->last_frame();
   1.378 @@ -224,10 +415,6 @@
   1.379      if (first->top_method()->code() != NULL) {
   1.380        // called obsolete method/nmethod -- no need to recompile
   1.381        if (TraceCompilationPolicy) tty->print_cr(" --> " INTPTR_FORMAT, first->top_method()->code());
   1.382 -    } else if (compilation_level(m, InvocationEntryBci) == CompLevel_fast_compile) {
   1.383 -      // Tier1 compilation policy avaoids stack walking.
   1.384 -      CompileBroker::compile_method(m, InvocationEntryBci,
   1.385 -                                    m, hot_count, comment, CHECK);
   1.386      } else {
   1.387        if (TimeCompilationPolicy) accumulated_time()->start();
   1.388        GrowableArray<RFrame*>* stack = new GrowableArray<RFrame*>(50);
   1.389 @@ -236,53 +423,25 @@
   1.390        if (TimeCompilationPolicy) accumulated_time()->stop();
   1.391        assert(top != NULL, "findTopInlinableFrame returned null");
   1.392        if (TraceCompilationPolicy) top->print();
   1.393 -      CompileBroker::compile_method(top->top_method(), InvocationEntryBci,
   1.394 +      CompileBroker::compile_method(top->top_method(), InvocationEntryBci, CompLevel_highest_tier,
   1.395                                      m, hot_count, comment, CHECK);
   1.396      }
   1.397    }
   1.398  }
   1.399  
   1.400 -void StackWalkCompPolicy::method_back_branch_event(methodHandle m, int branch_bci, int loop_top_bci, TRAPS) {
   1.401 +void StackWalkCompPolicy::method_back_branch_event(methodHandle m, int bci, TRAPS) {
   1.402    assert(UseCompiler || CompileTheWorld, "UseCompiler should be set by now.");
   1.403  
   1.404    int hot_count = m->backedge_count();
   1.405    const char* comment = "backedge_count";
   1.406  
   1.407 -  if (!m->is_not_osr_compilable() && !delayCompilationDuringStartup() && canBeCompiled(m) && CompileBroker::should_compile_new_jobs()) {
   1.408 -    CompileBroker::compile_method(m, loop_top_bci, m, hot_count, comment, CHECK);
   1.409 +  if (is_compilation_enabled() && !m->is_not_osr_compilable() && can_be_compiled(m)) {
   1.410 +    CompileBroker::compile_method(m, bci, CompLevel_highest_tier, m, hot_count, comment, CHECK);
   1.411  
   1.412 -    NOT_PRODUCT(trace_osr_completion(m->lookup_osr_nmethod_for(loop_top_bci));)
   1.413 +    NOT_PRODUCT(trace_osr_completion(m->lookup_osr_nmethod_for(bci, CompLevel_highest_tier, true));)
   1.414    }
   1.415  }
   1.416  
   1.417 -int StackWalkCompPolicy::compilation_level(methodHandle m, int osr_bci)
   1.418 -{
   1.419 -  int comp_level = CompLevel_full_optimization;
   1.420 -  if (TieredCompilation && osr_bci == InvocationEntryBci) {
   1.421 -    if (CompileTheWorld) {
   1.422 -      // Under CTW, the first compile is tier1, the second tier2
   1.423 -      if (m->highest_tier_compile() == CompLevel_none) {
   1.424 -        comp_level = CompLevel_fast_compile;
   1.425 -      }
   1.426 -    } else if (!m->has_osr_nmethod()) {
   1.427 -      // Before tier1 is done, use invocation_count + backedge_count to
   1.428 -      // compare against the threshold.  After that, the counters may/will
   1.429 -      // be reset, so rely on the straight interpreter_invocation_count.
   1.430 -      if (m->highest_tier_compile() == CompLevel_initial_compile) {
   1.431 -        if (m->interpreter_invocation_count() < Tier2CompileThreshold) {
   1.432 -          comp_level = CompLevel_fast_compile;
   1.433 -        }
   1.434 -      } else if (m->invocation_count() + m->backedge_count() <
   1.435 -                 Tier2CompileThreshold) {
   1.436 -        comp_level = CompLevel_fast_compile;
   1.437 -      }
   1.438 -    }
   1.439 -
   1.440 -  }
   1.441 -  return comp_level;
   1.442 -}
   1.443 -
   1.444 -
   1.445  RFrame* StackWalkCompPolicy::findTopInlinableFrame(GrowableArray<RFrame*>* stack) {
   1.446    // go up the stack until finding a frame that (probably) won't be inlined
   1.447    // into its caller
   1.448 @@ -372,7 +531,7 @@
   1.449  
   1.450      // If the caller method is too big or something then we do not want to
   1.451      // compile it just to inline a method
   1.452 -    if (!canBeCompiled(next_m)) {
   1.453 +    if (!can_be_compiled(next_m)) {
   1.454        msg = "caller cannot be compiled";
   1.455        break;
   1.456      }

mercurial