src/share/vm/runtime/sharedRuntime.cpp

changeset 6498
5da8bb64b370
parent 6492
1174c8abbdb6
parent 6172
df832bd8edb9
child 6503
a9becfeecd1b
     1.1 --- a/src/share/vm/runtime/sharedRuntime.cpp	Thu Dec 12 11:05:39 2013 -0800
     1.2 +++ b/src/share/vm/runtime/sharedRuntime.cpp	Mon Dec 23 10:26:08 2013 -0800
     1.3 @@ -1178,12 +1178,12 @@
     1.4    CodeBlob* caller_cb = caller_frame.cb();
     1.5    guarantee(caller_cb != NULL && caller_cb->is_nmethod(), "must be called from nmethod");
     1.6    nmethod* caller_nm = caller_cb->as_nmethod_or_null();
     1.7 +
     1.8    // make sure caller is not getting deoptimized
     1.9    // and removed before we are done with it.
    1.10    // CLEANUP - with lazy deopt shouldn't need this lock
    1.11    nmethodLocker caller_lock(caller_nm);
    1.12  
    1.13 -
    1.14    // determine call info & receiver
    1.15    // note: a) receiver is NULL for static calls
    1.16    //       b) an exception is thrown if receiver is NULL for non-static calls
    1.17 @@ -1198,6 +1198,11 @@
    1.18           (!is_virtual && invoke_code == Bytecodes::_invokedynamic) ||
    1.19           ( is_virtual && invoke_code != Bytecodes::_invokestatic ), "inconsistent bytecode");
    1.20  
    1.21 +  // We do not patch the call site if the caller nmethod has been made non-entrant.
    1.22 +  if (!caller_nm->is_in_use()) {
    1.23 +    return callee_method;
    1.24 +  }
    1.25 +
    1.26  #ifndef PRODUCT
    1.27    // tracing/debugging/statistics
    1.28    int *addr = (is_optimized) ? (&_resolve_opt_virtual_ctr) :
    1.29 @@ -1237,6 +1242,10 @@
    1.30    // Make sure the callee nmethod does not get deoptimized and removed before
    1.31    // we are done patching the code.
    1.32    nmethod* callee_nm = callee_method->code();
    1.33 +  if (callee_nm != NULL && !callee_nm->is_in_use()) {
    1.34 +    // Patch call site to C2I adapter if callee nmethod is deoptimized or unloaded.
    1.35 +    callee_nm = NULL;
    1.36 +  }
    1.37    nmethodLocker nl_callee(callee_nm);
    1.38  #ifdef ASSERT
    1.39    address dest_entry_point = callee_nm == NULL ? 0 : callee_nm->entry_point(); // used below
    1.40 @@ -1258,15 +1267,24 @@
    1.41    {
    1.42      MutexLocker ml_patch(CompiledIC_lock);
    1.43  
    1.44 +    // Lock blocks for safepoint during which both nmethods can change state.
    1.45 +
    1.46      // Now that we are ready to patch if the Method* was redefined then
    1.47      // don't update call site and let the caller retry.
    1.48 -
    1.49 -    if (!callee_method->is_old()) {
    1.50 +    // Don't update call site if caller nmethod has been made non-entrant
    1.51 +    // as it is a waste of time.
    1.52 +    // Don't update call site if callee nmethod was unloaded or deoptimized.
    1.53 +    // Don't update call site if callee nmethod was replaced by an other nmethod
    1.54 +    // which may happen when multiply alive nmethod (tiered compilation)
    1.55 +    // will be supported.
    1.56 +    if (!callee_method->is_old() && caller_nm->is_in_use() &&
    1.57 +        (callee_nm == NULL || callee_nm->is_in_use() && (callee_method->code() == callee_nm))) {
    1.58  #ifdef ASSERT
    1.59        // We must not try to patch to jump to an already unloaded method.
    1.60        if (dest_entry_point != 0) {
    1.61 -        assert(CodeCache::find_blob(dest_entry_point) != NULL,
    1.62 -               "should not unload nmethod while locked");
    1.63 +        CodeBlob* cb = CodeCache::find_blob(dest_entry_point);
    1.64 +        assert((cb != NULL) && cb->is_nmethod() && (((nmethod*)cb) == callee_nm),
    1.65 +               "should not call unloaded nmethod");
    1.66        }
    1.67  #endif
    1.68        if (is_virtual) {

mercurial