Merge

Sat, 17 Oct 2015 00:26:45 -0700

author
asaha
date
Sat, 17 Oct 2015 00:26:45 -0700
changeset 8167
f473f6facf41
parent 8166
af06eb8357cb
parent 8077
39d920531a4d
child 8168
29a16e0d9bf7

Merge

.hgtags file | annotate | diff | comparison | revisions
     1.1 --- a/.hgtags	Wed Oct 07 08:34:54 2015 -0700
     1.2 +++ b/.hgtags	Sat Oct 17 00:26:45 2015 -0700
     1.3 @@ -758,3 +758,4 @@
     1.4  a6f2a7ba281291f5dab79fa494f7cfaa6232c88b jdk8u66-b17
     1.5  b8f426369187c32551f0a3d571d933908988c81c jdk8u72-b00
     1.6  c0205eddb31766ece562483595ec28a7506971e9 jdk8u72-b01
     1.7 +15ef554f2f2e0a8d7c330191432fcd2126d19dab jdk8u72-b02
     2.1 --- a/src/share/vm/code/codeCache.cpp	Wed Oct 07 08:34:54 2015 -0700
     2.2 +++ b/src/share/vm/code/codeCache.cpp	Sat Oct 17 00:26:45 2015 -0700
     2.3 @@ -521,15 +521,17 @@
     2.4  
     2.5  void CodeCache::gc_epilogue() {
     2.6    assert_locked_or_safepoint(CodeCache_lock);
     2.7 -  FOR_ALL_ALIVE_BLOBS(cb) {
     2.8 -    if (cb->is_nmethod()) {
     2.9 -      nmethod *nm = (nmethod*)cb;
    2.10 -      assert(!nm->is_unloaded(), "Tautology");
    2.11 -      if (needs_cache_clean()) {
    2.12 -        nm->cleanup_inline_caches();
    2.13 +  NOT_DEBUG(if (needs_cache_clean())) {
    2.14 +    FOR_ALL_ALIVE_BLOBS(cb) {
    2.15 +      if (cb->is_nmethod()) {
    2.16 +        nmethod *nm = (nmethod*)cb;
    2.17 +        assert(!nm->is_unloaded(), "Tautology");
    2.18 +        DEBUG_ONLY(if (needs_cache_clean())) {
    2.19 +          nm->cleanup_inline_caches();
    2.20 +        }
    2.21 +        DEBUG_ONLY(nm->verify());
    2.22 +        DEBUG_ONLY(nm->verify_oop_relocations());
    2.23        }
    2.24 -      DEBUG_ONLY(nm->verify());
    2.25 -      DEBUG_ONLY(nm->verify_oop_relocations());
    2.26      }
    2.27    }
    2.28    set_needs_cache_clean(false);
    2.29 @@ -734,27 +736,6 @@
    2.30    return number_of_marked_CodeBlobs;
    2.31  }
    2.32  
    2.33 -void CodeCache::make_marked_nmethods_zombies() {
    2.34 -  assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint");
    2.35 -  FOR_ALL_ALIVE_NMETHODS(nm) {
    2.36 -    if (nm->is_marked_for_deoptimization()) {
    2.37 -
    2.38 -      // If the nmethod has already been made non-entrant and it can be converted
    2.39 -      // then zombie it now. Otherwise make it non-entrant and it will eventually
    2.40 -      // be zombied when it is no longer seen on the stack. Note that the nmethod
    2.41 -      // might be "entrant" and not on the stack and so could be zombied immediately
    2.42 -      // but we can't tell because we don't track it on stack until it becomes
    2.43 -      // non-entrant.
    2.44 -
    2.45 -      if (nm->is_not_entrant() && nm->can_not_entrant_be_converted()) {
    2.46 -        nm->make_zombie();
    2.47 -      } else {
    2.48 -        nm->make_not_entrant();
    2.49 -      }
    2.50 -    }
    2.51 -  }
    2.52 -}
    2.53 -
    2.54  void CodeCache::make_marked_nmethods_not_entrant() {
    2.55    assert_locked_or_safepoint(CodeCache_lock);
    2.56    FOR_ALL_ALIVE_NMETHODS(nm) {
     3.1 --- a/src/share/vm/code/codeCache.hpp	Wed Oct 07 08:34:54 2015 -0700
     3.2 +++ b/src/share/vm/code/codeCache.hpp	Sat Oct 17 00:26:45 2015 -0700
     3.3 @@ -179,7 +179,6 @@
     3.4  
     3.5    static void mark_all_nmethods_for_deoptimization();
     3.6    static int  mark_for_deoptimization(Method* dependee);
     3.7 -  static void make_marked_nmethods_zombies();
     3.8    static void make_marked_nmethods_not_entrant();
     3.9  
    3.10      // tells how many nmethods have dependencies
     4.1 --- a/src/share/vm/code/compiledIC.cpp	Wed Oct 07 08:34:54 2015 -0700
     4.2 +++ b/src/share/vm/code/compiledIC.cpp	Sat Oct 17 00:26:45 2015 -0700
     4.3 @@ -155,6 +155,14 @@
     4.4    return _ic_call->destination();
     4.5  }
     4.6  
     4.7 +// Clears the IC stub if the compiled IC is in transition state
     4.8 +void CompiledIC::clear_ic_stub() {
     4.9 +  if (is_in_transition_state()) {
    4.10 +    ICStub* stub = ICStub_from_destination_address(stub_address());
    4.11 +    stub->clear();
    4.12 +  }
    4.13 +}
    4.14 +
    4.15  
    4.16  //-----------------------------------------------------------------------------
    4.17  // High-level access to an inline cache. Guaranteed to be MT-safe.
    4.18 @@ -279,6 +287,7 @@
    4.19    assert( is_c1_method ||
    4.20           !is_monomorphic ||
    4.21           is_optimized() ||
    4.22 +         !caller->is_alive() ||
    4.23           (cached_metadata() != NULL && cached_metadata()->is_klass()), "sanity check");
    4.24  #endif // ASSERT
    4.25    return is_monomorphic;
    4.26 @@ -313,7 +322,7 @@
    4.27  }
    4.28  
    4.29  
    4.30 -void CompiledIC::set_to_clean() {
    4.31 +void CompiledIC::set_to_clean(bool in_use) {
    4.32    assert(SafepointSynchronize::is_at_safepoint() || CompiledIC_lock->is_locked() , "MT-unsafe call");
    4.33    if (TraceInlineCacheClearing || TraceICs) {
    4.34      tty->print_cr("IC@" INTPTR_FORMAT ": set to clean", p2i(instruction_address()));
    4.35 @@ -329,17 +338,14 @@
    4.36  
    4.37    // A zombie transition will always be safe, since the metadata has already been set to NULL, so
    4.38    // we only need to patch the destination
    4.39 -  bool safe_transition = is_optimized() || SafepointSynchronize::is_at_safepoint();
    4.40 +  bool safe_transition = !in_use || is_optimized() || SafepointSynchronize::is_at_safepoint();
    4.41  
    4.42    if (safe_transition) {
    4.43      // Kill any leftover stub we might have too
    4.44 -    if (is_in_transition_state()) {
    4.45 -      ICStub* old_stub = ICStub_from_destination_address(stub_address());
    4.46 -      old_stub->clear();
    4.47 -    }
    4.48 +    clear_ic_stub();
    4.49      if (is_optimized()) {
    4.50 -    set_ic_destination(entry);
    4.51 -  } else {
    4.52 +      set_ic_destination(entry);
    4.53 +    } else {
    4.54        set_ic_destination_and_value(entry, (void*)NULL);
    4.55      }
    4.56    } else {
     5.1 --- a/src/share/vm/code/compiledIC.hpp	Wed Oct 07 08:34:54 2015 -0700
     5.2 +++ b/src/share/vm/code/compiledIC.hpp	Sat Oct 17 00:26:45 2015 -0700
     5.3 @@ -228,8 +228,9 @@
     5.4    //
     5.5    // They all takes a TRAP argument, since they can cause a GC if the inline-cache buffer is full.
     5.6    //
     5.7 -  void set_to_clean();  // Can only be called during a safepoint operation
     5.8 +  void set_to_clean(bool in_use = true);
     5.9    void set_to_monomorphic(CompiledICInfo& info);
    5.10 +  void clear_ic_stub();
    5.11  
    5.12    // Returns true if successful and false otherwise. The call can fail if memory
    5.13    // allocation in the code cache fails.
     6.1 --- a/src/share/vm/code/nmethod.cpp	Wed Oct 07 08:34:54 2015 -0700
     6.2 +++ b/src/share/vm/code/nmethod.cpp	Sat Oct 17 00:26:45 2015 -0700
     6.3 @@ -1148,9 +1148,20 @@
     6.4    }
     6.5  }
     6.6  
     6.7 +// Clear ICStubs of all compiled ICs
     6.8 +void nmethod::clear_ic_stubs() {
     6.9 +  assert_locked_or_safepoint(CompiledIC_lock);
    6.10 +  RelocIterator iter(this);
    6.11 +  while(iter.next()) {
    6.12 +    if (iter.type() == relocInfo::virtual_call_type) {
    6.13 +      CompiledIC* ic = CompiledIC_at(&iter);
    6.14 +      ic->clear_ic_stub();
    6.15 +    }
    6.16 +  }
    6.17 +}
    6.18 +
    6.19  
    6.20  void nmethod::cleanup_inline_caches() {
    6.21 -
    6.22    assert_locked_or_safepoint(CompiledIC_lock);
    6.23  
    6.24    // If the method is not entrant or zombie then a JMP is plastered over the
    6.25 @@ -1166,7 +1177,8 @@
    6.26      // In fact, why are we bothering to look at oops in a non-entrant method??
    6.27    }
    6.28  
    6.29 -  // Find all calls in an nmethod, and clear the ones that points to zombie methods
    6.30 +  // Find all calls in an nmethod and clear the ones that point to non-entrant,
    6.31 +  // zombie and unloaded nmethods.
    6.32    ResourceMark rm;
    6.33    RelocIterator iter(this, low_boundary);
    6.34    while(iter.next()) {
    6.35 @@ -1178,8 +1190,8 @@
    6.36          CodeBlob *cb = CodeCache::find_blob_unsafe(ic->ic_destination());
    6.37          if( cb != NULL && cb->is_nmethod() ) {
    6.38            nmethod* nm = (nmethod*)cb;
    6.39 -          // Clean inline caches pointing to both zombie and not_entrant methods
    6.40 -          if (!nm->is_in_use() || (nm->method()->code() != nm)) ic->set_to_clean();
    6.41 +          // Clean inline caches pointing to zombie, non-entrant and unloaded methods
    6.42 +          if (!nm->is_in_use() || (nm->method()->code() != nm)) ic->set_to_clean(is_alive());
    6.43          }
    6.44          break;
    6.45        }
    6.46 @@ -1188,7 +1200,7 @@
    6.47          CodeBlob *cb = CodeCache::find_blob_unsafe(csc->destination());
    6.48          if( cb != NULL && cb->is_nmethod() ) {
    6.49            nmethod* nm = (nmethod*)cb;
    6.50 -          // Clean inline caches pointing to both zombie and not_entrant methods
    6.51 +          // Clean inline caches pointing to zombie, non-entrant and unloaded methods
    6.52            if (!nm->is_in_use() || (nm->method()->code() != nm)) csc->set_to_clean();
    6.53          }
    6.54          break;
    6.55 @@ -1279,7 +1291,7 @@
    6.56  // Tell if a non-entrant method can be converted to a zombie (i.e.,
    6.57  // there are no activations on the stack, not in use by the VM,
    6.58  // and not in use by the ServiceThread)
    6.59 -bool nmethod::can_not_entrant_be_converted() {
    6.60 +bool nmethod::can_convert_to_zombie() {
    6.61    assert(is_not_entrant(), "must be a non-entrant method");
    6.62  
    6.63    // Since the nmethod sweeper only does partial sweep the sweeper's traversal
    6.64 @@ -2695,7 +2707,7 @@
    6.65    // Hmm. OSR methods can be deopted but not marked as zombie or not_entrant
    6.66    // seems odd.
    6.67  
    6.68 -  if( is_zombie() || is_not_entrant() )
    6.69 +  if (is_zombie() || is_not_entrant() || is_unloaded())
    6.70      return;
    6.71  
    6.72    // Make sure all the entry points are correctly aligned for patching.
     7.1 --- a/src/share/vm/code/nmethod.hpp	Wed Oct 07 08:34:54 2015 -0700
     7.2 +++ b/src/share/vm/code/nmethod.hpp	Sat Oct 17 00:26:45 2015 -0700
     7.3 @@ -577,6 +577,7 @@
     7.4  
     7.5    // Inline cache support
     7.6    void clear_inline_caches();
     7.7 +  void clear_ic_stubs();
     7.8    void cleanup_inline_caches();
     7.9    bool inlinecache_check_contains(address addr) const {
    7.10      return (addr >= code_begin() && addr < verified_entry_point());
    7.11 @@ -604,7 +605,7 @@
    7.12  
    7.13    // See comment at definition of _last_seen_on_stack
    7.14    void mark_as_seen_on_stack();
    7.15 -  bool can_not_entrant_be_converted();
    7.16 +  bool can_convert_to_zombie();
    7.17  
    7.18    // Evolution support. We make old (discarded) compiled methods point to new Method*s.
    7.19    void set_method(Method* method) { _method = method; }
     8.1 --- a/src/share/vm/prims/jvmtiRedefineClasses.cpp	Wed Oct 07 08:34:54 2015 -0700
     8.2 +++ b/src/share/vm/prims/jvmtiRedefineClasses.cpp	Sat Oct 17 00:26:45 2015 -0700
     8.3 @@ -3751,7 +3751,7 @@
     8.4      // Deoptimize all activations depending on marked nmethods
     8.5      Deoptimization::deoptimize_dependents();
     8.6  
     8.7 -    // Make the dependent methods not entrant (in VM_Deoptimize they are made zombies)
     8.8 +    // Make the dependent methods not entrant
     8.9      CodeCache::make_marked_nmethods_not_entrant();
    8.10  
    8.11      // From now on we know that the dependency information is complete
     9.1 --- a/src/share/vm/runtime/sweeper.cpp	Wed Oct 07 08:34:54 2015 -0700
     9.2 +++ b/src/share/vm/runtime/sweeper.cpp	Sat Oct 17 00:26:45 2015 -0700
     9.3 @@ -538,10 +538,14 @@
     9.4    } else if (nm->is_not_entrant()) {
     9.5      // If there are no current activations of this method on the
     9.6      // stack we can safely convert it to a zombie method
     9.7 -    if (nm->can_not_entrant_be_converted()) {
     9.8 +    if (nm->can_convert_to_zombie()) {
     9.9        if (PrintMethodFlushing && Verbose) {
    9.10          tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (not entrant) being made zombie", nm->compile_id(), nm);
    9.11        }
    9.12 +      // Clear ICStubs to prevent back patching stubs of zombie or unloaded
    9.13 +      // nmethods during the next safepoint (see ICStub::finalize).
    9.14 +      MutexLocker cl(CompiledIC_lock);
    9.15 +      nm->clear_ic_stubs();
    9.16        // Code cache state change is tracked in make_zombie()
    9.17        nm->make_zombie();
    9.18        _zombified_count++;
    9.19 @@ -567,6 +571,12 @@
    9.20        release_nmethod(nm);
    9.21        _flushed_count++;
    9.22      } else {
    9.23 +      {
    9.24 +        // Clean ICs of unloaded nmethods as well because they may reference other
    9.25 +        // unloaded nmethods that may be flushed earlier in the sweeper cycle.
    9.26 +        MutexLocker cl(CompiledIC_lock);
    9.27 +        nm->cleanup_inline_caches();
    9.28 +      }
    9.29        // Code cache state change is tracked in make_zombie()
    9.30        nm->make_zombie();
    9.31        _zombified_count++;
    10.1 --- a/src/share/vm/runtime/vm_operations.cpp	Wed Oct 07 08:34:54 2015 -0700
    10.2 +++ b/src/share/vm/runtime/vm_operations.cpp	Sat Oct 17 00:26:45 2015 -0700
    10.3 @@ -106,8 +106,8 @@
    10.4    // Deoptimize all activations depending on marked nmethods
    10.5    Deoptimization::deoptimize_dependents();
    10.6  
    10.7 -  // Make the dependent methods zombies
    10.8 -  CodeCache::make_marked_nmethods_zombies();
    10.9 +  // Make the dependent methods not entrant
   10.10 +  CodeCache::make_marked_nmethods_not_entrant();
   10.11  }
   10.12  
   10.13  

mercurial