6950075: nmethod sweeper should operate concurrently

Mon, 17 May 2010 16:50:07 -0700

author
never
date
Mon, 17 May 2010 16:50:07 -0700
changeset 1893
bfe29ec02863
parent 1892
79bf863697eb
child 1894
c52275c698d1

6950075: nmethod sweeper should operate concurrently
Reviewed-by: never, kvn
Contributed-by: eric.caspole@amd.com

src/share/vm/code/codeCache.cpp file | annotate | diff | comparison | revisions
src/share/vm/code/codeCache.hpp file | annotate | diff | comparison | revisions
src/share/vm/code/nmethod.cpp file | annotate | diff | comparison | revisions
src/share/vm/code/nmethod.hpp file | annotate | diff | comparison | revisions
src/share/vm/compiler/compileBroker.cpp file | annotate | diff | comparison | revisions
src/share/vm/runtime/globals.hpp file | annotate | diff | comparison | revisions
src/share/vm/runtime/safepoint.cpp file | annotate | diff | comparison | revisions
src/share/vm/runtime/sweeper.cpp file | annotate | diff | comparison | revisions
src/share/vm/runtime/sweeper.hpp file | annotate | diff | comparison | revisions
     1.1 --- a/src/share/vm/code/codeCache.cpp	Mon May 17 11:32:56 2010 -0700
     1.2 +++ b/src/share/vm/code/codeCache.cpp	Mon May 17 16:50:07 2010 -0700
     1.3 @@ -124,6 +124,23 @@
     1.4    return (nmethod*)cb;
     1.5  }
     1.6  
     1.7 +nmethod* CodeCache::first_nmethod() {
     1.8 +  assert_locked_or_safepoint(CodeCache_lock);
     1.9 +  CodeBlob* cb = first();
    1.10 +  while (cb != NULL && !cb->is_nmethod()) {
    1.11 +    cb = next(cb);
    1.12 +  }
    1.13 +  return (nmethod*)cb;
    1.14 +}
    1.15 +
    1.16 +nmethod* CodeCache::next_nmethod (CodeBlob* cb) {
    1.17 +  assert_locked_or_safepoint(CodeCache_lock);
    1.18 +  cb = next(cb);
    1.19 +  while (cb != NULL && !cb->is_nmethod()) {
    1.20 +    cb = next(cb);
    1.21 +  }
    1.22 +  return (nmethod*)cb;
    1.23 +}
    1.24  
    1.25  CodeBlob* CodeCache::allocate(int size) {
    1.26    // Do not seize the CodeCache lock here--if the caller has not
    1.27 @@ -414,7 +431,7 @@
    1.28        saved->set_speculatively_disconnected(false);
    1.29        saved->set_saved_nmethod_link(NULL);
    1.30        if (PrintMethodFlushing) {
    1.31 -        saved->print_on(tty, " ### nmethod is reconnected");
    1.32 +        saved->print_on(tty, " ### nmethod is reconnected\n");
    1.33        }
    1.34        if (LogCompilation && (xtty != NULL)) {
    1.35          ttyLocker ttyl;
    1.36 @@ -432,7 +449,8 @@
    1.37  }
    1.38  
    1.39  void CodeCache::remove_saved_code(nmethod* nm) {
    1.40 -  MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
    1.41 +  // For conc swpr this will be called with CodeCache_lock taken by caller
    1.42 +  assert_locked_or_safepoint(CodeCache_lock);
    1.43    assert(nm->is_speculatively_disconnected(), "shouldn't call for other nmethods");
    1.44    nmethod* saved = _saved_nmethods;
    1.45    nmethod* prev = NULL;
    1.46 @@ -463,7 +481,7 @@
    1.47    nm->set_saved_nmethod_link(_saved_nmethods);
    1.48    _saved_nmethods = nm;
    1.49    if (PrintMethodFlushing) {
    1.50 -    nm->print_on(tty, " ### nmethod is speculatively disconnected");
    1.51 +    nm->print_on(tty, " ### nmethod is speculatively disconnected\n");
    1.52    }
    1.53    if (LogCompilation && (xtty != NULL)) {
    1.54      ttyLocker ttyl;
     2.1 --- a/src/share/vm/code/codeCache.hpp	Mon May 17 11:32:56 2010 -0700
     2.2 +++ b/src/share/vm/code/codeCache.hpp	Mon May 17 16:50:07 2010 -0700
     2.3 @@ -102,6 +102,8 @@
     2.4    static CodeBlob* next (CodeBlob* cb);
     2.5    static CodeBlob* alive(CodeBlob *cb);
     2.6    static nmethod* alive_nmethod(CodeBlob *cb);
     2.7 +  static nmethod* first_nmethod();
     2.8 +  static nmethod* next_nmethod (CodeBlob* cb);
     2.9    static int       nof_blobs()                 { return _number_of_blobs; }
    2.10  
    2.11    // GC support
     3.1 --- a/src/share/vm/code/nmethod.cpp	Mon May 17 11:32:56 2010 -0700
     3.2 +++ b/src/share/vm/code/nmethod.cpp	Mon May 17 16:50:07 2010 -0700
     3.3 @@ -1014,9 +1014,7 @@
     3.4  
     3.5  void nmethod::cleanup_inline_caches() {
     3.6  
     3.7 -  assert(SafepointSynchronize::is_at_safepoint() &&
     3.8 -        !CompiledIC_lock->is_locked() &&
     3.9 -        !Patching_lock->is_locked(), "no threads must be updating the inline caches by them selfs");
    3.10 +  assert_locked_or_safepoint(CompiledIC_lock);
    3.11  
    3.12    // If the method is not entrant or zombie then a JMP is plastered over the
    3.13    // first few bytes.  If an oop in the old code was there, that oop
    3.14 @@ -1071,7 +1069,6 @@
    3.15  // Tell if a non-entrant method can be converted to a zombie (i.e., there is no activations on the stack)
    3.16  bool nmethod::can_not_entrant_be_converted() {
    3.17    assert(is_not_entrant(), "must be a non-entrant method");
    3.18 -  assert(SafepointSynchronize::is_at_safepoint(), "must be called during a safepoint");
    3.19  
    3.20    // Since the nmethod sweeper only does partial sweep the sweeper's traversal
    3.21    // count can be greater than the stack traversal count before it hits the
    3.22 @@ -1127,7 +1124,7 @@
    3.23      _method = NULL;            // Clear the method of this dead nmethod
    3.24    }
    3.25    // Make the class unloaded - i.e., change state and notify sweeper
    3.26 -  check_safepoint();
    3.27 +  assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
    3.28    if (is_in_use()) {
    3.29      // Transitioning directly from live to unloaded -- so
    3.30      // we need to force a cache clean-up; remember this
    3.31 @@ -1220,17 +1217,6 @@
    3.32        assert (NativeJump::instruction_size == nmethod::_zombie_instruction_size, "");
    3.33      }
    3.34  
    3.35 -    // When the nmethod becomes zombie it is no longer alive so the
    3.36 -    // dependencies must be flushed.  nmethods in the not_entrant
    3.37 -    // state will be flushed later when the transition to zombie
    3.38 -    // happens or they get unloaded.
    3.39 -    if (state == zombie) {
    3.40 -      assert(SafepointSynchronize::is_at_safepoint(), "must be done at safepoint");
    3.41 -      flush_dependencies(NULL);
    3.42 -    } else {
    3.43 -      assert(state == not_entrant, "other cases may need to be handled differently");
    3.44 -    }
    3.45 -
    3.46      was_alive = is_in_use(); // Read state under lock
    3.47  
    3.48      // Change state
    3.49 @@ -1241,6 +1227,17 @@
    3.50  
    3.51    } // leave critical region under Patching_lock
    3.52  
    3.53 +  // When the nmethod becomes zombie it is no longer alive so the
    3.54 +  // dependencies must be flushed.  nmethods in the not_entrant
    3.55 +  // state will be flushed later when the transition to zombie
    3.56 +  // happens or they get unloaded.
    3.57 +  if (state == zombie) {
    3.58 +    MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
    3.59 +    flush_dependencies(NULL);
    3.60 +  } else {
    3.61 +    assert(state == not_entrant, "other cases may need to be handled differently");
    3.62 +  }
    3.63 +
    3.64    if (state == not_entrant) {
    3.65      Events::log("Make nmethod not entrant " INTPTR_FORMAT, this);
    3.66    } else {
    3.67 @@ -1310,21 +1307,13 @@
    3.68    return true;
    3.69  }
    3.70  
    3.71 -
    3.72 -#ifndef PRODUCT
    3.73 -void nmethod::check_safepoint() {
    3.74 -  assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
    3.75 -}
    3.76 -#endif
    3.77 -
    3.78 -
    3.79  void nmethod::flush() {
    3.80    // Note that there are no valid oops in the nmethod anymore.
    3.81    assert(is_zombie() || (is_osr_method() && is_unloaded()), "must be a zombie method");
    3.82    assert(is_marked_for_reclamation() || (is_osr_method() && is_unloaded()), "must be marked for reclamation");
    3.83  
    3.84    assert (!is_locked_by_vm(), "locked methods shouldn't be flushed");
    3.85 -  check_safepoint();
    3.86 +  assert_locked_or_safepoint(CodeCache_lock);
    3.87  
    3.88    // completely deallocate this method
    3.89    EventMark m("flushing nmethod " INTPTR_FORMAT " %s", this, "");
    3.90 @@ -1373,7 +1362,7 @@
    3.91  // notifies instanceKlasses that are reachable
    3.92  
    3.93  void nmethod::flush_dependencies(BoolObjectClosure* is_alive) {
    3.94 -  assert(SafepointSynchronize::is_at_safepoint(), "must be done at safepoint");
    3.95 +  assert_locked_or_safepoint(CodeCache_lock);
    3.96    assert(Universe::heap()->is_gc_active() == (is_alive != NULL),
    3.97    "is_alive is non-NULL if and only if we are called during GC");
    3.98    if (!has_flushed_dependencies()) {
    3.99 @@ -2266,7 +2255,6 @@
   3.100      tty->print(" for method " INTPTR_FORMAT , (address)method());
   3.101      tty->print(" { ");
   3.102      if (version())        tty->print("v%d ", version());
   3.103 -    if (level())          tty->print("l%d ", level());
   3.104      if (is_in_use())      tty->print("in_use ");
   3.105      if (is_not_entrant()) tty->print("not_entrant ");
   3.106      if (is_zombie())      tty->print("zombie ");
     4.1 --- a/src/share/vm/code/nmethod.hpp	Mon May 17 11:32:56 2010 -0700
     4.2 +++ b/src/share/vm/code/nmethod.hpp	Mon May 17 16:50:07 2010 -0700
     4.3 @@ -82,7 +82,6 @@
     4.4  struct nmFlags {
     4.5    friend class VMStructs;
     4.6    unsigned int version:8;                    // version number (0 = first version)
     4.7 -  unsigned int level:4;                      // optimization level
     4.8    unsigned int age:4;                        // age (in # of sweep steps)
     4.9  
    4.10    unsigned int state:2;                      // {alive, zombie, unloaded)
    4.11 @@ -410,14 +409,13 @@
    4.12    void flush_dependencies(BoolObjectClosure* is_alive);
    4.13    bool  has_flushed_dependencies()                { return flags.hasFlushedDependencies; }
    4.14    void  set_has_flushed_dependencies()            {
    4.15 -    check_safepoint();
    4.16      assert(!has_flushed_dependencies(), "should only happen once");
    4.17      flags.hasFlushedDependencies = 1;
    4.18    }
    4.19  
    4.20    bool  is_marked_for_reclamation() const         { return flags.markedForReclamation; }
    4.21 -  void  mark_for_reclamation()                    { check_safepoint(); flags.markedForReclamation = 1; }
    4.22 -  void  unmark_for_reclamation()                  { check_safepoint(); flags.markedForReclamation = 0; }
    4.23 +  void  mark_for_reclamation()                    { flags.markedForReclamation = 1; }
    4.24 +  void  unmark_for_reclamation()                  { flags.markedForReclamation = 0; }
    4.25  
    4.26    bool  has_unsafe_access() const                 { return flags.has_unsafe_access; }
    4.27    void  set_has_unsafe_access(bool z)             { flags.has_unsafe_access = z; }
    4.28 @@ -428,9 +426,6 @@
    4.29    bool  is_speculatively_disconnected() const     { return flags.speculatively_disconnected; }
    4.30    void  set_speculatively_disconnected(bool z)     { flags.speculatively_disconnected = z; }
    4.31  
    4.32 -  int   level() const                             { return flags.level; }
    4.33 -  void  set_level(int newLevel)                   { check_safepoint(); flags.level = newLevel; }
    4.34 -
    4.35    int   comp_level() const                        { return _comp_level; }
    4.36  
    4.37    int   version() const                           { return flags.version; }
     5.1 --- a/src/share/vm/compiler/compileBroker.cpp	Mon May 17 11:32:56 2010 -0700
     5.2 +++ b/src/share/vm/compiler/compileBroker.cpp	Mon May 17 16:50:07 2010 -0700
     5.3 @@ -461,12 +461,25 @@
     5.4  //
     5.5  // Get the next CompileTask from a CompileQueue
     5.6  CompileTask* CompileQueue::get() {
     5.7 +  NMethodSweeper::possibly_sweep();
     5.8 +
     5.9    MutexLocker locker(lock());
    5.10  
    5.11    // Wait for an available CompileTask.
    5.12    while (_first == NULL) {
    5.13      // There is no work to be done right now.  Wait.
    5.14 -    lock()->wait();
    5.15 +    if (UseCodeCacheFlushing && (!CompileBroker::should_compile_new_jobs() || CodeCache::needs_flushing())) {
    5.16 +      // During the emergency sweeping periods, wake up and sweep occasionally
    5.17 +      bool timedout = lock()->wait(!Mutex::_no_safepoint_check_flag, NmethodSweepCheckInterval*1000);
    5.18 +      if (timedout) {
    5.19 +        MutexUnlocker ul(lock());
    5.20 +        // When otherwise not busy, run nmethod sweeping
    5.21 +        NMethodSweeper::possibly_sweep();
    5.22 +      }
    5.23 +    } else {
    5.24 +      // During normal operation no need to wake up on timer
    5.25 +      lock()->wait();
    5.26 +    }
    5.27    }
    5.28  
    5.29    CompileTask* task = _first;
     6.1 --- a/src/share/vm/runtime/globals.hpp	Mon May 17 11:32:56 2010 -0700
     6.2 +++ b/src/share/vm/runtime/globals.hpp	Mon May 17 16:50:07 2010 -0700
     6.3 @@ -2756,6 +2756,9 @@
     6.4    product(intx, NmethodSweepFraction, 4,                                    \
     6.5            "Number of invocations of sweeper to cover all nmethods")         \
     6.6                                                                              \
     6.7 +  product(intx, NmethodSweepCheckInterval, 5,                               \
     6.8 +          "Compilers wake up every n seconds to possibly sweep nmethods")   \
     6.9 +                                                                            \
    6.10    notproduct(intx, MemProfilingInterval, 500,                               \
    6.11            "Time between each invocation of the MemProfiler")                \
    6.12                                                                              \
     7.1 --- a/src/share/vm/runtime/safepoint.cpp	Mon May 17 11:32:56 2010 -0700
     7.2 +++ b/src/share/vm/runtime/safepoint.cpp	Mon May 17 16:50:07 2010 -0700
     7.3 @@ -472,7 +472,7 @@
     7.4    }
     7.5  
     7.6    TraceTime t4("sweeping nmethods", TraceSafepointCleanupTime);
     7.7 -  NMethodSweeper::sweep();
     7.8 +  NMethodSweeper::scan_stacks();
     7.9  }
    7.10  
    7.11  
     8.1 --- a/src/share/vm/runtime/sweeper.cpp	Mon May 17 11:32:56 2010 -0700
     8.2 +++ b/src/share/vm/runtime/sweeper.cpp	Mon May 17 16:50:07 2010 -0700
     8.3 @@ -33,6 +33,8 @@
     8.4  jint      NMethodSweeper::_locked_seen = 0;
     8.5  jint      NMethodSweeper::_not_entrant_seen_on_stack = 0;
     8.6  bool      NMethodSweeper::_rescan = false;
     8.7 +bool      NMethodSweeper::_do_sweep = false;
     8.8 +jint      NMethodSweeper::_sweep_started = 0;
     8.9  bool      NMethodSweeper::_was_full = false;
    8.10  jint      NMethodSweeper::_advise_to_sweep = 0;
    8.11  jlong     NMethodSweeper::_last_was_full = 0;
    8.12 @@ -50,14 +52,20 @@
    8.13  };
    8.14  static MarkActivationClosure mark_activation_closure;
    8.15  
    8.16 -void NMethodSweeper::sweep() {
    8.17 +void NMethodSweeper::scan_stacks() {
    8.18    assert(SafepointSynchronize::is_at_safepoint(), "must be executed at a safepoint");
    8.19    if (!MethodFlushing) return;
    8.20 +  _do_sweep = true;
    8.21  
    8.22    // No need to synchronize access, since this is always executed at a
    8.23    // safepoint.  If we aren't in the middle of scan and a rescan
    8.24 -  // hasn't been requested then just return.
    8.25 -  if (_current == NULL && !_rescan) return;
    8.26 +  // hasn't been requested then just return. If UseCodeCacheFlushing is on and
    8.27 +  // code cache flushing is in progress, don't skip sweeping to help make progress
    8.28 +  // clearing space in the code cache.
    8.29 +  if ((_current == NULL && !_rescan) && !(UseCodeCacheFlushing && !CompileBroker::should_compile_new_jobs())) {
    8.30 +    _do_sweep = false;
    8.31 +    return;
    8.32 +  }
    8.33  
    8.34    // Make sure CompiledIC_lock in unlocked, since we might update some
    8.35    // inline caches. If it is, we just bail-out and try later.
    8.36 @@ -68,7 +76,7 @@
    8.37    if (_current == NULL) {
    8.38      _seen        = 0;
    8.39      _invocations = NmethodSweepFraction;
    8.40 -    _current     = CodeCache::first();
    8.41 +    _current     = CodeCache::first_nmethod();
    8.42      _traversals  += 1;
    8.43      if (PrintMethodFlushing) {
    8.44        tty->print_cr("### Sweep: stack traversal %d", _traversals);
    8.45 @@ -81,48 +89,9 @@
    8.46      _not_entrant_seen_on_stack = 0;
    8.47    }
    8.48  
    8.49 -  if (PrintMethodFlushing && Verbose) {
    8.50 -    tty->print_cr("### Sweep at %d out of %d. Invocations left: %d", _seen, CodeCache::nof_blobs(), _invocations);
    8.51 -  }
    8.52 -
    8.53 -  // We want to visit all nmethods after NmethodSweepFraction invocations.
    8.54 -  // If invocation is 1 we do the rest
    8.55 -  int todo = CodeCache::nof_blobs();
    8.56 -  if (_invocations != 1) {
    8.57 -    todo = (CodeCache::nof_blobs() - _seen) / _invocations;
    8.58 -    _invocations--;
    8.59 -  }
    8.60 -
    8.61 -  for(int i = 0; i < todo && _current != NULL; i++) {
    8.62 -    CodeBlob* next = CodeCache::next(_current); // Read next before we potentially delete current
    8.63 -    if (_current->is_nmethod()) {
    8.64 -      process_nmethod((nmethod *)_current);
    8.65 -    }
    8.66 -    _seen++;
    8.67 -    _current = next;
    8.68 -  }
    8.69 -  // Because we could stop on a codeBlob other than an nmethod we skip forward
    8.70 -  // to the next nmethod (if any). codeBlobs other than nmethods can be freed
    8.71 -  // async to us and make _current invalid while we sleep.
    8.72 -  while (_current != NULL && !_current->is_nmethod()) {
    8.73 -    _current = CodeCache::next(_current);
    8.74 -  }
    8.75 -
    8.76 -  if (_current == NULL && !_rescan && (_locked_seen || _not_entrant_seen_on_stack)) {
    8.77 -    // we've completed a scan without making progress but there were
    8.78 -    // nmethods we were unable to process either because they were
    8.79 -    // locked or were still on stack.  We don't have to aggresively
    8.80 -    // clean them up so just stop scanning.  We could scan once more
    8.81 -    // but that complicates the control logic and it's unlikely to
    8.82 -    // matter much.
    8.83 -    if (PrintMethodFlushing) {
    8.84 -      tty->print_cr("### Couldn't make progress on some nmethods so stopping sweep");
    8.85 -    }
    8.86 -  }
    8.87 -
    8.88    if (UseCodeCacheFlushing) {
    8.89      if (!CodeCache::needs_flushing()) {
    8.90 -      // In a safepoint, no race with setters
    8.91 +      // scan_stacks() runs during a safepoint, no race with setters
    8.92        _advise_to_sweep = 0;
    8.93      }
    8.94  
    8.95 @@ -155,13 +124,99 @@
    8.96    }
    8.97  }
    8.98  
    8.99 +void NMethodSweeper::possibly_sweep() {
   8.100 +  if ((!MethodFlushing) || (!_do_sweep)) return;
   8.101 +
   8.102 +  if (_invocations > 0) {
   8.103 +    // Only one thread at a time will sweep
   8.104 +    jint old = Atomic::cmpxchg( 1, &_sweep_started, 0 );
   8.105 +    if (old != 0) {
   8.106 +      return;
   8.107 +    }
   8.108 +    sweep_code_cache();
   8.109 +  }
   8.110 +  _sweep_started = 0;
   8.111 +}
   8.112 +
   8.113 +void NMethodSweeper::sweep_code_cache() {
   8.114 +#ifdef ASSERT
   8.115 +  jlong sweep_start;
   8.116 +  if(PrintMethodFlushing) {
   8.117 +    sweep_start = os::javaTimeMillis();
   8.118 +  }
   8.119 +#endif
   8.120 +  if (PrintMethodFlushing && Verbose) {
   8.121 +    tty->print_cr("### Sweep at %d out of %d. Invocations left: %d", _seen, CodeCache::nof_blobs(), _invocations);
   8.122 +  }
   8.123 +
   8.124 +  // We want to visit all nmethods after NmethodSweepFraction invocations.
   8.125 +  // If invocation is 1 we do the rest
   8.126 +  int todo = CodeCache::nof_blobs();
   8.127 +  if (_invocations > 1) {
   8.128 +    todo = (CodeCache::nof_blobs() - _seen) / _invocations;
   8.129 +  }
   8.130 +
   8.131 +  // Compilers may check to sweep more often than stack scans happen,
   8.132 +  // don't keep trying once it is all scanned
   8.133 +  _invocations--;
   8.134 +
   8.135 +  assert(!SafepointSynchronize::is_at_safepoint(), "should not be in safepoint when we get here");
   8.136 +  assert(!CodeCache_lock->owned_by_self(), "just checking");
   8.137 +
   8.138 +  {
   8.139 +    MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
   8.140 +
   8.141 +    for(int i = 0; i < todo && _current != NULL; i++) {
   8.142 +
   8.143 +      // Since we will give up the CodeCache_lock, always skip ahead to an nmethod.
   8.144 +      // Other blobs can be deleted by other threads
   8.145 +      // Read next before we potentially delete current
   8.146 +      CodeBlob* next = CodeCache::next_nmethod(_current);
   8.147 +
   8.148 +      // Now ready to process nmethod and give up CodeCache_lock
   8.149 +      {
   8.150 +        MutexUnlockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
   8.151 +        process_nmethod((nmethod *)_current);
   8.152 +      }
   8.153 +      _seen++;
   8.154 +      _current = next;
   8.155 +    }
   8.156 +
   8.157 +    // Skip forward to the next nmethod (if any). Code blobs other than nmethods
   8.158 +    // can be freed async to us and make _current invalid while we sleep.
   8.159 +    _current = CodeCache::next_nmethod(_current);
   8.160 +  }
   8.161 +
   8.162 +  if (_current == NULL && !_rescan && (_locked_seen || _not_entrant_seen_on_stack)) {
   8.163 +    // we've completed a scan without making progress but there were
   8.164 +    // nmethods we were unable to process either because they were
   8.165 +    // locked or were still on stack.  We don't have to aggresively
   8.166 +    // clean them up so just stop scanning.  We could scan once more
   8.167 +    // but that complicates the control logic and it's unlikely to
   8.168 +    // matter much.
   8.169 +    if (PrintMethodFlushing) {
   8.170 +      tty->print_cr("### Couldn't make progress on some nmethods so stopping sweep");
   8.171 +    }
   8.172 +  }
   8.173 +
   8.174 +#ifdef ASSERT
   8.175 +  if(PrintMethodFlushing) {
   8.176 +    jlong sweep_end             = os::javaTimeMillis();
   8.177 +    tty->print_cr("### sweeper:      sweep time(%d): " INT64_FORMAT, _invocations, sweep_end - sweep_start);
   8.178 +  }
   8.179 +#endif
   8.180 +}
   8.181 +
   8.182  
   8.183  void NMethodSweeper::process_nmethod(nmethod *nm) {
   8.184 +  assert(!CodeCache_lock->owned_by_self(), "just checking");
   8.185 +
   8.186    // Skip methods that are currently referenced by the VM
   8.187    if (nm->is_locked_by_vm()) {
   8.188      // But still remember to clean-up inline caches for alive nmethods
   8.189      if (nm->is_alive()) {
   8.190        // Clean-up all inline caches that points to zombie/non-reentrant methods
   8.191 +      MutexLocker cl(CompiledIC_lock);
   8.192        nm->cleanup_inline_caches();
   8.193      } else {
   8.194        _locked_seen++;
   8.195 @@ -178,6 +233,7 @@
   8.196        if (PrintMethodFlushing && Verbose) {
   8.197          tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (marked for reclamation) being flushed", nm->compile_id(), nm);
   8.198        }
   8.199 +      MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
   8.200        nm->flush();
   8.201      } else {
   8.202        if (PrintMethodFlushing && Verbose) {
   8.203 @@ -197,10 +253,11 @@
   8.204        _rescan = true;
   8.205      } else {
   8.206        // Still alive, clean up its inline caches
   8.207 +      MutexLocker cl(CompiledIC_lock);
   8.208        nm->cleanup_inline_caches();
   8.209        // we coudn't transition this nmethod so don't immediately
   8.210        // request a rescan.  If this method stays on the stack for a
   8.211 -      // long time we don't want to keep rescanning at every safepoint.
   8.212 +      // long time we don't want to keep rescanning the code cache.
   8.213        _not_entrant_seen_on_stack++;
   8.214      }
   8.215    } else if (nm->is_unloaded()) {
   8.216 @@ -209,6 +266,7 @@
   8.217        tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (unloaded) being made zombie", nm->compile_id(), nm);
   8.218      if (nm->is_osr_method()) {
   8.219        // No inline caches will ever point to osr methods, so we can just remove it
   8.220 +      MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
   8.221        nm->flush();
   8.222      } else {
   8.223        nm->make_zombie();
   8.224 @@ -227,6 +285,7 @@
   8.225      }
   8.226  
   8.227      // Clean-up all inline caches that points to zombie/non-reentrant methods
   8.228 +    MutexLocker cl(CompiledIC_lock);
   8.229      nm->cleanup_inline_caches();
   8.230    }
   8.231  }
   8.232 @@ -235,8 +294,8 @@
   8.233  // they will call a vm op that comes here. This code attempts to speculatively
   8.234  // unload the oldest half of the nmethods (based on the compile job id) by
   8.235  // saving the old code in a list in the CodeCache. Then
   8.236 -// execution resumes. If a method so marked is not called by the second
   8.237 -// safepoint from the current one, the nmethod will be marked non-entrant and
   8.238 +// execution resumes. If a method so marked is not called by the second sweeper
   8.239 +// stack traversal after the current one, the nmethod will be marked non-entrant and
   8.240  // got rid of by normal sweeping. If the method is called, the methodOop's
   8.241  // _code field is restored and the methodOop/nmethod
   8.242  // go back to their normal state.
   8.243 @@ -364,8 +423,8 @@
   8.244      xtty->end_elem();
   8.245    }
   8.246  
   8.247 -  // Shut off compiler. Sweeper will run exiting from this safepoint
   8.248 -  // and turn it back on if it clears enough space
   8.249 +  // Shut off compiler. Sweeper will start over with a new stack scan and
   8.250 +  // traversal cycle and turn it back on if it clears enough space.
   8.251    if (was_full()) {
   8.252      _last_was_full = os::javaTimeMillis();
   8.253      CompileBroker::set_should_compile_new_jobs(CompileBroker::stop_compilation);
     9.1 --- a/src/share/vm/runtime/sweeper.hpp	Mon May 17 11:32:56 2010 -0700
     9.2 +++ b/src/share/vm/runtime/sweeper.hpp	Mon May 17 16:50:07 2010 -0700
     9.3 @@ -35,6 +35,8 @@
     9.4  
     9.5    static bool      _rescan;          // Indicates that we should do a full rescan of the
     9.6                                       // of the code cache looking for work to do.
     9.7 +  static bool      _do_sweep;        // Flag to skip the conc sweep if no stack scan happened
     9.8 +  static jint      _sweep_started;   // Flag to control conc sweeper
     9.9    static int       _locked_seen;     // Number of locked nmethods encountered during the scan
    9.10    static int       _not_entrant_seen_on_stack; // Number of not entrant nmethod were are still on stack
    9.11  
    9.12 @@ -48,7 +50,9 @@
    9.13   public:
    9.14    static long traversal_count() { return _traversals; }
    9.15  
    9.16 -  static void sweep();  // Invoked at the end of each safepoint
    9.17 +  static void scan_stacks();      // Invoked at the end of each safepoint
    9.18 +  static void sweep_code_cache(); // Concurrent part of sweep job
    9.19 +  static void possibly_sweep();   // Compiler threads call this to sweep
    9.20  
    9.21    static void notify(nmethod* nm) {
    9.22      // Perform a full scan of the code cache from the beginning.  No

mercurial