src/share/vm/runtime/sweeper.cpp

changeset 5040
9ce110b1d14a
parent 5038
0cfa93c2fcc4
child 5237
f2110083203d
     1.1 --- a/src/share/vm/runtime/sweeper.cpp	Thu May 02 16:41:09 2013 -0700
     1.2 +++ b/src/share/vm/runtime/sweeper.cpp	Thu May 02 18:50:05 2013 -0700
     1.3 @@ -136,13 +136,12 @@
     1.4  
     1.5  jint      NMethodSweeper::_locked_seen = 0;
     1.6  jint      NMethodSweeper::_not_entrant_seen_on_stack = 0;
     1.7 -bool      NMethodSweeper::_rescan = false;
     1.8 -bool      NMethodSweeper::_do_sweep = false;
     1.9 -bool      NMethodSweeper::_was_full = false;
    1.10 -jint      NMethodSweeper::_advise_to_sweep = 0;
    1.11 -jlong     NMethodSweeper::_last_was_full = 0;
    1.12 -uint      NMethodSweeper::_highest_marked = 0;
    1.13 -long      NMethodSweeper::_was_full_traversal = 0;
    1.14 +bool      NMethodSweeper::_resweep = false;
    1.15 +jint      NMethodSweeper::_flush_token = 0;
    1.16 +jlong     NMethodSweeper::_last_full_flush_time = 0;
    1.17 +int       NMethodSweeper::_highest_marked = 0;
    1.18 +int       NMethodSweeper::_dead_compile_ids = 0;
    1.19 +long      NMethodSweeper::_last_flush_traversal_id = 0;
    1.20  
    1.21  class MarkActivationClosure: public CodeBlobClosure {
    1.22  public:
    1.23 @@ -155,20 +154,16 @@
    1.24  };
    1.25  static MarkActivationClosure mark_activation_closure;
    1.26  
    1.27 +bool NMethodSweeper::sweep_in_progress() {
    1.28 +  return (_current != NULL);
    1.29 +}
    1.30 +
    1.31  void NMethodSweeper::scan_stacks() {
    1.32    assert(SafepointSynchronize::is_at_safepoint(), "must be executed at a safepoint");
    1.33    if (!MethodFlushing) return;
    1.34 -  _do_sweep = true;
    1.35  
    1.36    // No need to synchronize access, since this is always executed at a
    1.37 -  // safepoint.  If we aren't in the middle of scan and a rescan
    1.38 -  // hasn't been requested then just return. If UseCodeCacheFlushing is on and
    1.39 -  // code cache flushing is in progress, don't skip sweeping to help make progress
    1.40 -  // clearing space in the code cache.
    1.41 -  if ((_current == NULL && !_rescan) && !(UseCodeCacheFlushing && !CompileBroker::should_compile_new_jobs())) {
    1.42 -    _do_sweep = false;
    1.43 -    return;
    1.44 -  }
    1.45 +  // safepoint.
    1.46  
    1.47    // Make sure CompiledIC_lock in unlocked, since we might update some
    1.48    // inline caches. If it is, we just bail-out and try later.
    1.49 @@ -176,7 +171,7 @@
    1.50  
    1.51    // Check for restart
    1.52    assert(CodeCache::find_blob_unsafe(_current) == _current, "Sweeper nmethod cached state invalid");
    1.53 -  if (_current == NULL) {
    1.54 +  if (!sweep_in_progress() && _resweep) {
    1.55      _seen        = 0;
    1.56      _invocations = NmethodSweepFraction;
    1.57      _current     = CodeCache::first_nmethod();
    1.58 @@ -187,39 +182,30 @@
    1.59      Threads::nmethods_do(&mark_activation_closure);
    1.60  
    1.61      // reset the flags since we started a scan from the beginning.
    1.62 -    _rescan = false;
    1.63 +    _resweep = false;
    1.64      _locked_seen = 0;
    1.65      _not_entrant_seen_on_stack = 0;
    1.66    }
    1.67  
    1.68    if (UseCodeCacheFlushing) {
    1.69 -    if (!CodeCache::needs_flushing()) {
    1.70 -      // scan_stacks() runs during a safepoint, no race with setters
    1.71 -      _advise_to_sweep = 0;
    1.72 +    // only allow new flushes after the interval is complete.
    1.73 +    jlong now           = os::javaTimeMillis();
    1.74 +    jlong max_interval  = (jlong)MinCodeCacheFlushingInterval * (jlong)1000;
    1.75 +    jlong curr_interval = now - _last_full_flush_time;
    1.76 +    if (curr_interval > max_interval) {
    1.77 +      _flush_token = 0;
    1.78      }
    1.79  
    1.80 -    if (was_full()) {
    1.81 -      // There was some progress so attempt to restart the compiler
    1.82 -      jlong now           = os::javaTimeMillis();
    1.83 -      jlong max_interval  = (jlong)MinCodeCacheFlushingInterval * (jlong)1000;
    1.84 -      jlong curr_interval = now - _last_was_full;
    1.85 -      if ((!CodeCache::needs_flushing()) && (curr_interval > max_interval)) {
    1.86 -        CompileBroker::set_should_compile_new_jobs(CompileBroker::run_compilation);
    1.87 -        set_was_full(false);
    1.88 -
    1.89 -        // Update the _last_was_full time so we can tell how fast the
    1.90 -        // code cache is filling up
    1.91 -        _last_was_full = os::javaTimeMillis();
    1.92 -
    1.93 -        log_sweep("restart_compiler");
    1.94 -      }
    1.95 +    if (!CodeCache::needs_flushing() && !CompileBroker::should_compile_new_jobs()) {
    1.96 +      CompileBroker::set_should_compile_new_jobs(CompileBroker::run_compilation);
    1.97 +      log_sweep("restart_compiler");
    1.98      }
    1.99    }
   1.100  }
   1.101  
   1.102  void NMethodSweeper::possibly_sweep() {
   1.103    assert(JavaThread::current()->thread_state() == _thread_in_vm, "must run in vm mode");
   1.104 -  if ((!MethodFlushing) || (!_do_sweep)) return;
   1.105 +  if (!MethodFlushing || !sweep_in_progress()) return;
   1.106  
   1.107    if (_invocations > 0) {
   1.108      // Only one thread at a time will sweep
   1.109 @@ -253,6 +239,14 @@
   1.110      tty->print_cr("### Sweep at %d out of %d. Invocations left: %d", _seen, CodeCache::nof_nmethods(), _invocations);
   1.111    }
   1.112  
   1.113 +  if (!CompileBroker::should_compile_new_jobs()) {
   1.114 +    // If we have turned off compilations we might as well do full sweeps
   1.115 +    // in order to reach the clean state faster. Otherwise the sleeping compiler
   1.116 +    // threads will slow down sweeping. After a few iterations the cache
   1.117 +    // will be clean and sweeping stops (_resweep will not be set)
   1.118 +    _invocations = 1;
   1.119 +  }
   1.120 +
   1.121    // We want to visit all nmethods after NmethodSweepFraction
   1.122    // invocations so divide the remaining number of nmethods by the
   1.123    // remaining number of invocations.  This is only an estimate since
   1.124 @@ -296,7 +290,7 @@
   1.125  
   1.126    assert(_invocations > 1 || _current == NULL, "must have scanned the whole cache");
   1.127  
   1.128 -  if (_current == NULL && !_rescan && (_locked_seen || _not_entrant_seen_on_stack)) {
   1.129 +  if (!sweep_in_progress() && !_resweep && (_locked_seen || _not_entrant_seen_on_stack)) {
   1.130      // we've completed a scan without making progress but there were
   1.131      // nmethods we were unable to process either because they were
   1.132      // locked or were still on stack.  We don't have to aggresively
   1.133 @@ -318,6 +312,13 @@
   1.134    if (_invocations == 1) {
   1.135      log_sweep("finished");
   1.136    }
   1.137 +
   1.138 +  // Sweeper is the only case where memory is released,
   1.139 +  // check here if it is time to restart the compiler.
   1.140 +  if (UseCodeCacheFlushing && !CompileBroker::should_compile_new_jobs() && !CodeCache::needs_flushing()) {
   1.141 +    CompileBroker::set_should_compile_new_jobs(CompileBroker::run_compilation);
   1.142 +    log_sweep("restart_compiler");
   1.143 +  }
   1.144  }
   1.145  
   1.146  class NMethodMarker: public StackObj {
   1.147 @@ -392,7 +393,7 @@
   1.148          tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (zombie) being marked for reclamation", nm->compile_id(), nm);
   1.149        }
   1.150        nm->mark_for_reclamation();
   1.151 -      _rescan = true;
   1.152 +      _resweep = true;
   1.153        SWEEP(nm);
   1.154      }
   1.155    } else if (nm->is_not_entrant()) {
   1.156 @@ -403,7 +404,7 @@
   1.157          tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (not entrant) being made zombie", nm->compile_id(), nm);
   1.158        }
   1.159        nm->make_zombie();
   1.160 -      _rescan = true;
   1.161 +      _resweep = true;
   1.162        SWEEP(nm);
   1.163      } else {
   1.164        // Still alive, clean up its inline caches
   1.165 @@ -425,16 +426,15 @@
   1.166        release_nmethod(nm);
   1.167      } else {
   1.168        nm->make_zombie();
   1.169 -      _rescan = true;
   1.170 +      _resweep = true;
   1.171        SWEEP(nm);
   1.172      }
   1.173    } else {
   1.174      assert(nm->is_alive(), "should be alive");
   1.175  
   1.176      if (UseCodeCacheFlushing) {
   1.177 -      if ((nm->method()->code() != nm) && !(nm->is_locked_by_vm()) && !(nm->is_osr_method()) &&
   1.178 -          (_traversals > _was_full_traversal+2) && (((uint)nm->compile_id()) < _highest_marked) &&
   1.179 -          CodeCache::needs_flushing()) {
   1.180 +      if (nm->is_speculatively_disconnected() && !nm->is_locked_by_vm() && !nm->is_osr_method() &&
   1.181 +          (_traversals > _last_flush_traversal_id + 2) && (nm->compile_id() < _highest_marked)) {
   1.182          // This method has not been called since the forced cleanup happened
   1.183          nm->make_not_entrant();
   1.184        }
   1.185 @@ -457,41 +457,27 @@
   1.186  // _code field is restored and the Method*/nmethod
   1.187  // go back to their normal state.
   1.188  void NMethodSweeper::handle_full_code_cache(bool is_full) {
   1.189 -  // Only the first one to notice can advise us to start early cleaning
   1.190 -  if (!is_full){
   1.191 -    jint old = Atomic::cmpxchg( 1, &_advise_to_sweep, 0 );
   1.192 -    if (old != 0) {
   1.193 -      return;
   1.194 +
   1.195 +  if (is_full) {
   1.196 +    // Since code cache is full, immediately stop new compiles
   1.197 +    if (CompileBroker::set_should_compile_new_jobs(CompileBroker::stop_compilation)) {
   1.198 +      log_sweep("disable_compiler");
   1.199      }
   1.200    }
   1.201  
   1.202 -  if (is_full) {
   1.203 -    // Since code cache is full, immediately stop new compiles
   1.204 -    bool did_set = CompileBroker::set_should_compile_new_jobs(CompileBroker::stop_compilation);
   1.205 -    if (!did_set) {
   1.206 -      // only the first to notice can start the cleaning,
   1.207 -      // others will go back and block
   1.208 -      return;
   1.209 -    }
   1.210 -    set_was_full(true);
   1.211 -
   1.212 -    // If we run out within MinCodeCacheFlushingInterval of the last unload time, give up
   1.213 -    jlong now = os::javaTimeMillis();
   1.214 -    jlong max_interval = (jlong)MinCodeCacheFlushingInterval * (jlong)1000;
   1.215 -    jlong curr_interval = now - _last_was_full;
   1.216 -    if (curr_interval < max_interval) {
   1.217 -      _rescan = true;
   1.218 -      log_sweep("disable_compiler", "flushing_interval='" UINT64_FORMAT "'",
   1.219 -                           curr_interval/1000);
   1.220 -      return;
   1.221 -    }
   1.222 +  // Make sure only one thread can flush
   1.223 +  // The token is reset after CodeCacheMinimumFlushInterval in scan stacks,
   1.224 +  // no need to check the timeout here.
   1.225 +  jint old = Atomic::cmpxchg( 1, &_flush_token, 0 );
   1.226 +  if (old != 0) {
   1.227 +    return;
   1.228    }
   1.229  
   1.230    VM_HandleFullCodeCache op(is_full);
   1.231    VMThread::execute(&op);
   1.232  
   1.233 -  // rescan again as soon as possible
   1.234 -  _rescan = true;
   1.235 +  // resweep again as soon as possible
   1.236 +  _resweep = true;
   1.237  }
   1.238  
   1.239  void NMethodSweeper::speculative_disconnect_nmethods(bool is_full) {
   1.240 @@ -500,62 +486,64 @@
   1.241  
   1.242    debug_only(jlong start = os::javaTimeMillis();)
   1.243  
   1.244 -  if ((!was_full()) && (is_full)) {
   1.245 -    if (!CodeCache::needs_flushing()) {
   1.246 -      log_sweep("restart_compiler");
   1.247 -      CompileBroker::set_should_compile_new_jobs(CompileBroker::run_compilation);
   1.248 -      return;
   1.249 -    }
   1.250 -  }
   1.251 +  // Traverse the code cache trying to dump the oldest nmethods
   1.252 +  int curr_max_comp_id = CompileBroker::get_compilation_id();
   1.253 +  int flush_target = ((curr_max_comp_id - _dead_compile_ids) / CodeCacheFlushingFraction) + _dead_compile_ids;
   1.254  
   1.255 -  // Traverse the code cache trying to dump the oldest nmethods
   1.256 -  uint curr_max_comp_id = CompileBroker::get_compilation_id();
   1.257 -  uint flush_target = ((curr_max_comp_id - _highest_marked) >> 1) + _highest_marked;
   1.258    log_sweep("start_cleaning");
   1.259  
   1.260    nmethod* nm = CodeCache::alive_nmethod(CodeCache::first());
   1.261    jint disconnected = 0;
   1.262    jint made_not_entrant  = 0;
   1.263 +  jint nmethod_count = 0;
   1.264 +
   1.265    while ((nm != NULL)){
   1.266 -    uint curr_comp_id = nm->compile_id();
   1.267 +    int curr_comp_id = nm->compile_id();
   1.268  
   1.269      // OSR methods cannot be flushed like this. Also, don't flush native methods
   1.270      // since they are part of the JDK in most cases
   1.271 -    if (nm->is_in_use() && (!nm->is_osr_method()) && (!nm->is_locked_by_vm()) &&
   1.272 -        (!nm->is_native_method()) && ((curr_comp_id < flush_target))) {
   1.273 +    if (!nm->is_osr_method() && !nm->is_locked_by_vm() && !nm->is_native_method()) {
   1.274  
   1.275 -      if ((nm->method()->code() == nm)) {
   1.276 -        // This method has not been previously considered for
   1.277 -        // unloading or it was restored already
   1.278 -        CodeCache::speculatively_disconnect(nm);
   1.279 -        disconnected++;
   1.280 -      } else if (nm->is_speculatively_disconnected()) {
   1.281 -        // This method was previously considered for preemptive unloading and was not called since then
   1.282 -        CompilationPolicy::policy()->delay_compilation(nm->method());
   1.283 -        nm->make_not_entrant();
   1.284 -        made_not_entrant++;
   1.285 -      }
   1.286 +      // only count methods that can be speculatively disconnected
   1.287 +      nmethod_count++;
   1.288  
   1.289 -      if (curr_comp_id > _highest_marked) {
   1.290 -        _highest_marked = curr_comp_id;
   1.291 +      if (nm->is_in_use() && (curr_comp_id < flush_target)) {
   1.292 +        if ((nm->method()->code() == nm)) {
   1.293 +          // This method has not been previously considered for
   1.294 +          // unloading or it was restored already
   1.295 +          CodeCache::speculatively_disconnect(nm);
   1.296 +          disconnected++;
   1.297 +        } else if (nm->is_speculatively_disconnected()) {
   1.298 +          // This method was previously considered for preemptive unloading and was not called since then
   1.299 +          CompilationPolicy::policy()->delay_compilation(nm->method());
   1.300 +          nm->make_not_entrant();
   1.301 +          made_not_entrant++;
   1.302 +        }
   1.303 +
   1.304 +        if (curr_comp_id > _highest_marked) {
   1.305 +          _highest_marked = curr_comp_id;
   1.306 +        }
   1.307        }
   1.308      }
   1.309      nm = CodeCache::alive_nmethod(CodeCache::next(nm));
   1.310    }
   1.311  
   1.312 +  // remember how many compile_ids wheren't seen last flush.
   1.313 +  _dead_compile_ids = curr_max_comp_id - nmethod_count;
   1.314 +
   1.315    log_sweep("stop_cleaning",
   1.316                         "disconnected='" UINT32_FORMAT "' made_not_entrant='" UINT32_FORMAT "'",
   1.317                         disconnected, made_not_entrant);
   1.318  
   1.319    // Shut off compiler. Sweeper will start over with a new stack scan and
   1.320    // traversal cycle and turn it back on if it clears enough space.
   1.321 -  if (was_full()) {
   1.322 -    _last_was_full = os::javaTimeMillis();
   1.323 -    CompileBroker::set_should_compile_new_jobs(CompileBroker::stop_compilation);
   1.324 +  if (is_full) {
   1.325 +    _last_full_flush_time = os::javaTimeMillis();
   1.326    }
   1.327  
   1.328    // After two more traversals the sweeper will get rid of unrestored nmethods
   1.329 -  _was_full_traversal = _traversals;
   1.330 +  _last_flush_traversal_id = _traversals;
   1.331 +  _resweep = true;
   1.332  #ifdef ASSERT
   1.333    jlong end = os::javaTimeMillis();
   1.334    if(PrintMethodFlushing && Verbose) {

mercurial