src/share/vm/runtime/sweeper.cpp

changeset 5792
510fbd28919c
parent 5734
ab274453d37f
child 6099
78da3894b86f
     1.1 --- a/src/share/vm/runtime/sweeper.cpp	Fri Sep 27 08:39:19 2013 +0200
     1.2 +++ b/src/share/vm/runtime/sweeper.cpp	Fri Sep 27 10:50:55 2013 +0200
     1.3 @@ -127,64 +127,79 @@
     1.4  #define SWEEP(nm)
     1.5  #endif
     1.6  
     1.7 +nmethod*  NMethodSweeper::_current         = NULL; // Current nmethod
     1.8 +long      NMethodSweeper::_traversals      = 0;    // Nof. stack traversals performed
     1.9 +int       NMethodSweeper::_seen            = 0;    // Nof. nmethods we have currently processed in current pass of CodeCache
    1.10 +int       NMethodSweeper::_flushed_count   = 0;    // Nof. nmethods flushed in current sweep
    1.11 +int       NMethodSweeper::_zombified_count = 0;    // Nof. nmethods made zombie in current sweep
    1.12 +int       NMethodSweeper::_marked_count    = 0;    // Nof. nmethods marked for reclaim in current sweep
    1.13  
    1.14 -long      NMethodSweeper::_traversals = 0;   // No. of stack traversals performed
    1.15 -nmethod*  NMethodSweeper::_current = NULL;   // Current nmethod
    1.16 -int       NMethodSweeper::_seen = 0 ;        // No. of nmethods we have currently processed in current pass of CodeCache
    1.17 -int       NMethodSweeper::_flushed_count = 0;   // Nof. nmethods flushed in current sweep
    1.18 -int       NMethodSweeper::_zombified_count = 0; // Nof. nmethods made zombie in current sweep
    1.19 -int       NMethodSweeper::_marked_count = 0;    // Nof. nmethods marked for reclaim in current sweep
    1.20 -
    1.21 -volatile int NMethodSweeper::_invocations = 0;   // No. of invocations left until we are completed with this pass
    1.22 +volatile int NMethodSweeper::_invocations   = 0; // Nof. invocations left until we are completed with this pass
    1.23  volatile int NMethodSweeper::_sweep_started = 0; // Whether a sweep is in progress.
    1.24  
    1.25 -jint      NMethodSweeper::_locked_seen = 0;
    1.26 +jint      NMethodSweeper::_locked_seen               = 0;
    1.27  jint      NMethodSweeper::_not_entrant_seen_on_stack = 0;
    1.28 -bool      NMethodSweeper::_resweep = false;
    1.29 -jint      NMethodSweeper::_flush_token = 0;
    1.30 -jlong     NMethodSweeper::_last_full_flush_time = 0;
    1.31 -int       NMethodSweeper::_highest_marked = 0;
    1.32 -int       NMethodSweeper::_dead_compile_ids = 0;
    1.33 -long      NMethodSweeper::_last_flush_traversal_id = 0;
    1.34 +bool      NMethodSweeper::_request_mark_phase        = false;
    1.35  
    1.36 -int       NMethodSweeper::_number_of_flushes = 0; // Total of full traversals caused by full cache
    1.37  int       NMethodSweeper::_total_nof_methods_reclaimed = 0;
    1.38 -jlong     NMethodSweeper::_total_time_sweeping = 0;
    1.39 -jlong     NMethodSweeper::_total_time_this_sweep = 0;
    1.40 -jlong     NMethodSweeper::_peak_sweep_time = 0;
    1.41 -jlong     NMethodSweeper::_peak_sweep_fraction_time = 0;
    1.42 -jlong     NMethodSweeper::_total_disconnect_time = 0;
    1.43 -jlong     NMethodSweeper::_peak_disconnect_time = 0;
    1.44 +jlong     NMethodSweeper::_total_time_sweeping         = 0;
    1.45 +jlong     NMethodSweeper::_total_time_this_sweep       = 0;
    1.46 +jlong     NMethodSweeper::_peak_sweep_time             = 0;
    1.47 +jlong     NMethodSweeper::_peak_sweep_fraction_time    = 0;
    1.48 +int       NMethodSweeper::_hotness_counter_reset_val   = 0;
    1.49 +
    1.50  
    1.51  class MarkActivationClosure: public CodeBlobClosure {
    1.52  public:
    1.53    virtual void do_code_blob(CodeBlob* cb) {
    1.54 -    // If we see an activation belonging to a non_entrant nmethod, we mark it.
    1.55 -    if (cb->is_nmethod() && ((nmethod*)cb)->is_not_entrant()) {
    1.56 -      ((nmethod*)cb)->mark_as_seen_on_stack();
    1.57 +    if (cb->is_nmethod()) {
    1.58 +      nmethod* nm = (nmethod*)cb;
    1.59 +      nm->set_hotness_counter(NMethodSweeper::hotness_counter_reset_val());
    1.60 +      // If we see an activation belonging to a non_entrant nmethod, we mark it.
    1.61 +      if (nm->is_not_entrant()) {
    1.62 +        nm->mark_as_seen_on_stack();
    1.63 +      }
    1.64      }
    1.65    }
    1.66  };
    1.67  static MarkActivationClosure mark_activation_closure;
    1.68  
    1.69 +class SetHotnessClosure: public CodeBlobClosure {
    1.70 +public:
    1.71 +  virtual void do_code_blob(CodeBlob* cb) {
    1.72 +    if (cb->is_nmethod()) {
    1.73 +      nmethod* nm = (nmethod*)cb;
    1.74 +      nm->set_hotness_counter(NMethodSweeper::hotness_counter_reset_val());
    1.75 +    }
    1.76 +  }
    1.77 +};
    1.78 +static SetHotnessClosure set_hotness_closure;
    1.79 +
    1.80 +
    1.81 +int NMethodSweeper::hotness_counter_reset_val() {
    1.82 +  if (_hotness_counter_reset_val == 0) {
    1.83 +    _hotness_counter_reset_val = (ReservedCodeCacheSize < M) ? 1 : (ReservedCodeCacheSize / M) * 2;
    1.84 +  }
    1.85 +  return _hotness_counter_reset_val;
    1.86 +}
    1.87  bool NMethodSweeper::sweep_in_progress() {
    1.88    return (_current != NULL);
    1.89  }
    1.90  
    1.91 -void NMethodSweeper::scan_stacks() {
    1.92 +// Scans the stacks of all Java threads and marks activations of not-entrant methods.
    1.93 +// No need to synchronize access, since 'mark_active_nmethods' is always executed at a
    1.94 +// safepoint.
    1.95 +void NMethodSweeper::mark_active_nmethods() {
    1.96    assert(SafepointSynchronize::is_at_safepoint(), "must be executed at a safepoint");
    1.97 -  if (!MethodFlushing) return;
    1.98 -
    1.99 -  // No need to synchronize access, since this is always executed at a
   1.100 -  // safepoint.
   1.101 -
   1.102 -  // Make sure CompiledIC_lock in unlocked, since we might update some
   1.103 -  // inline caches. If it is, we just bail-out and try later.
   1.104 -  if (CompiledIC_lock->is_locked() || Patching_lock->is_locked()) return;
   1.105 +  // If we do not want to reclaim not-entrant or zombie methods there is no need
   1.106 +  // to scan stacks
   1.107 +  if (!MethodFlushing) {
   1.108 +    return;
   1.109 +  }
   1.110  
   1.111    // Check for restart
   1.112    assert(CodeCache::find_blob_unsafe(_current) == _current, "Sweeper nmethod cached state invalid");
   1.113 -  if (!sweep_in_progress() && _resweep) {
   1.114 +  if (!sweep_in_progress() && need_marking_phase()) {
   1.115      _seen        = 0;
   1.116      _invocations = NmethodSweepFraction;
   1.117      _current     = CodeCache::first_nmethod();
   1.118 @@ -197,30 +212,22 @@
   1.119      Threads::nmethods_do(&mark_activation_closure);
   1.120  
   1.121      // reset the flags since we started a scan from the beginning.
   1.122 -    _resweep = false;
   1.123 +    reset_nmethod_marking();
   1.124      _locked_seen = 0;
   1.125      _not_entrant_seen_on_stack = 0;
   1.126 +  } else {
   1.127 +    // Only set hotness counter
   1.128 +    Threads::nmethods_do(&set_hotness_closure);
   1.129    }
   1.130  
   1.131 -  if (UseCodeCacheFlushing) {
   1.132 -    // only allow new flushes after the interval is complete.
   1.133 -    jlong now           = os::javaTimeMillis();
   1.134 -    jlong max_interval  = (jlong)MinCodeCacheFlushingInterval * (jlong)1000;
   1.135 -    jlong curr_interval = now - _last_full_flush_time;
   1.136 -    if (curr_interval > max_interval) {
   1.137 -      _flush_token = 0;
   1.138 -    }
   1.139 -
   1.140 -    if (!CodeCache::needs_flushing() && !CompileBroker::should_compile_new_jobs()) {
   1.141 -      CompileBroker::set_should_compile_new_jobs(CompileBroker::run_compilation);
   1.142 -      log_sweep("restart_compiler");
   1.143 -    }
   1.144 -  }
   1.145 +  OrderAccess::storestore();
   1.146  }
   1.147  
   1.148  void NMethodSweeper::possibly_sweep() {
   1.149    assert(JavaThread::current()->thread_state() == _thread_in_vm, "must run in vm mode");
   1.150 -  if (!MethodFlushing || !sweep_in_progress()) return;
   1.151 +  if (!MethodFlushing || !sweep_in_progress()) {
   1.152 +    return;
   1.153 +  }
   1.154  
   1.155    if (_invocations > 0) {
   1.156      // Only one thread at a time will sweep
   1.157 @@ -258,8 +265,7 @@
   1.158    if (!CompileBroker::should_compile_new_jobs()) {
   1.159      // If we have turned off compilations we might as well do full sweeps
   1.160      // in order to reach the clean state faster. Otherwise the sleeping compiler
   1.161 -    // threads will slow down sweeping. After a few iterations the cache
   1.162 -    // will be clean and sweeping stops (_resweep will not be set)
   1.163 +    // threads will slow down sweeping.
   1.164      _invocations = 1;
   1.165    }
   1.166  
   1.167 @@ -271,9 +277,11 @@
   1.168    int todo = (CodeCache::nof_nmethods() - _seen) / _invocations;
   1.169    int swept_count = 0;
   1.170  
   1.171 +
   1.172    assert(!SafepointSynchronize::is_at_safepoint(), "should not be in safepoint when we get here");
   1.173    assert(!CodeCache_lock->owned_by_self(), "just checking");
   1.174  
   1.175 +  int freed_memory = 0;
   1.176    {
   1.177      MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
   1.178  
   1.179 @@ -299,7 +307,7 @@
   1.180        // Now ready to process nmethod and give up CodeCache_lock
   1.181        {
   1.182          MutexUnlockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
   1.183 -        process_nmethod(_current);
   1.184 +        freed_memory += process_nmethod(_current);
   1.185        }
   1.186        _seen++;
   1.187        _current = next;
   1.188 @@ -308,11 +316,11 @@
   1.189  
   1.190    assert(_invocations > 1 || _current == NULL, "must have scanned the whole cache");
   1.191  
   1.192 -  if (!sweep_in_progress() && !_resweep && (_locked_seen || _not_entrant_seen_on_stack)) {
   1.193 +  if (!sweep_in_progress() && !need_marking_phase() && (_locked_seen || _not_entrant_seen_on_stack)) {
   1.194      // we've completed a scan without making progress but there were
   1.195      // nmethods we were unable to process either because they were
   1.196 -    // locked or were still on stack.  We don't have to aggresively
   1.197 -    // clean them up so just stop scanning.  We could scan once more
   1.198 +    // locked or were still on stack. We don't have to aggressively
   1.199 +    // clean them up so just stop scanning. We could scan once more
   1.200      // but that complicates the control logic and it's unlikely to
   1.201      // matter much.
   1.202      if (PrintMethodFlushing) {
   1.203 @@ -351,9 +359,16 @@
   1.204      log_sweep("finished");
   1.205    }
   1.206  
   1.207 -  // Sweeper is the only case where memory is released,
   1.208 -  // check here if it is time to restart the compiler.
   1.209 -  if (UseCodeCacheFlushing && !CompileBroker::should_compile_new_jobs() && !CodeCache::needs_flushing()) {
   1.210 +  // Sweeper is the only case where memory is released, check here if it
   1.211 +  // is time to restart the compiler. Only checking if there is a certain
   1.212 +  // amount of free memory in the code cache might lead to re-enabling
   1.213 +  // compilation although no memory has been released. For example, there are
   1.214 +  // cases when compilation was disabled although there is 4MB (or more) free
   1.215 +  // memory in the code cache. The reason is code cache fragmentation. Therefore,
   1.216 +  // it only makes sense to re-enable compilation if we have actually freed memory.
   1.217 +  // Note that typically several kB are released for sweeping 16MB of the code
   1.218 +  // cache. As a result, 'freed_memory' > 0 to restart the compiler.
   1.219 +  if (UseCodeCacheFlushing && (!CompileBroker::should_compile_new_jobs() && (freed_memory > 0))) {
   1.220      CompileBroker::set_should_compile_new_jobs(CompileBroker::run_compilation);
   1.221      log_sweep("restart_compiler");
   1.222    }
   1.223 @@ -367,8 +382,8 @@
   1.224      _thread = CompilerThread::current();
   1.225      if (!nm->is_zombie() && !nm->is_unloaded()) {
   1.226        // Only expose live nmethods for scanning
   1.227 -    _thread->set_scanned_nmethod(nm);
   1.228 -  }
   1.229 +      _thread->set_scanned_nmethod(nm);
   1.230 +    }
   1.231    }
   1.232    ~NMethodMarker() {
   1.233      _thread->set_scanned_nmethod(NULL);
   1.234 @@ -392,20 +407,20 @@
   1.235    nm->flush();
   1.236  }
   1.237  
   1.238 -void NMethodSweeper::process_nmethod(nmethod *nm) {
   1.239 +int NMethodSweeper::process_nmethod(nmethod *nm) {
   1.240    assert(!CodeCache_lock->owned_by_self(), "just checking");
   1.241  
   1.242 +  int freed_memory = 0;
   1.243    // Make sure this nmethod doesn't get unloaded during the scan,
   1.244 -  // since the locks acquired below might safepoint.
   1.245 +  // since safepoints may happen during acquired below locks.
   1.246    NMethodMarker nmm(nm);
   1.247 -
   1.248    SWEEP(nm);
   1.249  
   1.250    // Skip methods that are currently referenced by the VM
   1.251    if (nm->is_locked_by_vm()) {
   1.252      // But still remember to clean-up inline caches for alive nmethods
   1.253      if (nm->is_alive()) {
   1.254 -      // Clean-up all inline caches that points to zombie/non-reentrant methods
   1.255 +      // Clean inline caches that point to zombie/non-entrant methods
   1.256        MutexLocker cl(CompiledIC_lock);
   1.257        nm->cleanup_inline_caches();
   1.258        SWEEP(nm);
   1.259 @@ -413,18 +428,19 @@
   1.260        _locked_seen++;
   1.261        SWEEP(nm);
   1.262      }
   1.263 -    return;
   1.264 +    return freed_memory;
   1.265    }
   1.266  
   1.267    if (nm->is_zombie()) {
   1.268 -    // If it is first time, we see nmethod then we mark it. Otherwise,
   1.269 -    // we reclame it. When we have seen a zombie method twice, we know that
   1.270 +    // If it is the first time we see nmethod then we mark it. Otherwise,
   1.271 +    // we reclaim it. When we have seen a zombie method twice, we know that
   1.272      // there are no inline caches that refer to it.
   1.273      if (nm->is_marked_for_reclamation()) {
   1.274        assert(!nm->is_locked_by_vm(), "must not flush locked nmethods");
   1.275        if (PrintMethodFlushing && Verbose) {
   1.276          tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (marked for reclamation) being flushed", nm->compile_id(), nm);
   1.277        }
   1.278 +      freed_memory = nm->total_size();
   1.279        release_nmethod(nm);
   1.280        _flushed_count++;
   1.281      } else {
   1.282 @@ -432,19 +448,19 @@
   1.283          tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (zombie) being marked for reclamation", nm->compile_id(), nm);
   1.284        }
   1.285        nm->mark_for_reclamation();
   1.286 -      _resweep = true;
   1.287 +      request_nmethod_marking();
   1.288        _marked_count++;
   1.289        SWEEP(nm);
   1.290      }
   1.291    } else if (nm->is_not_entrant()) {
   1.292 -    // If there is no current activations of this method on the
   1.293 +    // If there are no current activations of this method on the
   1.294      // stack we can safely convert it to a zombie method
   1.295      if (nm->can_not_entrant_be_converted()) {
   1.296        if (PrintMethodFlushing && Verbose) {
   1.297          tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (not entrant) being made zombie", nm->compile_id(), nm);
   1.298        }
   1.299        nm->make_zombie();
   1.300 -      _resweep = true;
   1.301 +      request_nmethod_marking();
   1.302        _zombified_count++;
   1.303        SWEEP(nm);
   1.304      } else {
   1.305 @@ -459,159 +475,57 @@
   1.306      }
   1.307    } else if (nm->is_unloaded()) {
   1.308      // Unloaded code, just make it a zombie
   1.309 -    if (PrintMethodFlushing && Verbose)
   1.310 +    if (PrintMethodFlushing && Verbose) {
   1.311        tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (unloaded) being made zombie", nm->compile_id(), nm);
   1.312 -
   1.313 +    }
   1.314      if (nm->is_osr_method()) {
   1.315        SWEEP(nm);
   1.316        // No inline caches will ever point to osr methods, so we can just remove it
   1.317 +      freed_memory = nm->total_size();
   1.318        release_nmethod(nm);
   1.319        _flushed_count++;
   1.320      } else {
   1.321        nm->make_zombie();
   1.322 -      _resweep = true;
   1.323 +      request_nmethod_marking();
   1.324        _zombified_count++;
   1.325        SWEEP(nm);
   1.326      }
   1.327    } else {
   1.328 -    assert(nm->is_alive(), "should be alive");
   1.329 -
   1.330      if (UseCodeCacheFlushing) {
   1.331 -      if (nm->is_speculatively_disconnected() && !nm->is_locked_by_vm() && !nm->is_osr_method() &&
   1.332 -          (_traversals > _last_flush_traversal_id + 2) && (nm->compile_id() < _highest_marked)) {
   1.333 -        // This method has not been called since the forced cleanup happened
   1.334 -        nm->make_not_entrant();
   1.335 +      if (!nm->is_locked_by_vm() && !nm->is_osr_method() && !nm->is_native_method()) {
   1.336 +        // Do not make native methods and OSR-methods not-entrant
   1.337 +        nm->dec_hotness_counter();
   1.338 +        // Get the initial value of the hotness counter. This value depends on the
   1.339 +        // ReservedCodeCacheSize
   1.340 +        int reset_val = hotness_counter_reset_val();
   1.341 +        int time_since_reset = reset_val - nm->hotness_counter();
   1.342 +        double threshold = -reset_val + (CodeCache::reverse_free_ratio() * NmethodSweepActivity);
   1.343 +        // The less free space in the code cache we have - the bigger reverse_free_ratio() is.
   1.344 +        // I.e., 'threshold' increases with lower available space in the code cache and a higher
   1.345 +        // NmethodSweepActivity. If the current hotness counter - which decreases from its initial
   1.346 +        // value until it is reset by stack walking - is smaller than the computed threshold, the
   1.347 +        // corresponding nmethod is considered for removal.
   1.348 +        if ((NmethodSweepActivity > 0) && (nm->hotness_counter() < threshold) && (time_since_reset > 10)) {
   1.349 +          // A method is marked as not-entrant if the method is
   1.350 +          // 1) 'old enough': nm->hotness_counter() < threshold
   1.351 +          // 2) The method was in_use for a minimum amount of time: (time_since_reset > 10)
   1.352 +          //    The second condition is necessary if we are dealing with very small code cache
   1.353 +          //    sizes (e.g., <10m) and the code cache size is too small to hold all hot methods.
   1.354 +          //    The second condition ensures that methods are not immediately made not-entrant
   1.355 +          //    after compilation.
   1.356 +          nm->make_not_entrant();
   1.357 +          request_nmethod_marking();
   1.358 +        }
   1.359        }
   1.360      }
   1.361 -
   1.362 -    // Clean-up all inline caches that points to zombie/non-reentrant methods
   1.363 +    // Clean-up all inline caches that point to zombie/non-reentrant methods
   1.364      MutexLocker cl(CompiledIC_lock);
   1.365      nm->cleanup_inline_caches();
   1.366      SWEEP(nm);
   1.367    }
   1.368 +  return freed_memory;
   1.369  }
   1.370  
   1.371 -// Code cache unloading: when compilers notice the code cache is getting full,
   1.372 -// they will call a vm op that comes here. This code attempts to speculatively
   1.373 -// unload the oldest half of the nmethods (based on the compile job id) by
   1.374 -// saving the old code in a list in the CodeCache. Then
   1.375 -// execution resumes. If a method so marked is not called by the second sweeper
   1.376 -// stack traversal after the current one, the nmethod will be marked non-entrant and
   1.377 -// got rid of by normal sweeping. If the method is called, the Method*'s
   1.378 -// _code field is restored and the Method*/nmethod
   1.379 -// go back to their normal state.
   1.380 -void NMethodSweeper::handle_full_code_cache(bool is_full) {
   1.381 -
   1.382 -  if (is_full) {
   1.383 -    // Since code cache is full, immediately stop new compiles
   1.384 -    if (CompileBroker::set_should_compile_new_jobs(CompileBroker::stop_compilation)) {
   1.385 -      log_sweep("disable_compiler");
   1.386 -    }
   1.387 -  }
   1.388 -
   1.389 -  // Make sure only one thread can flush
   1.390 -  // The token is reset after CodeCacheMinimumFlushInterval in scan stacks,
   1.391 -  // no need to check the timeout here.
   1.392 -  jint old = Atomic::cmpxchg( 1, &_flush_token, 0 );
   1.393 -  if (old != 0) {
   1.394 -    return;
   1.395 -  }
   1.396 -
   1.397 -  VM_HandleFullCodeCache op(is_full);
   1.398 -  VMThread::execute(&op);
   1.399 -
   1.400 -  // resweep again as soon as possible
   1.401 -  _resweep = true;
   1.402 -}
   1.403 -
   1.404 -void NMethodSweeper::speculative_disconnect_nmethods(bool is_full) {
   1.405 -  // If there was a race in detecting full code cache, only run
   1.406 -  // one vm op for it or keep the compiler shut off
   1.407 -
   1.408 -  jlong disconnect_start_counter = os::elapsed_counter();
   1.409 -
   1.410 -  // Traverse the code cache trying to dump the oldest nmethods
   1.411 -  int curr_max_comp_id = CompileBroker::get_compilation_id();
   1.412 -  int flush_target = ((curr_max_comp_id - _dead_compile_ids) / CodeCacheFlushingFraction) + _dead_compile_ids;
   1.413 -
   1.414 -  log_sweep("start_cleaning");
   1.415 -
   1.416 -  nmethod* nm = CodeCache::alive_nmethod(CodeCache::first());
   1.417 -  jint disconnected = 0;
   1.418 -  jint made_not_entrant  = 0;
   1.419 -  jint nmethod_count = 0;
   1.420 -
   1.421 -  while ((nm != NULL)){
   1.422 -    int curr_comp_id = nm->compile_id();
   1.423 -
   1.424 -    // OSR methods cannot be flushed like this. Also, don't flush native methods
   1.425 -    // since they are part of the JDK in most cases
   1.426 -    if (!nm->is_osr_method() && !nm->is_locked_by_vm() && !nm->is_native_method()) {
   1.427 -
   1.428 -      // only count methods that can be speculatively disconnected
   1.429 -      nmethod_count++;
   1.430 -
   1.431 -      if (nm->is_in_use() && (curr_comp_id < flush_target)) {
   1.432 -        if ((nm->method()->code() == nm)) {
   1.433 -          // This method has not been previously considered for
   1.434 -          // unloading or it was restored already
   1.435 -          CodeCache::speculatively_disconnect(nm);
   1.436 -          disconnected++;
   1.437 -        } else if (nm->is_speculatively_disconnected()) {
   1.438 -          // This method was previously considered for preemptive unloading and was not called since then
   1.439 -          CompilationPolicy::policy()->delay_compilation(nm->method());
   1.440 -          nm->make_not_entrant();
   1.441 -          made_not_entrant++;
   1.442 -        }
   1.443 -
   1.444 -        if (curr_comp_id > _highest_marked) {
   1.445 -          _highest_marked = curr_comp_id;
   1.446 -        }
   1.447 -      }
   1.448 -    }
   1.449 -    nm = CodeCache::alive_nmethod(CodeCache::next(nm));
   1.450 -  }
   1.451 -
   1.452 -  // remember how many compile_ids wheren't seen last flush.
   1.453 -  _dead_compile_ids = curr_max_comp_id - nmethod_count;
   1.454 -
   1.455 -  log_sweep("stop_cleaning",
   1.456 -                       "disconnected='" UINT32_FORMAT "' made_not_entrant='" UINT32_FORMAT "'",
   1.457 -                       disconnected, made_not_entrant);
   1.458 -
   1.459 -  // Shut off compiler. Sweeper will start over with a new stack scan and
   1.460 -  // traversal cycle and turn it back on if it clears enough space.
   1.461 -  if (is_full) {
   1.462 -    _last_full_flush_time = os::javaTimeMillis();
   1.463 -  }
   1.464 -
   1.465 -  jlong disconnect_end_counter = os::elapsed_counter();
   1.466 -  jlong disconnect_time = disconnect_end_counter - disconnect_start_counter;
   1.467 -  _total_disconnect_time += disconnect_time;
   1.468 -  _peak_disconnect_time = MAX2(disconnect_time, _peak_disconnect_time);
   1.469 -
   1.470 -  EventCleanCodeCache event(UNTIMED);
   1.471 -  if (event.should_commit()) {
   1.472 -    event.set_starttime(disconnect_start_counter);
   1.473 -    event.set_endtime(disconnect_end_counter);
   1.474 -    event.set_disconnectedCount(disconnected);
   1.475 -    event.set_madeNonEntrantCount(made_not_entrant);
   1.476 -    event.commit();
   1.477 -  }
   1.478 -  _number_of_flushes++;
   1.479 -
   1.480 -  // After two more traversals the sweeper will get rid of unrestored nmethods
   1.481 -  _last_flush_traversal_id = _traversals;
   1.482 -  _resweep = true;
   1.483 -#ifdef ASSERT
   1.484 -
   1.485 -  if(PrintMethodFlushing && Verbose) {
   1.486 -    tty->print_cr("### sweeper: unload time: " INT64_FORMAT, (jlong)disconnect_time);
   1.487 -  }
   1.488 -#endif
   1.489 -}
   1.490 -
   1.491 -
   1.492  // Print out some state information about the current sweep and the
   1.493  // state of the code cache if it's requested.
   1.494  void NMethodSweeper::log_sweep(const char* msg, const char* format, ...) {

mercurial