8020151: PSR:PERF Large performance regressions when code cache is filled

Fri, 27 Sep 2013 10:50:55 +0200

author
anoll
date
Fri, 27 Sep 2013 10:50:55 +0200
changeset 5792
510fbd28919c
parent 5791
c9ccd7b85f20
child 5794
1c3486050433

8020151: PSR:PERF Large performance regressions when code cache is filled
Summary: Code cache sweeping based on method hotness; removed speculatively disconnect
Reviewed-by: kvn, iveresov

src/share/vm/code/codeCache.cpp file | annotate | diff | comparison | revisions
src/share/vm/code/codeCache.hpp file | annotate | diff | comparison | revisions
src/share/vm/code/nmethod.cpp file | annotate | diff | comparison | revisions
src/share/vm/code/nmethod.hpp file | annotate | diff | comparison | revisions
src/share/vm/compiler/compileBroker.cpp file | annotate | diff | comparison | revisions
src/share/vm/oops/method.cpp file | annotate | diff | comparison | revisions
src/share/vm/runtime/arguments.cpp file | annotate | diff | comparison | revisions
src/share/vm/runtime/globals.hpp file | annotate | diff | comparison | revisions
src/share/vm/runtime/safepoint.cpp file | annotate | diff | comparison | revisions
src/share/vm/runtime/sweeper.cpp file | annotate | diff | comparison | revisions
src/share/vm/runtime/sweeper.hpp file | annotate | diff | comparison | revisions
src/share/vm/runtime/vmStructs.cpp file | annotate | diff | comparison | revisions
src/share/vm/runtime/vm_operations.cpp file | annotate | diff | comparison | revisions
src/share/vm/runtime/vm_operations.hpp file | annotate | diff | comparison | revisions
src/share/vm/trace/trace.xml file | annotate | diff | comparison | revisions
     1.1 --- a/src/share/vm/code/codeCache.cpp	Fri Sep 27 08:39:19 2013 +0200
     1.2 +++ b/src/share/vm/code/codeCache.cpp	Fri Sep 27 10:50:55 2013 +0200
     1.3 @@ -124,7 +124,6 @@
     1.4  int CodeCache::_number_of_nmethods_with_dependencies = 0;
     1.5  bool CodeCache::_needs_cache_clean = false;
     1.6  nmethod* CodeCache::_scavenge_root_nmethods = NULL;
     1.7 -nmethod* CodeCache::_saved_nmethods = NULL;
     1.8  
     1.9  int CodeCache::_codemem_full_count = 0;
    1.10  
    1.11 @@ -464,96 +463,11 @@
    1.12  }
    1.13  #endif //PRODUCT
    1.14  
    1.15 -/**
    1.16 - * Remove and return nmethod from the saved code list in order to reanimate it.
    1.17 - */
    1.18 -nmethod* CodeCache::reanimate_saved_code(Method* m) {
    1.19 -  MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
    1.20 -  nmethod* saved = _saved_nmethods;
    1.21 -  nmethod* prev = NULL;
    1.22 -  while (saved != NULL) {
    1.23 -    if (saved->is_in_use() && saved->method() == m) {
    1.24 -      if (prev != NULL) {
    1.25 -        prev->set_saved_nmethod_link(saved->saved_nmethod_link());
    1.26 -      } else {
    1.27 -        _saved_nmethods = saved->saved_nmethod_link();
    1.28 -      }
    1.29 -      assert(saved->is_speculatively_disconnected(), "shouldn't call for other nmethods");
    1.30 -      saved->set_speculatively_disconnected(false);
    1.31 -      saved->set_saved_nmethod_link(NULL);
    1.32 -      if (PrintMethodFlushing) {
    1.33 -        saved->print_on(tty, " ### nmethod is reconnected");
    1.34 -      }
    1.35 -      if (LogCompilation && (xtty != NULL)) {
    1.36 -        ttyLocker ttyl;
    1.37 -        xtty->begin_elem("nmethod_reconnected compile_id='%3d'", saved->compile_id());
    1.38 -        xtty->method(m);
    1.39 -        xtty->stamp();
    1.40 -        xtty->end_elem();
    1.41 -      }
    1.42 -      return saved;
    1.43 -    }
    1.44 -    prev = saved;
    1.45 -    saved = saved->saved_nmethod_link();
    1.46 -  }
    1.47 -  return NULL;
    1.48 -}
    1.49 -
    1.50 -/**
    1.51 - * Remove nmethod from the saved code list in order to discard it permanently
    1.52 - */
    1.53 -void CodeCache::remove_saved_code(nmethod* nm) {
    1.54 -  // For conc swpr this will be called with CodeCache_lock taken by caller
    1.55 -  assert_locked_or_safepoint(CodeCache_lock);
    1.56 -  assert(nm->is_speculatively_disconnected(), "shouldn't call for other nmethods");
    1.57 -  nmethod* saved = _saved_nmethods;
    1.58 -  nmethod* prev = NULL;
    1.59 -  while (saved != NULL) {
    1.60 -    if (saved == nm) {
    1.61 -      if (prev != NULL) {
    1.62 -        prev->set_saved_nmethod_link(saved->saved_nmethod_link());
    1.63 -      } else {
    1.64 -        _saved_nmethods = saved->saved_nmethod_link();
    1.65 -      }
    1.66 -      if (LogCompilation && (xtty != NULL)) {
    1.67 -        ttyLocker ttyl;
    1.68 -        xtty->begin_elem("nmethod_removed compile_id='%3d'", nm->compile_id());
    1.69 -        xtty->stamp();
    1.70 -        xtty->end_elem();
    1.71 -      }
    1.72 -      return;
    1.73 -    }
    1.74 -    prev = saved;
    1.75 -    saved = saved->saved_nmethod_link();
    1.76 -  }
    1.77 -  ShouldNotReachHere();
    1.78 -}
    1.79 -
    1.80 -void CodeCache::speculatively_disconnect(nmethod* nm) {
    1.81 -  assert_locked_or_safepoint(CodeCache_lock);
    1.82 -  assert(nm->is_in_use() && !nm->is_speculatively_disconnected(), "should only disconnect live nmethods");
    1.83 -  nm->set_saved_nmethod_link(_saved_nmethods);
    1.84 -  _saved_nmethods = nm;
    1.85 -  if (PrintMethodFlushing) {
    1.86 -    nm->print_on(tty, " ### nmethod is speculatively disconnected");
    1.87 -  }
    1.88 -  if (LogCompilation && (xtty != NULL)) {
    1.89 -    ttyLocker ttyl;
    1.90 -    xtty->begin_elem("nmethod_disconnected compile_id='%3d'", nm->compile_id());
    1.91 -    xtty->method(nm->method());
    1.92 -    xtty->stamp();
    1.93 -    xtty->end_elem();
    1.94 -  }
    1.95 -  nm->method()->clear_code();
    1.96 -  nm->set_speculatively_disconnected(true);
    1.97 -}
    1.98 -
    1.99  
   1.100  void CodeCache::gc_prologue() {
   1.101    assert(!nmethod::oops_do_marking_is_active(), "oops_do_marking_epilogue must be called");
   1.102  }
   1.103  
   1.104 -
   1.105  void CodeCache::gc_epilogue() {
   1.106    assert_locked_or_safepoint(CodeCache_lock);
   1.107    FOR_ALL_ALIVE_BLOBS(cb) {
     2.1 --- a/src/share/vm/code/codeCache.hpp	Fri Sep 27 08:39:19 2013 +0200
     2.2 +++ b/src/share/vm/code/codeCache.hpp	Fri Sep 27 10:50:55 2013 +0200
     2.3 @@ -57,7 +57,6 @@
     2.4    static int _number_of_nmethods_with_dependencies;
     2.5    static bool _needs_cache_clean;
     2.6    static nmethod* _scavenge_root_nmethods;  // linked via nm->scavenge_root_link()
     2.7 -  static nmethod* _saved_nmethods;          // Linked list of speculatively disconnected nmethods.
     2.8  
     2.9    static void verify_if_often() PRODUCT_RETURN;
    2.10  
    2.11 @@ -167,17 +166,12 @@
    2.12    static size_t  capacity()                      { return _heap->capacity(); }
    2.13    static size_t  max_capacity()                  { return _heap->max_capacity(); }
    2.14    static size_t  unallocated_capacity()          { return _heap->unallocated_capacity(); }
    2.15 -  static bool    needs_flushing()                { return unallocated_capacity() < CodeCacheFlushingMinimumFreeSpace; }
    2.16    static double  reverse_free_ratio();
    2.17  
    2.18    static bool needs_cache_clean()                { return _needs_cache_clean; }
    2.19    static void set_needs_cache_clean(bool v)      { _needs_cache_clean = v;    }
    2.20    static void clear_inline_caches();             // clear all inline caches
    2.21  
    2.22 -  static nmethod* reanimate_saved_code(Method* m);
    2.23 -  static void remove_saved_code(nmethod* nm);
    2.24 -  static void speculatively_disconnect(nmethod* nm);
    2.25 -
    2.26    // Deoptimization
    2.27    static int  mark_for_deoptimization(DepChange& changes);
    2.28  #ifdef HOTSWAP
     3.1 --- a/src/share/vm/code/nmethod.cpp	Fri Sep 27 08:39:19 2013 +0200
     3.2 +++ b/src/share/vm/code/nmethod.cpp	Fri Sep 27 10:50:55 2013 +0200
     3.3 @@ -462,7 +462,6 @@
     3.4    _state                      = alive;
     3.5    _marked_for_reclamation     = 0;
     3.6    _has_flushed_dependencies   = 0;
     3.7 -  _speculatively_disconnected = 0;
     3.8    _has_unsafe_access          = 0;
     3.9    _has_method_handle_invokes  = 0;
    3.10    _lazy_critical_native       = 0;
    3.11 @@ -481,7 +480,6 @@
    3.12    _osr_link                = NULL;
    3.13    _scavenge_root_link      = NULL;
    3.14    _scavenge_root_state     = 0;
    3.15 -  _saved_nmethod_link      = NULL;
    3.16    _compiler                = NULL;
    3.17  
    3.18  #ifdef HAVE_DTRACE_H
    3.19 @@ -686,6 +684,7 @@
    3.20      _osr_entry_point         = NULL;
    3.21      _exception_cache         = NULL;
    3.22      _pc_desc_cache.reset_to(NULL);
    3.23 +    _hotness_counter         = NMethodSweeper::hotness_counter_reset_val();
    3.24  
    3.25      code_buffer->copy_values_to(this);
    3.26      if (ScavengeRootsInCode && detect_scavenge_root_oops()) {
    3.27 @@ -770,6 +769,7 @@
    3.28      _osr_entry_point         = NULL;
    3.29      _exception_cache         = NULL;
    3.30      _pc_desc_cache.reset_to(NULL);
    3.31 +    _hotness_counter         = NMethodSweeper::hotness_counter_reset_val();
    3.32  
    3.33      code_buffer->copy_values_to(this);
    3.34      debug_only(verify_scavenge_root_oops());
    3.35 @@ -842,6 +842,7 @@
    3.36      _comp_level              = comp_level;
    3.37      _compiler                = compiler;
    3.38      _orig_pc_offset          = orig_pc_offset;
    3.39 +    _hotness_counter         = NMethodSweeper::hotness_counter_reset_val();
    3.40  
    3.41      // Section offsets
    3.42      _consts_offset           = content_offset()      + code_buffer->total_offset_of(code_buffer->consts());
    3.43 @@ -1176,7 +1177,7 @@
    3.44  
    3.45  // This is a private interface with the sweeper.
    3.46  void nmethod::mark_as_seen_on_stack() {
    3.47 -  assert(is_not_entrant(), "must be a non-entrant method");
    3.48 +  assert(is_alive(), "Must be an alive method");
    3.49    // Set the traversal mark to ensure that the sweeper does 2
    3.50    // cleaning passes before moving to zombie.
    3.51    set_stack_traversal_mark(NMethodSweeper::traversal_count());
    3.52 @@ -1261,7 +1262,7 @@
    3.53  
    3.54    set_osr_link(NULL);
    3.55    //set_scavenge_root_link(NULL); // done by prune_scavenge_root_nmethods
    3.56 -  NMethodSweeper::notify(this);
    3.57 +  NMethodSweeper::notify();
    3.58  }
    3.59  
    3.60  void nmethod::invalidate_osr_method() {
    3.61 @@ -1351,6 +1352,15 @@
    3.62        nmethod_needs_unregister = true;
    3.63      }
    3.64  
    3.65 +    // Must happen before state change. Otherwise we have a race condition in
    3.66 +    // nmethod::can_not_entrant_be_converted(). I.e., a method can immediately
    3.67 +    // transition its state from 'not_entrant' to 'zombie' without having to wait
    3.68 +    // for stack scanning.
    3.69 +    if (state == not_entrant) {
    3.70 +      mark_as_seen_on_stack();
    3.71 +      OrderAccess::storestore();
    3.72 +    }
    3.73 +
    3.74      // Change state
    3.75      _state = state;
    3.76  
    3.77 @@ -1369,11 +1379,6 @@
    3.78        HandleMark hm;
    3.79        method()->clear_code();
    3.80      }
    3.81 -
    3.82 -    if (state == not_entrant) {
    3.83 -      mark_as_seen_on_stack();
    3.84 -    }
    3.85 -
    3.86    } // leave critical region under Patching_lock
    3.87  
    3.88    // When the nmethod becomes zombie it is no longer alive so the
    3.89 @@ -1416,7 +1421,7 @@
    3.90    }
    3.91  
    3.92    // Make sweeper aware that there is a zombie method that needs to be removed
    3.93 -  NMethodSweeper::notify(this);
    3.94 +  NMethodSweeper::notify();
    3.95  
    3.96    return true;
    3.97  }
    3.98 @@ -1451,10 +1456,6 @@
    3.99      CodeCache::drop_scavenge_root_nmethod(this);
   3.100    }
   3.101  
   3.102 -  if (is_speculatively_disconnected()) {
   3.103 -    CodeCache::remove_saved_code(this);
   3.104 -  }
   3.105 -
   3.106  #ifdef SHARK
   3.107    ((SharkCompiler *) compiler())->free_compiled_method(insts_begin());
   3.108  #endif // SHARK
     4.1 --- a/src/share/vm/code/nmethod.hpp	Fri Sep 27 08:39:19 2013 +0200
     4.2 +++ b/src/share/vm/code/nmethod.hpp	Fri Sep 27 10:50:55 2013 +0200
     4.3 @@ -119,7 +119,6 @@
     4.4    // To support simple linked-list chaining of nmethods:
     4.5    nmethod*  _osr_link;         // from InstanceKlass::osr_nmethods_head
     4.6    nmethod*  _scavenge_root_link; // from CodeCache::scavenge_root_nmethods
     4.7 -  nmethod*  _saved_nmethod_link; // from CodeCache::speculatively_disconnect
     4.8  
     4.9    static nmethod* volatile _oops_do_mark_nmethods;
    4.10    nmethod*        volatile _oops_do_mark_link;
    4.11 @@ -165,7 +164,6 @@
    4.12  
    4.13    // protected by CodeCache_lock
    4.14    bool _has_flushed_dependencies;            // Used for maintenance of dependencies (CodeCache_lock)
    4.15 -  bool _speculatively_disconnected;          // Marked for potential unload
    4.16  
    4.17    bool _marked_for_reclamation;              // Used by NMethodSweeper (set only by sweeper)
    4.18    bool _marked_for_deoptimization;           // Used for stack deoptimization
    4.19 @@ -180,7 +178,7 @@
    4.20    unsigned int _has_wide_vectors:1;          // Preserve wide vectors at safepoints
    4.21  
    4.22    // Protected by Patching_lock
    4.23 -  unsigned char _state;                      // {alive, not_entrant, zombie, unloaded}
    4.24 +  volatile unsigned char _state;             // {alive, not_entrant, zombie, unloaded}
    4.25  
    4.26  #ifdef ASSERT
    4.27    bool _oops_are_stale;  // indicates that it's no longer safe to access oops section
    4.28 @@ -202,11 +200,18 @@
    4.29  
    4.30    // not_entrant method removal. Each mark_sweep pass will update
    4.31    // this mark to current sweep invocation count if it is seen on the
    4.32 -  // stack.  An not_entrant method can be removed when there is no
    4.33 +  // stack.  An not_entrant method can be removed when there are no
    4.34    // more activations, i.e., when the _stack_traversal_mark is less than
    4.35    // current sweep traversal index.
    4.36    long _stack_traversal_mark;
    4.37  
    4.38 +  // The _hotness_counter indicates the hotness of a method. The higher
    4.39 +  // the value the hotter the method. The hotness counter of a nmethod is
    4.40 +  // set to [(ReservedCodeCacheSize / (1024 * 1024)) * 2] each time the method
    4.41 +  // is active while stack scanning (mark_active_nmethods()). The hotness
    4.42 +  // counter is decreased (by 1) while sweeping.
    4.43 +  int _hotness_counter;
    4.44 +
    4.45    ExceptionCache *_exception_cache;
    4.46    PcDescCache     _pc_desc_cache;
    4.47  
    4.48 @@ -382,6 +387,10 @@
    4.49  
    4.50    int total_size        () const;
    4.51  
    4.52 +  void dec_hotness_counter()        { _hotness_counter--; }
    4.53 +  void set_hotness_counter(int val) { _hotness_counter = val; }
    4.54 +  int  hotness_counter() const      { return _hotness_counter; }
    4.55 +
    4.56    // Containment
    4.57    bool consts_contains       (address addr) const { return consts_begin       () <= addr && addr < consts_end       (); }
    4.58    bool insts_contains        (address addr) const { return insts_begin        () <= addr && addr < insts_end        (); }
    4.59 @@ -408,8 +417,8 @@
    4.60    // alive.  It is used when an uncommon trap happens.  Returns true
    4.61    // if this thread changed the state of the nmethod or false if
    4.62    // another thread performed the transition.
    4.63 -  bool  make_not_entrant()                        { return make_not_entrant_or_zombie(not_entrant); }
    4.64 -  bool  make_zombie()                             { return make_not_entrant_or_zombie(zombie); }
    4.65 +  bool  make_not_entrant() { return make_not_entrant_or_zombie(not_entrant); }
    4.66 +  bool  make_zombie()      { return make_not_entrant_or_zombie(zombie); }
    4.67  
    4.68    // used by jvmti to track if the unload event has been reported
    4.69    bool  unload_reported()                         { return _unload_reported; }
    4.70 @@ -437,9 +446,6 @@
    4.71    bool  has_method_handle_invokes() const         { return _has_method_handle_invokes; }
    4.72    void  set_has_method_handle_invokes(bool z)     { _has_method_handle_invokes = z; }
    4.73  
    4.74 -  bool  is_speculatively_disconnected() const     { return _speculatively_disconnected; }
    4.75 -  void  set_speculatively_disconnected(bool z)    { _speculatively_disconnected = z; }
    4.76 -
    4.77    bool  is_lazy_critical_native() const           { return _lazy_critical_native; }
    4.78    void  set_lazy_critical_native(bool z)          { _lazy_critical_native = z; }
    4.79  
    4.80 @@ -499,9 +505,6 @@
    4.81    nmethod* scavenge_root_link() const                  { return _scavenge_root_link; }
    4.82    void     set_scavenge_root_link(nmethod *n)          { _scavenge_root_link = n; }
    4.83  
    4.84 -  nmethod* saved_nmethod_link() const                  { return _saved_nmethod_link; }
    4.85 -  void     set_saved_nmethod_link(nmethod *n)          { _saved_nmethod_link = n; }
    4.86 -
    4.87   public:
    4.88  
    4.89    // Sweeper support
     5.1 --- a/src/share/vm/compiler/compileBroker.cpp	Fri Sep 27 08:39:19 2013 +0200
     5.2 +++ b/src/share/vm/compiler/compileBroker.cpp	Fri Sep 27 10:50:55 2013 +0200
     5.3 @@ -634,19 +634,36 @@
     5.4    NMethodSweeper::possibly_sweep();
     5.5  
     5.6    MutexLocker locker(lock());
     5.7 -  // Wait for an available CompileTask.
     5.8 +  // If _first is NULL we have no more compile jobs. There are two reasons for
     5.9 +  // having no compile jobs: First, we compiled everything we wanted. Second,
    5.10 +  // we ran out of code cache so compilation has been disabled. In the latter
    5.11 +  // case we perform code cache sweeps to free memory such that we can re-enable
    5.12 +  // compilation.
    5.13    while (_first == NULL) {
    5.14 -    // There is no work to be done right now.  Wait.
    5.15 -    if (UseCodeCacheFlushing && (!CompileBroker::should_compile_new_jobs() || CodeCache::needs_flushing())) {
    5.16 -      // During the emergency sweeping periods, wake up and sweep occasionally
    5.17 -      bool timedout = lock()->wait(!Mutex::_no_safepoint_check_flag, NmethodSweepCheckInterval*1000);
    5.18 -      if (timedout) {
    5.19 +    if (UseCodeCacheFlushing && !CompileBroker::should_compile_new_jobs()) {
    5.20 +      // Wait a certain amount of time to possibly do another sweep.
    5.21 +      // We must wait until stack scanning has happened so that we can
    5.22 +      // transition a method's state from 'not_entrant' to 'zombie'.
    5.23 +      long wait_time = NmethodSweepCheckInterval * 1000;
    5.24 +      if (FLAG_IS_DEFAULT(NmethodSweepCheckInterval)) {
    5.25 +        // Only one thread at a time can do sweeping. Scale the
    5.26 +        // wait time according to the number of compiler threads.
    5.27 +        // As a result, the next sweep is likely to happen every 100ms
    5.28 +        // with an arbitrary number of threads that do sweeping.
    5.29 +        wait_time = 100 * CICompilerCount;
    5.30 +      }
    5.31 +      bool timeout = lock()->wait(!Mutex::_no_safepoint_check_flag, wait_time);
    5.32 +      if (timeout) {
    5.33          MutexUnlocker ul(lock());
    5.34 -        // When otherwise not busy, run nmethod sweeping
    5.35          NMethodSweeper::possibly_sweep();
    5.36        }
    5.37      } else {
    5.38 -      // During normal operation no need to wake up on timer
    5.39 +      // If there are no compilation tasks and we can compile new jobs
    5.40 +      // (i.e., there is enough free space in the code cache) there is
    5.41 +      // no need to invoke the sweeper. As a result, the hotness of methods
    5.42 +      // remains unchanged. This behavior is desired, since we want to keep
    5.43 +      // the stable state, i.e., we do not want to evict methods from the
    5.44 +      // code cache if it is unnecessary.
    5.45        lock()->wait();
    5.46      }
    5.47    }
    5.48 @@ -1227,16 +1244,9 @@
    5.49          return method_code;
    5.50        }
    5.51      }
    5.52 -    if (method->is_not_compilable(comp_level)) return NULL;
    5.53 -
    5.54 -    if (UseCodeCacheFlushing) {
    5.55 -      nmethod* saved = CodeCache::reanimate_saved_code(method());
    5.56 -      if (saved != NULL) {
    5.57 -        method->set_code(method, saved);
    5.58 -        return saved;
    5.59 -      }
    5.60 +    if (method->is_not_compilable(comp_level)) {
    5.61 +      return NULL;
    5.62      }
    5.63 -
    5.64    } else {
    5.65      // osr compilation
    5.66  #ifndef TIERED
    5.67 @@ -1585,9 +1595,6 @@
    5.68        if (CodeCache::unallocated_capacity() < CodeCacheMinimumFreeSpace) {
    5.69          // the code cache is really full
    5.70          handle_full_code_cache();
    5.71 -      } else if (UseCodeCacheFlushing && CodeCache::needs_flushing()) {
    5.72 -        // Attempt to start cleaning the code cache while there is still a little headroom
    5.73 -        NMethodSweeper::handle_full_code_cache(false);
    5.74        }
    5.75  
    5.76        CompileTask* task = queue->get();
    5.77 @@ -1943,7 +1950,11 @@
    5.78      }
    5.79  #endif
    5.80      if (UseCodeCacheFlushing) {
    5.81 -      NMethodSweeper::handle_full_code_cache(true);
    5.82 +      // Since code cache is full, immediately stop new compiles
    5.83 +      if (CompileBroker::set_should_compile_new_jobs(CompileBroker::stop_compilation)) {
    5.84 +        NMethodSweeper::log_sweep("disable_compiler");
    5.85 +        NMethodSweeper::possibly_sweep();
    5.86 +      }
    5.87      } else {
    5.88        UseCompiler               = false;
    5.89        AlwaysCompileLoopMethods  = false;
     6.1 --- a/src/share/vm/oops/method.cpp	Fri Sep 27 08:39:19 2013 +0200
     6.2 +++ b/src/share/vm/oops/method.cpp	Fri Sep 27 10:50:55 2013 +0200
     6.3 @@ -901,16 +901,6 @@
     6.4  // This function must not hit a safepoint!
     6.5  address Method::verified_code_entry() {
     6.6    debug_only(No_Safepoint_Verifier nsv;)
     6.7 -  nmethod *code = (nmethod *)OrderAccess::load_ptr_acquire(&_code);
     6.8 -  if (code == NULL && UseCodeCacheFlushing) {
     6.9 -    nmethod *saved_code = CodeCache::reanimate_saved_code(this);
    6.10 -    if (saved_code != NULL) {
    6.11 -      methodHandle method(this);
    6.12 -      assert( ! saved_code->is_osr_method(), "should not get here for osr" );
    6.13 -      set_code( method, saved_code );
    6.14 -    }
    6.15 -  }
    6.16 -
    6.17    assert(_from_compiled_entry != NULL, "must be set");
    6.18    return _from_compiled_entry;
    6.19  }
     7.1 --- a/src/share/vm/runtime/arguments.cpp	Fri Sep 27 08:39:19 2013 +0200
     7.2 +++ b/src/share/vm/runtime/arguments.cpp	Fri Sep 27 10:50:55 2013 +0200
     7.3 @@ -1130,6 +1130,9 @@
     7.4      Tier3InvokeNotifyFreqLog = 0;
     7.5      Tier4InvocationThreshold = 0;
     7.6    }
     7.7 +  if (FLAG_IS_DEFAULT(NmethodSweepFraction)) {
     7.8 +    FLAG_SET_DEFAULT(NmethodSweepFraction, 1 + ReservedCodeCacheSize / (16 * M));
     7.9 +  }
    7.10  }
    7.11  
    7.12  #if INCLUDE_ALL_GCS
    7.13 @@ -2333,6 +2336,10 @@
    7.14                  (2*G)/M);
    7.15      status = false;
    7.16    }
    7.17 +
    7.18 +  status &= verify_interval(NmethodSweepFraction, 1, ReservedCodeCacheSize/K, "NmethodSweepFraction");
    7.19 +  status &= verify_interval(NmethodSweepActivity, 0, 2000, "NmethodSweepActivity");
    7.20 +
    7.21    return status;
    7.22  }
    7.23  
     8.1 --- a/src/share/vm/runtime/globals.hpp	Fri Sep 27 08:39:19 2013 +0200
     8.2 +++ b/src/share/vm/runtime/globals.hpp	Fri Sep 27 10:50:55 2013 +0200
     8.3 @@ -2868,6 +2868,10 @@
     8.4    product(intx, NmethodSweepCheckInterval, 5,                               \
     8.5            "Compilers wake up every n seconds to possibly sweep nmethods")   \
     8.6                                                                              \
     8.7 +  product(intx, NmethodSweepActivity, 10,                                   \
     8.8 +          "Removes cold nmethods from code cache if > 0. Higher values "    \
     8.9 +          "result in more aggressive sweeping")                             \
    8.10 +                                                                            \
    8.11    notproduct(bool, LogSweeper, false,                                       \
    8.12              "Keep a ring buffer of sweeper activity")                       \
    8.13                                                                              \
    8.14 @@ -3239,15 +3243,6 @@
    8.15    product(bool, UseCodeCacheFlushing, true,                                 \
    8.16            "Attempt to clean the code cache before shutting off compiler")   \
    8.17                                                                              \
    8.18 -  product(intx,  MinCodeCacheFlushingInterval, 30,                          \
    8.19 -          "Min number of seconds between code cache cleaning sessions")     \
    8.20 -                                                                            \
    8.21 -  product(uintx,  CodeCacheFlushingMinimumFreeSpace, 1500*K,                \
    8.22 -          "When less than X space left, start code cache cleaning")         \
    8.23 -                                                                            \
    8.24 -  product(uintx, CodeCacheFlushingFraction, 2,                              \
    8.25 -          "Fraction of the code cache that is flushed when full")           \
    8.26 -                                                                            \
    8.27    /* interpreter debugging */                                               \
    8.28    develop(intx, BinarySwitchThreshold, 5,                                   \
    8.29            "Minimal number of lookupswitch entries for rewriting to binary " \
     9.1 --- a/src/share/vm/runtime/safepoint.cpp	Fri Sep 27 08:39:19 2013 +0200
     9.2 +++ b/src/share/vm/runtime/safepoint.cpp	Fri Sep 27 10:50:55 2013 +0200
     9.3 @@ -519,8 +519,8 @@
     9.4    }
     9.5  
     9.6    {
     9.7 -    TraceTime t4("sweeping nmethods", TraceSafepointCleanupTime);
     9.8 -    NMethodSweeper::scan_stacks();
     9.9 +    TraceTime t4("mark nmethods", TraceSafepointCleanupTime);
    9.10 +    NMethodSweeper::mark_active_nmethods();
    9.11    }
    9.12  
    9.13    if (SymbolTable::needs_rehashing()) {
    10.1 --- a/src/share/vm/runtime/sweeper.cpp	Fri Sep 27 08:39:19 2013 +0200
    10.2 +++ b/src/share/vm/runtime/sweeper.cpp	Fri Sep 27 10:50:55 2013 +0200
    10.3 @@ -127,64 +127,79 @@
    10.4  #define SWEEP(nm)
    10.5  #endif
    10.6  
    10.7 +nmethod*  NMethodSweeper::_current         = NULL; // Current nmethod
    10.8 +long      NMethodSweeper::_traversals      = 0;    // Nof. stack traversals performed
    10.9 +int       NMethodSweeper::_seen            = 0;    // Nof. nmethods we have currently processed in current pass of CodeCache
   10.10 +int       NMethodSweeper::_flushed_count   = 0;    // Nof. nmethods flushed in current sweep
   10.11 +int       NMethodSweeper::_zombified_count = 0;    // Nof. nmethods made zombie in current sweep
   10.12 +int       NMethodSweeper::_marked_count    = 0;    // Nof. nmethods marked for reclaim in current sweep
   10.13  
   10.14 -long      NMethodSweeper::_traversals = 0;   // No. of stack traversals performed
   10.15 -nmethod*  NMethodSweeper::_current = NULL;   // Current nmethod
   10.16 -int       NMethodSweeper::_seen = 0 ;        // No. of nmethods we have currently processed in current pass of CodeCache
   10.17 -int       NMethodSweeper::_flushed_count = 0;   // Nof. nmethods flushed in current sweep
   10.18 -int       NMethodSweeper::_zombified_count = 0; // Nof. nmethods made zombie in current sweep
   10.19 -int       NMethodSweeper::_marked_count = 0;    // Nof. nmethods marked for reclaim in current sweep
   10.20 -
   10.21 -volatile int NMethodSweeper::_invocations = 0;   // No. of invocations left until we are completed with this pass
   10.22 +volatile int NMethodSweeper::_invocations   = 0; // Nof. invocations left until we are completed with this pass
   10.23  volatile int NMethodSweeper::_sweep_started = 0; // Whether a sweep is in progress.
   10.24  
   10.25 -jint      NMethodSweeper::_locked_seen = 0;
   10.26 +jint      NMethodSweeper::_locked_seen               = 0;
   10.27  jint      NMethodSweeper::_not_entrant_seen_on_stack = 0;
   10.28 -bool      NMethodSweeper::_resweep = false;
   10.29 -jint      NMethodSweeper::_flush_token = 0;
   10.30 -jlong     NMethodSweeper::_last_full_flush_time = 0;
   10.31 -int       NMethodSweeper::_highest_marked = 0;
   10.32 -int       NMethodSweeper::_dead_compile_ids = 0;
   10.33 -long      NMethodSweeper::_last_flush_traversal_id = 0;
   10.34 +bool      NMethodSweeper::_request_mark_phase        = false;
   10.35  
   10.36 -int       NMethodSweeper::_number_of_flushes = 0; // Total of full traversals caused by full cache
   10.37  int       NMethodSweeper::_total_nof_methods_reclaimed = 0;
   10.38 -jlong     NMethodSweeper::_total_time_sweeping = 0;
   10.39 -jlong     NMethodSweeper::_total_time_this_sweep = 0;
   10.40 -jlong     NMethodSweeper::_peak_sweep_time = 0;
   10.41 -jlong     NMethodSweeper::_peak_sweep_fraction_time = 0;
   10.42 -jlong     NMethodSweeper::_total_disconnect_time = 0;
   10.43 -jlong     NMethodSweeper::_peak_disconnect_time = 0;
   10.44 +jlong     NMethodSweeper::_total_time_sweeping         = 0;
   10.45 +jlong     NMethodSweeper::_total_time_this_sweep       = 0;
   10.46 +jlong     NMethodSweeper::_peak_sweep_time             = 0;
   10.47 +jlong     NMethodSweeper::_peak_sweep_fraction_time    = 0;
   10.48 +int       NMethodSweeper::_hotness_counter_reset_val   = 0;
   10.49 +
   10.50  
   10.51  class MarkActivationClosure: public CodeBlobClosure {
   10.52  public:
   10.53    virtual void do_code_blob(CodeBlob* cb) {
   10.54 -    // If we see an activation belonging to a non_entrant nmethod, we mark it.
   10.55 -    if (cb->is_nmethod() && ((nmethod*)cb)->is_not_entrant()) {
   10.56 -      ((nmethod*)cb)->mark_as_seen_on_stack();
   10.57 +    if (cb->is_nmethod()) {
   10.58 +      nmethod* nm = (nmethod*)cb;
   10.59 +      nm->set_hotness_counter(NMethodSweeper::hotness_counter_reset_val());
   10.60 +      // If we see an activation belonging to a non_entrant nmethod, we mark it.
   10.61 +      if (nm->is_not_entrant()) {
   10.62 +        nm->mark_as_seen_on_stack();
   10.63 +      }
   10.64      }
   10.65    }
   10.66  };
   10.67  static MarkActivationClosure mark_activation_closure;
   10.68  
   10.69 +class SetHotnessClosure: public CodeBlobClosure {
   10.70 +public:
   10.71 +  virtual void do_code_blob(CodeBlob* cb) {
   10.72 +    if (cb->is_nmethod()) {
   10.73 +      nmethod* nm = (nmethod*)cb;
   10.74 +      nm->set_hotness_counter(NMethodSweeper::hotness_counter_reset_val());
   10.75 +    }
   10.76 +  }
   10.77 +};
   10.78 +static SetHotnessClosure set_hotness_closure;
   10.79 +
   10.80 +
   10.81 +int NMethodSweeper::hotness_counter_reset_val() {
   10.82 +  if (_hotness_counter_reset_val == 0) {
   10.83 +    _hotness_counter_reset_val = (ReservedCodeCacheSize < M) ? 1 : (ReservedCodeCacheSize / M) * 2;
   10.84 +  }
   10.85 +  return _hotness_counter_reset_val;
   10.86 +}
   10.87  bool NMethodSweeper::sweep_in_progress() {
   10.88    return (_current != NULL);
   10.89  }
   10.90  
   10.91 -void NMethodSweeper::scan_stacks() {
   10.92 +// Scans the stacks of all Java threads and marks activations of not-entrant methods.
   10.93 +// No need to synchronize access, since 'mark_active_nmethods' is always executed at a
   10.94 +// safepoint.
   10.95 +void NMethodSweeper::mark_active_nmethods() {
   10.96    assert(SafepointSynchronize::is_at_safepoint(), "must be executed at a safepoint");
   10.97 -  if (!MethodFlushing) return;
   10.98 -
   10.99 -  // No need to synchronize access, since this is always executed at a
  10.100 -  // safepoint.
  10.101 -
  10.102 -  // Make sure CompiledIC_lock in unlocked, since we might update some
  10.103 -  // inline caches. If it is, we just bail-out and try later.
  10.104 -  if (CompiledIC_lock->is_locked() || Patching_lock->is_locked()) return;
  10.105 +  // If we do not want to reclaim not-entrant or zombie methods there is no need
  10.106 +  // to scan stacks
  10.107 +  if (!MethodFlushing) {
  10.108 +    return;
  10.109 +  }
  10.110  
  10.111    // Check for restart
  10.112    assert(CodeCache::find_blob_unsafe(_current) == _current, "Sweeper nmethod cached state invalid");
  10.113 -  if (!sweep_in_progress() && _resweep) {
  10.114 +  if (!sweep_in_progress() && need_marking_phase()) {
  10.115      _seen        = 0;
  10.116      _invocations = NmethodSweepFraction;
  10.117      _current     = CodeCache::first_nmethod();
  10.118 @@ -197,30 +212,22 @@
  10.119      Threads::nmethods_do(&mark_activation_closure);
  10.120  
  10.121      // reset the flags since we started a scan from the beginning.
  10.122 -    _resweep = false;
  10.123 +    reset_nmethod_marking();
  10.124      _locked_seen = 0;
  10.125      _not_entrant_seen_on_stack = 0;
  10.126 +  } else {
  10.127 +    // Only set hotness counter
  10.128 +    Threads::nmethods_do(&set_hotness_closure);
  10.129    }
  10.130  
  10.131 -  if (UseCodeCacheFlushing) {
  10.132 -    // only allow new flushes after the interval is complete.
  10.133 -    jlong now           = os::javaTimeMillis();
  10.134 -    jlong max_interval  = (jlong)MinCodeCacheFlushingInterval * (jlong)1000;
  10.135 -    jlong curr_interval = now - _last_full_flush_time;
  10.136 -    if (curr_interval > max_interval) {
  10.137 -      _flush_token = 0;
  10.138 -    }
  10.139 -
  10.140 -    if (!CodeCache::needs_flushing() && !CompileBroker::should_compile_new_jobs()) {
  10.141 -      CompileBroker::set_should_compile_new_jobs(CompileBroker::run_compilation);
  10.142 -      log_sweep("restart_compiler");
  10.143 -    }
  10.144 -  }
  10.145 +  OrderAccess::storestore();
  10.146  }
  10.147  
  10.148  void NMethodSweeper::possibly_sweep() {
  10.149    assert(JavaThread::current()->thread_state() == _thread_in_vm, "must run in vm mode");
  10.150 -  if (!MethodFlushing || !sweep_in_progress()) return;
  10.151 +  if (!MethodFlushing || !sweep_in_progress()) {
  10.152 +    return;
  10.153 +  }
  10.154  
  10.155    if (_invocations > 0) {
  10.156      // Only one thread at a time will sweep
  10.157 @@ -258,8 +265,7 @@
  10.158    if (!CompileBroker::should_compile_new_jobs()) {
  10.159      // If we have turned off compilations we might as well do full sweeps
  10.160      // in order to reach the clean state faster. Otherwise the sleeping compiler
  10.161 -    // threads will slow down sweeping. After a few iterations the cache
  10.162 -    // will be clean and sweeping stops (_resweep will not be set)
  10.163 +    // threads will slow down sweeping.
  10.164      _invocations = 1;
  10.165    }
  10.166  
  10.167 @@ -271,9 +277,11 @@
  10.168    int todo = (CodeCache::nof_nmethods() - _seen) / _invocations;
  10.169    int swept_count = 0;
  10.170  
  10.171 +
  10.172    assert(!SafepointSynchronize::is_at_safepoint(), "should not be in safepoint when we get here");
  10.173    assert(!CodeCache_lock->owned_by_self(), "just checking");
  10.174  
  10.175 +  int freed_memory = 0;
  10.176    {
  10.177      MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
  10.178  
  10.179 @@ -299,7 +307,7 @@
  10.180        // Now ready to process nmethod and give up CodeCache_lock
  10.181        {
  10.182          MutexUnlockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
  10.183 -        process_nmethod(_current);
  10.184 +        freed_memory += process_nmethod(_current);
  10.185        }
  10.186        _seen++;
  10.187        _current = next;
  10.188 @@ -308,11 +316,11 @@
  10.189  
  10.190    assert(_invocations > 1 || _current == NULL, "must have scanned the whole cache");
  10.191  
  10.192 -  if (!sweep_in_progress() && !_resweep && (_locked_seen || _not_entrant_seen_on_stack)) {
  10.193 +  if (!sweep_in_progress() && !need_marking_phase() && (_locked_seen || _not_entrant_seen_on_stack)) {
  10.194      // we've completed a scan without making progress but there were
  10.195      // nmethods we were unable to process either because they were
  10.196 -    // locked or were still on stack.  We don't have to aggresively
  10.197 -    // clean them up so just stop scanning.  We could scan once more
  10.198 +    // locked or were still on stack. We don't have to aggressively
  10.199 +    // clean them up so just stop scanning. We could scan once more
  10.200      // but that complicates the control logic and it's unlikely to
  10.201      // matter much.
  10.202      if (PrintMethodFlushing) {
  10.203 @@ -351,9 +359,16 @@
  10.204      log_sweep("finished");
  10.205    }
  10.206  
  10.207 -  // Sweeper is the only case where memory is released,
  10.208 -  // check here if it is time to restart the compiler.
  10.209 -  if (UseCodeCacheFlushing && !CompileBroker::should_compile_new_jobs() && !CodeCache::needs_flushing()) {
  10.210 +  // Sweeper is the only case where memory is released, check here if it
  10.211 +  // is time to restart the compiler. Only checking if there is a certain
  10.212 +  // amount of free memory in the code cache might lead to re-enabling
  10.213 +  // compilation although no memory has been released. For example, there are
  10.214 +  // cases when compilation was disabled although there is 4MB (or more) free
  10.215 +  // memory in the code cache. The reason is code cache fragmentation. Therefore,
  10.216 +  // it only makes sense to re-enable compilation if we have actually freed memory.
  10.217 +  // Note that typically several kB are released for sweeping 16MB of the code
  10.218 +  // cache. As a result, 'freed_memory' > 0 to restart the compiler.
  10.219 +  if (UseCodeCacheFlushing && (!CompileBroker::should_compile_new_jobs() && (freed_memory > 0))) {
  10.220      CompileBroker::set_should_compile_new_jobs(CompileBroker::run_compilation);
  10.221      log_sweep("restart_compiler");
  10.222    }
  10.223 @@ -367,8 +382,8 @@
  10.224      _thread = CompilerThread::current();
  10.225      if (!nm->is_zombie() && !nm->is_unloaded()) {
  10.226        // Only expose live nmethods for scanning
  10.227 -    _thread->set_scanned_nmethod(nm);
  10.228 -  }
  10.229 +      _thread->set_scanned_nmethod(nm);
  10.230 +    }
  10.231    }
  10.232    ~NMethodMarker() {
  10.233      _thread->set_scanned_nmethod(NULL);
  10.234 @@ -392,20 +407,20 @@
  10.235    nm->flush();
  10.236  }
  10.237  
  10.238 -void NMethodSweeper::process_nmethod(nmethod *nm) {
  10.239 +int NMethodSweeper::process_nmethod(nmethod *nm) {
  10.240    assert(!CodeCache_lock->owned_by_self(), "just checking");
  10.241  
  10.242 +  int freed_memory = 0;
  10.243    // Make sure this nmethod doesn't get unloaded during the scan,
  10.244 -  // since the locks acquired below might safepoint.
  10.245 +  // since safepoints may happen during acquired below locks.
  10.246    NMethodMarker nmm(nm);
  10.247 -
  10.248    SWEEP(nm);
  10.249  
  10.250    // Skip methods that are currently referenced by the VM
  10.251    if (nm->is_locked_by_vm()) {
  10.252      // But still remember to clean-up inline caches for alive nmethods
  10.253      if (nm->is_alive()) {
  10.254 -      // Clean-up all inline caches that points to zombie/non-reentrant methods
  10.255 +      // Clean inline caches that point to zombie/non-entrant methods
  10.256        MutexLocker cl(CompiledIC_lock);
  10.257        nm->cleanup_inline_caches();
  10.258        SWEEP(nm);
  10.259 @@ -413,18 +428,19 @@
  10.260        _locked_seen++;
  10.261        SWEEP(nm);
  10.262      }
  10.263 -    return;
  10.264 +    return freed_memory;
  10.265    }
  10.266  
  10.267    if (nm->is_zombie()) {
  10.268 -    // If it is first time, we see nmethod then we mark it. Otherwise,
  10.269 -    // we reclame it. When we have seen a zombie method twice, we know that
  10.270 +    // If it is the first time we see nmethod then we mark it. Otherwise,
  10.271 +    // we reclaim it. When we have seen a zombie method twice, we know that
  10.272      // there are no inline caches that refer to it.
  10.273      if (nm->is_marked_for_reclamation()) {
  10.274        assert(!nm->is_locked_by_vm(), "must not flush locked nmethods");
  10.275        if (PrintMethodFlushing && Verbose) {
  10.276          tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (marked for reclamation) being flushed", nm->compile_id(), nm);
  10.277        }
  10.278 +      freed_memory = nm->total_size();
  10.279        release_nmethod(nm);
  10.280        _flushed_count++;
  10.281      } else {
  10.282 @@ -432,19 +448,19 @@
  10.283          tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (zombie) being marked for reclamation", nm->compile_id(), nm);
  10.284        }
  10.285        nm->mark_for_reclamation();
  10.286 -      _resweep = true;
  10.287 +      request_nmethod_marking();
  10.288        _marked_count++;
  10.289        SWEEP(nm);
  10.290      }
  10.291    } else if (nm->is_not_entrant()) {
  10.292 -    // If there is no current activations of this method on the
  10.293 +    // If there are no current activations of this method on the
  10.294      // stack we can safely convert it to a zombie method
  10.295      if (nm->can_not_entrant_be_converted()) {
  10.296        if (PrintMethodFlushing && Verbose) {
  10.297          tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (not entrant) being made zombie", nm->compile_id(), nm);
  10.298        }
  10.299        nm->make_zombie();
  10.300 -      _resweep = true;
  10.301 +      request_nmethod_marking();
  10.302        _zombified_count++;
  10.303        SWEEP(nm);
  10.304      } else {
  10.305 @@ -459,159 +475,57 @@
  10.306      }
  10.307    } else if (nm->is_unloaded()) {
  10.308      // Unloaded code, just make it a zombie
  10.309 -    if (PrintMethodFlushing && Verbose)
  10.310 +    if (PrintMethodFlushing && Verbose) {
  10.311        tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (unloaded) being made zombie", nm->compile_id(), nm);
  10.312 -
  10.313 +    }
  10.314      if (nm->is_osr_method()) {
  10.315        SWEEP(nm);
  10.316        // No inline caches will ever point to osr methods, so we can just remove it
  10.317 +      freed_memory = nm->total_size();
  10.318        release_nmethod(nm);
  10.319        _flushed_count++;
  10.320      } else {
  10.321        nm->make_zombie();
  10.322 -      _resweep = true;
  10.323 +      request_nmethod_marking();
  10.324        _zombified_count++;
  10.325        SWEEP(nm);
  10.326      }
  10.327    } else {
  10.328 -    assert(nm->is_alive(), "should be alive");
  10.329 -
  10.330      if (UseCodeCacheFlushing) {
  10.331 -      if (nm->is_speculatively_disconnected() && !nm->is_locked_by_vm() && !nm->is_osr_method() &&
  10.332 -          (_traversals > _last_flush_traversal_id + 2) && (nm->compile_id() < _highest_marked)) {
  10.333 -        // This method has not been called since the forced cleanup happened
  10.334 -        nm->make_not_entrant();
  10.335 +      if (!nm->is_locked_by_vm() && !nm->is_osr_method() && !nm->is_native_method()) {
  10.336 +        // Do not make native methods and OSR-methods not-entrant
  10.337 +        nm->dec_hotness_counter();
  10.338 +        // Get the initial value of the hotness counter. This value depends on the
  10.339 +        // ReservedCodeCacheSize
  10.340 +        int reset_val = hotness_counter_reset_val();
  10.341 +        int time_since_reset = reset_val - nm->hotness_counter();
  10.342 +        double threshold = -reset_val + (CodeCache::reverse_free_ratio() * NmethodSweepActivity);
  10.343 +        // The less free space in the code cache we have - the bigger reverse_free_ratio() is.
  10.344 +        // I.e., 'threshold' increases with lower available space in the code cache and a higher
  10.345 +        // NmethodSweepActivity. If the current hotness counter - which decreases from its initial
  10.346 +        // value until it is reset by stack walking - is smaller than the computed threshold, the
  10.347 +        // corresponding nmethod is considered for removal.
  10.348 +        if ((NmethodSweepActivity > 0) && (nm->hotness_counter() < threshold) && (time_since_reset > 10)) {
  10.349 +          // A method is marked as not-entrant if the method is
  10.350 +          // 1) 'old enough': nm->hotness_counter() < threshold
  10.351 +          // 2) The method was in_use for a minimum amount of time: (time_since_reset > 10)
  10.352 +          //    The second condition is necessary if we are dealing with very small code cache
  10.353 +          //    sizes (e.g., <10m) and the code cache size is too small to hold all hot methods.
  10.354 +          //    The second condition ensures that methods are not immediately made not-entrant
  10.355 +          //    after compilation.
  10.356 +          nm->make_not_entrant();
  10.357 +          request_nmethod_marking();
  10.358 +        }
  10.359        }
  10.360      }
  10.361 -
  10.362 -    // Clean-up all inline caches that points to zombie/non-reentrant methods
  10.363 +    // Clean-up all inline caches that point to zombie/non-reentrant methods
  10.364      MutexLocker cl(CompiledIC_lock);
  10.365      nm->cleanup_inline_caches();
  10.366      SWEEP(nm);
  10.367    }
  10.368 +  return freed_memory;
  10.369  }
  10.370  
  10.371 -// Code cache unloading: when compilers notice the code cache is getting full,
  10.372 -// they will call a vm op that comes here. This code attempts to speculatively
  10.373 -// unload the oldest half of the nmethods (based on the compile job id) by
  10.374 -// saving the old code in a list in the CodeCache. Then
  10.375 -// execution resumes. If a method so marked is not called by the second sweeper
  10.376 -// stack traversal after the current one, the nmethod will be marked non-entrant and
  10.377 -// got rid of by normal sweeping. If the method is called, the Method*'s
  10.378 -// _code field is restored and the Method*/nmethod
  10.379 -// go back to their normal state.
  10.380 -void NMethodSweeper::handle_full_code_cache(bool is_full) {
  10.381 -
  10.382 -  if (is_full) {
  10.383 -    // Since code cache is full, immediately stop new compiles
  10.384 -    if (CompileBroker::set_should_compile_new_jobs(CompileBroker::stop_compilation)) {
  10.385 -      log_sweep("disable_compiler");
  10.386 -    }
  10.387 -  }
  10.388 -
  10.389 -  // Make sure only one thread can flush
  10.390 -  // The token is reset after CodeCacheMinimumFlushInterval in scan stacks,
  10.391 -  // no need to check the timeout here.
  10.392 -  jint old = Atomic::cmpxchg( 1, &_flush_token, 0 );
  10.393 -  if (old != 0) {
  10.394 -    return;
  10.395 -  }
  10.396 -
  10.397 -  VM_HandleFullCodeCache op(is_full);
  10.398 -  VMThread::execute(&op);
  10.399 -
  10.400 -  // resweep again as soon as possible
  10.401 -  _resweep = true;
  10.402 -}
  10.403 -
  10.404 -void NMethodSweeper::speculative_disconnect_nmethods(bool is_full) {
  10.405 -  // If there was a race in detecting full code cache, only run
  10.406 -  // one vm op for it or keep the compiler shut off
  10.407 -
  10.408 -  jlong disconnect_start_counter = os::elapsed_counter();
  10.409 -
  10.410 -  // Traverse the code cache trying to dump the oldest nmethods
  10.411 -  int curr_max_comp_id = CompileBroker::get_compilation_id();
  10.412 -  int flush_target = ((curr_max_comp_id - _dead_compile_ids) / CodeCacheFlushingFraction) + _dead_compile_ids;
  10.413 -
  10.414 -  log_sweep("start_cleaning");
  10.415 -
  10.416 -  nmethod* nm = CodeCache::alive_nmethod(CodeCache::first());
  10.417 -  jint disconnected = 0;
  10.418 -  jint made_not_entrant  = 0;
  10.419 -  jint nmethod_count = 0;
  10.420 -
  10.421 -  while ((nm != NULL)){
  10.422 -    int curr_comp_id = nm->compile_id();
  10.423 -
  10.424 -    // OSR methods cannot be flushed like this. Also, don't flush native methods
  10.425 -    // since they are part of the JDK in most cases
  10.426 -    if (!nm->is_osr_method() && !nm->is_locked_by_vm() && !nm->is_native_method()) {
  10.427 -
  10.428 -      // only count methods that can be speculatively disconnected
  10.429 -      nmethod_count++;
  10.430 -
  10.431 -      if (nm->is_in_use() && (curr_comp_id < flush_target)) {
  10.432 -        if ((nm->method()->code() == nm)) {
  10.433 -          // This method has not been previously considered for
  10.434 -          // unloading or it was restored already
  10.435 -          CodeCache::speculatively_disconnect(nm);
  10.436 -          disconnected++;
  10.437 -        } else if (nm->is_speculatively_disconnected()) {
  10.438 -          // This method was previously considered for preemptive unloading and was not called since then
  10.439 -          CompilationPolicy::policy()->delay_compilation(nm->method());
  10.440 -          nm->make_not_entrant();
  10.441 -          made_not_entrant++;
  10.442 -        }
  10.443 -
  10.444 -        if (curr_comp_id > _highest_marked) {
  10.445 -          _highest_marked = curr_comp_id;
  10.446 -        }
  10.447 -      }
  10.448 -    }
  10.449 -    nm = CodeCache::alive_nmethod(CodeCache::next(nm));
  10.450 -  }
  10.451 -
  10.452 -  // remember how many compile_ids wheren't seen last flush.
  10.453 -  _dead_compile_ids = curr_max_comp_id - nmethod_count;
  10.454 -
  10.455 -  log_sweep("stop_cleaning",
  10.456 -                       "disconnected='" UINT32_FORMAT "' made_not_entrant='" UINT32_FORMAT "'",
  10.457 -                       disconnected, made_not_entrant);
  10.458 -
  10.459 -  // Shut off compiler. Sweeper will start over with a new stack scan and
  10.460 -  // traversal cycle and turn it back on if it clears enough space.
  10.461 -  if (is_full) {
  10.462 -    _last_full_flush_time = os::javaTimeMillis();
  10.463 -  }
  10.464 -
  10.465 -  jlong disconnect_end_counter = os::elapsed_counter();
  10.466 -  jlong disconnect_time = disconnect_end_counter - disconnect_start_counter;
  10.467 -  _total_disconnect_time += disconnect_time;
  10.468 -  _peak_disconnect_time = MAX2(disconnect_time, _peak_disconnect_time);
  10.469 -
  10.470 -  EventCleanCodeCache event(UNTIMED);
  10.471 -  if (event.should_commit()) {
  10.472 -    event.set_starttime(disconnect_start_counter);
  10.473 -    event.set_endtime(disconnect_end_counter);
  10.474 -    event.set_disconnectedCount(disconnected);
  10.475 -    event.set_madeNonEntrantCount(made_not_entrant);
  10.476 -    event.commit();
  10.477 -  }
  10.478 -  _number_of_flushes++;
  10.479 -
  10.480 -  // After two more traversals the sweeper will get rid of unrestored nmethods
  10.481 -  _last_flush_traversal_id = _traversals;
  10.482 -  _resweep = true;
  10.483 -#ifdef ASSERT
  10.484 -
  10.485 -  if(PrintMethodFlushing && Verbose) {
  10.486 -    tty->print_cr("### sweeper: unload time: " INT64_FORMAT, (jlong)disconnect_time);
  10.487 -  }
  10.488 -#endif
  10.489 -}
  10.490 -
  10.491 -
  10.492  // Print out some state information about the current sweep and the
  10.493  // state of the code cache if it's requested.
  10.494  void NMethodSweeper::log_sweep(const char* msg, const char* format, ...) {
    11.1 --- a/src/share/vm/runtime/sweeper.hpp	Fri Sep 27 08:39:19 2013 +0200
    11.2 +++ b/src/share/vm/runtime/sweeper.hpp	Fri Sep 27 10:50:55 2013 +0200
    11.3 @@ -27,8 +27,30 @@
    11.4  
    11.5  // An NmethodSweeper is an incremental cleaner for:
    11.6  //    - cleanup inline caches
    11.7 -//    - reclamation of unreferences zombie nmethods
    11.8 -//
    11.9 +//    - reclamation of nmethods
   11.10 +// Removing nmethods from the code cache includes two operations
   11.11 +//  1) mark active nmethods
   11.12 +//     Is done in 'mark_active_nmethods()'. This function is called at a
   11.13 +//     safepoint and marks all nmethods that are active on a thread's stack.
   11.14 +//  2) sweep nmethods
   11.15 +//     Is done in sweep_code_cache(). This function is the only place in the
   11.16 +//     sweeper where memory is reclaimed. Note that sweep_code_cache() is not
   11.17 +//     called at a safepoint. However, sweep_code_cache() stops executing if
   11.18 +//     another thread requests a safepoint. Consequently, 'mark_active_nmethods()'
   11.19 +//     and sweep_code_cache() cannot execute at the same time.
   11.20 +//     To reclaim memory, nmethods are first marked as 'not-entrant'. Methods can
   11.21 +//     be made not-entrant by (i) the sweeper, (ii) deoptimization, (iii) dependency
   11.22 +//     invalidation, and (iv) being replaced be a different method version (tiered
   11.23 +//     compilation). Not-entrant nmethod cannot be called by Java threads, but they
   11.24 +//     can still be active on the stack. To ensure that active nmethod are not reclaimed,
   11.25 +//     we have to wait until the next marking phase has completed. If a not-entrant
   11.26 +//     nmethod was NOT marked as active, it can be converted to 'zombie' state. To safely
   11.27 +//     remove the nmethod, all inline caches (IC) that point to the the nmethod must be
   11.28 +//     cleared. After that, the nmethod can be evicted from the code cache. Each nmethod's
   11.29 +//     state change happens during separate sweeps. It may take at least 3 sweeps before an
   11.30 +//     nmethod's space is freed. Sweeping is currently done by compiler threads between
   11.31 +//     compilations or at least each 5 sec (NmethodSweepCheckInterval) when the code cache
   11.32 +//     is full.
   11.33  
   11.34  class NMethodSweeper : public AllStatic {
   11.35    static long      _traversals;      // Stack scan count, also sweep ID.
   11.36 @@ -41,46 +63,38 @@
   11.37    static volatile int  _invocations;   // No. of invocations left until we are completed with this pass
   11.38    static volatile int  _sweep_started; // Flag to control conc sweeper
   11.39  
   11.40 -  //The following are reset in scan_stacks and synchronized by the safepoint
   11.41 -  static bool      _resweep;           // Indicates that a change has happend and we want another sweep,
   11.42 -                                       // always checked and reset at a safepoint so memory will be in sync.
   11.43 -  static int       _locked_seen;       // Number of locked nmethods encountered during the scan
   11.44 +  //The following are reset in mark_active_nmethods and synchronized by the safepoint
   11.45 +  static bool      _request_mark_phase;        // Indicates that a change has happend and we need another mark pahse,
   11.46 +                                               // always checked and reset at a safepoint so memory will be in sync.
   11.47 +  static int       _locked_seen;               // Number of locked nmethods encountered during the scan
   11.48    static int       _not_entrant_seen_on_stack; // Number of not entrant nmethod were are still on stack
   11.49 -  static jint      _flush_token;       // token that guards method flushing, making sure it is executed only once.
   11.50 -
   11.51 -  // These are set during a flush, a VM-operation
   11.52 -  static long      _last_flush_traversal_id; // trav number at last flush unloading
   11.53 -  static jlong     _last_full_flush_time;    // timestamp of last emergency unloading
   11.54 -
   11.55 -  // These are synchronized by the _sweep_started token
   11.56 -  static int       _highest_marked;   // highest compile id dumped at last emergency unloading
   11.57 -  static int       _dead_compile_ids; // number of compile ids that where not in the cache last flush
   11.58  
   11.59    // Stat counters
   11.60 -  static int       _number_of_flushes;            // Total of full traversals caused by full cache
   11.61    static int       _total_nof_methods_reclaimed;  // Accumulated nof methods flushed
   11.62    static jlong     _total_time_sweeping;          // Accumulated time sweeping
   11.63    static jlong     _total_time_this_sweep;        // Total time this sweep
   11.64    static jlong     _peak_sweep_time;              // Peak time for a full sweep
   11.65    static jlong     _peak_sweep_fraction_time;     // Peak time sweeping one fraction
   11.66 -  static jlong     _total_disconnect_time;        // Total time cleaning code mem
   11.67 -  static jlong     _peak_disconnect_time;         // Peak time cleaning code mem
   11.68  
   11.69 -  static void process_nmethod(nmethod *nm);
   11.70 +  static int  process_nmethod(nmethod *nm);
   11.71    static void release_nmethod(nmethod* nm);
   11.72  
   11.73 -  static void log_sweep(const char* msg, const char* format = NULL, ...);
   11.74    static bool sweep_in_progress();
   11.75 +  static void sweep_code_cache();
   11.76 +  static void request_nmethod_marking() { _request_mark_phase = true; }
   11.77 +  static void reset_nmethod_marking()   { _request_mark_phase = false; }
   11.78 +  static bool need_marking_phase()      { return _request_mark_phase; }
   11.79 +
   11.80 +  static int _hotness_counter_reset_val;
   11.81  
   11.82   public:
   11.83    static long traversal_count()              { return _traversals; }
   11.84 -  static int  number_of_flushes()            { return _number_of_flushes; }
   11.85    static int  total_nof_methods_reclaimed()  { return _total_nof_methods_reclaimed; }
   11.86    static jlong total_time_sweeping()         { return _total_time_sweeping; }
   11.87    static jlong peak_sweep_time()             { return _peak_sweep_time; }
   11.88    static jlong peak_sweep_fraction_time()    { return _peak_sweep_fraction_time; }
   11.89 -  static jlong total_disconnect_time()       { return _total_disconnect_time; }
   11.90 -  static jlong peak_disconnect_time()        { return _peak_disconnect_time; }
   11.91 +  static void log_sweep(const char* msg, const char* format = NULL, ...);
   11.92 +
   11.93  
   11.94  #ifdef ASSERT
   11.95    static bool is_sweeping(nmethod* which) { return _current == which; }
   11.96 @@ -90,19 +104,18 @@
   11.97    static void report_events();
   11.98  #endif
   11.99  
  11.100 -  static void scan_stacks();      // Invoked at the end of each safepoint
  11.101 -  static void sweep_code_cache(); // Concurrent part of sweep job
  11.102 -  static void possibly_sweep();   // Compiler threads call this to sweep
  11.103 +  static void mark_active_nmethods();      // Invoked at the end of each safepoint
  11.104 +  static void possibly_sweep();            // Compiler threads call this to sweep
  11.105  
  11.106 -  static void notify(nmethod* nm) {
  11.107 +  static int sort_nmethods_by_hotness(nmethod** nm1, nmethod** nm2);
  11.108 +  static int hotness_counter_reset_val();
  11.109 +
  11.110 +  static void notify() {
  11.111      // Request a new sweep of the code cache from the beginning. No
  11.112      // need to synchronize the setting of this flag since it only
  11.113      // changes to false at safepoint so we can never overwrite it with false.
  11.114 -     _resweep = true;
  11.115 +     request_nmethod_marking();
  11.116    }
  11.117 -
  11.118 -  static void handle_full_code_cache(bool is_full); // Called by compilers who fail to allocate
  11.119 -  static void speculative_disconnect_nmethods(bool was_full);   // Called by vm op to deal with alloc failure
  11.120  };
  11.121  
  11.122  #endif // SHARE_VM_RUNTIME_SWEEPER_HPP
    12.1 --- a/src/share/vm/runtime/vmStructs.cpp	Fri Sep 27 08:39:19 2013 +0200
    12.2 +++ b/src/share/vm/runtime/vmStructs.cpp	Fri Sep 27 10:50:55 2013 +0200
    12.3 @@ -842,7 +842,7 @@
    12.4    nonstatic_field(nmethod,             _osr_link,                                     nmethod*)                              \
    12.5    nonstatic_field(nmethod,             _scavenge_root_link,                           nmethod*)                              \
    12.6    nonstatic_field(nmethod,             _scavenge_root_state,                          jbyte)                                 \
    12.7 -  nonstatic_field(nmethod,             _state,                                        unsigned char)                         \
    12.8 +  nonstatic_field(nmethod,             _state,                                        volatile unsigned char)                \
    12.9    nonstatic_field(nmethod,             _exception_offset,                             int)                                   \
   12.10    nonstatic_field(nmethod,             _deoptimize_offset,                            int)                                   \
   12.11    nonstatic_field(nmethod,             _deoptimize_mh_offset,                         int)                                   \
   12.12 @@ -1360,6 +1360,7 @@
   12.13    declare_integer_type(long)                                              \
   12.14    declare_integer_type(char)                                              \
   12.15    declare_unsigned_integer_type(unsigned char)                            \
   12.16 +  declare_unsigned_integer_type(volatile unsigned char)                   \
   12.17    declare_unsigned_integer_type(u_char)                                   \
   12.18    declare_unsigned_integer_type(unsigned int)                             \
   12.19    declare_unsigned_integer_type(uint)                                     \
   12.20 @@ -1382,6 +1383,7 @@
   12.21    declare_toplevel_type(char**)                                           \
   12.22    declare_toplevel_type(u_char*)                                          \
   12.23    declare_toplevel_type(unsigned char*)                                   \
   12.24 +  declare_toplevel_type(volatile unsigned char*)                          \
   12.25                                                                            \
   12.26    /*******************************************************************/   \
   12.27    /* Types which it will be handy to have available over in the SA   */   \
    13.1 --- a/src/share/vm/runtime/vm_operations.cpp	Fri Sep 27 08:39:19 2013 +0200
    13.2 +++ b/src/share/vm/runtime/vm_operations.cpp	Fri Sep 27 10:50:55 2013 +0200
    13.3 @@ -173,10 +173,6 @@
    13.4    SymbolTable::unlink();
    13.5  }
    13.6  
    13.7 -void VM_HandleFullCodeCache::doit() {
    13.8 -  NMethodSweeper::speculative_disconnect_nmethods(_is_full);
    13.9 -}
   13.10 -
   13.11  void VM_Verify::doit() {
   13.12    Universe::heap()->prepare_for_verify();
   13.13    Universe::verify(_silent);
    14.1 --- a/src/share/vm/runtime/vm_operations.hpp	Fri Sep 27 08:39:19 2013 +0200
    14.2 +++ b/src/share/vm/runtime/vm_operations.hpp	Fri Sep 27 10:50:55 2013 +0200
    14.3 @@ -51,7 +51,6 @@
    14.4    template(DeoptimizeAll)                         \
    14.5    template(ZombieAll)                             \
    14.6    template(UnlinkSymbols)                         \
    14.7 -  template(HandleFullCodeCache)                   \
    14.8    template(Verify)                                \
    14.9    template(PrintJNI)                              \
   14.10    template(HeapDumper)                            \
   14.11 @@ -261,16 +260,6 @@
   14.12    bool allow_nested_vm_operations() const        { return true;  }
   14.13  };
   14.14  
   14.15 -class VM_HandleFullCodeCache: public VM_Operation {
   14.16 - private:
   14.17 -  bool  _is_full;
   14.18 - public:
   14.19 -  VM_HandleFullCodeCache(bool is_full)           { _is_full = is_full; }
   14.20 -  VMOp_Type type() const                         { return VMOp_HandleFullCodeCache; }
   14.21 -  void doit();
   14.22 -  bool allow_nested_vm_operations() const        { return true; }
   14.23 -};
   14.24 -
   14.25  #ifndef PRODUCT
   14.26  class VM_DeoptimizeAll: public VM_Operation {
   14.27   private:
    15.1 --- a/src/share/vm/trace/trace.xml	Fri Sep 27 08:39:19 2013 +0200
    15.2 +++ b/src/share/vm/trace/trace.xml	Fri Sep 27 10:50:55 2013 +0200
    15.3 @@ -313,13 +313,6 @@
    15.4        <value type="UINT" field="zombifiedCount" label="Methods Zombified"/>
    15.5      </event>
    15.6  
    15.7 -    <event id="CleanCodeCache" path="vm/code_sweeper/clean" label="Clean Code Cache"
    15.8 -             description="Clean code cache from oldest methods"
    15.9 -             has_thread="true" is_requestable="false" is_constant="false">
   15.10 -      <value type="UINT" field="disconnectedCount" label="Methods Disconnected"/>
   15.11 -      <value type="UINT" field="madeNonEntrantCount" label="Methods Made Non-Entrant"/>
   15.12 -    </event>
   15.13 -
   15.14      <!-- Code cache events -->
   15.15  
   15.16      <event id="CodeCacheFull" path="vm/code_cache/full" label="Code Cache Full"

mercurial