src/share/vm/runtime/sweeper.cpp

Wed, 15 Feb 2012 12:32:03 -0800

author
iveresov
date
Wed, 15 Feb 2012 12:32:03 -0800
changeset 3572
cfdfbeac0a5b
parent 2916
f52ed367b66d
child 3900
d2a62e0f25eb
permissions
-rw-r--r--

7145345: Code cache sweeper must cooperate with safepoints
Summary: Safepoint in the sweeper loop in necessary
Reviewed-by: kvn, never

     1 /*
     2  * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    20  * or visit www.oracle.com if you need additional information or have any
    21  * questions.
    22  *
    23  */
    25 #include "precompiled.hpp"
    26 #include "code/codeCache.hpp"
    27 #include "code/nmethod.hpp"
    28 #include "compiler/compileBroker.hpp"
    29 #include "memory/resourceArea.hpp"
    30 #include "oops/methodOop.hpp"
    31 #include "runtime/atomic.hpp"
    32 #include "runtime/compilationPolicy.hpp"
    33 #include "runtime/mutexLocker.hpp"
    34 #include "runtime/os.hpp"
    35 #include "runtime/sweeper.hpp"
    36 #include "runtime/vm_operations.hpp"
    37 #include "utilities/events.hpp"
    38 #include "utilities/xmlstream.hpp"
    40 #ifdef ASSERT
    42 #define SWEEP(nm) record_sweep(nm, __LINE__)
    43 // Sweeper logging code
    44 class SweeperRecord {
    45  public:
    46   int traversal;
    47   int invocation;
    48   int compile_id;
    49   long traversal_mark;
    50   int state;
    51   const char* kind;
    52   address vep;
    53   address uep;
    54   int line;
    56   void print() {
    57       tty->print_cr("traversal = %d invocation = %d compile_id = %d %s uep = " PTR_FORMAT " vep = "
    58                     PTR_FORMAT " state = %d traversal_mark %d line = %d",
    59                     traversal,
    60                     invocation,
    61                     compile_id,
    62                     kind == NULL ? "" : kind,
    63                     uep,
    64                     vep,
    65                     state,
    66                     traversal_mark,
    67                     line);
    68   }
    69 };
    71 static int _sweep_index = 0;
    72 static SweeperRecord* _records = NULL;
    74 void NMethodSweeper::report_events(int id, address entry) {
    75   if (_records != NULL) {
    76     for (int i = _sweep_index; i < SweeperLogEntries; i++) {
    77       if (_records[i].uep == entry ||
    78           _records[i].vep == entry ||
    79           _records[i].compile_id == id) {
    80         _records[i].print();
    81       }
    82     }
    83     for (int i = 0; i < _sweep_index; i++) {
    84       if (_records[i].uep == entry ||
    85           _records[i].vep == entry ||
    86           _records[i].compile_id == id) {
    87         _records[i].print();
    88       }
    89     }
    90   }
    91 }
    93 void NMethodSweeper::report_events() {
    94   if (_records != NULL) {
    95     for (int i = _sweep_index; i < SweeperLogEntries; i++) {
    96       // skip empty records
    97       if (_records[i].vep == NULL) continue;
    98       _records[i].print();
    99     }
   100     for (int i = 0; i < _sweep_index; i++) {
   101       // skip empty records
   102       if (_records[i].vep == NULL) continue;
   103       _records[i].print();
   104     }
   105   }
   106 }
   108 void NMethodSweeper::record_sweep(nmethod* nm, int line) {
   109   if (_records != NULL) {
   110     _records[_sweep_index].traversal = _traversals;
   111     _records[_sweep_index].traversal_mark = nm->_stack_traversal_mark;
   112     _records[_sweep_index].invocation = _invocations;
   113     _records[_sweep_index].compile_id = nm->compile_id();
   114     _records[_sweep_index].kind = nm->compile_kind();
   115     _records[_sweep_index].state = nm->_state;
   116     _records[_sweep_index].vep = nm->verified_entry_point();
   117     _records[_sweep_index].uep = nm->entry_point();
   118     _records[_sweep_index].line = line;
   120     _sweep_index = (_sweep_index + 1) % SweeperLogEntries;
   121   }
   122 }
   123 #else
   124 #define SWEEP(nm)
   125 #endif
   128 long      NMethodSweeper::_traversals = 0;   // No. of stack traversals performed
   129 nmethod*  NMethodSweeper::_current = NULL;   // Current nmethod
   130 int       NMethodSweeper::_seen = 0 ;        // No. of nmethods we have currently processed in current pass of CodeCache
   132 volatile int NMethodSweeper::_invocations = 0;   // No. of invocations left until we are completed with this pass
   133 volatile int NMethodSweeper::_sweep_started = 0; // Whether a sweep is in progress.
   135 jint      NMethodSweeper::_locked_seen = 0;
   136 jint      NMethodSweeper::_not_entrant_seen_on_stack = 0;
   137 bool      NMethodSweeper::_rescan = false;
   138 bool      NMethodSweeper::_do_sweep = false;
   139 bool      NMethodSweeper::_was_full = false;
   140 jint      NMethodSweeper::_advise_to_sweep = 0;
   141 jlong     NMethodSweeper::_last_was_full = 0;
   142 uint      NMethodSweeper::_highest_marked = 0;
   143 long      NMethodSweeper::_was_full_traversal = 0;
   145 class MarkActivationClosure: public CodeBlobClosure {
   146 public:
   147   virtual void do_code_blob(CodeBlob* cb) {
   148     // If we see an activation belonging to a non_entrant nmethod, we mark it.
   149     if (cb->is_nmethod() && ((nmethod*)cb)->is_not_entrant()) {
   150       ((nmethod*)cb)->mark_as_seen_on_stack();
   151     }
   152   }
   153 };
   154 static MarkActivationClosure mark_activation_closure;
   156 void NMethodSweeper::scan_stacks() {
   157   assert(SafepointSynchronize::is_at_safepoint(), "must be executed at a safepoint");
   158   if (!MethodFlushing) return;
   159   _do_sweep = true;
   161   // No need to synchronize access, since this is always executed at a
   162   // safepoint.  If we aren't in the middle of scan and a rescan
   163   // hasn't been requested then just return. If UseCodeCacheFlushing is on and
   164   // code cache flushing is in progress, don't skip sweeping to help make progress
   165   // clearing space in the code cache.
   166   if ((_current == NULL && !_rescan) && !(UseCodeCacheFlushing && !CompileBroker::should_compile_new_jobs())) {
   167     _do_sweep = false;
   168     return;
   169   }
   171   // Make sure CompiledIC_lock in unlocked, since we might update some
   172   // inline caches. If it is, we just bail-out and try later.
   173   if (CompiledIC_lock->is_locked() || Patching_lock->is_locked()) return;
   175   // Check for restart
   176   assert(CodeCache::find_blob_unsafe(_current) == _current, "Sweeper nmethod cached state invalid");
   177   if (_current == NULL) {
   178     _seen        = 0;
   179     _invocations = NmethodSweepFraction;
   180     _current     = CodeCache::first_nmethod();
   181     _traversals  += 1;
   182     if (PrintMethodFlushing) {
   183       tty->print_cr("### Sweep: stack traversal %d", _traversals);
   184     }
   185     Threads::nmethods_do(&mark_activation_closure);
   187     // reset the flags since we started a scan from the beginning.
   188     _rescan = false;
   189     _locked_seen = 0;
   190     _not_entrant_seen_on_stack = 0;
   191   }
   193   if (UseCodeCacheFlushing) {
   194     if (!CodeCache::needs_flushing()) {
   195       // scan_stacks() runs during a safepoint, no race with setters
   196       _advise_to_sweep = 0;
   197     }
   199     if (was_full()) {
   200       // There was some progress so attempt to restart the compiler
   201       jlong now           = os::javaTimeMillis();
   202       jlong max_interval  = (jlong)MinCodeCacheFlushingInterval * (jlong)1000;
   203       jlong curr_interval = now - _last_was_full;
   204       if ((!CodeCache::needs_flushing()) && (curr_interval > max_interval)) {
   205         CompileBroker::set_should_compile_new_jobs(CompileBroker::run_compilation);
   206         set_was_full(false);
   208         // Update the _last_was_full time so we can tell how fast the
   209         // code cache is filling up
   210         _last_was_full = os::javaTimeMillis();
   212         log_sweep("restart_compiler");
   213       }
   214     }
   215   }
   216 }
   218 void NMethodSweeper::possibly_sweep() {
   219   assert(JavaThread::current()->thread_state() == _thread_in_vm, "must run in vm mode");
   220   if ((!MethodFlushing) || (!_do_sweep)) return;
   222   if (_invocations > 0) {
   223     // Only one thread at a time will sweep
   224     jint old = Atomic::cmpxchg( 1, &_sweep_started, 0 );
   225     if (old != 0) {
   226       return;
   227     }
   228 #ifdef ASSERT
   229     if (LogSweeper && _records == NULL) {
   230       // Create the ring buffer for the logging code
   231       _records = NEW_C_HEAP_ARRAY(SweeperRecord, SweeperLogEntries);
   232       memset(_records, 0, sizeof(SweeperRecord) * SweeperLogEntries);
   233     }
   234 #endif
   235     if (_invocations > 0) {
   236       sweep_code_cache();
   237       _invocations--;
   238     }
   239     _sweep_started = 0;
   240   }
   241 }
   243 void NMethodSweeper::sweep_code_cache() {
   244 #ifdef ASSERT
   245   jlong sweep_start;
   246   if (PrintMethodFlushing) {
   247     sweep_start = os::javaTimeMillis();
   248   }
   249 #endif
   250   if (PrintMethodFlushing && Verbose) {
   251     tty->print_cr("### Sweep at %d out of %d. Invocations left: %d", _seen, CodeCache::nof_nmethods(), _invocations);
   252   }
   254   // We want to visit all nmethods after NmethodSweepFraction
   255   // invocations so divide the remaining number of nmethods by the
   256   // remaining number of invocations.  This is only an estimate since
   257   // the number of nmethods changes during the sweep so the final
   258   // stage must iterate until it there are no more nmethods.
   259   int todo = (CodeCache::nof_nmethods() - _seen) / _invocations;
   261   assert(!SafepointSynchronize::is_at_safepoint(), "should not be in safepoint when we get here");
   262   assert(!CodeCache_lock->owned_by_self(), "just checking");
   264   {
   265     MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
   267     // The last invocation iterates until there are no more nmethods
   268     for (int i = 0; (i < todo || _invocations == 1) && _current != NULL; i++) {
   269       if (SafepointSynchronize::is_synchronizing()) { // Safepoint request
   270         if (PrintMethodFlushing && Verbose) {
   271           tty->print_cr("### Sweep at %d out of %d, invocation: %d, yielding to safepoint", _seen, CodeCache::nof_nmethods(), _invocations);
   272         }
   273         MutexUnlockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
   275         assert(Thread::current()->is_Java_thread(), "should be java thread");
   276         JavaThread* thread = (JavaThread*)Thread::current();
   277         ThreadBlockInVM tbivm(thread);
   278         thread->java_suspend_self();
   279       }
   280       // Since we will give up the CodeCache_lock, always skip ahead
   281       // to the next nmethod.  Other blobs can be deleted by other
   282       // threads but nmethods are only reclaimed by the sweeper.
   283       nmethod* next = CodeCache::next_nmethod(_current);
   285       // Now ready to process nmethod and give up CodeCache_lock
   286       {
   287         MutexUnlockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
   288         process_nmethod(_current);
   289       }
   290       _seen++;
   291       _current = next;
   292     }
   293   }
   295   assert(_invocations > 1 || _current == NULL, "must have scanned the whole cache");
   297   if (_current == NULL && !_rescan && (_locked_seen || _not_entrant_seen_on_stack)) {
   298     // we've completed a scan without making progress but there were
   299     // nmethods we were unable to process either because they were
   300     // locked or were still on stack.  We don't have to aggresively
   301     // clean them up so just stop scanning.  We could scan once more
   302     // but that complicates the control logic and it's unlikely to
   303     // matter much.
   304     if (PrintMethodFlushing) {
   305       tty->print_cr("### Couldn't make progress on some nmethods so stopping sweep");
   306     }
   307   }
   309 #ifdef ASSERT
   310   if(PrintMethodFlushing) {
   311     jlong sweep_end             = os::javaTimeMillis();
   312     tty->print_cr("### sweeper:      sweep time(%d): " INT64_FORMAT, _invocations, sweep_end - sweep_start);
   313   }
   314 #endif
   316   if (_invocations == 1) {
   317     log_sweep("finished");
   318   }
   319 }
   321 class NMethodMarker: public StackObj {
   322  private:
   323   CompilerThread* _thread;
   324  public:
   325   NMethodMarker(nmethod* nm) {
   326     _thread = CompilerThread::current();
   327     _thread->set_scanned_nmethod(nm);
   328   }
   329   ~NMethodMarker() {
   330     _thread->set_scanned_nmethod(NULL);
   331   }
   332 };
   335 void NMethodSweeper::process_nmethod(nmethod *nm) {
   336   assert(!CodeCache_lock->owned_by_self(), "just checking");
   338   // Make sure this nmethod doesn't get unloaded during the scan,
   339   // since the locks acquired below might safepoint.
   340   NMethodMarker nmm(nm);
   342   SWEEP(nm);
   344   // Skip methods that are currently referenced by the VM
   345   if (nm->is_locked_by_vm()) {
   346     // But still remember to clean-up inline caches for alive nmethods
   347     if (nm->is_alive()) {
   348       // Clean-up all inline caches that points to zombie/non-reentrant methods
   349       MutexLocker cl(CompiledIC_lock);
   350       nm->cleanup_inline_caches();
   351       SWEEP(nm);
   352     } else {
   353       _locked_seen++;
   354       SWEEP(nm);
   355     }
   356     return;
   357   }
   359   if (nm->is_zombie()) {
   360     // If it is first time, we see nmethod then we mark it. Otherwise,
   361     // we reclame it. When we have seen a zombie method twice, we know that
   362     // there are no inline caches that refer to it.
   363     if (nm->is_marked_for_reclamation()) {
   364       assert(!nm->is_locked_by_vm(), "must not flush locked nmethods");
   365       if (PrintMethodFlushing && Verbose) {
   366         tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (marked for reclamation) being flushed", nm->compile_id(), nm);
   367       }
   368       MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
   369       nm->flush();
   370     } else {
   371       if (PrintMethodFlushing && Verbose) {
   372         tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (zombie) being marked for reclamation", nm->compile_id(), nm);
   373       }
   374       nm->mark_for_reclamation();
   375       _rescan = true;
   376       SWEEP(nm);
   377     }
   378   } else if (nm->is_not_entrant()) {
   379     // If there is no current activations of this method on the
   380     // stack we can safely convert it to a zombie method
   381     if (nm->can_not_entrant_be_converted()) {
   382       if (PrintMethodFlushing && Verbose) {
   383         tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (not entrant) being made zombie", nm->compile_id(), nm);
   384       }
   385       nm->make_zombie();
   386       _rescan = true;
   387       SWEEP(nm);
   388     } else {
   389       // Still alive, clean up its inline caches
   390       MutexLocker cl(CompiledIC_lock);
   391       nm->cleanup_inline_caches();
   392       // we coudn't transition this nmethod so don't immediately
   393       // request a rescan.  If this method stays on the stack for a
   394       // long time we don't want to keep rescanning the code cache.
   395       _not_entrant_seen_on_stack++;
   396       SWEEP(nm);
   397     }
   398   } else if (nm->is_unloaded()) {
   399     // Unloaded code, just make it a zombie
   400     if (PrintMethodFlushing && Verbose)
   401       tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (unloaded) being made zombie", nm->compile_id(), nm);
   402     if (nm->is_osr_method()) {
   403       // No inline caches will ever point to osr methods, so we can just remove it
   404       MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
   405       SWEEP(nm);
   406       nm->flush();
   407     } else {
   408       nm->make_zombie();
   409       _rescan = true;
   410       SWEEP(nm);
   411     }
   412   } else {
   413     assert(nm->is_alive(), "should be alive");
   415     if (UseCodeCacheFlushing) {
   416       if ((nm->method()->code() != nm) && !(nm->is_locked_by_vm()) && !(nm->is_osr_method()) &&
   417           (_traversals > _was_full_traversal+2) && (((uint)nm->compile_id()) < _highest_marked) &&
   418           CodeCache::needs_flushing()) {
   419         // This method has not been called since the forced cleanup happened
   420         nm->make_not_entrant();
   421       }
   422     }
   424     // Clean-up all inline caches that points to zombie/non-reentrant methods
   425     MutexLocker cl(CompiledIC_lock);
   426     nm->cleanup_inline_caches();
   427     SWEEP(nm);
   428   }
   429 }
   431 // Code cache unloading: when compilers notice the code cache is getting full,
   432 // they will call a vm op that comes here. This code attempts to speculatively
   433 // unload the oldest half of the nmethods (based on the compile job id) by
   434 // saving the old code in a list in the CodeCache. Then
   435 // execution resumes. If a method so marked is not called by the second sweeper
   436 // stack traversal after the current one, the nmethod will be marked non-entrant and
   437 // got rid of by normal sweeping. If the method is called, the methodOop's
   438 // _code field is restored and the methodOop/nmethod
   439 // go back to their normal state.
   440 void NMethodSweeper::handle_full_code_cache(bool is_full) {
   441   // Only the first one to notice can advise us to start early cleaning
   442   if (!is_full){
   443     jint old = Atomic::cmpxchg( 1, &_advise_to_sweep, 0 );
   444     if (old != 0) {
   445       return;
   446     }
   447   }
   449   if (is_full) {
   450     // Since code cache is full, immediately stop new compiles
   451     bool did_set = CompileBroker::set_should_compile_new_jobs(CompileBroker::stop_compilation);
   452     if (!did_set) {
   453       // only the first to notice can start the cleaning,
   454       // others will go back and block
   455       return;
   456     }
   457     set_was_full(true);
   459     // If we run out within MinCodeCacheFlushingInterval of the last unload time, give up
   460     jlong now = os::javaTimeMillis();
   461     jlong max_interval = (jlong)MinCodeCacheFlushingInterval * (jlong)1000;
   462     jlong curr_interval = now - _last_was_full;
   463     if (curr_interval < max_interval) {
   464       _rescan = true;
   465       log_sweep("disable_compiler", "flushing_interval='" UINT64_FORMAT "'",
   466                            curr_interval/1000);
   467       return;
   468     }
   469   }
   471   VM_HandleFullCodeCache op(is_full);
   472   VMThread::execute(&op);
   474   // rescan again as soon as possible
   475   _rescan = true;
   476 }
   478 void NMethodSweeper::speculative_disconnect_nmethods(bool is_full) {
   479   // If there was a race in detecting full code cache, only run
   480   // one vm op for it or keep the compiler shut off
   482   debug_only(jlong start = os::javaTimeMillis();)
   484   if ((!was_full()) && (is_full)) {
   485     if (!CodeCache::needs_flushing()) {
   486       log_sweep("restart_compiler");
   487       CompileBroker::set_should_compile_new_jobs(CompileBroker::run_compilation);
   488       return;
   489     }
   490   }
   492   // Traverse the code cache trying to dump the oldest nmethods
   493   uint curr_max_comp_id = CompileBroker::get_compilation_id();
   494   uint flush_target = ((curr_max_comp_id - _highest_marked) >> 1) + _highest_marked;
   495   log_sweep("start_cleaning");
   497   nmethod* nm = CodeCache::alive_nmethod(CodeCache::first());
   498   jint disconnected = 0;
   499   jint made_not_entrant  = 0;
   500   while ((nm != NULL)){
   501     uint curr_comp_id = nm->compile_id();
   503     // OSR methods cannot be flushed like this. Also, don't flush native methods
   504     // since they are part of the JDK in most cases
   505     if (nm->is_in_use() && (!nm->is_osr_method()) && (!nm->is_locked_by_vm()) &&
   506         (!nm->is_native_method()) && ((curr_comp_id < flush_target))) {
   508       if ((nm->method()->code() == nm)) {
   509         // This method has not been previously considered for
   510         // unloading or it was restored already
   511         CodeCache::speculatively_disconnect(nm);
   512         disconnected++;
   513       } else if (nm->is_speculatively_disconnected()) {
   514         // This method was previously considered for preemptive unloading and was not called since then
   515         CompilationPolicy::policy()->delay_compilation(nm->method());
   516         nm->make_not_entrant();
   517         made_not_entrant++;
   518       }
   520       if (curr_comp_id > _highest_marked) {
   521         _highest_marked = curr_comp_id;
   522       }
   523     }
   524     nm = CodeCache::alive_nmethod(CodeCache::next(nm));
   525   }
   527   log_sweep("stop_cleaning",
   528                        "disconnected='" UINT32_FORMAT "' made_not_entrant='" UINT32_FORMAT "'",
   529                        disconnected, made_not_entrant);
   531   // Shut off compiler. Sweeper will start over with a new stack scan and
   532   // traversal cycle and turn it back on if it clears enough space.
   533   if (was_full()) {
   534     _last_was_full = os::javaTimeMillis();
   535     CompileBroker::set_should_compile_new_jobs(CompileBroker::stop_compilation);
   536   }
   538   // After two more traversals the sweeper will get rid of unrestored nmethods
   539   _was_full_traversal = _traversals;
   540 #ifdef ASSERT
   541   jlong end = os::javaTimeMillis();
   542   if(PrintMethodFlushing && Verbose) {
   543     tty->print_cr("### sweeper: unload time: " INT64_FORMAT, end-start);
   544   }
   545 #endif
   546 }
   549 // Print out some state information about the current sweep and the
   550 // state of the code cache if it's requested.
   551 void NMethodSweeper::log_sweep(const char* msg, const char* format, ...) {
   552   if (PrintMethodFlushing) {
   553     stringStream s;
   554     // Dump code cache state into a buffer before locking the tty,
   555     // because log_state() will use locks causing lock conflicts.
   556     CodeCache::log_state(&s);
   558     ttyLocker ttyl;
   559     tty->print("### sweeper: %s ", msg);
   560     if (format != NULL) {
   561       va_list ap;
   562       va_start(ap, format);
   563       tty->vprint(format, ap);
   564       va_end(ap);
   565     }
   566     tty->print_cr(s.as_string());
   567   }
   569   if (LogCompilation && (xtty != NULL)) {
   570     stringStream s;
   571     // Dump code cache state into a buffer before locking the tty,
   572     // because log_state() will use locks causing lock conflicts.
   573     CodeCache::log_state(&s);
   575     ttyLocker ttyl;
   576     xtty->begin_elem("sweeper state='%s' traversals='" INTX_FORMAT "' ", msg, (intx)traversal_count());
   577     if (format != NULL) {
   578       va_list ap;
   579       va_start(ap, format);
   580       xtty->vprint(format, ap);
   581       va_end(ap);
   582     }
   583     xtty->print(s.as_string());
   584     xtty->stamp();
   585     xtty->end_elem();
   586   }
   587 }

mercurial