src/share/vm/runtime/sweeper.cpp

Mon, 12 Jul 2010 22:27:18 -0700

author
never
date
Mon, 12 Jul 2010 22:27:18 -0700
changeset 2001
8d5934a77f10
parent 1999
2a47bd84841f
child 2138
d5d065957597
permissions
-rw-r--r--

6968385: malformed xml in sweeper logging
Reviewed-by: kvn

     1 /*
     2  * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    20  * or visit www.oracle.com if you need additional information or have any
    21  * questions.
    22  *
    23  */
    25 # include "incls/_precompiled.incl"
    26 # include "incls/_sweeper.cpp.incl"
    28 long      NMethodSweeper::_traversals = 0;   // No. of stack traversals performed
    29 nmethod*  NMethodSweeper::_current = NULL;   // Current nmethod
    30 int       NMethodSweeper::_seen = 0 ;        // No. of nmethods we have currently processed in current pass of CodeCache
    32 volatile int NMethodSweeper::_invocations = 0;   // No. of invocations left until we are completed with this pass
    33 volatile int NMethodSweeper::_sweep_started = 0; // Whether a sweep is in progress.
    35 jint      NMethodSweeper::_locked_seen = 0;
    36 jint      NMethodSweeper::_not_entrant_seen_on_stack = 0;
    37 bool      NMethodSweeper::_rescan = false;
    38 bool      NMethodSweeper::_do_sweep = false;
    39 bool      NMethodSweeper::_was_full = false;
    40 jint      NMethodSweeper::_advise_to_sweep = 0;
    41 jlong     NMethodSweeper::_last_was_full = 0;
    42 uint      NMethodSweeper::_highest_marked = 0;
    43 long      NMethodSweeper::_was_full_traversal = 0;
    45 class MarkActivationClosure: public CodeBlobClosure {
    46 public:
    47   virtual void do_code_blob(CodeBlob* cb) {
    48     // If we see an activation belonging to a non_entrant nmethod, we mark it.
    49     if (cb->is_nmethod() && ((nmethod*)cb)->is_not_entrant()) {
    50       ((nmethod*)cb)->mark_as_seen_on_stack();
    51     }
    52   }
    53 };
    54 static MarkActivationClosure mark_activation_closure;
    56 void NMethodSweeper::scan_stacks() {
    57   assert(SafepointSynchronize::is_at_safepoint(), "must be executed at a safepoint");
    58   if (!MethodFlushing) return;
    59   _do_sweep = true;
    61   // No need to synchronize access, since this is always executed at a
    62   // safepoint.  If we aren't in the middle of scan and a rescan
    63   // hasn't been requested then just return. If UseCodeCacheFlushing is on and
    64   // code cache flushing is in progress, don't skip sweeping to help make progress
    65   // clearing space in the code cache.
    66   if ((_current == NULL && !_rescan) && !(UseCodeCacheFlushing && !CompileBroker::should_compile_new_jobs())) {
    67     _do_sweep = false;
    68     return;
    69   }
    71   // Make sure CompiledIC_lock in unlocked, since we might update some
    72   // inline caches. If it is, we just bail-out and try later.
    73   if (CompiledIC_lock->is_locked() || Patching_lock->is_locked()) return;
    75   // Check for restart
    76   assert(CodeCache::find_blob_unsafe(_current) == _current, "Sweeper nmethod cached state invalid");
    77   if (_current == NULL) {
    78     _seen        = 0;
    79     _invocations = NmethodSweepFraction;
    80     _current     = CodeCache::first_nmethod();
    81     _traversals  += 1;
    82     if (PrintMethodFlushing) {
    83       tty->print_cr("### Sweep: stack traversal %d", _traversals);
    84     }
    85     Threads::nmethods_do(&mark_activation_closure);
    87     // reset the flags since we started a scan from the beginning.
    88     _rescan = false;
    89     _locked_seen = 0;
    90     _not_entrant_seen_on_stack = 0;
    91   }
    93   if (UseCodeCacheFlushing) {
    94     if (!CodeCache::needs_flushing()) {
    95       // scan_stacks() runs during a safepoint, no race with setters
    96       _advise_to_sweep = 0;
    97     }
    99     if (was_full()) {
   100       // There was some progress so attempt to restart the compiler
   101       jlong now           = os::javaTimeMillis();
   102       jlong max_interval  = (jlong)MinCodeCacheFlushingInterval * (jlong)1000;
   103       jlong curr_interval = now - _last_was_full;
   104       if ((!CodeCache::needs_flushing()) && (curr_interval > max_interval)) {
   105         CompileBroker::set_should_compile_new_jobs(CompileBroker::run_compilation);
   106         set_was_full(false);
   108         // Update the _last_was_full time so we can tell how fast the
   109         // code cache is filling up
   110         _last_was_full = os::javaTimeMillis();
   112         log_sweep("restart_compiler");
   113       }
   114     }
   115   }
   116 }
   118 void NMethodSweeper::possibly_sweep() {
   119   assert(JavaThread::current()->thread_state() == _thread_in_vm, "must run in vm mode");
   120   if ((!MethodFlushing) || (!_do_sweep)) return;
   122   if (_invocations > 0) {
   123     // Only one thread at a time will sweep
   124     jint old = Atomic::cmpxchg( 1, &_sweep_started, 0 );
   125     if (old != 0) {
   126       return;
   127     }
   128     if (_invocations > 0) {
   129       sweep_code_cache();
   130       _invocations--;
   131     }
   132     _sweep_started = 0;
   133   }
   134 }
   136 void NMethodSweeper::sweep_code_cache() {
   137 #ifdef ASSERT
   138   jlong sweep_start;
   139   if (PrintMethodFlushing) {
   140     sweep_start = os::javaTimeMillis();
   141   }
   142 #endif
   143   if (PrintMethodFlushing && Verbose) {
   144     tty->print_cr("### Sweep at %d out of %d. Invocations left: %d", _seen, CodeCache::nof_nmethods(), _invocations);
   145   }
   147   // We want to visit all nmethods after NmethodSweepFraction
   148   // invocations so divide the remaining number of nmethods by the
   149   // remaining number of invocations.  This is only an estimate since
   150   // the number of nmethods changes during the sweep so the final
   151   // stage must iterate until it there are no more nmethods.
   152   int todo = (CodeCache::nof_nmethods() - _seen) / _invocations;
   154   assert(!SafepointSynchronize::is_at_safepoint(), "should not be in safepoint when we get here");
   155   assert(!CodeCache_lock->owned_by_self(), "just checking");
   157   {
   158     MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
   160     // The last invocation iterates until there are no more nmethods
   161     for (int i = 0; (i < todo || _invocations == 1) && _current != NULL; i++) {
   163       // Since we will give up the CodeCache_lock, always skip ahead
   164       // to the next nmethod.  Other blobs can be deleted by other
   165       // threads but nmethods are only reclaimed by the sweeper.
   166       nmethod* next = CodeCache::next_nmethod(_current);
   168       // Now ready to process nmethod and give up CodeCache_lock
   169       {
   170         MutexUnlockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
   171         process_nmethod(_current);
   172       }
   173       _seen++;
   174       _current = next;
   175     }
   176   }
   178   assert(_invocations > 1 || _current == NULL, "must have scanned the whole cache");
   180   if (_current == NULL && !_rescan && (_locked_seen || _not_entrant_seen_on_stack)) {
   181     // we've completed a scan without making progress but there were
   182     // nmethods we were unable to process either because they were
   183     // locked or were still on stack.  We don't have to aggresively
   184     // clean them up so just stop scanning.  We could scan once more
   185     // but that complicates the control logic and it's unlikely to
   186     // matter much.
   187     if (PrintMethodFlushing) {
   188       tty->print_cr("### Couldn't make progress on some nmethods so stopping sweep");
   189     }
   190   }
   192 #ifdef ASSERT
   193   if(PrintMethodFlushing) {
   194     jlong sweep_end             = os::javaTimeMillis();
   195     tty->print_cr("### sweeper:      sweep time(%d): " INT64_FORMAT, _invocations, sweep_end - sweep_start);
   196   }
   197 #endif
   199   if (_invocations == 1) {
   200     log_sweep("finished");
   201   }
   202 }
   205 void NMethodSweeper::process_nmethod(nmethod *nm) {
   206   assert(!CodeCache_lock->owned_by_self(), "just checking");
   208   // Skip methods that are currently referenced by the VM
   209   if (nm->is_locked_by_vm()) {
   210     // But still remember to clean-up inline caches for alive nmethods
   211     if (nm->is_alive()) {
   212       // Clean-up all inline caches that points to zombie/non-reentrant methods
   213       MutexLocker cl(CompiledIC_lock);
   214       nm->cleanup_inline_caches();
   215     } else {
   216       _locked_seen++;
   217     }
   218     return;
   219   }
   221   if (nm->is_zombie()) {
   222     // If it is first time, we see nmethod then we mark it. Otherwise,
   223     // we reclame it. When we have seen a zombie method twice, we know that
   224     // there are no inline caches that refer to it.
   225     if (nm->is_marked_for_reclamation()) {
   226       assert(!nm->is_locked_by_vm(), "must not flush locked nmethods");
   227       if (PrintMethodFlushing && Verbose) {
   228         tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (marked for reclamation) being flushed", nm->compile_id(), nm);
   229       }
   230       MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
   231       nm->flush();
   232     } else {
   233       if (PrintMethodFlushing && Verbose) {
   234         tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (zombie) being marked for reclamation", nm->compile_id(), nm);
   235       }
   236       nm->mark_for_reclamation();
   237       _rescan = true;
   238     }
   239   } else if (nm->is_not_entrant()) {
   240     // If there is no current activations of this method on the
   241     // stack we can safely convert it to a zombie method
   242     if (nm->can_not_entrant_be_converted()) {
   243       if (PrintMethodFlushing && Verbose) {
   244         tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (not entrant) being made zombie", nm->compile_id(), nm);
   245       }
   246       nm->make_zombie();
   247       _rescan = true;
   248     } else {
   249       // Still alive, clean up its inline caches
   250       MutexLocker cl(CompiledIC_lock);
   251       nm->cleanup_inline_caches();
   252       // we coudn't transition this nmethod so don't immediately
   253       // request a rescan.  If this method stays on the stack for a
   254       // long time we don't want to keep rescanning the code cache.
   255       _not_entrant_seen_on_stack++;
   256     }
   257   } else if (nm->is_unloaded()) {
   258     // Unloaded code, just make it a zombie
   259     if (PrintMethodFlushing && Verbose)
   260       tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (unloaded) being made zombie", nm->compile_id(), nm);
   261     if (nm->is_osr_method()) {
   262       // No inline caches will ever point to osr methods, so we can just remove it
   263       MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
   264       nm->flush();
   265     } else {
   266       nm->make_zombie();
   267       _rescan = true;
   268     }
   269   } else {
   270     assert(nm->is_alive(), "should be alive");
   272     if (UseCodeCacheFlushing) {
   273       if ((nm->method()->code() != nm) && !(nm->is_locked_by_vm()) && !(nm->is_osr_method()) &&
   274           (_traversals > _was_full_traversal+2) && (((uint)nm->compile_id()) < _highest_marked) &&
   275           CodeCache::needs_flushing()) {
   276         // This method has not been called since the forced cleanup happened
   277         nm->make_not_entrant();
   278       }
   279     }
   281     // Clean-up all inline caches that points to zombie/non-reentrant methods
   282     MutexLocker cl(CompiledIC_lock);
   283     nm->cleanup_inline_caches();
   284   }
   285 }
   287 // Code cache unloading: when compilers notice the code cache is getting full,
   288 // they will call a vm op that comes here. This code attempts to speculatively
   289 // unload the oldest half of the nmethods (based on the compile job id) by
   290 // saving the old code in a list in the CodeCache. Then
   291 // execution resumes. If a method so marked is not called by the second sweeper
   292 // stack traversal after the current one, the nmethod will be marked non-entrant and
   293 // got rid of by normal sweeping. If the method is called, the methodOop's
   294 // _code field is restored and the methodOop/nmethod
   295 // go back to their normal state.
   296 void NMethodSweeper::handle_full_code_cache(bool is_full) {
   297   // Only the first one to notice can advise us to start early cleaning
   298   if (!is_full){
   299     jint old = Atomic::cmpxchg( 1, &_advise_to_sweep, 0 );
   300     if (old != 0) {
   301       return;
   302     }
   303   }
   305   if (is_full) {
   306     // Since code cache is full, immediately stop new compiles
   307     bool did_set = CompileBroker::set_should_compile_new_jobs(CompileBroker::stop_compilation);
   308     if (!did_set) {
   309       // only the first to notice can start the cleaning,
   310       // others will go back and block
   311       return;
   312     }
   313     set_was_full(true);
   315     // If we run out within MinCodeCacheFlushingInterval of the last unload time, give up
   316     jlong now = os::javaTimeMillis();
   317     jlong max_interval = (jlong)MinCodeCacheFlushingInterval * (jlong)1000;
   318     jlong curr_interval = now - _last_was_full;
   319     if (curr_interval < max_interval) {
   320       _rescan = true;
   321       log_sweep("disable_compiler", "flushing_interval='" UINT64_FORMAT "'",
   322                            curr_interval/1000);
   323       return;
   324     }
   325   }
   327   VM_HandleFullCodeCache op(is_full);
   328   VMThread::execute(&op);
   330   // rescan again as soon as possible
   331   _rescan = true;
   332 }
   334 void NMethodSweeper::speculative_disconnect_nmethods(bool is_full) {
   335   // If there was a race in detecting full code cache, only run
   336   // one vm op for it or keep the compiler shut off
   338   debug_only(jlong start = os::javaTimeMillis();)
   340   if ((!was_full()) && (is_full)) {
   341     if (!CodeCache::needs_flushing()) {
   342       log_sweep("restart_compiler");
   343       CompileBroker::set_should_compile_new_jobs(CompileBroker::run_compilation);
   344       return;
   345     }
   346   }
   348   // Traverse the code cache trying to dump the oldest nmethods
   349   uint curr_max_comp_id = CompileBroker::get_compilation_id();
   350   uint flush_target = ((curr_max_comp_id - _highest_marked) >> 1) + _highest_marked;
   351   log_sweep("start_cleaning");
   353   nmethod* nm = CodeCache::alive_nmethod(CodeCache::first());
   354   jint disconnected = 0;
   355   jint made_not_entrant  = 0;
   356   while ((nm != NULL)){
   357     uint curr_comp_id = nm->compile_id();
   359     // OSR methods cannot be flushed like this. Also, don't flush native methods
   360     // since they are part of the JDK in most cases
   361     if (nm->is_in_use() && (!nm->is_osr_method()) && (!nm->is_locked_by_vm()) &&
   362         (!nm->is_native_method()) && ((curr_comp_id < flush_target))) {
   364       if ((nm->method()->code() == nm)) {
   365         // This method has not been previously considered for
   366         // unloading or it was restored already
   367         CodeCache::speculatively_disconnect(nm);
   368         disconnected++;
   369       } else if (nm->is_speculatively_disconnected()) {
   370         // This method was previously considered for preemptive unloading and was not called since then
   371         nm->method()->invocation_counter()->decay();
   372         nm->method()->backedge_counter()->decay();
   373         nm->make_not_entrant();
   374         made_not_entrant++;
   375       }
   377       if (curr_comp_id > _highest_marked) {
   378         _highest_marked = curr_comp_id;
   379       }
   380     }
   381     nm = CodeCache::alive_nmethod(CodeCache::next(nm));
   382   }
   384   log_sweep("stop_cleaning",
   385                        "disconnected='" UINT32_FORMAT "' made_not_entrant='" UINT32_FORMAT "'",
   386                        disconnected, made_not_entrant);
   388   // Shut off compiler. Sweeper will start over with a new stack scan and
   389   // traversal cycle and turn it back on if it clears enough space.
   390   if (was_full()) {
   391     _last_was_full = os::javaTimeMillis();
   392     CompileBroker::set_should_compile_new_jobs(CompileBroker::stop_compilation);
   393   }
   395   // After two more traversals the sweeper will get rid of unrestored nmethods
   396   _was_full_traversal = _traversals;
   397 #ifdef ASSERT
   398   jlong end = os::javaTimeMillis();
   399   if(PrintMethodFlushing && Verbose) {
   400     tty->print_cr("### sweeper: unload time: " INT64_FORMAT, end-start);
   401   }
   402 #endif
   403 }
   406 // Print out some state information about the current sweep and the
   407 // state of the code cache if it's requested.
   408 void NMethodSweeper::log_sweep(const char* msg, const char* format, ...) {
   409   if (PrintMethodFlushing) {
   410     ttyLocker ttyl;
   411     tty->print("### sweeper: %s ", msg);
   412     if (format != NULL) {
   413       va_list ap;
   414       va_start(ap, format);
   415       tty->vprint(format, ap);
   416       va_end(ap);
   417     }
   418     tty->print_cr(" total_blobs='" UINT32_FORMAT "' nmethods='" UINT32_FORMAT "'"
   419                   " adapters='" UINT32_FORMAT "' free_code_cache='" SIZE_FORMAT "'",
   420                   CodeCache::nof_blobs(), CodeCache::nof_nmethods(), CodeCache::nof_adapters(), CodeCache::unallocated_capacity());
   421   }
   423   if (LogCompilation && (xtty != NULL)) {
   424     ttyLocker ttyl;
   425     xtty->begin_elem("sweeper state='%s' traversals='" INTX_FORMAT "' ", msg, (intx)traversal_count());
   426     if (format != NULL) {
   427       va_list ap;
   428       va_start(ap, format);
   429       xtty->vprint(format, ap);
   430       va_end(ap);
   431     }
   432     xtty->print(" total_blobs='" UINT32_FORMAT "' nmethods='" UINT32_FORMAT "'"
   433                 " adapters='" UINT32_FORMAT "' free_code_cache='" SIZE_FORMAT "'",
   434                 CodeCache::nof_blobs(), CodeCache::nof_nmethods(), CodeCache::nof_adapters(), CodeCache::unallocated_capacity());
   435     xtty->stamp();
   436     xtty->end_elem();
   437   }
   438 }

mercurial