src/share/vm/runtime/sweeper.cpp

Fri, 27 Jan 2012 09:04:57 +0100

author
brutisso
date
Fri, 27 Jan 2012 09:04:57 +0100
changeset 3467
0a10d80352d5
parent 2916
f52ed367b66d
child 3572
cfdfbeac0a5b
permissions
-rw-r--r--

Merge

duke@435 1 /*
trims@2708 2 * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
duke@435 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
duke@435 4 *
duke@435 5 * This code is free software; you can redistribute it and/or modify it
duke@435 6 * under the terms of the GNU General Public License version 2 only, as
duke@435 7 * published by the Free Software Foundation.
duke@435 8 *
duke@435 9 * This code is distributed in the hope that it will be useful, but WITHOUT
duke@435 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
duke@435 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
duke@435 12 * version 2 for more details (a copy is included in the LICENSE file that
duke@435 13 * accompanied this code).
duke@435 14 *
duke@435 15 * You should have received a copy of the GNU General Public License version
duke@435 16 * 2 along with this work; if not, write to the Free Software Foundation,
duke@435 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
duke@435 18 *
trims@1907 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
trims@1907 20 * or visit www.oracle.com if you need additional information or have any
trims@1907 21 * questions.
duke@435 22 *
duke@435 23 */
duke@435 24
stefank@2314 25 #include "precompiled.hpp"
stefank@2314 26 #include "code/codeCache.hpp"
stefank@2314 27 #include "code/nmethod.hpp"
stefank@2314 28 #include "compiler/compileBroker.hpp"
stefank@2314 29 #include "memory/resourceArea.hpp"
stefank@2314 30 #include "oops/methodOop.hpp"
stefank@2314 31 #include "runtime/atomic.hpp"
stefank@2314 32 #include "runtime/compilationPolicy.hpp"
stefank@2314 33 #include "runtime/mutexLocker.hpp"
stefank@2314 34 #include "runtime/os.hpp"
stefank@2314 35 #include "runtime/sweeper.hpp"
stefank@2314 36 #include "runtime/vm_operations.hpp"
stefank@2314 37 #include "utilities/events.hpp"
stefank@2314 38 #include "utilities/xmlstream.hpp"
duke@435 39
never@2916 40 #ifdef ASSERT
never@2916 41
never@2916 42 #define SWEEP(nm) record_sweep(nm, __LINE__)
never@2916 43 // Sweeper logging code
never@2916 44 class SweeperRecord {
never@2916 45 public:
never@2916 46 int traversal;
never@2916 47 int invocation;
never@2916 48 int compile_id;
never@2916 49 long traversal_mark;
never@2916 50 int state;
never@2916 51 const char* kind;
never@2916 52 address vep;
never@2916 53 address uep;
never@2916 54 int line;
never@2916 55
never@2916 56 void print() {
never@2916 57 tty->print_cr("traversal = %d invocation = %d compile_id = %d %s uep = " PTR_FORMAT " vep = "
never@2916 58 PTR_FORMAT " state = %d traversal_mark %d line = %d",
never@2916 59 traversal,
never@2916 60 invocation,
never@2916 61 compile_id,
never@2916 62 kind == NULL ? "" : kind,
never@2916 63 uep,
never@2916 64 vep,
never@2916 65 state,
never@2916 66 traversal_mark,
never@2916 67 line);
never@2916 68 }
never@2916 69 };
never@2916 70
never@2916 71 static int _sweep_index = 0;
never@2916 72 static SweeperRecord* _records = NULL;
never@2916 73
never@2916 74 void NMethodSweeper::report_events(int id, address entry) {
never@2916 75 if (_records != NULL) {
never@2916 76 for (int i = _sweep_index; i < SweeperLogEntries; i++) {
never@2916 77 if (_records[i].uep == entry ||
never@2916 78 _records[i].vep == entry ||
never@2916 79 _records[i].compile_id == id) {
never@2916 80 _records[i].print();
never@2916 81 }
never@2916 82 }
never@2916 83 for (int i = 0; i < _sweep_index; i++) {
never@2916 84 if (_records[i].uep == entry ||
never@2916 85 _records[i].vep == entry ||
never@2916 86 _records[i].compile_id == id) {
never@2916 87 _records[i].print();
never@2916 88 }
never@2916 89 }
never@2916 90 }
never@2916 91 }
never@2916 92
never@2916 93 void NMethodSweeper::report_events() {
never@2916 94 if (_records != NULL) {
never@2916 95 for (int i = _sweep_index; i < SweeperLogEntries; i++) {
never@2916 96 // skip empty records
never@2916 97 if (_records[i].vep == NULL) continue;
never@2916 98 _records[i].print();
never@2916 99 }
never@2916 100 for (int i = 0; i < _sweep_index; i++) {
never@2916 101 // skip empty records
never@2916 102 if (_records[i].vep == NULL) continue;
never@2916 103 _records[i].print();
never@2916 104 }
never@2916 105 }
never@2916 106 }
never@2916 107
never@2916 108 void NMethodSweeper::record_sweep(nmethod* nm, int line) {
never@2916 109 if (_records != NULL) {
never@2916 110 _records[_sweep_index].traversal = _traversals;
never@2916 111 _records[_sweep_index].traversal_mark = nm->_stack_traversal_mark;
never@2916 112 _records[_sweep_index].invocation = _invocations;
never@2916 113 _records[_sweep_index].compile_id = nm->compile_id();
never@2916 114 _records[_sweep_index].kind = nm->compile_kind();
never@2916 115 _records[_sweep_index].state = nm->_state;
never@2916 116 _records[_sweep_index].vep = nm->verified_entry_point();
never@2916 117 _records[_sweep_index].uep = nm->entry_point();
never@2916 118 _records[_sweep_index].line = line;
never@2916 119
never@2916 120 _sweep_index = (_sweep_index + 1) % SweeperLogEntries;
never@2916 121 }
never@2916 122 }
never@2916 123 #else
never@2916 124 #define SWEEP(nm)
never@2916 125 #endif
never@2916 126
never@2916 127
duke@435 128 long NMethodSweeper::_traversals = 0; // No. of stack traversals performed
never@1970 129 nmethod* NMethodSweeper::_current = NULL; // Current nmethod
never@1999 130 int NMethodSweeper::_seen = 0 ; // No. of nmethods we have currently processed in current pass of CodeCache
never@1999 131
never@1999 132 volatile int NMethodSweeper::_invocations = 0; // No. of invocations left until we are completed with this pass
never@1999 133 volatile int NMethodSweeper::_sweep_started = 0; // Whether a sweep is in progress.
duke@435 134
duke@435 135 jint NMethodSweeper::_locked_seen = 0;
duke@435 136 jint NMethodSweeper::_not_entrant_seen_on_stack = 0;
duke@435 137 bool NMethodSweeper::_rescan = false;
never@1893 138 bool NMethodSweeper::_do_sweep = false;
kvn@1637 139 bool NMethodSweeper::_was_full = false;
kvn@1637 140 jint NMethodSweeper::_advise_to_sweep = 0;
kvn@1637 141 jlong NMethodSweeper::_last_was_full = 0;
kvn@1637 142 uint NMethodSweeper::_highest_marked = 0;
kvn@1637 143 long NMethodSweeper::_was_full_traversal = 0;
duke@435 144
jrose@1424 145 class MarkActivationClosure: public CodeBlobClosure {
jrose@1424 146 public:
jrose@1424 147 virtual void do_code_blob(CodeBlob* cb) {
jrose@1424 148 // If we see an activation belonging to a non_entrant nmethod, we mark it.
jrose@1424 149 if (cb->is_nmethod() && ((nmethod*)cb)->is_not_entrant()) {
jrose@1424 150 ((nmethod*)cb)->mark_as_seen_on_stack();
jrose@1424 151 }
jrose@1424 152 }
jrose@1424 153 };
jrose@1424 154 static MarkActivationClosure mark_activation_closure;
jrose@1424 155
never@1893 156 void NMethodSweeper::scan_stacks() {
duke@435 157 assert(SafepointSynchronize::is_at_safepoint(), "must be executed at a safepoint");
duke@435 158 if (!MethodFlushing) return;
never@1893 159 _do_sweep = true;
duke@435 160
duke@435 161 // No need to synchronize access, since this is always executed at a
duke@435 162 // safepoint. If we aren't in the middle of scan and a rescan
never@1893 163 // hasn't been requested then just return. If UseCodeCacheFlushing is on and
never@1893 164 // code cache flushing is in progress, don't skip sweeping to help make progress
never@1893 165 // clearing space in the code cache.
never@1893 166 if ((_current == NULL && !_rescan) && !(UseCodeCacheFlushing && !CompileBroker::should_compile_new_jobs())) {
never@1893 167 _do_sweep = false;
never@1893 168 return;
never@1893 169 }
duke@435 170
duke@435 171 // Make sure CompiledIC_lock in unlocked, since we might update some
duke@435 172 // inline caches. If it is, we just bail-out and try later.
duke@435 173 if (CompiledIC_lock->is_locked() || Patching_lock->is_locked()) return;
duke@435 174
duke@435 175 // Check for restart
duke@435 176 assert(CodeCache::find_blob_unsafe(_current) == _current, "Sweeper nmethod cached state invalid");
duke@435 177 if (_current == NULL) {
duke@435 178 _seen = 0;
duke@435 179 _invocations = NmethodSweepFraction;
never@1893 180 _current = CodeCache::first_nmethod();
duke@435 181 _traversals += 1;
duke@435 182 if (PrintMethodFlushing) {
duke@435 183 tty->print_cr("### Sweep: stack traversal %d", _traversals);
duke@435 184 }
jrose@1424 185 Threads::nmethods_do(&mark_activation_closure);
duke@435 186
duke@435 187 // reset the flags since we started a scan from the beginning.
duke@435 188 _rescan = false;
duke@435 189 _locked_seen = 0;
duke@435 190 _not_entrant_seen_on_stack = 0;
duke@435 191 }
duke@435 192
kvn@1637 193 if (UseCodeCacheFlushing) {
kvn@1637 194 if (!CodeCache::needs_flushing()) {
never@1893 195 // scan_stacks() runs during a safepoint, no race with setters
kvn@1637 196 _advise_to_sweep = 0;
kvn@1637 197 }
kvn@1637 198
kvn@1637 199 if (was_full()) {
kvn@1637 200 // There was some progress so attempt to restart the compiler
kvn@1637 201 jlong now = os::javaTimeMillis();
kvn@1637 202 jlong max_interval = (jlong)MinCodeCacheFlushingInterval * (jlong)1000;
kvn@1637 203 jlong curr_interval = now - _last_was_full;
kvn@1637 204 if ((!CodeCache::needs_flushing()) && (curr_interval > max_interval)) {
kvn@1637 205 CompileBroker::set_should_compile_new_jobs(CompileBroker::run_compilation);
kvn@1637 206 set_was_full(false);
kvn@1637 207
kvn@1637 208 // Update the _last_was_full time so we can tell how fast the
kvn@1637 209 // code cache is filling up
kvn@1637 210 _last_was_full = os::javaTimeMillis();
kvn@1637 211
never@1999 212 log_sweep("restart_compiler");
kvn@1637 213 }
kvn@1637 214 }
kvn@1637 215 }
duke@435 216 }
duke@435 217
never@1893 218 void NMethodSweeper::possibly_sweep() {
never@1999 219 assert(JavaThread::current()->thread_state() == _thread_in_vm, "must run in vm mode");
never@1893 220 if ((!MethodFlushing) || (!_do_sweep)) return;
never@1893 221
never@1893 222 if (_invocations > 0) {
never@1893 223 // Only one thread at a time will sweep
never@1893 224 jint old = Atomic::cmpxchg( 1, &_sweep_started, 0 );
never@1893 225 if (old != 0) {
never@1893 226 return;
never@1893 227 }
never@2916 228 #ifdef ASSERT
never@2916 229 if (LogSweeper && _records == NULL) {
never@2916 230 // Create the ring buffer for the logging code
never@2916 231 _records = NEW_C_HEAP_ARRAY(SweeperRecord, SweeperLogEntries);
never@2916 232 memset(_records, 0, sizeof(SweeperRecord) * SweeperLogEntries);
never@2916 233 }
never@2916 234 #endif
never@1999 235 if (_invocations > 0) {
never@1999 236 sweep_code_cache();
never@1999 237 _invocations--;
never@1999 238 }
never@1999 239 _sweep_started = 0;
never@1893 240 }
never@1893 241 }
never@1893 242
never@1893 243 void NMethodSweeper::sweep_code_cache() {
never@1893 244 #ifdef ASSERT
never@1893 245 jlong sweep_start;
never@1999 246 if (PrintMethodFlushing) {
never@1893 247 sweep_start = os::javaTimeMillis();
never@1893 248 }
never@1893 249 #endif
never@1893 250 if (PrintMethodFlushing && Verbose) {
never@1999 251 tty->print_cr("### Sweep at %d out of %d. Invocations left: %d", _seen, CodeCache::nof_nmethods(), _invocations);
never@1893 252 }
never@1893 253
never@1999 254 // We want to visit all nmethods after NmethodSweepFraction
never@1999 255 // invocations so divide the remaining number of nmethods by the
never@1999 256 // remaining number of invocations. This is only an estimate since
never@1999 257 // the number of nmethods changes during the sweep so the final
never@1999 258 // stage must iterate until it there are no more nmethods.
never@1999 259 int todo = (CodeCache::nof_nmethods() - _seen) / _invocations;
never@1893 260
never@1893 261 assert(!SafepointSynchronize::is_at_safepoint(), "should not be in safepoint when we get here");
never@1893 262 assert(!CodeCache_lock->owned_by_self(), "just checking");
never@1893 263
never@1893 264 {
never@1893 265 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
never@1893 266
never@1999 267 // The last invocation iterates until there are no more nmethods
never@1999 268 for (int i = 0; (i < todo || _invocations == 1) && _current != NULL; i++) {
never@1893 269
never@1999 270 // Since we will give up the CodeCache_lock, always skip ahead
never@1999 271 // to the next nmethod. Other blobs can be deleted by other
never@1999 272 // threads but nmethods are only reclaimed by the sweeper.
never@1970 273 nmethod* next = CodeCache::next_nmethod(_current);
never@1893 274
never@1893 275 // Now ready to process nmethod and give up CodeCache_lock
never@1893 276 {
never@1893 277 MutexUnlockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
never@1970 278 process_nmethod(_current);
never@1893 279 }
never@1893 280 _seen++;
never@1893 281 _current = next;
never@1893 282 }
never@1893 283 }
never@1893 284
never@1999 285 assert(_invocations > 1 || _current == NULL, "must have scanned the whole cache");
never@1999 286
never@1893 287 if (_current == NULL && !_rescan && (_locked_seen || _not_entrant_seen_on_stack)) {
never@1893 288 // we've completed a scan without making progress but there were
never@1893 289 // nmethods we were unable to process either because they were
never@1893 290 // locked or were still on stack. We don't have to aggresively
never@1893 291 // clean them up so just stop scanning. We could scan once more
never@1893 292 // but that complicates the control logic and it's unlikely to
never@1893 293 // matter much.
never@1893 294 if (PrintMethodFlushing) {
never@1893 295 tty->print_cr("### Couldn't make progress on some nmethods so stopping sweep");
never@1893 296 }
never@1893 297 }
never@1893 298
never@1893 299 #ifdef ASSERT
never@1893 300 if(PrintMethodFlushing) {
never@1893 301 jlong sweep_end = os::javaTimeMillis();
never@1893 302 tty->print_cr("### sweeper: sweep time(%d): " INT64_FORMAT, _invocations, sweep_end - sweep_start);
never@1893 303 }
never@1893 304 #endif
never@1999 305
never@1999 306 if (_invocations == 1) {
never@1999 307 log_sweep("finished");
never@1999 308 }
never@1893 309 }
never@1893 310
never@2916 311 class NMethodMarker: public StackObj {
never@2916 312 private:
never@2916 313 CompilerThread* _thread;
never@2916 314 public:
never@2916 315 NMethodMarker(nmethod* nm) {
never@2916 316 _thread = CompilerThread::current();
never@2916 317 _thread->set_scanned_nmethod(nm);
never@2916 318 }
never@2916 319 ~NMethodMarker() {
never@2916 320 _thread->set_scanned_nmethod(NULL);
never@2916 321 }
never@2916 322 };
never@2916 323
duke@435 324
duke@435 325 void NMethodSweeper::process_nmethod(nmethod *nm) {
never@1893 326 assert(!CodeCache_lock->owned_by_self(), "just checking");
never@1893 327
never@2916 328 // Make sure this nmethod doesn't get unloaded during the scan,
never@2916 329 // since the locks acquired below might safepoint.
never@2916 330 NMethodMarker nmm(nm);
never@2916 331
never@2916 332 SWEEP(nm);
never@2916 333
duke@435 334 // Skip methods that are currently referenced by the VM
duke@435 335 if (nm->is_locked_by_vm()) {
duke@435 336 // But still remember to clean-up inline caches for alive nmethods
duke@435 337 if (nm->is_alive()) {
duke@435 338 // Clean-up all inline caches that points to zombie/non-reentrant methods
never@1893 339 MutexLocker cl(CompiledIC_lock);
duke@435 340 nm->cleanup_inline_caches();
never@2916 341 SWEEP(nm);
duke@435 342 } else {
duke@435 343 _locked_seen++;
never@2916 344 SWEEP(nm);
duke@435 345 }
duke@435 346 return;
duke@435 347 }
duke@435 348
duke@435 349 if (nm->is_zombie()) {
duke@435 350 // If it is first time, we see nmethod then we mark it. Otherwise,
duke@435 351 // we reclame it. When we have seen a zombie method twice, we know that
never@1999 352 // there are no inline caches that refer to it.
duke@435 353 if (nm->is_marked_for_reclamation()) {
duke@435 354 assert(!nm->is_locked_by_vm(), "must not flush locked nmethods");
ysr@1376 355 if (PrintMethodFlushing && Verbose) {
kvn@1637 356 tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (marked for reclamation) being flushed", nm->compile_id(), nm);
ysr@1376 357 }
never@1893 358 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
duke@435 359 nm->flush();
duke@435 360 } else {
ysr@1376 361 if (PrintMethodFlushing && Verbose) {
kvn@1637 362 tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (zombie) being marked for reclamation", nm->compile_id(), nm);
ysr@1376 363 }
duke@435 364 nm->mark_for_reclamation();
duke@435 365 _rescan = true;
never@2916 366 SWEEP(nm);
duke@435 367 }
duke@435 368 } else if (nm->is_not_entrant()) {
duke@435 369 // If there is no current activations of this method on the
duke@435 370 // stack we can safely convert it to a zombie method
duke@435 371 if (nm->can_not_entrant_be_converted()) {
ysr@1376 372 if (PrintMethodFlushing && Verbose) {
kvn@1637 373 tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (not entrant) being made zombie", nm->compile_id(), nm);
ysr@1376 374 }
duke@435 375 nm->make_zombie();
duke@435 376 _rescan = true;
never@2916 377 SWEEP(nm);
duke@435 378 } else {
duke@435 379 // Still alive, clean up its inline caches
never@1893 380 MutexLocker cl(CompiledIC_lock);
duke@435 381 nm->cleanup_inline_caches();
duke@435 382 // we coudn't transition this nmethod so don't immediately
duke@435 383 // request a rescan. If this method stays on the stack for a
never@1893 384 // long time we don't want to keep rescanning the code cache.
duke@435 385 _not_entrant_seen_on_stack++;
never@2916 386 SWEEP(nm);
duke@435 387 }
duke@435 388 } else if (nm->is_unloaded()) {
duke@435 389 // Unloaded code, just make it a zombie
ysr@1376 390 if (PrintMethodFlushing && Verbose)
kvn@1637 391 tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (unloaded) being made zombie", nm->compile_id(), nm);
ysr@1376 392 if (nm->is_osr_method()) {
duke@435 393 // No inline caches will ever point to osr methods, so we can just remove it
never@1893 394 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
never@2916 395 SWEEP(nm);
duke@435 396 nm->flush();
duke@435 397 } else {
duke@435 398 nm->make_zombie();
duke@435 399 _rescan = true;
never@2916 400 SWEEP(nm);
duke@435 401 }
duke@435 402 } else {
duke@435 403 assert(nm->is_alive(), "should be alive");
kvn@1637 404
kvn@1637 405 if (UseCodeCacheFlushing) {
kvn@1637 406 if ((nm->method()->code() != nm) && !(nm->is_locked_by_vm()) && !(nm->is_osr_method()) &&
kvn@1637 407 (_traversals > _was_full_traversal+2) && (((uint)nm->compile_id()) < _highest_marked) &&
kvn@1637 408 CodeCache::needs_flushing()) {
kvn@1637 409 // This method has not been called since the forced cleanup happened
kvn@1637 410 nm->make_not_entrant();
kvn@1637 411 }
kvn@1637 412 }
kvn@1637 413
duke@435 414 // Clean-up all inline caches that points to zombie/non-reentrant methods
never@1893 415 MutexLocker cl(CompiledIC_lock);
duke@435 416 nm->cleanup_inline_caches();
never@2916 417 SWEEP(nm);
duke@435 418 }
duke@435 419 }
kvn@1637 420
kvn@1637 421 // Code cache unloading: when compilers notice the code cache is getting full,
kvn@1637 422 // they will call a vm op that comes here. This code attempts to speculatively
kvn@1637 423 // unload the oldest half of the nmethods (based on the compile job id) by
kvn@1637 424 // saving the old code in a list in the CodeCache. Then
never@1893 425 // execution resumes. If a method so marked is not called by the second sweeper
never@1893 426 // stack traversal after the current one, the nmethod will be marked non-entrant and
kvn@1637 427 // got rid of by normal sweeping. If the method is called, the methodOop's
kvn@1637 428 // _code field is restored and the methodOop/nmethod
kvn@1637 429 // go back to their normal state.
kvn@1637 430 void NMethodSweeper::handle_full_code_cache(bool is_full) {
kvn@1637 431 // Only the first one to notice can advise us to start early cleaning
kvn@1637 432 if (!is_full){
kvn@1637 433 jint old = Atomic::cmpxchg( 1, &_advise_to_sweep, 0 );
kvn@1637 434 if (old != 0) {
kvn@1637 435 return;
kvn@1637 436 }
kvn@1637 437 }
kvn@1637 438
kvn@1637 439 if (is_full) {
kvn@1637 440 // Since code cache is full, immediately stop new compiles
kvn@1637 441 bool did_set = CompileBroker::set_should_compile_new_jobs(CompileBroker::stop_compilation);
kvn@1637 442 if (!did_set) {
kvn@1637 443 // only the first to notice can start the cleaning,
kvn@1637 444 // others will go back and block
kvn@1637 445 return;
kvn@1637 446 }
kvn@1637 447 set_was_full(true);
kvn@1637 448
kvn@1637 449 // If we run out within MinCodeCacheFlushingInterval of the last unload time, give up
kvn@1637 450 jlong now = os::javaTimeMillis();
kvn@1637 451 jlong max_interval = (jlong)MinCodeCacheFlushingInterval * (jlong)1000;
kvn@1637 452 jlong curr_interval = now - _last_was_full;
kvn@1637 453 if (curr_interval < max_interval) {
kvn@1637 454 _rescan = true;
never@1999 455 log_sweep("disable_compiler", "flushing_interval='" UINT64_FORMAT "'",
never@1999 456 curr_interval/1000);
kvn@1637 457 return;
kvn@1637 458 }
kvn@1637 459 }
kvn@1637 460
kvn@1637 461 VM_HandleFullCodeCache op(is_full);
kvn@1637 462 VMThread::execute(&op);
kvn@1637 463
kvn@1637 464 // rescan again as soon as possible
kvn@1637 465 _rescan = true;
kvn@1637 466 }
kvn@1637 467
kvn@1637 468 void NMethodSweeper::speculative_disconnect_nmethods(bool is_full) {
kvn@1637 469 // If there was a race in detecting full code cache, only run
kvn@1637 470 // one vm op for it or keep the compiler shut off
kvn@1637 471
kvn@1637 472 debug_only(jlong start = os::javaTimeMillis();)
kvn@1637 473
kvn@1637 474 if ((!was_full()) && (is_full)) {
kvn@1637 475 if (!CodeCache::needs_flushing()) {
never@1999 476 log_sweep("restart_compiler");
kvn@1637 477 CompileBroker::set_should_compile_new_jobs(CompileBroker::run_compilation);
kvn@1637 478 return;
kvn@1637 479 }
kvn@1637 480 }
kvn@1637 481
kvn@1637 482 // Traverse the code cache trying to dump the oldest nmethods
kvn@1637 483 uint curr_max_comp_id = CompileBroker::get_compilation_id();
kvn@1637 484 uint flush_target = ((curr_max_comp_id - _highest_marked) >> 1) + _highest_marked;
never@1999 485 log_sweep("start_cleaning");
kvn@1637 486
kvn@1637 487 nmethod* nm = CodeCache::alive_nmethod(CodeCache::first());
kvn@1637 488 jint disconnected = 0;
kvn@1637 489 jint made_not_entrant = 0;
kvn@1637 490 while ((nm != NULL)){
kvn@1637 491 uint curr_comp_id = nm->compile_id();
kvn@1637 492
kvn@1637 493 // OSR methods cannot be flushed like this. Also, don't flush native methods
kvn@1637 494 // since they are part of the JDK in most cases
kvn@1637 495 if (nm->is_in_use() && (!nm->is_osr_method()) && (!nm->is_locked_by_vm()) &&
kvn@1637 496 (!nm->is_native_method()) && ((curr_comp_id < flush_target))) {
kvn@1637 497
kvn@1637 498 if ((nm->method()->code() == nm)) {
kvn@1637 499 // This method has not been previously considered for
kvn@1637 500 // unloading or it was restored already
kvn@1637 501 CodeCache::speculatively_disconnect(nm);
kvn@1637 502 disconnected++;
kvn@1637 503 } else if (nm->is_speculatively_disconnected()) {
kvn@1637 504 // This method was previously considered for preemptive unloading and was not called since then
iveresov@2138 505 CompilationPolicy::policy()->delay_compilation(nm->method());
kvn@1637 506 nm->make_not_entrant();
kvn@1637 507 made_not_entrant++;
kvn@1637 508 }
kvn@1637 509
kvn@1637 510 if (curr_comp_id > _highest_marked) {
kvn@1637 511 _highest_marked = curr_comp_id;
kvn@1637 512 }
kvn@1637 513 }
kvn@1637 514 nm = CodeCache::alive_nmethod(CodeCache::next(nm));
kvn@1637 515 }
kvn@1637 516
never@1999 517 log_sweep("stop_cleaning",
never@1999 518 "disconnected='" UINT32_FORMAT "' made_not_entrant='" UINT32_FORMAT "'",
never@1999 519 disconnected, made_not_entrant);
kvn@1637 520
never@1893 521 // Shut off compiler. Sweeper will start over with a new stack scan and
never@1893 522 // traversal cycle and turn it back on if it clears enough space.
kvn@1637 523 if (was_full()) {
kvn@1637 524 _last_was_full = os::javaTimeMillis();
kvn@1637 525 CompileBroker::set_should_compile_new_jobs(CompileBroker::stop_compilation);
kvn@1637 526 }
kvn@1637 527
kvn@1637 528 // After two more traversals the sweeper will get rid of unrestored nmethods
kvn@1637 529 _was_full_traversal = _traversals;
kvn@1637 530 #ifdef ASSERT
kvn@1637 531 jlong end = os::javaTimeMillis();
kvn@1637 532 if(PrintMethodFlushing && Verbose) {
kvn@1637 533 tty->print_cr("### sweeper: unload time: " INT64_FORMAT, end-start);
kvn@1637 534 }
kvn@1637 535 #endif
kvn@1637 536 }
never@1999 537
never@1999 538
never@1999 539 // Print out some state information about the current sweep and the
never@1999 540 // state of the code cache if it's requested.
never@1999 541 void NMethodSweeper::log_sweep(const char* msg, const char* format, ...) {
never@1999 542 if (PrintMethodFlushing) {
iveresov@2764 543 stringStream s;
iveresov@2764 544 // Dump code cache state into a buffer before locking the tty,
iveresov@2764 545 // because log_state() will use locks causing lock conflicts.
iveresov@2764 546 CodeCache::log_state(&s);
iveresov@2764 547
never@1999 548 ttyLocker ttyl;
never@1999 549 tty->print("### sweeper: %s ", msg);
never@1999 550 if (format != NULL) {
never@1999 551 va_list ap;
never@1999 552 va_start(ap, format);
never@1999 553 tty->vprint(format, ap);
never@1999 554 va_end(ap);
never@1999 555 }
iveresov@2764 556 tty->print_cr(s.as_string());
never@1999 557 }
never@1999 558
never@1999 559 if (LogCompilation && (xtty != NULL)) {
iveresov@2764 560 stringStream s;
iveresov@2764 561 // Dump code cache state into a buffer before locking the tty,
iveresov@2764 562 // because log_state() will use locks causing lock conflicts.
iveresov@2764 563 CodeCache::log_state(&s);
iveresov@2764 564
never@1999 565 ttyLocker ttyl;
never@2001 566 xtty->begin_elem("sweeper state='%s' traversals='" INTX_FORMAT "' ", msg, (intx)traversal_count());
never@1999 567 if (format != NULL) {
never@1999 568 va_list ap;
never@1999 569 va_start(ap, format);
never@1999 570 xtty->vprint(format, ap);
never@1999 571 va_end(ap);
never@1999 572 }
iveresov@2764 573 xtty->print(s.as_string());
never@1999 574 xtty->stamp();
never@1999 575 xtty->end_elem();
never@1999 576 }
never@1999 577 }

mercurial