src/share/vm/runtime/sweeper.cpp

Fri, 31 May 2013 14:40:26 +0200

author
roland
date
Fri, 31 May 2013 14:40:26 +0200
changeset 5222
28e5aed7f3a6
parent 5038
0cfa93c2fcc4
child 5237
f2110083203d
permissions
-rw-r--r--

8009981: nashorn tests fail with -XX:+VerifyStack
Summary: nmethod::preserve_callee_argument_oops() must take appendix into account.
Reviewed-by: kvn, twisti

duke@435 1 /*
coleenp@4037 2 * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
duke@435 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
duke@435 4 *
duke@435 5 * This code is free software; you can redistribute it and/or modify it
duke@435 6 * under the terms of the GNU General Public License version 2 only, as
duke@435 7 * published by the Free Software Foundation.
duke@435 8 *
duke@435 9 * This code is distributed in the hope that it will be useful, but WITHOUT
duke@435 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
duke@435 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
duke@435 12 * version 2 for more details (a copy is included in the LICENSE file that
duke@435 13 * accompanied this code).
duke@435 14 *
duke@435 15 * You should have received a copy of the GNU General Public License version
duke@435 16 * 2 along with this work; if not, write to the Free Software Foundation,
duke@435 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
duke@435 18 *
trims@1907 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
trims@1907 20 * or visit www.oracle.com if you need additional information or have any
trims@1907 21 * questions.
duke@435 22 *
duke@435 23 */
duke@435 24
stefank@2314 25 #include "precompiled.hpp"
stefank@2314 26 #include "code/codeCache.hpp"
coleenp@4037 27 #include "code/compiledIC.hpp"
coleenp@4037 28 #include "code/icBuffer.hpp"
stefank@2314 29 #include "code/nmethod.hpp"
stefank@2314 30 #include "compiler/compileBroker.hpp"
stefank@2314 31 #include "memory/resourceArea.hpp"
coleenp@4037 32 #include "oops/method.hpp"
stefank@2314 33 #include "runtime/atomic.hpp"
stefank@2314 34 #include "runtime/compilationPolicy.hpp"
stefank@2314 35 #include "runtime/mutexLocker.hpp"
stefank@2314 36 #include "runtime/os.hpp"
stefank@2314 37 #include "runtime/sweeper.hpp"
stefank@2314 38 #include "runtime/vm_operations.hpp"
stefank@2314 39 #include "utilities/events.hpp"
stefank@2314 40 #include "utilities/xmlstream.hpp"
duke@435 41
never@2916 42 #ifdef ASSERT
never@2916 43
never@2916 44 #define SWEEP(nm) record_sweep(nm, __LINE__)
never@2916 45 // Sweeper logging code
never@2916 46 class SweeperRecord {
never@2916 47 public:
never@2916 48 int traversal;
never@2916 49 int invocation;
never@2916 50 int compile_id;
never@2916 51 long traversal_mark;
never@2916 52 int state;
never@2916 53 const char* kind;
never@2916 54 address vep;
never@2916 55 address uep;
never@2916 56 int line;
never@2916 57
never@2916 58 void print() {
never@2916 59 tty->print_cr("traversal = %d invocation = %d compile_id = %d %s uep = " PTR_FORMAT " vep = "
never@2916 60 PTR_FORMAT " state = %d traversal_mark %d line = %d",
never@2916 61 traversal,
never@2916 62 invocation,
never@2916 63 compile_id,
never@2916 64 kind == NULL ? "" : kind,
never@2916 65 uep,
never@2916 66 vep,
never@2916 67 state,
never@2916 68 traversal_mark,
never@2916 69 line);
never@2916 70 }
never@2916 71 };
never@2916 72
never@2916 73 static int _sweep_index = 0;
never@2916 74 static SweeperRecord* _records = NULL;
never@2916 75
never@2916 76 void NMethodSweeper::report_events(int id, address entry) {
never@2916 77 if (_records != NULL) {
never@2916 78 for (int i = _sweep_index; i < SweeperLogEntries; i++) {
never@2916 79 if (_records[i].uep == entry ||
never@2916 80 _records[i].vep == entry ||
never@2916 81 _records[i].compile_id == id) {
never@2916 82 _records[i].print();
never@2916 83 }
never@2916 84 }
never@2916 85 for (int i = 0; i < _sweep_index; i++) {
never@2916 86 if (_records[i].uep == entry ||
never@2916 87 _records[i].vep == entry ||
never@2916 88 _records[i].compile_id == id) {
never@2916 89 _records[i].print();
never@2916 90 }
never@2916 91 }
never@2916 92 }
never@2916 93 }
never@2916 94
never@2916 95 void NMethodSweeper::report_events() {
never@2916 96 if (_records != NULL) {
never@2916 97 for (int i = _sweep_index; i < SweeperLogEntries; i++) {
never@2916 98 // skip empty records
never@2916 99 if (_records[i].vep == NULL) continue;
never@2916 100 _records[i].print();
never@2916 101 }
never@2916 102 for (int i = 0; i < _sweep_index; i++) {
never@2916 103 // skip empty records
never@2916 104 if (_records[i].vep == NULL) continue;
never@2916 105 _records[i].print();
never@2916 106 }
never@2916 107 }
never@2916 108 }
never@2916 109
never@2916 110 void NMethodSweeper::record_sweep(nmethod* nm, int line) {
never@2916 111 if (_records != NULL) {
never@2916 112 _records[_sweep_index].traversal = _traversals;
never@2916 113 _records[_sweep_index].traversal_mark = nm->_stack_traversal_mark;
never@2916 114 _records[_sweep_index].invocation = _invocations;
never@2916 115 _records[_sweep_index].compile_id = nm->compile_id();
never@2916 116 _records[_sweep_index].kind = nm->compile_kind();
never@2916 117 _records[_sweep_index].state = nm->_state;
never@2916 118 _records[_sweep_index].vep = nm->verified_entry_point();
never@2916 119 _records[_sweep_index].uep = nm->entry_point();
never@2916 120 _records[_sweep_index].line = line;
never@2916 121
never@2916 122 _sweep_index = (_sweep_index + 1) % SweeperLogEntries;
never@2916 123 }
never@2916 124 }
never@2916 125 #else
never@2916 126 #define SWEEP(nm)
never@2916 127 #endif
never@2916 128
never@2916 129
duke@435 130 long NMethodSweeper::_traversals = 0; // No. of stack traversals performed
never@1970 131 nmethod* NMethodSweeper::_current = NULL; // Current nmethod
never@1999 132 int NMethodSweeper::_seen = 0 ; // No. of nmethods we have currently processed in current pass of CodeCache
never@1999 133
never@1999 134 volatile int NMethodSweeper::_invocations = 0; // No. of invocations left until we are completed with this pass
never@1999 135 volatile int NMethodSweeper::_sweep_started = 0; // Whether a sweep is in progress.
duke@435 136
duke@435 137 jint NMethodSweeper::_locked_seen = 0;
duke@435 138 jint NMethodSweeper::_not_entrant_seen_on_stack = 0;
neliasso@5038 139 bool NMethodSweeper::_resweep = false;
neliasso@5038 140 jint NMethodSweeper::_flush_token = 0;
neliasso@5038 141 jlong NMethodSweeper::_last_full_flush_time = 0;
neliasso@5038 142 int NMethodSweeper::_highest_marked = 0;
neliasso@5038 143 int NMethodSweeper::_dead_compile_ids = 0;
neliasso@5038 144 long NMethodSweeper::_last_flush_traversal_id = 0;
duke@435 145
jrose@1424 146 class MarkActivationClosure: public CodeBlobClosure {
jrose@1424 147 public:
jrose@1424 148 virtual void do_code_blob(CodeBlob* cb) {
jrose@1424 149 // If we see an activation belonging to a non_entrant nmethod, we mark it.
jrose@1424 150 if (cb->is_nmethod() && ((nmethod*)cb)->is_not_entrant()) {
jrose@1424 151 ((nmethod*)cb)->mark_as_seen_on_stack();
jrose@1424 152 }
jrose@1424 153 }
jrose@1424 154 };
jrose@1424 155 static MarkActivationClosure mark_activation_closure;
jrose@1424 156
neliasso@5038 157 bool NMethodSweeper::sweep_in_progress() {
neliasso@5038 158 return (_current != NULL);
neliasso@5038 159 }
neliasso@5038 160
never@1893 161 void NMethodSweeper::scan_stacks() {
duke@435 162 assert(SafepointSynchronize::is_at_safepoint(), "must be executed at a safepoint");
duke@435 163 if (!MethodFlushing) return;
duke@435 164
duke@435 165 // No need to synchronize access, since this is always executed at a
neliasso@5038 166 // safepoint.
duke@435 167
duke@435 168 // Make sure CompiledIC_lock in unlocked, since we might update some
duke@435 169 // inline caches. If it is, we just bail-out and try later.
duke@435 170 if (CompiledIC_lock->is_locked() || Patching_lock->is_locked()) return;
duke@435 171
duke@435 172 // Check for restart
duke@435 173 assert(CodeCache::find_blob_unsafe(_current) == _current, "Sweeper nmethod cached state invalid");
neliasso@5038 174 if (!sweep_in_progress() && _resweep) {
duke@435 175 _seen = 0;
duke@435 176 _invocations = NmethodSweepFraction;
never@1893 177 _current = CodeCache::first_nmethod();
duke@435 178 _traversals += 1;
duke@435 179 if (PrintMethodFlushing) {
duke@435 180 tty->print_cr("### Sweep: stack traversal %d", _traversals);
duke@435 181 }
jrose@1424 182 Threads::nmethods_do(&mark_activation_closure);
duke@435 183
duke@435 184 // reset the flags since we started a scan from the beginning.
neliasso@5038 185 _resweep = false;
duke@435 186 _locked_seen = 0;
duke@435 187 _not_entrant_seen_on_stack = 0;
duke@435 188 }
duke@435 189
kvn@1637 190 if (UseCodeCacheFlushing) {
neliasso@5038 191 // only allow new flushes after the interval is complete.
neliasso@5038 192 jlong now = os::javaTimeMillis();
neliasso@5038 193 jlong max_interval = (jlong)MinCodeCacheFlushingInterval * (jlong)1000;
neliasso@5038 194 jlong curr_interval = now - _last_full_flush_time;
neliasso@5038 195 if (curr_interval > max_interval) {
neliasso@5038 196 _flush_token = 0;
kvn@1637 197 }
kvn@1637 198
neliasso@5038 199 if (!CodeCache::needs_flushing() && !CompileBroker::should_compile_new_jobs()) {
neliasso@5038 200 CompileBroker::set_should_compile_new_jobs(CompileBroker::run_compilation);
neliasso@5038 201 log_sweep("restart_compiler");
kvn@1637 202 }
kvn@1637 203 }
duke@435 204 }
duke@435 205
never@1893 206 void NMethodSweeper::possibly_sweep() {
never@1999 207 assert(JavaThread::current()->thread_state() == _thread_in_vm, "must run in vm mode");
neliasso@5038 208 if (!MethodFlushing || !sweep_in_progress()) return;
never@1893 209
never@1893 210 if (_invocations > 0) {
never@1893 211 // Only one thread at a time will sweep
never@1893 212 jint old = Atomic::cmpxchg( 1, &_sweep_started, 0 );
never@1893 213 if (old != 0) {
never@1893 214 return;
never@1893 215 }
never@2916 216 #ifdef ASSERT
never@2916 217 if (LogSweeper && _records == NULL) {
never@2916 218 // Create the ring buffer for the logging code
zgu@3900 219 _records = NEW_C_HEAP_ARRAY(SweeperRecord, SweeperLogEntries, mtGC);
never@2916 220 memset(_records, 0, sizeof(SweeperRecord) * SweeperLogEntries);
never@2916 221 }
never@2916 222 #endif
never@1999 223 if (_invocations > 0) {
never@1999 224 sweep_code_cache();
never@1999 225 _invocations--;
never@1999 226 }
never@1999 227 _sweep_started = 0;
never@1893 228 }
never@1893 229 }
never@1893 230
never@1893 231 void NMethodSweeper::sweep_code_cache() {
never@1893 232 #ifdef ASSERT
never@1893 233 jlong sweep_start;
never@1999 234 if (PrintMethodFlushing) {
never@1893 235 sweep_start = os::javaTimeMillis();
never@1893 236 }
never@1893 237 #endif
never@1893 238 if (PrintMethodFlushing && Verbose) {
never@1999 239 tty->print_cr("### Sweep at %d out of %d. Invocations left: %d", _seen, CodeCache::nof_nmethods(), _invocations);
never@1893 240 }
never@1893 241
neliasso@5038 242 if (!CompileBroker::should_compile_new_jobs()) {
neliasso@5038 243 // If we have turned off compilations we might as well do full sweeps
neliasso@5038 244 // in order to reach the clean state faster. Otherwise the sleeping compiler
neliasso@5038 245 // threads will slow down sweeping. After a few iterations the cache
neliasso@5038 246 // will be clean and sweeping stops (_resweep will not be set)
neliasso@5038 247 _invocations = 1;
neliasso@5038 248 }
neliasso@5038 249
never@1999 250 // We want to visit all nmethods after NmethodSweepFraction
never@1999 251 // invocations so divide the remaining number of nmethods by the
never@1999 252 // remaining number of invocations. This is only an estimate since
never@1999 253 // the number of nmethods changes during the sweep so the final
never@1999 254 // stage must iterate until it there are no more nmethods.
never@1999 255 int todo = (CodeCache::nof_nmethods() - _seen) / _invocations;
never@1893 256
never@1893 257 assert(!SafepointSynchronize::is_at_safepoint(), "should not be in safepoint when we get here");
never@1893 258 assert(!CodeCache_lock->owned_by_self(), "just checking");
never@1893 259
never@1893 260 {
never@1893 261 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
never@1893 262
never@1999 263 // The last invocation iterates until there are no more nmethods
never@1999 264 for (int i = 0; (i < todo || _invocations == 1) && _current != NULL; i++) {
iveresov@3572 265 if (SafepointSynchronize::is_synchronizing()) { // Safepoint request
iveresov@3572 266 if (PrintMethodFlushing && Verbose) {
iveresov@3572 267 tty->print_cr("### Sweep at %d out of %d, invocation: %d, yielding to safepoint", _seen, CodeCache::nof_nmethods(), _invocations);
iveresov@3572 268 }
iveresov@3572 269 MutexUnlockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
never@1893 270
iveresov@3572 271 assert(Thread::current()->is_Java_thread(), "should be java thread");
iveresov@3572 272 JavaThread* thread = (JavaThread*)Thread::current();
iveresov@3572 273 ThreadBlockInVM tbivm(thread);
iveresov@3572 274 thread->java_suspend_self();
iveresov@3572 275 }
never@1999 276 // Since we will give up the CodeCache_lock, always skip ahead
never@1999 277 // to the next nmethod. Other blobs can be deleted by other
never@1999 278 // threads but nmethods are only reclaimed by the sweeper.
never@1970 279 nmethod* next = CodeCache::next_nmethod(_current);
never@1893 280
never@1893 281 // Now ready to process nmethod and give up CodeCache_lock
never@1893 282 {
never@1893 283 MutexUnlockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
never@1970 284 process_nmethod(_current);
never@1893 285 }
never@1893 286 _seen++;
never@1893 287 _current = next;
never@1893 288 }
never@1893 289 }
never@1893 290
never@1999 291 assert(_invocations > 1 || _current == NULL, "must have scanned the whole cache");
never@1999 292
neliasso@5038 293 if (!sweep_in_progress() && !_resweep && (_locked_seen || _not_entrant_seen_on_stack)) {
never@1893 294 // we've completed a scan without making progress but there were
never@1893 295 // nmethods we were unable to process either because they were
never@1893 296 // locked or were still on stack. We don't have to aggresively
never@1893 297 // clean them up so just stop scanning. We could scan once more
never@1893 298 // but that complicates the control logic and it's unlikely to
never@1893 299 // matter much.
never@1893 300 if (PrintMethodFlushing) {
never@1893 301 tty->print_cr("### Couldn't make progress on some nmethods so stopping sweep");
never@1893 302 }
never@1893 303 }
never@1893 304
never@1893 305 #ifdef ASSERT
never@1893 306 if(PrintMethodFlushing) {
never@1893 307 jlong sweep_end = os::javaTimeMillis();
never@1893 308 tty->print_cr("### sweeper: sweep time(%d): " INT64_FORMAT, _invocations, sweep_end - sweep_start);
never@1893 309 }
never@1893 310 #endif
never@1999 311
never@1999 312 if (_invocations == 1) {
never@1999 313 log_sweep("finished");
never@1999 314 }
neliasso@5038 315
neliasso@5038 316 // Sweeper is the only case where memory is released,
neliasso@5038 317 // check here if it is time to restart the compiler.
neliasso@5038 318 if (UseCodeCacheFlushing && !CompileBroker::should_compile_new_jobs() && !CodeCache::needs_flushing()) {
neliasso@5038 319 CompileBroker::set_should_compile_new_jobs(CompileBroker::run_compilation);
neliasso@5038 320 log_sweep("restart_compiler");
neliasso@5038 321 }
never@1893 322 }
never@1893 323
never@2916 324 class NMethodMarker: public StackObj {
never@2916 325 private:
never@2916 326 CompilerThread* _thread;
never@2916 327 public:
never@2916 328 NMethodMarker(nmethod* nm) {
never@2916 329 _thread = CompilerThread::current();
coleenp@4037 330 if (!nm->is_zombie() && !nm->is_unloaded()) {
coleenp@4037 331 // Only expose live nmethods for scanning
never@2916 332 _thread->set_scanned_nmethod(nm);
never@2916 333 }
coleenp@4037 334 }
never@2916 335 ~NMethodMarker() {
never@2916 336 _thread->set_scanned_nmethod(NULL);
never@2916 337 }
never@2916 338 };
never@2916 339
coleenp@4037 340 void NMethodSweeper::release_nmethod(nmethod *nm) {
coleenp@4037 341 // Clean up any CompiledICHolders
coleenp@4037 342 {
coleenp@4037 343 ResourceMark rm;
coleenp@4037 344 MutexLocker ml_patch(CompiledIC_lock);
coleenp@4037 345 RelocIterator iter(nm);
coleenp@4037 346 while (iter.next()) {
coleenp@4037 347 if (iter.type() == relocInfo::virtual_call_type) {
coleenp@4037 348 CompiledIC::cleanup_call_site(iter.virtual_call_reloc());
coleenp@4037 349 }
coleenp@4037 350 }
coleenp@4037 351 }
coleenp@4037 352
coleenp@4037 353 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
coleenp@4037 354 nm->flush();
coleenp@4037 355 }
duke@435 356
duke@435 357 void NMethodSweeper::process_nmethod(nmethod *nm) {
never@1893 358 assert(!CodeCache_lock->owned_by_self(), "just checking");
never@1893 359
never@2916 360 // Make sure this nmethod doesn't get unloaded during the scan,
never@2916 361 // since the locks acquired below might safepoint.
never@2916 362 NMethodMarker nmm(nm);
never@2916 363
never@2916 364 SWEEP(nm);
never@2916 365
duke@435 366 // Skip methods that are currently referenced by the VM
duke@435 367 if (nm->is_locked_by_vm()) {
duke@435 368 // But still remember to clean-up inline caches for alive nmethods
duke@435 369 if (nm->is_alive()) {
duke@435 370 // Clean-up all inline caches that points to zombie/non-reentrant methods
never@1893 371 MutexLocker cl(CompiledIC_lock);
duke@435 372 nm->cleanup_inline_caches();
never@2916 373 SWEEP(nm);
duke@435 374 } else {
duke@435 375 _locked_seen++;
never@2916 376 SWEEP(nm);
duke@435 377 }
duke@435 378 return;
duke@435 379 }
duke@435 380
duke@435 381 if (nm->is_zombie()) {
duke@435 382 // If it is first time, we see nmethod then we mark it. Otherwise,
duke@435 383 // we reclame it. When we have seen a zombie method twice, we know that
never@1999 384 // there are no inline caches that refer to it.
duke@435 385 if (nm->is_marked_for_reclamation()) {
duke@435 386 assert(!nm->is_locked_by_vm(), "must not flush locked nmethods");
ysr@1376 387 if (PrintMethodFlushing && Verbose) {
kvn@1637 388 tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (marked for reclamation) being flushed", nm->compile_id(), nm);
ysr@1376 389 }
coleenp@4037 390 release_nmethod(nm);
duke@435 391 } else {
ysr@1376 392 if (PrintMethodFlushing && Verbose) {
kvn@1637 393 tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (zombie) being marked for reclamation", nm->compile_id(), nm);
ysr@1376 394 }
duke@435 395 nm->mark_for_reclamation();
neliasso@5038 396 _resweep = true;
never@2916 397 SWEEP(nm);
duke@435 398 }
duke@435 399 } else if (nm->is_not_entrant()) {
duke@435 400 // If there is no current activations of this method on the
duke@435 401 // stack we can safely convert it to a zombie method
duke@435 402 if (nm->can_not_entrant_be_converted()) {
ysr@1376 403 if (PrintMethodFlushing && Verbose) {
kvn@1637 404 tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (not entrant) being made zombie", nm->compile_id(), nm);
ysr@1376 405 }
duke@435 406 nm->make_zombie();
neliasso@5038 407 _resweep = true;
never@2916 408 SWEEP(nm);
duke@435 409 } else {
duke@435 410 // Still alive, clean up its inline caches
never@1893 411 MutexLocker cl(CompiledIC_lock);
duke@435 412 nm->cleanup_inline_caches();
duke@435 413 // we coudn't transition this nmethod so don't immediately
duke@435 414 // request a rescan. If this method stays on the stack for a
never@1893 415 // long time we don't want to keep rescanning the code cache.
duke@435 416 _not_entrant_seen_on_stack++;
never@2916 417 SWEEP(nm);
duke@435 418 }
duke@435 419 } else if (nm->is_unloaded()) {
duke@435 420 // Unloaded code, just make it a zombie
ysr@1376 421 if (PrintMethodFlushing && Verbose)
kvn@1637 422 tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (unloaded) being made zombie", nm->compile_id(), nm);
ysr@1376 423 if (nm->is_osr_method()) {
coleenp@4037 424 SWEEP(nm);
duke@435 425 // No inline caches will ever point to osr methods, so we can just remove it
coleenp@4037 426 release_nmethod(nm);
duke@435 427 } else {
duke@435 428 nm->make_zombie();
neliasso@5038 429 _resweep = true;
never@2916 430 SWEEP(nm);
duke@435 431 }
duke@435 432 } else {
duke@435 433 assert(nm->is_alive(), "should be alive");
kvn@1637 434
kvn@1637 435 if (UseCodeCacheFlushing) {
neliasso@5038 436 if (nm->is_speculatively_disconnected() && !nm->is_locked_by_vm() && !nm->is_osr_method() &&
neliasso@5038 437 (_traversals > _last_flush_traversal_id + 2) && (nm->compile_id() < _highest_marked)) {
kvn@1637 438 // This method has not been called since the forced cleanup happened
kvn@1637 439 nm->make_not_entrant();
kvn@1637 440 }
kvn@1637 441 }
kvn@1637 442
duke@435 443 // Clean-up all inline caches that points to zombie/non-reentrant methods
never@1893 444 MutexLocker cl(CompiledIC_lock);
duke@435 445 nm->cleanup_inline_caches();
never@2916 446 SWEEP(nm);
duke@435 447 }
duke@435 448 }
kvn@1637 449
kvn@1637 450 // Code cache unloading: when compilers notice the code cache is getting full,
kvn@1637 451 // they will call a vm op that comes here. This code attempts to speculatively
kvn@1637 452 // unload the oldest half of the nmethods (based on the compile job id) by
kvn@1637 453 // saving the old code in a list in the CodeCache. Then
never@1893 454 // execution resumes. If a method so marked is not called by the second sweeper
never@1893 455 // stack traversal after the current one, the nmethod will be marked non-entrant and
coleenp@4037 456 // got rid of by normal sweeping. If the method is called, the Method*'s
coleenp@4037 457 // _code field is restored and the Method*/nmethod
kvn@1637 458 // go back to their normal state.
kvn@1637 459 void NMethodSweeper::handle_full_code_cache(bool is_full) {
neliasso@5038 460
neliasso@5038 461 if (is_full) {
neliasso@5038 462 // Since code cache is full, immediately stop new compiles
neliasso@5038 463 if (CompileBroker::set_should_compile_new_jobs(CompileBroker::stop_compilation)) {
neliasso@5038 464 log_sweep("disable_compiler");
kvn@1637 465 }
kvn@1637 466 }
kvn@1637 467
neliasso@5038 468 // Make sure only one thread can flush
neliasso@5038 469 // The token is reset after CodeCacheMinimumFlushInterval in scan stacks,
neliasso@5038 470 // no need to check the timeout here.
neliasso@5038 471 jint old = Atomic::cmpxchg( 1, &_flush_token, 0 );
neliasso@5038 472 if (old != 0) {
neliasso@5038 473 return;
kvn@1637 474 }
kvn@1637 475
kvn@1637 476 VM_HandleFullCodeCache op(is_full);
kvn@1637 477 VMThread::execute(&op);
kvn@1637 478
neliasso@5038 479 // resweep again as soon as possible
neliasso@5038 480 _resweep = true;
kvn@1637 481 }
kvn@1637 482
kvn@1637 483 void NMethodSweeper::speculative_disconnect_nmethods(bool is_full) {
kvn@1637 484 // If there was a race in detecting full code cache, only run
kvn@1637 485 // one vm op for it or keep the compiler shut off
kvn@1637 486
kvn@1637 487 debug_only(jlong start = os::javaTimeMillis();)
kvn@1637 488
neliasso@5038 489 // Traverse the code cache trying to dump the oldest nmethods
neliasso@5038 490 int curr_max_comp_id = CompileBroker::get_compilation_id();
neliasso@5038 491 int flush_target = ((curr_max_comp_id - _dead_compile_ids) / CodeCacheFlushingFraction) + _dead_compile_ids;
kvn@1637 492
never@1999 493 log_sweep("start_cleaning");
kvn@1637 494
kvn@1637 495 nmethod* nm = CodeCache::alive_nmethod(CodeCache::first());
kvn@1637 496 jint disconnected = 0;
kvn@1637 497 jint made_not_entrant = 0;
neliasso@5038 498 jint nmethod_count = 0;
neliasso@5038 499
kvn@1637 500 while ((nm != NULL)){
neliasso@5038 501 int curr_comp_id = nm->compile_id();
kvn@1637 502
kvn@1637 503 // OSR methods cannot be flushed like this. Also, don't flush native methods
kvn@1637 504 // since they are part of the JDK in most cases
neliasso@5038 505 if (!nm->is_osr_method() && !nm->is_locked_by_vm() && !nm->is_native_method()) {
kvn@1637 506
neliasso@5038 507 // only count methods that can be speculatively disconnected
neliasso@5038 508 nmethod_count++;
kvn@1637 509
neliasso@5038 510 if (nm->is_in_use() && (curr_comp_id < flush_target)) {
neliasso@5038 511 if ((nm->method()->code() == nm)) {
neliasso@5038 512 // This method has not been previously considered for
neliasso@5038 513 // unloading or it was restored already
neliasso@5038 514 CodeCache::speculatively_disconnect(nm);
neliasso@5038 515 disconnected++;
neliasso@5038 516 } else if (nm->is_speculatively_disconnected()) {
neliasso@5038 517 // This method was previously considered for preemptive unloading and was not called since then
neliasso@5038 518 CompilationPolicy::policy()->delay_compilation(nm->method());
neliasso@5038 519 nm->make_not_entrant();
neliasso@5038 520 made_not_entrant++;
neliasso@5038 521 }
neliasso@5038 522
neliasso@5038 523 if (curr_comp_id > _highest_marked) {
neliasso@5038 524 _highest_marked = curr_comp_id;
neliasso@5038 525 }
kvn@1637 526 }
kvn@1637 527 }
kvn@1637 528 nm = CodeCache::alive_nmethod(CodeCache::next(nm));
kvn@1637 529 }
kvn@1637 530
neliasso@5038 531 // remember how many compile_ids wheren't seen last flush.
neliasso@5038 532 _dead_compile_ids = curr_max_comp_id - nmethod_count;
neliasso@5038 533
never@1999 534 log_sweep("stop_cleaning",
never@1999 535 "disconnected='" UINT32_FORMAT "' made_not_entrant='" UINT32_FORMAT "'",
never@1999 536 disconnected, made_not_entrant);
kvn@1637 537
never@1893 538 // Shut off compiler. Sweeper will start over with a new stack scan and
never@1893 539 // traversal cycle and turn it back on if it clears enough space.
neliasso@5038 540 if (is_full) {
neliasso@5038 541 _last_full_flush_time = os::javaTimeMillis();
kvn@1637 542 }
kvn@1637 543
kvn@1637 544 // After two more traversals the sweeper will get rid of unrestored nmethods
neliasso@5038 545 _last_flush_traversal_id = _traversals;
neliasso@5038 546 _resweep = true;
kvn@1637 547 #ifdef ASSERT
kvn@1637 548 jlong end = os::javaTimeMillis();
kvn@1637 549 if(PrintMethodFlushing && Verbose) {
kvn@1637 550 tty->print_cr("### sweeper: unload time: " INT64_FORMAT, end-start);
kvn@1637 551 }
kvn@1637 552 #endif
kvn@1637 553 }
never@1999 554
never@1999 555
never@1999 556 // Print out some state information about the current sweep and the
never@1999 557 // state of the code cache if it's requested.
never@1999 558 void NMethodSweeper::log_sweep(const char* msg, const char* format, ...) {
never@1999 559 if (PrintMethodFlushing) {
iveresov@2764 560 stringStream s;
iveresov@2764 561 // Dump code cache state into a buffer before locking the tty,
iveresov@2764 562 // because log_state() will use locks causing lock conflicts.
iveresov@2764 563 CodeCache::log_state(&s);
iveresov@2764 564
never@1999 565 ttyLocker ttyl;
never@1999 566 tty->print("### sweeper: %s ", msg);
never@1999 567 if (format != NULL) {
never@1999 568 va_list ap;
never@1999 569 va_start(ap, format);
never@1999 570 tty->vprint(format, ap);
never@1999 571 va_end(ap);
never@1999 572 }
iveresov@2764 573 tty->print_cr(s.as_string());
never@1999 574 }
never@1999 575
never@1999 576 if (LogCompilation && (xtty != NULL)) {
iveresov@2764 577 stringStream s;
iveresov@2764 578 // Dump code cache state into a buffer before locking the tty,
iveresov@2764 579 // because log_state() will use locks causing lock conflicts.
iveresov@2764 580 CodeCache::log_state(&s);
iveresov@2764 581
never@1999 582 ttyLocker ttyl;
never@2001 583 xtty->begin_elem("sweeper state='%s' traversals='" INTX_FORMAT "' ", msg, (intx)traversal_count());
never@1999 584 if (format != NULL) {
never@1999 585 va_list ap;
never@1999 586 va_start(ap, format);
never@1999 587 xtty->vprint(format, ap);
never@1999 588 va_end(ap);
never@1999 589 }
iveresov@2764 590 xtty->print(s.as_string());
never@1999 591 xtty->stamp();
never@1999 592 xtty->end_elem();
never@1999 593 }
never@1999 594 }

mercurial