src/share/vm/runtime/sweeper.cpp

Fri, 11 Oct 2013 08:27:21 -0700

author
jcoomes
date
Fri, 11 Oct 2013 08:27:21 -0700
changeset 5865
aa6f2ea19d8f
parent 5792
510fbd28919c
child 6099
78da3894b86f
permissions
-rw-r--r--

Merge

duke@435 1 /*
sla@5237 2 * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
duke@435 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
duke@435 4 *
duke@435 5 * This code is free software; you can redistribute it and/or modify it
duke@435 6 * under the terms of the GNU General Public License version 2 only, as
duke@435 7 * published by the Free Software Foundation.
duke@435 8 *
duke@435 9 * This code is distributed in the hope that it will be useful, but WITHOUT
duke@435 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
duke@435 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
duke@435 12 * version 2 for more details (a copy is included in the LICENSE file that
duke@435 13 * accompanied this code).
duke@435 14 *
duke@435 15 * You should have received a copy of the GNU General Public License version
duke@435 16 * 2 along with this work; if not, write to the Free Software Foundation,
duke@435 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
duke@435 18 *
trims@1907 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
trims@1907 20 * or visit www.oracle.com if you need additional information or have any
trims@1907 21 * questions.
duke@435 22 *
duke@435 23 */
duke@435 24
stefank@2314 25 #include "precompiled.hpp"
stefank@2314 26 #include "code/codeCache.hpp"
coleenp@4037 27 #include "code/compiledIC.hpp"
coleenp@4037 28 #include "code/icBuffer.hpp"
stefank@2314 29 #include "code/nmethod.hpp"
stefank@2314 30 #include "compiler/compileBroker.hpp"
stefank@2314 31 #include "memory/resourceArea.hpp"
coleenp@4037 32 #include "oops/method.hpp"
stefank@2314 33 #include "runtime/atomic.hpp"
stefank@2314 34 #include "runtime/compilationPolicy.hpp"
stefank@2314 35 #include "runtime/mutexLocker.hpp"
stefank@2314 36 #include "runtime/os.hpp"
stefank@2314 37 #include "runtime/sweeper.hpp"
stefank@2314 38 #include "runtime/vm_operations.hpp"
sla@5237 39 #include "trace/tracing.hpp"
stefank@2314 40 #include "utilities/events.hpp"
stefank@2314 41 #include "utilities/xmlstream.hpp"
duke@435 42
never@2916 43 #ifdef ASSERT
never@2916 44
never@2916 45 #define SWEEP(nm) record_sweep(nm, __LINE__)
never@2916 46 // Sweeper logging code
never@2916 47 class SweeperRecord {
never@2916 48 public:
never@2916 49 int traversal;
never@2916 50 int invocation;
never@2916 51 int compile_id;
never@2916 52 long traversal_mark;
never@2916 53 int state;
never@2916 54 const char* kind;
never@2916 55 address vep;
never@2916 56 address uep;
never@2916 57 int line;
never@2916 58
never@2916 59 void print() {
never@2916 60 tty->print_cr("traversal = %d invocation = %d compile_id = %d %s uep = " PTR_FORMAT " vep = "
never@2916 61 PTR_FORMAT " state = %d traversal_mark %d line = %d",
never@2916 62 traversal,
never@2916 63 invocation,
never@2916 64 compile_id,
never@2916 65 kind == NULL ? "" : kind,
never@2916 66 uep,
never@2916 67 vep,
never@2916 68 state,
never@2916 69 traversal_mark,
never@2916 70 line);
never@2916 71 }
never@2916 72 };
never@2916 73
never@2916 74 static int _sweep_index = 0;
never@2916 75 static SweeperRecord* _records = NULL;
never@2916 76
never@2916 77 void NMethodSweeper::report_events(int id, address entry) {
never@2916 78 if (_records != NULL) {
never@2916 79 for (int i = _sweep_index; i < SweeperLogEntries; i++) {
never@2916 80 if (_records[i].uep == entry ||
never@2916 81 _records[i].vep == entry ||
never@2916 82 _records[i].compile_id == id) {
never@2916 83 _records[i].print();
never@2916 84 }
never@2916 85 }
never@2916 86 for (int i = 0; i < _sweep_index; i++) {
never@2916 87 if (_records[i].uep == entry ||
never@2916 88 _records[i].vep == entry ||
never@2916 89 _records[i].compile_id == id) {
never@2916 90 _records[i].print();
never@2916 91 }
never@2916 92 }
never@2916 93 }
never@2916 94 }
never@2916 95
never@2916 96 void NMethodSweeper::report_events() {
never@2916 97 if (_records != NULL) {
never@2916 98 for (int i = _sweep_index; i < SweeperLogEntries; i++) {
never@2916 99 // skip empty records
never@2916 100 if (_records[i].vep == NULL) continue;
never@2916 101 _records[i].print();
never@2916 102 }
never@2916 103 for (int i = 0; i < _sweep_index; i++) {
never@2916 104 // skip empty records
never@2916 105 if (_records[i].vep == NULL) continue;
never@2916 106 _records[i].print();
never@2916 107 }
never@2916 108 }
never@2916 109 }
never@2916 110
never@2916 111 void NMethodSweeper::record_sweep(nmethod* nm, int line) {
never@2916 112 if (_records != NULL) {
never@2916 113 _records[_sweep_index].traversal = _traversals;
never@2916 114 _records[_sweep_index].traversal_mark = nm->_stack_traversal_mark;
never@2916 115 _records[_sweep_index].invocation = _invocations;
never@2916 116 _records[_sweep_index].compile_id = nm->compile_id();
never@2916 117 _records[_sweep_index].kind = nm->compile_kind();
never@2916 118 _records[_sweep_index].state = nm->_state;
never@2916 119 _records[_sweep_index].vep = nm->verified_entry_point();
never@2916 120 _records[_sweep_index].uep = nm->entry_point();
never@2916 121 _records[_sweep_index].line = line;
never@2916 122
never@2916 123 _sweep_index = (_sweep_index + 1) % SweeperLogEntries;
never@2916 124 }
never@2916 125 }
never@2916 126 #else
never@2916 127 #define SWEEP(nm)
never@2916 128 #endif
never@2916 129
anoll@5792 130 nmethod* NMethodSweeper::_current = NULL; // Current nmethod
anoll@5792 131 long NMethodSweeper::_traversals = 0; // Nof. stack traversals performed
anoll@5792 132 int NMethodSweeper::_seen = 0; // Nof. nmethods we have currently processed in current pass of CodeCache
anoll@5792 133 int NMethodSweeper::_flushed_count = 0; // Nof. nmethods flushed in current sweep
anoll@5792 134 int NMethodSweeper::_zombified_count = 0; // Nof. nmethods made zombie in current sweep
anoll@5792 135 int NMethodSweeper::_marked_count = 0; // Nof. nmethods marked for reclaim in current sweep
never@2916 136
anoll@5792 137 volatile int NMethodSweeper::_invocations = 0; // Nof. invocations left until we are completed with this pass
never@1999 138 volatile int NMethodSweeper::_sweep_started = 0; // Whether a sweep is in progress.
duke@435 139
anoll@5792 140 jint NMethodSweeper::_locked_seen = 0;
duke@435 141 jint NMethodSweeper::_not_entrant_seen_on_stack = 0;
anoll@5792 142 bool NMethodSweeper::_request_mark_phase = false;
duke@435 143
sla@5237 144 int NMethodSweeper::_total_nof_methods_reclaimed = 0;
anoll@5792 145 jlong NMethodSweeper::_total_time_sweeping = 0;
anoll@5792 146 jlong NMethodSweeper::_total_time_this_sweep = 0;
anoll@5792 147 jlong NMethodSweeper::_peak_sweep_time = 0;
anoll@5792 148 jlong NMethodSweeper::_peak_sweep_fraction_time = 0;
anoll@5792 149 int NMethodSweeper::_hotness_counter_reset_val = 0;
anoll@5792 150
sla@5237 151
jrose@1424 152 class MarkActivationClosure: public CodeBlobClosure {
jrose@1424 153 public:
jrose@1424 154 virtual void do_code_blob(CodeBlob* cb) {
anoll@5792 155 if (cb->is_nmethod()) {
anoll@5792 156 nmethod* nm = (nmethod*)cb;
anoll@5792 157 nm->set_hotness_counter(NMethodSweeper::hotness_counter_reset_val());
anoll@5792 158 // If we see an activation belonging to a non_entrant nmethod, we mark it.
anoll@5792 159 if (nm->is_not_entrant()) {
anoll@5792 160 nm->mark_as_seen_on_stack();
anoll@5792 161 }
jrose@1424 162 }
jrose@1424 163 }
jrose@1424 164 };
jrose@1424 165 static MarkActivationClosure mark_activation_closure;
jrose@1424 166
anoll@5792 167 class SetHotnessClosure: public CodeBlobClosure {
anoll@5792 168 public:
anoll@5792 169 virtual void do_code_blob(CodeBlob* cb) {
anoll@5792 170 if (cb->is_nmethod()) {
anoll@5792 171 nmethod* nm = (nmethod*)cb;
anoll@5792 172 nm->set_hotness_counter(NMethodSweeper::hotness_counter_reset_val());
anoll@5792 173 }
anoll@5792 174 }
anoll@5792 175 };
anoll@5792 176 static SetHotnessClosure set_hotness_closure;
anoll@5792 177
anoll@5792 178
anoll@5792 179 int NMethodSweeper::hotness_counter_reset_val() {
anoll@5792 180 if (_hotness_counter_reset_val == 0) {
anoll@5792 181 _hotness_counter_reset_val = (ReservedCodeCacheSize < M) ? 1 : (ReservedCodeCacheSize / M) * 2;
anoll@5792 182 }
anoll@5792 183 return _hotness_counter_reset_val;
anoll@5792 184 }
neliasso@5038 185 bool NMethodSweeper::sweep_in_progress() {
neliasso@5038 186 return (_current != NULL);
neliasso@5038 187 }
neliasso@5038 188
anoll@5792 189 // Scans the stacks of all Java threads and marks activations of not-entrant methods.
anoll@5792 190 // No need to synchronize access, since 'mark_active_nmethods' is always executed at a
anoll@5792 191 // safepoint.
anoll@5792 192 void NMethodSweeper::mark_active_nmethods() {
duke@435 193 assert(SafepointSynchronize::is_at_safepoint(), "must be executed at a safepoint");
anoll@5792 194 // If we do not want to reclaim not-entrant or zombie methods there is no need
anoll@5792 195 // to scan stacks
anoll@5792 196 if (!MethodFlushing) {
anoll@5792 197 return;
anoll@5792 198 }
duke@435 199
duke@435 200 // Check for restart
duke@435 201 assert(CodeCache::find_blob_unsafe(_current) == _current, "Sweeper nmethod cached state invalid");
anoll@5792 202 if (!sweep_in_progress() && need_marking_phase()) {
duke@435 203 _seen = 0;
duke@435 204 _invocations = NmethodSweepFraction;
never@1893 205 _current = CodeCache::first_nmethod();
duke@435 206 _traversals += 1;
sla@5237 207 _total_time_this_sweep = 0;
sla@5237 208
duke@435 209 if (PrintMethodFlushing) {
duke@435 210 tty->print_cr("### Sweep: stack traversal %d", _traversals);
duke@435 211 }
jrose@1424 212 Threads::nmethods_do(&mark_activation_closure);
duke@435 213
duke@435 214 // reset the flags since we started a scan from the beginning.
anoll@5792 215 reset_nmethod_marking();
duke@435 216 _locked_seen = 0;
duke@435 217 _not_entrant_seen_on_stack = 0;
anoll@5792 218 } else {
anoll@5792 219 // Only set hotness counter
anoll@5792 220 Threads::nmethods_do(&set_hotness_closure);
duke@435 221 }
duke@435 222
anoll@5792 223 OrderAccess::storestore();
duke@435 224 }
duke@435 225
never@1893 226 void NMethodSweeper::possibly_sweep() {
never@1999 227 assert(JavaThread::current()->thread_state() == _thread_in_vm, "must run in vm mode");
anoll@5792 228 if (!MethodFlushing || !sweep_in_progress()) {
anoll@5792 229 return;
anoll@5792 230 }
never@1893 231
never@1893 232 if (_invocations > 0) {
never@1893 233 // Only one thread at a time will sweep
never@1893 234 jint old = Atomic::cmpxchg( 1, &_sweep_started, 0 );
never@1893 235 if (old != 0) {
never@1893 236 return;
never@1893 237 }
never@2916 238 #ifdef ASSERT
never@2916 239 if (LogSweeper && _records == NULL) {
never@2916 240 // Create the ring buffer for the logging code
zgu@3900 241 _records = NEW_C_HEAP_ARRAY(SweeperRecord, SweeperLogEntries, mtGC);
never@2916 242 memset(_records, 0, sizeof(SweeperRecord) * SweeperLogEntries);
never@2916 243 }
never@2916 244 #endif
never@1999 245 if (_invocations > 0) {
never@1999 246 sweep_code_cache();
never@1999 247 _invocations--;
never@1999 248 }
never@1999 249 _sweep_started = 0;
never@1893 250 }
never@1893 251 }
never@1893 252
never@1893 253 void NMethodSweeper::sweep_code_cache() {
sla@5237 254
sla@5237 255 jlong sweep_start_counter = os::elapsed_counter();
sla@5237 256
sla@5237 257 _flushed_count = 0;
sla@5237 258 _zombified_count = 0;
sla@5237 259 _marked_count = 0;
sla@5237 260
never@1893 261 if (PrintMethodFlushing && Verbose) {
never@1999 262 tty->print_cr("### Sweep at %d out of %d. Invocations left: %d", _seen, CodeCache::nof_nmethods(), _invocations);
never@1893 263 }
never@1893 264
neliasso@5038 265 if (!CompileBroker::should_compile_new_jobs()) {
neliasso@5038 266 // If we have turned off compilations we might as well do full sweeps
neliasso@5038 267 // in order to reach the clean state faster. Otherwise the sleeping compiler
anoll@5792 268 // threads will slow down sweeping.
neliasso@5038 269 _invocations = 1;
neliasso@5038 270 }
neliasso@5038 271
never@1999 272 // We want to visit all nmethods after NmethodSweepFraction
never@1999 273 // invocations so divide the remaining number of nmethods by the
never@1999 274 // remaining number of invocations. This is only an estimate since
never@1999 275 // the number of nmethods changes during the sweep so the final
never@1999 276 // stage must iterate until it there are no more nmethods.
never@1999 277 int todo = (CodeCache::nof_nmethods() - _seen) / _invocations;
anoll@5734 278 int swept_count = 0;
never@1893 279
anoll@5792 280
never@1893 281 assert(!SafepointSynchronize::is_at_safepoint(), "should not be in safepoint when we get here");
never@1893 282 assert(!CodeCache_lock->owned_by_self(), "just checking");
never@1893 283
anoll@5792 284 int freed_memory = 0;
never@1893 285 {
never@1893 286 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
never@1893 287
never@1999 288 // The last invocation iterates until there are no more nmethods
never@1999 289 for (int i = 0; (i < todo || _invocations == 1) && _current != NULL; i++) {
anoll@5734 290 swept_count++;
iveresov@3572 291 if (SafepointSynchronize::is_synchronizing()) { // Safepoint request
iveresov@3572 292 if (PrintMethodFlushing && Verbose) {
iveresov@3572 293 tty->print_cr("### Sweep at %d out of %d, invocation: %d, yielding to safepoint", _seen, CodeCache::nof_nmethods(), _invocations);
iveresov@3572 294 }
iveresov@3572 295 MutexUnlockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
never@1893 296
iveresov@3572 297 assert(Thread::current()->is_Java_thread(), "should be java thread");
iveresov@3572 298 JavaThread* thread = (JavaThread*)Thread::current();
iveresov@3572 299 ThreadBlockInVM tbivm(thread);
iveresov@3572 300 thread->java_suspend_self();
iveresov@3572 301 }
never@1999 302 // Since we will give up the CodeCache_lock, always skip ahead
never@1999 303 // to the next nmethod. Other blobs can be deleted by other
never@1999 304 // threads but nmethods are only reclaimed by the sweeper.
never@1970 305 nmethod* next = CodeCache::next_nmethod(_current);
never@1893 306
never@1893 307 // Now ready to process nmethod and give up CodeCache_lock
never@1893 308 {
never@1893 309 MutexUnlockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
anoll@5792 310 freed_memory += process_nmethod(_current);
never@1893 311 }
never@1893 312 _seen++;
never@1893 313 _current = next;
never@1893 314 }
never@1893 315 }
never@1893 316
never@1999 317 assert(_invocations > 1 || _current == NULL, "must have scanned the whole cache");
never@1999 318
anoll@5792 319 if (!sweep_in_progress() && !need_marking_phase() && (_locked_seen || _not_entrant_seen_on_stack)) {
never@1893 320 // we've completed a scan without making progress but there were
never@1893 321 // nmethods we were unable to process either because they were
anoll@5792 322 // locked or were still on stack. We don't have to aggressively
anoll@5792 323 // clean them up so just stop scanning. We could scan once more
never@1893 324 // but that complicates the control logic and it's unlikely to
never@1893 325 // matter much.
never@1893 326 if (PrintMethodFlushing) {
never@1893 327 tty->print_cr("### Couldn't make progress on some nmethods so stopping sweep");
never@1893 328 }
never@1893 329 }
never@1893 330
sla@5237 331 jlong sweep_end_counter = os::elapsed_counter();
sla@5237 332 jlong sweep_time = sweep_end_counter - sweep_start_counter;
sla@5237 333 _total_time_sweeping += sweep_time;
sla@5237 334 _total_time_this_sweep += sweep_time;
sla@5237 335 _peak_sweep_fraction_time = MAX2(sweep_time, _peak_sweep_fraction_time);
sla@5237 336 _total_nof_methods_reclaimed += _flushed_count;
sla@5237 337
sla@5237 338 EventSweepCodeCache event(UNTIMED);
sla@5237 339 if (event.should_commit()) {
sla@5237 340 event.set_starttime(sweep_start_counter);
sla@5237 341 event.set_endtime(sweep_end_counter);
sla@5237 342 event.set_sweepIndex(_traversals);
sla@5237 343 event.set_sweepFractionIndex(NmethodSweepFraction - _invocations + 1);
anoll@5734 344 event.set_sweptCount(swept_count);
sla@5237 345 event.set_flushedCount(_flushed_count);
sla@5237 346 event.set_markedCount(_marked_count);
sla@5237 347 event.set_zombifiedCount(_zombified_count);
sla@5237 348 event.commit();
sla@5237 349 }
sla@5237 350
never@1893 351 #ifdef ASSERT
never@1893 352 if(PrintMethodFlushing) {
sla@5237 353 tty->print_cr("### sweeper: sweep time(%d): " INT64_FORMAT, _invocations, (jlong)sweep_time);
never@1893 354 }
never@1893 355 #endif
never@1999 356
never@1999 357 if (_invocations == 1) {
sla@5237 358 _peak_sweep_time = MAX2(_peak_sweep_time, _total_time_this_sweep);
never@1999 359 log_sweep("finished");
never@1999 360 }
neliasso@5038 361
anoll@5792 362 // Sweeper is the only case where memory is released, check here if it
anoll@5792 363 // is time to restart the compiler. Only checking if there is a certain
anoll@5792 364 // amount of free memory in the code cache might lead to re-enabling
anoll@5792 365 // compilation although no memory has been released. For example, there are
anoll@5792 366 // cases when compilation was disabled although there is 4MB (or more) free
anoll@5792 367 // memory in the code cache. The reason is code cache fragmentation. Therefore,
anoll@5792 368 // it only makes sense to re-enable compilation if we have actually freed memory.
anoll@5792 369 // Note that typically several kB are released for sweeping 16MB of the code
anoll@5792 370 // cache. As a result, 'freed_memory' > 0 to restart the compiler.
anoll@5792 371 if (UseCodeCacheFlushing && (!CompileBroker::should_compile_new_jobs() && (freed_memory > 0))) {
neliasso@5038 372 CompileBroker::set_should_compile_new_jobs(CompileBroker::run_compilation);
neliasso@5038 373 log_sweep("restart_compiler");
neliasso@5038 374 }
never@1893 375 }
never@1893 376
never@2916 377 class NMethodMarker: public StackObj {
never@2916 378 private:
never@2916 379 CompilerThread* _thread;
never@2916 380 public:
never@2916 381 NMethodMarker(nmethod* nm) {
never@2916 382 _thread = CompilerThread::current();
coleenp@4037 383 if (!nm->is_zombie() && !nm->is_unloaded()) {
coleenp@4037 384 // Only expose live nmethods for scanning
anoll@5792 385 _thread->set_scanned_nmethod(nm);
anoll@5792 386 }
coleenp@4037 387 }
never@2916 388 ~NMethodMarker() {
never@2916 389 _thread->set_scanned_nmethod(NULL);
never@2916 390 }
never@2916 391 };
never@2916 392
coleenp@4037 393 void NMethodSweeper::release_nmethod(nmethod *nm) {
coleenp@4037 394 // Clean up any CompiledICHolders
coleenp@4037 395 {
coleenp@4037 396 ResourceMark rm;
coleenp@4037 397 MutexLocker ml_patch(CompiledIC_lock);
coleenp@4037 398 RelocIterator iter(nm);
coleenp@4037 399 while (iter.next()) {
coleenp@4037 400 if (iter.type() == relocInfo::virtual_call_type) {
coleenp@4037 401 CompiledIC::cleanup_call_site(iter.virtual_call_reloc());
coleenp@4037 402 }
coleenp@4037 403 }
coleenp@4037 404 }
coleenp@4037 405
coleenp@4037 406 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
coleenp@4037 407 nm->flush();
coleenp@4037 408 }
duke@435 409
anoll@5792 410 int NMethodSweeper::process_nmethod(nmethod *nm) {
never@1893 411 assert(!CodeCache_lock->owned_by_self(), "just checking");
never@1893 412
anoll@5792 413 int freed_memory = 0;
never@2916 414 // Make sure this nmethod doesn't get unloaded during the scan,
anoll@5792 415 // since safepoints may happen during acquired below locks.
never@2916 416 NMethodMarker nmm(nm);
never@2916 417 SWEEP(nm);
never@2916 418
duke@435 419 // Skip methods that are currently referenced by the VM
duke@435 420 if (nm->is_locked_by_vm()) {
duke@435 421 // But still remember to clean-up inline caches for alive nmethods
duke@435 422 if (nm->is_alive()) {
anoll@5792 423 // Clean inline caches that point to zombie/non-entrant methods
never@1893 424 MutexLocker cl(CompiledIC_lock);
duke@435 425 nm->cleanup_inline_caches();
never@2916 426 SWEEP(nm);
duke@435 427 } else {
duke@435 428 _locked_seen++;
never@2916 429 SWEEP(nm);
duke@435 430 }
anoll@5792 431 return freed_memory;
duke@435 432 }
duke@435 433
duke@435 434 if (nm->is_zombie()) {
anoll@5792 435 // If it is the first time we see nmethod then we mark it. Otherwise,
anoll@5792 436 // we reclaim it. When we have seen a zombie method twice, we know that
never@1999 437 // there are no inline caches that refer to it.
duke@435 438 if (nm->is_marked_for_reclamation()) {
duke@435 439 assert(!nm->is_locked_by_vm(), "must not flush locked nmethods");
ysr@1376 440 if (PrintMethodFlushing && Verbose) {
kvn@1637 441 tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (marked for reclamation) being flushed", nm->compile_id(), nm);
ysr@1376 442 }
anoll@5792 443 freed_memory = nm->total_size();
coleenp@4037 444 release_nmethod(nm);
sla@5237 445 _flushed_count++;
duke@435 446 } else {
ysr@1376 447 if (PrintMethodFlushing && Verbose) {
kvn@1637 448 tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (zombie) being marked for reclamation", nm->compile_id(), nm);
ysr@1376 449 }
duke@435 450 nm->mark_for_reclamation();
anoll@5792 451 request_nmethod_marking();
sla@5237 452 _marked_count++;
never@2916 453 SWEEP(nm);
duke@435 454 }
duke@435 455 } else if (nm->is_not_entrant()) {
anoll@5792 456 // If there are no current activations of this method on the
duke@435 457 // stack we can safely convert it to a zombie method
duke@435 458 if (nm->can_not_entrant_be_converted()) {
ysr@1376 459 if (PrintMethodFlushing && Verbose) {
kvn@1637 460 tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (not entrant) being made zombie", nm->compile_id(), nm);
ysr@1376 461 }
duke@435 462 nm->make_zombie();
anoll@5792 463 request_nmethod_marking();
sla@5237 464 _zombified_count++;
never@2916 465 SWEEP(nm);
duke@435 466 } else {
duke@435 467 // Still alive, clean up its inline caches
never@1893 468 MutexLocker cl(CompiledIC_lock);
duke@435 469 nm->cleanup_inline_caches();
duke@435 470 // we coudn't transition this nmethod so don't immediately
duke@435 471 // request a rescan. If this method stays on the stack for a
never@1893 472 // long time we don't want to keep rescanning the code cache.
duke@435 473 _not_entrant_seen_on_stack++;
never@2916 474 SWEEP(nm);
duke@435 475 }
duke@435 476 } else if (nm->is_unloaded()) {
duke@435 477 // Unloaded code, just make it a zombie
anoll@5792 478 if (PrintMethodFlushing && Verbose) {
kvn@1637 479 tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (unloaded) being made zombie", nm->compile_id(), nm);
anoll@5792 480 }
ysr@1376 481 if (nm->is_osr_method()) {
coleenp@4037 482 SWEEP(nm);
duke@435 483 // No inline caches will ever point to osr methods, so we can just remove it
anoll@5792 484 freed_memory = nm->total_size();
coleenp@4037 485 release_nmethod(nm);
sla@5237 486 _flushed_count++;
duke@435 487 } else {
duke@435 488 nm->make_zombie();
anoll@5792 489 request_nmethod_marking();
sla@5237 490 _zombified_count++;
never@2916 491 SWEEP(nm);
duke@435 492 }
duke@435 493 } else {
kvn@1637 494 if (UseCodeCacheFlushing) {
anoll@5792 495 if (!nm->is_locked_by_vm() && !nm->is_osr_method() && !nm->is_native_method()) {
anoll@5792 496 // Do not make native methods and OSR-methods not-entrant
anoll@5792 497 nm->dec_hotness_counter();
anoll@5792 498 // Get the initial value of the hotness counter. This value depends on the
anoll@5792 499 // ReservedCodeCacheSize
anoll@5792 500 int reset_val = hotness_counter_reset_val();
anoll@5792 501 int time_since_reset = reset_val - nm->hotness_counter();
anoll@5792 502 double threshold = -reset_val + (CodeCache::reverse_free_ratio() * NmethodSweepActivity);
anoll@5792 503 // The less free space in the code cache we have - the bigger reverse_free_ratio() is.
anoll@5792 504 // I.e., 'threshold' increases with lower available space in the code cache and a higher
anoll@5792 505 // NmethodSweepActivity. If the current hotness counter - which decreases from its initial
anoll@5792 506 // value until it is reset by stack walking - is smaller than the computed threshold, the
anoll@5792 507 // corresponding nmethod is considered for removal.
anoll@5792 508 if ((NmethodSweepActivity > 0) && (nm->hotness_counter() < threshold) && (time_since_reset > 10)) {
anoll@5792 509 // A method is marked as not-entrant if the method is
anoll@5792 510 // 1) 'old enough': nm->hotness_counter() < threshold
anoll@5792 511 // 2) The method was in_use for a minimum amount of time: (time_since_reset > 10)
anoll@5792 512 // The second condition is necessary if we are dealing with very small code cache
anoll@5792 513 // sizes (e.g., <10m) and the code cache size is too small to hold all hot methods.
anoll@5792 514 // The second condition ensures that methods are not immediately made not-entrant
anoll@5792 515 // after compilation.
anoll@5792 516 nm->make_not_entrant();
anoll@5792 517 request_nmethod_marking();
anoll@5792 518 }
kvn@1637 519 }
kvn@1637 520 }
anoll@5792 521 // Clean-up all inline caches that point to zombie/non-reentrant methods
never@1893 522 MutexLocker cl(CompiledIC_lock);
duke@435 523 nm->cleanup_inline_caches();
never@2916 524 SWEEP(nm);
duke@435 525 }
anoll@5792 526 return freed_memory;
duke@435 527 }
kvn@1637 528
never@1999 529 // Print out some state information about the current sweep and the
never@1999 530 // state of the code cache if it's requested.
never@1999 531 void NMethodSweeper::log_sweep(const char* msg, const char* format, ...) {
never@1999 532 if (PrintMethodFlushing) {
iveresov@2764 533 stringStream s;
iveresov@2764 534 // Dump code cache state into a buffer before locking the tty,
iveresov@2764 535 // because log_state() will use locks causing lock conflicts.
iveresov@2764 536 CodeCache::log_state(&s);
iveresov@2764 537
never@1999 538 ttyLocker ttyl;
never@1999 539 tty->print("### sweeper: %s ", msg);
never@1999 540 if (format != NULL) {
never@1999 541 va_list ap;
never@1999 542 va_start(ap, format);
never@1999 543 tty->vprint(format, ap);
never@1999 544 va_end(ap);
never@1999 545 }
iveresov@2764 546 tty->print_cr(s.as_string());
never@1999 547 }
never@1999 548
never@1999 549 if (LogCompilation && (xtty != NULL)) {
iveresov@2764 550 stringStream s;
iveresov@2764 551 // Dump code cache state into a buffer before locking the tty,
iveresov@2764 552 // because log_state() will use locks causing lock conflicts.
iveresov@2764 553 CodeCache::log_state(&s);
iveresov@2764 554
never@1999 555 ttyLocker ttyl;
never@2001 556 xtty->begin_elem("sweeper state='%s' traversals='" INTX_FORMAT "' ", msg, (intx)traversal_count());
never@1999 557 if (format != NULL) {
never@1999 558 va_list ap;
never@1999 559 va_start(ap, format);
never@1999 560 xtty->vprint(format, ap);
never@1999 561 va_end(ap);
never@1999 562 }
iveresov@2764 563 xtty->print(s.as_string());
never@1999 564 xtty->stamp();
never@1999 565 xtty->end_elem();
never@1999 566 }
never@1999 567 }

mercurial