src/share/vm/runtime/sweeper.cpp

Fri, 24 Jun 2016 17:12:13 +0800

author
aoqi<aoqi@loongson.cn>
date
Fri, 24 Jun 2016 17:12:13 +0800
changeset 25
873fd82b133d
parent 0
f90c822e73f8
child 6876
710a3c8b516e
permissions
-rw-r--r--

[Code Reorganization] Removed GC related modifications made by Loongson, for example, UseOldNUMA.

aoqi@0 1 /*
aoqi@0 2 * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
aoqi@0 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
aoqi@0 4 *
aoqi@0 5 * This code is free software; you can redistribute it and/or modify it
aoqi@0 6 * under the terms of the GNU General Public License version 2 only, as
aoqi@0 7 * published by the Free Software Foundation.
aoqi@0 8 *
aoqi@0 9 * This code is distributed in the hope that it will be useful, but WITHOUT
aoqi@0 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
aoqi@0 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
aoqi@0 12 * version 2 for more details (a copy is included in the LICENSE file that
aoqi@0 13 * accompanied this code).
aoqi@0 14 *
aoqi@0 15 * You should have received a copy of the GNU General Public License version
aoqi@0 16 * 2 along with this work; if not, write to the Free Software Foundation,
aoqi@0 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
aoqi@0 18 *
aoqi@0 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
aoqi@0 20 * or visit www.oracle.com if you need additional information or have any
aoqi@0 21 * questions.
aoqi@0 22 *
aoqi@0 23 */
aoqi@0 24
aoqi@0 25 #include "precompiled.hpp"
aoqi@0 26 #include "code/codeCache.hpp"
aoqi@0 27 #include "code/compiledIC.hpp"
aoqi@0 28 #include "code/icBuffer.hpp"
aoqi@0 29 #include "code/nmethod.hpp"
aoqi@0 30 #include "compiler/compileBroker.hpp"
aoqi@0 31 #include "memory/resourceArea.hpp"
aoqi@0 32 #include "oops/method.hpp"
aoqi@0 33 #include "runtime/atomic.hpp"
aoqi@0 34 #include "runtime/compilationPolicy.hpp"
aoqi@0 35 #include "runtime/mutexLocker.hpp"
aoqi@0 36 #include "runtime/os.hpp"
aoqi@0 37 #include "runtime/sweeper.hpp"
aoqi@0 38 #include "runtime/vm_operations.hpp"
aoqi@0 39 #include "trace/tracing.hpp"
aoqi@0 40 #include "utilities/events.hpp"
aoqi@0 41 #include "utilities/ticks.inline.hpp"
aoqi@0 42 #include "utilities/xmlstream.hpp"
aoqi@0 43
aoqi@0 44 PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
aoqi@0 45
aoqi@0 46 #ifdef ASSERT
aoqi@0 47
aoqi@0 48 #define SWEEP(nm) record_sweep(nm, __LINE__)
aoqi@0 49 // Sweeper logging code
aoqi@0 50 class SweeperRecord {
aoqi@0 51 public:
aoqi@0 52 int traversal;
aoqi@0 53 int invocation;
aoqi@0 54 int compile_id;
aoqi@0 55 long traversal_mark;
aoqi@0 56 int state;
aoqi@0 57 const char* kind;
aoqi@0 58 address vep;
aoqi@0 59 address uep;
aoqi@0 60 int line;
aoqi@0 61
aoqi@0 62 void print() {
aoqi@0 63 tty->print_cr("traversal = %d invocation = %d compile_id = %d %s uep = " PTR_FORMAT " vep = "
aoqi@0 64 PTR_FORMAT " state = %d traversal_mark %d line = %d",
aoqi@0 65 traversal,
aoqi@0 66 invocation,
aoqi@0 67 compile_id,
aoqi@0 68 kind == NULL ? "" : kind,
aoqi@0 69 uep,
aoqi@0 70 vep,
aoqi@0 71 state,
aoqi@0 72 traversal_mark,
aoqi@0 73 line);
aoqi@0 74 }
aoqi@0 75 };
aoqi@0 76
aoqi@0 77 static int _sweep_index = 0;
aoqi@0 78 static SweeperRecord* _records = NULL;
aoqi@0 79
aoqi@0 80 void NMethodSweeper::report_events(int id, address entry) {
aoqi@0 81 if (_records != NULL) {
aoqi@0 82 for (int i = _sweep_index; i < SweeperLogEntries; i++) {
aoqi@0 83 if (_records[i].uep == entry ||
aoqi@0 84 _records[i].vep == entry ||
aoqi@0 85 _records[i].compile_id == id) {
aoqi@0 86 _records[i].print();
aoqi@0 87 }
aoqi@0 88 }
aoqi@0 89 for (int i = 0; i < _sweep_index; i++) {
aoqi@0 90 if (_records[i].uep == entry ||
aoqi@0 91 _records[i].vep == entry ||
aoqi@0 92 _records[i].compile_id == id) {
aoqi@0 93 _records[i].print();
aoqi@0 94 }
aoqi@0 95 }
aoqi@0 96 }
aoqi@0 97 }
aoqi@0 98
aoqi@0 99 void NMethodSweeper::report_events() {
aoqi@0 100 if (_records != NULL) {
aoqi@0 101 for (int i = _sweep_index; i < SweeperLogEntries; i++) {
aoqi@0 102 // skip empty records
aoqi@0 103 if (_records[i].vep == NULL) continue;
aoqi@0 104 _records[i].print();
aoqi@0 105 }
aoqi@0 106 for (int i = 0; i < _sweep_index; i++) {
aoqi@0 107 // skip empty records
aoqi@0 108 if (_records[i].vep == NULL) continue;
aoqi@0 109 _records[i].print();
aoqi@0 110 }
aoqi@0 111 }
aoqi@0 112 }
aoqi@0 113
aoqi@0 114 void NMethodSweeper::record_sweep(nmethod* nm, int line) {
aoqi@0 115 if (_records != NULL) {
aoqi@0 116 _records[_sweep_index].traversal = _traversals;
aoqi@0 117 _records[_sweep_index].traversal_mark = nm->_stack_traversal_mark;
aoqi@0 118 _records[_sweep_index].invocation = _sweep_fractions_left;
aoqi@0 119 _records[_sweep_index].compile_id = nm->compile_id();
aoqi@0 120 _records[_sweep_index].kind = nm->compile_kind();
aoqi@0 121 _records[_sweep_index].state = nm->_state;
aoqi@0 122 _records[_sweep_index].vep = nm->verified_entry_point();
aoqi@0 123 _records[_sweep_index].uep = nm->entry_point();
aoqi@0 124 _records[_sweep_index].line = line;
aoqi@0 125 _sweep_index = (_sweep_index + 1) % SweeperLogEntries;
aoqi@0 126 }
aoqi@0 127 }
aoqi@0 128 #else
aoqi@0 129 #define SWEEP(nm)
aoqi@0 130 #endif
aoqi@0 131
aoqi@0 132 nmethod* NMethodSweeper::_current = NULL; // Current nmethod
aoqi@0 133 long NMethodSweeper::_traversals = 0; // Stack scan count, also sweep ID.
aoqi@0 134 long NMethodSweeper::_total_nof_code_cache_sweeps = 0; // Total number of full sweeps of the code cache
aoqi@0 135 long NMethodSweeper::_time_counter = 0; // Virtual time used to periodically invoke sweeper
aoqi@0 136 long NMethodSweeper::_last_sweep = 0; // Value of _time_counter when the last sweep happened
aoqi@0 137 int NMethodSweeper::_seen = 0; // Nof. nmethod we have currently processed in current pass of CodeCache
aoqi@0 138 int NMethodSweeper::_flushed_count = 0; // Nof. nmethods flushed in current sweep
aoqi@0 139 int NMethodSweeper::_zombified_count = 0; // Nof. nmethods made zombie in current sweep
aoqi@0 140 int NMethodSweeper::_marked_for_reclamation_count = 0; // Nof. nmethods marked for reclaim in current sweep
aoqi@0 141
aoqi@0 142 volatile bool NMethodSweeper::_should_sweep = true; // Indicates if we should invoke the sweeper
aoqi@0 143 volatile int NMethodSweeper::_sweep_fractions_left = 0; // Nof. invocations left until we are completed with this pass
aoqi@0 144 volatile int NMethodSweeper::_sweep_started = 0; // Flag to control conc sweeper
aoqi@0 145 volatile int NMethodSweeper::_bytes_changed = 0; // Counts the total nmethod size if the nmethod changed from:
aoqi@0 146 // 1) alive -> not_entrant
aoqi@0 147 // 2) not_entrant -> zombie
aoqi@0 148 // 3) zombie -> marked_for_reclamation
aoqi@0 149 int NMethodSweeper::_hotness_counter_reset_val = 0;
aoqi@0 150
aoqi@0 151 long NMethodSweeper::_total_nof_methods_reclaimed = 0; // Accumulated nof methods flushed
aoqi@0 152 long NMethodSweeper::_total_nof_c2_methods_reclaimed = 0; // Accumulated nof methods flushed
aoqi@0 153 size_t NMethodSweeper::_total_flushed_size = 0; // Total number of bytes flushed from the code cache
aoqi@0 154 Tickspan NMethodSweeper::_total_time_sweeping; // Accumulated time sweeping
aoqi@0 155 Tickspan NMethodSweeper::_total_time_this_sweep; // Total time this sweep
aoqi@0 156 Tickspan NMethodSweeper::_peak_sweep_time; // Peak time for a full sweep
aoqi@0 157 Tickspan NMethodSweeper::_peak_sweep_fraction_time; // Peak time sweeping one fraction
aoqi@0 158
aoqi@0 159
aoqi@0 160
aoqi@0 161 class MarkActivationClosure: public CodeBlobClosure {
aoqi@0 162 public:
aoqi@0 163 virtual void do_code_blob(CodeBlob* cb) {
aoqi@0 164 if (cb->is_nmethod()) {
aoqi@0 165 nmethod* nm = (nmethod*)cb;
aoqi@0 166 nm->set_hotness_counter(NMethodSweeper::hotness_counter_reset_val());
aoqi@0 167 // If we see an activation belonging to a non_entrant nmethod, we mark it.
aoqi@0 168 if (nm->is_not_entrant()) {
aoqi@0 169 nm->mark_as_seen_on_stack();
aoqi@0 170 }
aoqi@0 171 }
aoqi@0 172 }
aoqi@0 173 };
aoqi@0 174 static MarkActivationClosure mark_activation_closure;
aoqi@0 175
aoqi@0 176 class SetHotnessClosure: public CodeBlobClosure {
aoqi@0 177 public:
aoqi@0 178 virtual void do_code_blob(CodeBlob* cb) {
aoqi@0 179 if (cb->is_nmethod()) {
aoqi@0 180 nmethod* nm = (nmethod*)cb;
aoqi@0 181 nm->set_hotness_counter(NMethodSweeper::hotness_counter_reset_val());
aoqi@0 182 }
aoqi@0 183 }
aoqi@0 184 };
aoqi@0 185 static SetHotnessClosure set_hotness_closure;
aoqi@0 186
aoqi@0 187
aoqi@0 188 int NMethodSweeper::hotness_counter_reset_val() {
aoqi@0 189 if (_hotness_counter_reset_val == 0) {
aoqi@0 190 _hotness_counter_reset_val = (ReservedCodeCacheSize < M) ? 1 : (ReservedCodeCacheSize / M) * 2;
aoqi@0 191 }
aoqi@0 192 return _hotness_counter_reset_val;
aoqi@0 193 }
aoqi@0 194 bool NMethodSweeper::sweep_in_progress() {
aoqi@0 195 return (_current != NULL);
aoqi@0 196 }
aoqi@0 197
aoqi@0 198 // Scans the stacks of all Java threads and marks activations of not-entrant methods.
aoqi@0 199 // No need to synchronize access, since 'mark_active_nmethods' is always executed at a
aoqi@0 200 // safepoint.
aoqi@0 201 void NMethodSweeper::mark_active_nmethods() {
aoqi@0 202 assert(SafepointSynchronize::is_at_safepoint(), "must be executed at a safepoint");
aoqi@0 203 // If we do not want to reclaim not-entrant or zombie methods there is no need
aoqi@0 204 // to scan stacks
aoqi@0 205 if (!MethodFlushing) {
aoqi@0 206 return;
aoqi@0 207 }
aoqi@0 208
aoqi@0 209 // Increase time so that we can estimate when to invoke the sweeper again.
aoqi@0 210 _time_counter++;
aoqi@0 211
aoqi@0 212 // Check for restart
aoqi@0 213 assert(CodeCache::find_blob_unsafe(_current) == _current, "Sweeper nmethod cached state invalid");
aoqi@0 214 if (!sweep_in_progress()) {
aoqi@0 215 _seen = 0;
aoqi@0 216 _sweep_fractions_left = NmethodSweepFraction;
aoqi@0 217 _current = CodeCache::first_nmethod();
aoqi@0 218 _traversals += 1;
aoqi@0 219 _total_time_this_sweep = Tickspan();
aoqi@0 220
aoqi@0 221 if (PrintMethodFlushing) {
aoqi@0 222 tty->print_cr("### Sweep: stack traversal %d", _traversals);
aoqi@0 223 }
aoqi@0 224 Threads::nmethods_do(&mark_activation_closure);
aoqi@0 225
aoqi@0 226 } else {
aoqi@0 227 // Only set hotness counter
aoqi@0 228 Threads::nmethods_do(&set_hotness_closure);
aoqi@0 229 }
aoqi@0 230
aoqi@0 231 OrderAccess::storestore();
aoqi@0 232 }
aoqi@0 233 /**
aoqi@0 234 * This function invokes the sweeper if at least one of the three conditions is met:
aoqi@0 235 * (1) The code cache is getting full
aoqi@0 236 * (2) There are sufficient state changes in/since the last sweep.
aoqi@0 237 * (3) We have not been sweeping for 'some time'
aoqi@0 238 */
aoqi@0 239 void NMethodSweeper::possibly_sweep() {
aoqi@0 240 assert(JavaThread::current()->thread_state() == _thread_in_vm, "must run in vm mode");
aoqi@0 241 // Only compiler threads are allowed to sweep
aoqi@0 242 if (!MethodFlushing || !sweep_in_progress() || !Thread::current()->is_Compiler_thread()) {
aoqi@0 243 return;
aoqi@0 244 }
aoqi@0 245
aoqi@0 246 // If there was no state change while nmethod sweeping, 'should_sweep' will be false.
aoqi@0 247 // This is one of the two places where should_sweep can be set to true. The general
aoqi@0 248 // idea is as follows: If there is enough free space in the code cache, there is no
aoqi@0 249 // need to invoke the sweeper. The following formula (which determines whether to invoke
aoqi@0 250 // the sweeper or not) depends on the assumption that for larger ReservedCodeCacheSizes
aoqi@0 251 // we need less frequent sweeps than for smaller ReservedCodecCacheSizes. Furthermore,
aoqi@0 252 // the formula considers how much space in the code cache is currently used. Here are
aoqi@0 253 // some examples that will (hopefully) help in understanding.
aoqi@0 254 //
aoqi@0 255 // Small ReservedCodeCacheSizes: (e.g., < 16M) We invoke the sweeper every time, since
aoqi@0 256 // the result of the division is 0. This
aoqi@0 257 // keeps the used code cache size small
aoqi@0 258 // (important for embedded Java)
aoqi@0 259 // Large ReservedCodeCacheSize : (e.g., 256M + code cache is 10% full). The formula
aoqi@0 260 // computes: (256 / 16) - 1 = 15
aoqi@0 261 // As a result, we invoke the sweeper after
aoqi@0 262 // 15 invocations of 'mark_active_nmethods.
aoqi@0 263 // Large ReservedCodeCacheSize: (e.g., 256M + code Cache is 90% full). The formula
aoqi@0 264 // computes: (256 / 16) - 10 = 6.
aoqi@0 265 if (!_should_sweep) {
aoqi@0 266 const int time_since_last_sweep = _time_counter - _last_sweep;
aoqi@0 267 // ReservedCodeCacheSize has an 'unsigned' type. We need a 'signed' type for max_wait_time,
aoqi@0 268 // since 'time_since_last_sweep' can be larger than 'max_wait_time'. If that happens using
aoqi@0 269 // an unsigned type would cause an underflow (wait_until_next_sweep becomes a large positive
aoqi@0 270 // value) that disables the intended periodic sweeps.
aoqi@0 271 const int max_wait_time = ReservedCodeCacheSize / (16 * M);
aoqi@0 272 double wait_until_next_sweep = max_wait_time - time_since_last_sweep - CodeCache::reverse_free_ratio();
aoqi@0 273 assert(wait_until_next_sweep <= (double)max_wait_time, "Calculation of code cache sweeper interval is incorrect");
aoqi@0 274
aoqi@0 275 if ((wait_until_next_sweep <= 0.0) || !CompileBroker::should_compile_new_jobs()) {
aoqi@0 276 _should_sweep = true;
aoqi@0 277 }
aoqi@0 278 }
aoqi@0 279
aoqi@0 280 if (_should_sweep && _sweep_fractions_left > 0) {
aoqi@0 281 // Only one thread at a time will sweep
aoqi@0 282 jint old = Atomic::cmpxchg( 1, &_sweep_started, 0 );
aoqi@0 283 if (old != 0) {
aoqi@0 284 return;
aoqi@0 285 }
aoqi@0 286 #ifdef ASSERT
aoqi@0 287 if (LogSweeper && _records == NULL) {
aoqi@0 288 // Create the ring buffer for the logging code
aoqi@0 289 _records = NEW_C_HEAP_ARRAY(SweeperRecord, SweeperLogEntries, mtGC);
aoqi@0 290 memset(_records, 0, sizeof(SweeperRecord) * SweeperLogEntries);
aoqi@0 291 }
aoqi@0 292 #endif
aoqi@0 293
aoqi@0 294 if (_sweep_fractions_left > 0) {
aoqi@0 295 sweep_code_cache();
aoqi@0 296 _sweep_fractions_left--;
aoqi@0 297 }
aoqi@0 298
aoqi@0 299 // We are done with sweeping the code cache once.
aoqi@0 300 if (_sweep_fractions_left == 0) {
aoqi@0 301 _total_nof_code_cache_sweeps++;
aoqi@0 302 _last_sweep = _time_counter;
aoqi@0 303 // Reset flag; temporarily disables sweeper
aoqi@0 304 _should_sweep = false;
aoqi@0 305 // If there was enough state change, 'possibly_enable_sweeper()'
aoqi@0 306 // sets '_should_sweep' to true
aoqi@0 307 possibly_enable_sweeper();
aoqi@0 308 // Reset _bytes_changed only if there was enough state change. _bytes_changed
aoqi@0 309 // can further increase by calls to 'report_state_change'.
aoqi@0 310 if (_should_sweep) {
aoqi@0 311 _bytes_changed = 0;
aoqi@0 312 }
aoqi@0 313 }
aoqi@0 314 // Release work, because another compiler thread could continue.
aoqi@0 315 OrderAccess::release_store((int*)&_sweep_started, 0);
aoqi@0 316 }
aoqi@0 317 }
aoqi@0 318
aoqi@0 319 void NMethodSweeper::sweep_code_cache() {
aoqi@0 320 Ticks sweep_start_counter = Ticks::now();
aoqi@0 321
aoqi@0 322 _flushed_count = 0;
aoqi@0 323 _zombified_count = 0;
aoqi@0 324 _marked_for_reclamation_count = 0;
aoqi@0 325
aoqi@0 326 if (PrintMethodFlushing && Verbose) {
aoqi@0 327 tty->print_cr("### Sweep at %d out of %d. Invocations left: %d", _seen, CodeCache::nof_nmethods(), _sweep_fractions_left);
aoqi@0 328 }
aoqi@0 329
aoqi@0 330 if (!CompileBroker::should_compile_new_jobs()) {
aoqi@0 331 // If we have turned off compilations we might as well do full sweeps
aoqi@0 332 // in order to reach the clean state faster. Otherwise the sleeping compiler
aoqi@0 333 // threads will slow down sweeping.
aoqi@0 334 _sweep_fractions_left = 1;
aoqi@0 335 }
aoqi@0 336
aoqi@0 337 // We want to visit all nmethods after NmethodSweepFraction
aoqi@0 338 // invocations so divide the remaining number of nmethods by the
aoqi@0 339 // remaining number of invocations. This is only an estimate since
aoqi@0 340 // the number of nmethods changes during the sweep so the final
aoqi@0 341 // stage must iterate until it there are no more nmethods.
aoqi@0 342 int todo = (CodeCache::nof_nmethods() - _seen) / _sweep_fractions_left;
aoqi@0 343 int swept_count = 0;
aoqi@0 344
aoqi@0 345
aoqi@0 346 assert(!SafepointSynchronize::is_at_safepoint(), "should not be in safepoint when we get here");
aoqi@0 347 assert(!CodeCache_lock->owned_by_self(), "just checking");
aoqi@0 348
aoqi@0 349 int freed_memory = 0;
aoqi@0 350 {
aoqi@0 351 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
aoqi@0 352
aoqi@0 353 // The last invocation iterates until there are no more nmethods
aoqi@0 354 for (int i = 0; (i < todo || _sweep_fractions_left == 1) && _current != NULL; i++) {
aoqi@0 355 swept_count++;
aoqi@0 356 if (SafepointSynchronize::is_synchronizing()) { // Safepoint request
aoqi@0 357 if (PrintMethodFlushing && Verbose) {
aoqi@0 358 tty->print_cr("### Sweep at %d out of %d, invocation: %d, yielding to safepoint", _seen, CodeCache::nof_nmethods(), _sweep_fractions_left);
aoqi@0 359 }
aoqi@0 360 MutexUnlockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
aoqi@0 361
aoqi@0 362 assert(Thread::current()->is_Java_thread(), "should be java thread");
aoqi@0 363 JavaThread* thread = (JavaThread*)Thread::current();
aoqi@0 364 ThreadBlockInVM tbivm(thread);
aoqi@0 365 thread->java_suspend_self();
aoqi@0 366 }
aoqi@0 367 // Since we will give up the CodeCache_lock, always skip ahead
aoqi@0 368 // to the next nmethod. Other blobs can be deleted by other
aoqi@0 369 // threads but nmethods are only reclaimed by the sweeper.
aoqi@0 370 nmethod* next = CodeCache::next_nmethod(_current);
aoqi@0 371
aoqi@0 372 // Now ready to process nmethod and give up CodeCache_lock
aoqi@0 373 {
aoqi@0 374 MutexUnlockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
aoqi@0 375 freed_memory += process_nmethod(_current);
aoqi@0 376 }
aoqi@0 377 _seen++;
aoqi@0 378 _current = next;
aoqi@0 379 }
aoqi@0 380 }
aoqi@0 381
aoqi@0 382 assert(_sweep_fractions_left > 1 || _current == NULL, "must have scanned the whole cache");
aoqi@0 383
aoqi@0 384 const Ticks sweep_end_counter = Ticks::now();
aoqi@0 385 const Tickspan sweep_time = sweep_end_counter - sweep_start_counter;
aoqi@0 386 _total_time_sweeping += sweep_time;
aoqi@0 387 _total_time_this_sweep += sweep_time;
aoqi@0 388 _peak_sweep_fraction_time = MAX2(sweep_time, _peak_sweep_fraction_time);
aoqi@0 389 _total_flushed_size += freed_memory;
aoqi@0 390 _total_nof_methods_reclaimed += _flushed_count;
aoqi@0 391
aoqi@0 392 EventSweepCodeCache event(UNTIMED);
aoqi@0 393 if (event.should_commit()) {
aoqi@0 394 event.set_starttime(sweep_start_counter);
aoqi@0 395 event.set_endtime(sweep_end_counter);
aoqi@0 396 event.set_sweepIndex(_traversals);
aoqi@0 397 event.set_sweepFractionIndex(NmethodSweepFraction - _sweep_fractions_left + 1);
aoqi@0 398 event.set_sweptCount(swept_count);
aoqi@0 399 event.set_flushedCount(_flushed_count);
aoqi@0 400 event.set_markedCount(_marked_for_reclamation_count);
aoqi@0 401 event.set_zombifiedCount(_zombified_count);
aoqi@0 402 event.commit();
aoqi@0 403 }
aoqi@0 404
aoqi@0 405 #ifdef ASSERT
aoqi@0 406 if(PrintMethodFlushing) {
aoqi@0 407 tty->print_cr("### sweeper: sweep time(%d): "
aoqi@0 408 INT64_FORMAT, _sweep_fractions_left, (jlong)sweep_time.value());
aoqi@0 409 }
aoqi@0 410 #endif
aoqi@0 411
aoqi@0 412 if (_sweep_fractions_left == 1) {
aoqi@0 413 _peak_sweep_time = MAX2(_peak_sweep_time, _total_time_this_sweep);
aoqi@0 414 log_sweep("finished");
aoqi@0 415 }
aoqi@0 416
aoqi@0 417 // Sweeper is the only case where memory is released, check here if it
aoqi@0 418 // is time to restart the compiler. Only checking if there is a certain
aoqi@0 419 // amount of free memory in the code cache might lead to re-enabling
aoqi@0 420 // compilation although no memory has been released. For example, there are
aoqi@0 421 // cases when compilation was disabled although there is 4MB (or more) free
aoqi@0 422 // memory in the code cache. The reason is code cache fragmentation. Therefore,
aoqi@0 423 // it only makes sense to re-enable compilation if we have actually freed memory.
aoqi@0 424 // Note that typically several kB are released for sweeping 16MB of the code
aoqi@0 425 // cache. As a result, 'freed_memory' > 0 to restart the compiler.
aoqi@0 426 if (!CompileBroker::should_compile_new_jobs() && (freed_memory > 0)) {
aoqi@0 427 CompileBroker::set_should_compile_new_jobs(CompileBroker::run_compilation);
aoqi@0 428 log_sweep("restart_compiler");
aoqi@0 429 }
aoqi@0 430 }
aoqi@0 431
aoqi@0 432 /**
aoqi@0 433 * This function updates the sweeper statistics that keep track of nmethods
aoqi@0 434 * state changes. If there is 'enough' state change, the sweeper is invoked
aoqi@0 435 * as soon as possible. There can be data races on _bytes_changed. The data
aoqi@0 436 * races are benign, since it does not matter if we loose a couple of bytes.
aoqi@0 437 * In the worst case we call the sweeper a little later. Also, we are guaranteed
aoqi@0 438 * to invoke the sweeper if the code cache gets full.
aoqi@0 439 */
aoqi@0 440 void NMethodSweeper::report_state_change(nmethod* nm) {
aoqi@0 441 _bytes_changed += nm->total_size();
aoqi@0 442 possibly_enable_sweeper();
aoqi@0 443 }
aoqi@0 444
aoqi@0 445 /**
aoqi@0 446 * Function determines if there was 'enough' state change in the code cache to invoke
aoqi@0 447 * the sweeper again. Currently, we determine 'enough' as more than 1% state change in
aoqi@0 448 * the code cache since the last sweep.
aoqi@0 449 */
aoqi@0 450 void NMethodSweeper::possibly_enable_sweeper() {
aoqi@0 451 double percent_changed = ((double)_bytes_changed / (double)ReservedCodeCacheSize) * 100;
aoqi@0 452 if (percent_changed > 1.0) {
aoqi@0 453 _should_sweep = true;
aoqi@0 454 }
aoqi@0 455 }
aoqi@0 456
aoqi@0 457 class NMethodMarker: public StackObj {
aoqi@0 458 private:
aoqi@0 459 CompilerThread* _thread;
aoqi@0 460 public:
aoqi@0 461 NMethodMarker(nmethod* nm) {
aoqi@0 462 _thread = CompilerThread::current();
aoqi@0 463 if (!nm->is_zombie() && !nm->is_unloaded()) {
aoqi@0 464 // Only expose live nmethods for scanning
aoqi@0 465 _thread->set_scanned_nmethod(nm);
aoqi@0 466 }
aoqi@0 467 }
aoqi@0 468 ~NMethodMarker() {
aoqi@0 469 _thread->set_scanned_nmethod(NULL);
aoqi@0 470 }
aoqi@0 471 };
aoqi@0 472
aoqi@0 473 void NMethodSweeper::release_nmethod(nmethod *nm) {
aoqi@0 474 // Clean up any CompiledICHolders
aoqi@0 475 {
aoqi@0 476 ResourceMark rm;
aoqi@0 477 MutexLocker ml_patch(CompiledIC_lock);
aoqi@0 478 RelocIterator iter(nm);
aoqi@0 479 while (iter.next()) {
aoqi@0 480 if (iter.type() == relocInfo::virtual_call_type) {
aoqi@0 481 CompiledIC::cleanup_call_site(iter.virtual_call_reloc());
aoqi@0 482 }
aoqi@0 483 }
aoqi@0 484 }
aoqi@0 485
aoqi@0 486 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
aoqi@0 487 nm->flush();
aoqi@0 488 }
aoqi@0 489
aoqi@0 490 int NMethodSweeper::process_nmethod(nmethod *nm) {
aoqi@0 491 assert(!CodeCache_lock->owned_by_self(), "just checking");
aoqi@0 492
aoqi@0 493 int freed_memory = 0;
aoqi@0 494 // Make sure this nmethod doesn't get unloaded during the scan,
aoqi@0 495 // since safepoints may happen during acquired below locks.
aoqi@0 496 NMethodMarker nmm(nm);
aoqi@0 497 SWEEP(nm);
aoqi@0 498
aoqi@0 499 // Skip methods that are currently referenced by the VM
aoqi@0 500 if (nm->is_locked_by_vm()) {
aoqi@0 501 // But still remember to clean-up inline caches for alive nmethods
aoqi@0 502 if (nm->is_alive()) {
aoqi@0 503 // Clean inline caches that point to zombie/non-entrant methods
aoqi@0 504 MutexLocker cl(CompiledIC_lock);
aoqi@0 505 nm->cleanup_inline_caches();
aoqi@0 506 SWEEP(nm);
aoqi@0 507 }
aoqi@0 508 return freed_memory;
aoqi@0 509 }
aoqi@0 510
aoqi@0 511 if (nm->is_zombie()) {
aoqi@0 512 // If it is the first time we see nmethod then we mark it. Otherwise,
aoqi@0 513 // we reclaim it. When we have seen a zombie method twice, we know that
aoqi@0 514 // there are no inline caches that refer to it.
aoqi@0 515 if (nm->is_marked_for_reclamation()) {
aoqi@0 516 assert(!nm->is_locked_by_vm(), "must not flush locked nmethods");
aoqi@0 517 if (PrintMethodFlushing && Verbose) {
aoqi@0 518 tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (marked for reclamation) being flushed", nm->compile_id(), nm);
aoqi@0 519 }
aoqi@0 520 freed_memory = nm->total_size();
aoqi@0 521 if (nm->is_compiled_by_c2()) {
aoqi@0 522 _total_nof_c2_methods_reclaimed++;
aoqi@0 523 }
aoqi@0 524 release_nmethod(nm);
aoqi@0 525 _flushed_count++;
aoqi@0 526 } else {
aoqi@0 527 if (PrintMethodFlushing && Verbose) {
aoqi@0 528 tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (zombie) being marked for reclamation", nm->compile_id(), nm);
aoqi@0 529 }
aoqi@0 530 nm->mark_for_reclamation();
aoqi@0 531 // Keep track of code cache state change
aoqi@0 532 _bytes_changed += nm->total_size();
aoqi@0 533 _marked_for_reclamation_count++;
aoqi@0 534 SWEEP(nm);
aoqi@0 535 }
aoqi@0 536 } else if (nm->is_not_entrant()) {
aoqi@0 537 // If there are no current activations of this method on the
aoqi@0 538 // stack we can safely convert it to a zombie method
aoqi@0 539 if (nm->can_not_entrant_be_converted()) {
aoqi@0 540 if (PrintMethodFlushing && Verbose) {
aoqi@0 541 tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (not entrant) being made zombie", nm->compile_id(), nm);
aoqi@0 542 }
aoqi@0 543 // Code cache state change is tracked in make_zombie()
aoqi@0 544 nm->make_zombie();
aoqi@0 545 _zombified_count++;
aoqi@0 546 SWEEP(nm);
aoqi@0 547 } else {
aoqi@0 548 // Still alive, clean up its inline caches
aoqi@0 549 MutexLocker cl(CompiledIC_lock);
aoqi@0 550 nm->cleanup_inline_caches();
aoqi@0 551 SWEEP(nm);
aoqi@0 552 }
aoqi@0 553 } else if (nm->is_unloaded()) {
aoqi@0 554 // Unloaded code, just make it a zombie
aoqi@0 555 if (PrintMethodFlushing && Verbose) {
aoqi@0 556 tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (unloaded) being made zombie", nm->compile_id(), nm);
aoqi@0 557 }
aoqi@0 558 if (nm->is_osr_method()) {
aoqi@0 559 SWEEP(nm);
aoqi@0 560 // No inline caches will ever point to osr methods, so we can just remove it
aoqi@0 561 freed_memory = nm->total_size();
aoqi@0 562 if (nm->is_compiled_by_c2()) {
aoqi@0 563 _total_nof_c2_methods_reclaimed++;
aoqi@0 564 }
aoqi@0 565 release_nmethod(nm);
aoqi@0 566 _flushed_count++;
aoqi@0 567 } else {
aoqi@0 568 // Code cache state change is tracked in make_zombie()
aoqi@0 569 nm->make_zombie();
aoqi@0 570 _zombified_count++;
aoqi@0 571 SWEEP(nm);
aoqi@0 572 }
aoqi@0 573 } else {
aoqi@0 574 if (UseCodeCacheFlushing) {
aoqi@0 575 if (!nm->is_locked_by_vm() && !nm->is_osr_method() && !nm->is_native_method()) {
aoqi@0 576 // Do not make native methods and OSR-methods not-entrant
aoqi@0 577 nm->dec_hotness_counter();
aoqi@0 578 // Get the initial value of the hotness counter. This value depends on the
aoqi@0 579 // ReservedCodeCacheSize
aoqi@0 580 int reset_val = hotness_counter_reset_val();
aoqi@0 581 int time_since_reset = reset_val - nm->hotness_counter();
aoqi@0 582 double threshold = -reset_val + (CodeCache::reverse_free_ratio() * NmethodSweepActivity);
aoqi@0 583 // The less free space in the code cache we have - the bigger reverse_free_ratio() is.
aoqi@0 584 // I.e., 'threshold' increases with lower available space in the code cache and a higher
aoqi@0 585 // NmethodSweepActivity. If the current hotness counter - which decreases from its initial
aoqi@0 586 // value until it is reset by stack walking - is smaller than the computed threshold, the
aoqi@0 587 // corresponding nmethod is considered for removal.
aoqi@0 588 if ((NmethodSweepActivity > 0) && (nm->hotness_counter() < threshold) && (time_since_reset > 10)) {
aoqi@0 589 // A method is marked as not-entrant if the method is
aoqi@0 590 // 1) 'old enough': nm->hotness_counter() < threshold
aoqi@0 591 // 2) The method was in_use for a minimum amount of time: (time_since_reset > 10)
aoqi@0 592 // The second condition is necessary if we are dealing with very small code cache
aoqi@0 593 // sizes (e.g., <10m) and the code cache size is too small to hold all hot methods.
aoqi@0 594 // The second condition ensures that methods are not immediately made not-entrant
aoqi@0 595 // after compilation.
aoqi@0 596 nm->make_not_entrant();
aoqi@0 597 // Code cache state change is tracked in make_not_entrant()
aoqi@0 598 if (PrintMethodFlushing && Verbose) {
aoqi@0 599 tty->print_cr("### Nmethod %d/" PTR_FORMAT "made not-entrant: hotness counter %d/%d threshold %f",
aoqi@0 600 nm->compile_id(), nm, nm->hotness_counter(), reset_val, threshold);
aoqi@0 601 }
aoqi@0 602 }
aoqi@0 603 }
aoqi@0 604 }
aoqi@0 605 // Clean-up all inline caches that point to zombie/non-reentrant methods
aoqi@0 606 MutexLocker cl(CompiledIC_lock);
aoqi@0 607 nm->cleanup_inline_caches();
aoqi@0 608 SWEEP(nm);
aoqi@0 609 }
aoqi@0 610 return freed_memory;
aoqi@0 611 }
aoqi@0 612
aoqi@0 613 // Print out some state information about the current sweep and the
aoqi@0 614 // state of the code cache if it's requested.
aoqi@0 615 void NMethodSweeper::log_sweep(const char* msg, const char* format, ...) {
aoqi@0 616 if (PrintMethodFlushing) {
aoqi@0 617 stringStream s;
aoqi@0 618 // Dump code cache state into a buffer before locking the tty,
aoqi@0 619 // because log_state() will use locks causing lock conflicts.
aoqi@0 620 CodeCache::log_state(&s);
aoqi@0 621
aoqi@0 622 ttyLocker ttyl;
aoqi@0 623 tty->print("### sweeper: %s ", msg);
aoqi@0 624 if (format != NULL) {
aoqi@0 625 va_list ap;
aoqi@0 626 va_start(ap, format);
aoqi@0 627 tty->vprint(format, ap);
aoqi@0 628 va_end(ap);
aoqi@0 629 }
aoqi@0 630 tty->print_cr("%s", s.as_string());
aoqi@0 631 }
aoqi@0 632
aoqi@0 633 if (LogCompilation && (xtty != NULL)) {
aoqi@0 634 stringStream s;
aoqi@0 635 // Dump code cache state into a buffer before locking the tty,
aoqi@0 636 // because log_state() will use locks causing lock conflicts.
aoqi@0 637 CodeCache::log_state(&s);
aoqi@0 638
aoqi@0 639 ttyLocker ttyl;
aoqi@0 640 xtty->begin_elem("sweeper state='%s' traversals='" INTX_FORMAT "' ", msg, (intx)traversal_count());
aoqi@0 641 if (format != NULL) {
aoqi@0 642 va_list ap;
aoqi@0 643 va_start(ap, format);
aoqi@0 644 xtty->vprint(format, ap);
aoqi@0 645 va_end(ap);
aoqi@0 646 }
aoqi@0 647 xtty->print("%s", s.as_string());
aoqi@0 648 xtty->stamp();
aoqi@0 649 xtty->end_elem();
aoqi@0 650 }
aoqi@0 651 }
aoqi@0 652
aoqi@0 653 void NMethodSweeper::print() {
aoqi@0 654 ttyLocker ttyl;
aoqi@0 655 tty->print_cr("Code cache sweeper statistics:");
aoqi@0 656 tty->print_cr(" Total sweep time: %1.0lfms", (double)_total_time_sweeping.value()/1000000);
aoqi@0 657 tty->print_cr(" Total number of full sweeps: %ld", _total_nof_code_cache_sweeps);
aoqi@0 658 tty->print_cr(" Total number of flushed methods: %ld(%ld C2 methods)", _total_nof_methods_reclaimed,
aoqi@0 659 _total_nof_c2_methods_reclaimed);
aoqi@0 660 tty->print_cr(" Total size of flushed methods: " SIZE_FORMAT "kB", _total_flushed_size/K);
aoqi@0 661 }

mercurial