src/share/vm/runtime/sweeper.cpp

Thu, 24 May 2018 20:03:11 +0800

author
aoqi
date
Thu, 24 May 2018 20:03:11 +0800
changeset 8868
91ddc23482a4
parent 8856
ac27a9c85bea
child 9931
fd44df5e3bc3
permissions
-rw-r--r--

Increase MaxHeapSize for better performance on MIPS

aoqi@0 1 /*
aoqi@0 2 * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
aoqi@0 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
aoqi@0 4 *
aoqi@0 5 * This code is free software; you can redistribute it and/or modify it
aoqi@0 6 * under the terms of the GNU General Public License version 2 only, as
aoqi@0 7 * published by the Free Software Foundation.
aoqi@0 8 *
aoqi@0 9 * This code is distributed in the hope that it will be useful, but WITHOUT
aoqi@0 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
aoqi@0 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
aoqi@0 12 * version 2 for more details (a copy is included in the LICENSE file that
aoqi@0 13 * accompanied this code).
aoqi@0 14 *
aoqi@0 15 * You should have received a copy of the GNU General Public License version
aoqi@0 16 * 2 along with this work; if not, write to the Free Software Foundation,
aoqi@0 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
aoqi@0 18 *
aoqi@0 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
aoqi@0 20 * or visit www.oracle.com if you need additional information or have any
aoqi@0 21 * questions.
aoqi@0 22 *
aoqi@0 23 */
aoqi@0 24
aoqi@0 25 #include "precompiled.hpp"
aoqi@0 26 #include "code/codeCache.hpp"
aoqi@0 27 #include "code/compiledIC.hpp"
aoqi@0 28 #include "code/icBuffer.hpp"
aoqi@0 29 #include "code/nmethod.hpp"
aoqi@0 30 #include "compiler/compileBroker.hpp"
aoqi@0 31 #include "memory/resourceArea.hpp"
aoqi@0 32 #include "oops/method.hpp"
aoqi@0 33 #include "runtime/atomic.hpp"
aoqi@0 34 #include "runtime/compilationPolicy.hpp"
aoqi@0 35 #include "runtime/mutexLocker.hpp"
goetz@6911 36 #include "runtime/orderAccess.inline.hpp"
aoqi@0 37 #include "runtime/os.hpp"
aoqi@0 38 #include "runtime/sweeper.hpp"
goetz@6911 39 #include "runtime/thread.inline.hpp"
aoqi@0 40 #include "runtime/vm_operations.hpp"
aoqi@0 41 #include "trace/tracing.hpp"
aoqi@0 42 #include "utilities/events.hpp"
aoqi@0 43 #include "utilities/ticks.inline.hpp"
aoqi@0 44 #include "utilities/xmlstream.hpp"
aoqi@0 45
aoqi@0 46 PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
aoqi@0 47
aoqi@0 48 #ifdef ASSERT
aoqi@0 49
aoqi@0 50 #define SWEEP(nm) record_sweep(nm, __LINE__)
aoqi@0 51 // Sweeper logging code
aoqi@0 52 class SweeperRecord {
aoqi@0 53 public:
aoqi@0 54 int traversal;
aoqi@0 55 int invocation;
aoqi@0 56 int compile_id;
aoqi@0 57 long traversal_mark;
aoqi@0 58 int state;
aoqi@0 59 const char* kind;
aoqi@0 60 address vep;
aoqi@0 61 address uep;
aoqi@0 62 int line;
aoqi@0 63
aoqi@0 64 void print() {
aoqi@0 65 tty->print_cr("traversal = %d invocation = %d compile_id = %d %s uep = " PTR_FORMAT " vep = "
aoqi@0 66 PTR_FORMAT " state = %d traversal_mark %d line = %d",
aoqi@0 67 traversal,
aoqi@0 68 invocation,
aoqi@0 69 compile_id,
aoqi@0 70 kind == NULL ? "" : kind,
aoqi@0 71 uep,
aoqi@0 72 vep,
aoqi@0 73 state,
aoqi@0 74 traversal_mark,
aoqi@0 75 line);
aoqi@0 76 }
aoqi@0 77 };
aoqi@0 78
aoqi@0 79 static int _sweep_index = 0;
aoqi@0 80 static SweeperRecord* _records = NULL;
aoqi@0 81
aoqi@0 82 void NMethodSweeper::report_events(int id, address entry) {
aoqi@0 83 if (_records != NULL) {
aoqi@0 84 for (int i = _sweep_index; i < SweeperLogEntries; i++) {
aoqi@0 85 if (_records[i].uep == entry ||
aoqi@0 86 _records[i].vep == entry ||
aoqi@0 87 _records[i].compile_id == id) {
aoqi@0 88 _records[i].print();
aoqi@0 89 }
aoqi@0 90 }
aoqi@0 91 for (int i = 0; i < _sweep_index; i++) {
aoqi@0 92 if (_records[i].uep == entry ||
aoqi@0 93 _records[i].vep == entry ||
aoqi@0 94 _records[i].compile_id == id) {
aoqi@0 95 _records[i].print();
aoqi@0 96 }
aoqi@0 97 }
aoqi@0 98 }
aoqi@0 99 }
aoqi@0 100
aoqi@0 101 void NMethodSweeper::report_events() {
aoqi@0 102 if (_records != NULL) {
aoqi@0 103 for (int i = _sweep_index; i < SweeperLogEntries; i++) {
aoqi@0 104 // skip empty records
aoqi@0 105 if (_records[i].vep == NULL) continue;
aoqi@0 106 _records[i].print();
aoqi@0 107 }
aoqi@0 108 for (int i = 0; i < _sweep_index; i++) {
aoqi@0 109 // skip empty records
aoqi@0 110 if (_records[i].vep == NULL) continue;
aoqi@0 111 _records[i].print();
aoqi@0 112 }
aoqi@0 113 }
aoqi@0 114 }
aoqi@0 115
aoqi@0 116 void NMethodSweeper::record_sweep(nmethod* nm, int line) {
aoqi@0 117 if (_records != NULL) {
aoqi@0 118 _records[_sweep_index].traversal = _traversals;
aoqi@0 119 _records[_sweep_index].traversal_mark = nm->_stack_traversal_mark;
aoqi@0 120 _records[_sweep_index].invocation = _sweep_fractions_left;
aoqi@0 121 _records[_sweep_index].compile_id = nm->compile_id();
aoqi@0 122 _records[_sweep_index].kind = nm->compile_kind();
aoqi@0 123 _records[_sweep_index].state = nm->_state;
aoqi@0 124 _records[_sweep_index].vep = nm->verified_entry_point();
aoqi@0 125 _records[_sweep_index].uep = nm->entry_point();
aoqi@0 126 _records[_sweep_index].line = line;
aoqi@0 127 _sweep_index = (_sweep_index + 1) % SweeperLogEntries;
aoqi@0 128 }
aoqi@0 129 }
aoqi@0 130 #else
aoqi@0 131 #define SWEEP(nm)
aoqi@0 132 #endif
aoqi@0 133
aoqi@0 134 nmethod* NMethodSweeper::_current = NULL; // Current nmethod
aoqi@0 135 long NMethodSweeper::_traversals = 0; // Stack scan count, also sweep ID.
aoqi@0 136 long NMethodSweeper::_total_nof_code_cache_sweeps = 0; // Total number of full sweeps of the code cache
aoqi@0 137 long NMethodSweeper::_time_counter = 0; // Virtual time used to periodically invoke sweeper
aoqi@0 138 long NMethodSweeper::_last_sweep = 0; // Value of _time_counter when the last sweep happened
aoqi@0 139 int NMethodSweeper::_seen = 0; // Nof. nmethod we have currently processed in current pass of CodeCache
aoqi@0 140 int NMethodSweeper::_flushed_count = 0; // Nof. nmethods flushed in current sweep
aoqi@0 141 int NMethodSweeper::_zombified_count = 0; // Nof. nmethods made zombie in current sweep
aoqi@0 142 int NMethodSweeper::_marked_for_reclamation_count = 0; // Nof. nmethods marked for reclaim in current sweep
aoqi@0 143
aoqi@0 144 volatile bool NMethodSweeper::_should_sweep = true; // Indicates if we should invoke the sweeper
aoqi@0 145 volatile int NMethodSweeper::_sweep_fractions_left = 0; // Nof. invocations left until we are completed with this pass
aoqi@0 146 volatile int NMethodSweeper::_sweep_started = 0; // Flag to control conc sweeper
aoqi@0 147 volatile int NMethodSweeper::_bytes_changed = 0; // Counts the total nmethod size if the nmethod changed from:
aoqi@0 148 // 1) alive -> not_entrant
aoqi@0 149 // 2) not_entrant -> zombie
aoqi@0 150 // 3) zombie -> marked_for_reclamation
aoqi@0 151 int NMethodSweeper::_hotness_counter_reset_val = 0;
aoqi@0 152
aoqi@0 153 long NMethodSweeper::_total_nof_methods_reclaimed = 0; // Accumulated nof methods flushed
aoqi@0 154 long NMethodSweeper::_total_nof_c2_methods_reclaimed = 0; // Accumulated nof methods flushed
aoqi@0 155 size_t NMethodSweeper::_total_flushed_size = 0; // Total number of bytes flushed from the code cache
aoqi@0 156 Tickspan NMethodSweeper::_total_time_sweeping; // Accumulated time sweeping
aoqi@0 157 Tickspan NMethodSweeper::_total_time_this_sweep; // Total time this sweep
aoqi@0 158 Tickspan NMethodSweeper::_peak_sweep_time; // Peak time for a full sweep
aoqi@0 159 Tickspan NMethodSweeper::_peak_sweep_fraction_time; // Peak time sweeping one fraction
aoqi@0 160
aoqi@0 161
aoqi@0 162
aoqi@0 163 class MarkActivationClosure: public CodeBlobClosure {
aoqi@0 164 public:
aoqi@0 165 virtual void do_code_blob(CodeBlob* cb) {
aoqi@0 166 if (cb->is_nmethod()) {
aoqi@0 167 nmethod* nm = (nmethod*)cb;
aoqi@0 168 nm->set_hotness_counter(NMethodSweeper::hotness_counter_reset_val());
aoqi@0 169 // If we see an activation belonging to a non_entrant nmethod, we mark it.
aoqi@0 170 if (nm->is_not_entrant()) {
aoqi@0 171 nm->mark_as_seen_on_stack();
aoqi@0 172 }
aoqi@0 173 }
aoqi@0 174 }
aoqi@0 175 };
aoqi@0 176 static MarkActivationClosure mark_activation_closure;
aoqi@0 177
aoqi@0 178 class SetHotnessClosure: public CodeBlobClosure {
aoqi@0 179 public:
aoqi@0 180 virtual void do_code_blob(CodeBlob* cb) {
aoqi@0 181 if (cb->is_nmethod()) {
aoqi@0 182 nmethod* nm = (nmethod*)cb;
aoqi@0 183 nm->set_hotness_counter(NMethodSweeper::hotness_counter_reset_val());
aoqi@0 184 }
aoqi@0 185 }
aoqi@0 186 };
aoqi@0 187 static SetHotnessClosure set_hotness_closure;
aoqi@0 188
aoqi@0 189
aoqi@0 190 int NMethodSweeper::hotness_counter_reset_val() {
aoqi@0 191 if (_hotness_counter_reset_val == 0) {
aoqi@0 192 _hotness_counter_reset_val = (ReservedCodeCacheSize < M) ? 1 : (ReservedCodeCacheSize / M) * 2;
aoqi@0 193 }
aoqi@0 194 return _hotness_counter_reset_val;
aoqi@0 195 }
aoqi@0 196 bool NMethodSweeper::sweep_in_progress() {
aoqi@0 197 return (_current != NULL);
aoqi@0 198 }
aoqi@0 199
aoqi@0 200 // Scans the stacks of all Java threads and marks activations of not-entrant methods.
aoqi@0 201 // No need to synchronize access, since 'mark_active_nmethods' is always executed at a
aoqi@0 202 // safepoint.
aoqi@0 203 void NMethodSweeper::mark_active_nmethods() {
aoqi@0 204 assert(SafepointSynchronize::is_at_safepoint(), "must be executed at a safepoint");
aoqi@0 205 // If we do not want to reclaim not-entrant or zombie methods there is no need
aoqi@0 206 // to scan stacks
aoqi@0 207 if (!MethodFlushing) {
aoqi@0 208 return;
aoqi@0 209 }
aoqi@0 210
aoqi@0 211 // Increase time so that we can estimate when to invoke the sweeper again.
aoqi@0 212 _time_counter++;
aoqi@0 213
aoqi@0 214 // Check for restart
aoqi@0 215 assert(CodeCache::find_blob_unsafe(_current) == _current, "Sweeper nmethod cached state invalid");
aoqi@0 216 if (!sweep_in_progress()) {
aoqi@0 217 _seen = 0;
aoqi@0 218 _sweep_fractions_left = NmethodSweepFraction;
aoqi@0 219 _current = CodeCache::first_nmethod();
aoqi@0 220 _traversals += 1;
aoqi@0 221 _total_time_this_sweep = Tickspan();
aoqi@0 222
aoqi@0 223 if (PrintMethodFlushing) {
aoqi@0 224 tty->print_cr("### Sweep: stack traversal %d", _traversals);
aoqi@0 225 }
aoqi@0 226 Threads::nmethods_do(&mark_activation_closure);
aoqi@0 227
aoqi@0 228 } else {
aoqi@0 229 // Only set hotness counter
aoqi@0 230 Threads::nmethods_do(&set_hotness_closure);
aoqi@0 231 }
aoqi@0 232
aoqi@0 233 OrderAccess::storestore();
aoqi@0 234 }
aoqi@0 235 /**
aoqi@0 236 * This function invokes the sweeper if at least one of the three conditions is met:
aoqi@0 237 * (1) The code cache is getting full
aoqi@0 238 * (2) There are sufficient state changes in/since the last sweep.
aoqi@0 239 * (3) We have not been sweeping for 'some time'
aoqi@0 240 */
aoqi@0 241 void NMethodSweeper::possibly_sweep() {
aoqi@0 242 assert(JavaThread::current()->thread_state() == _thread_in_vm, "must run in vm mode");
aoqi@0 243 // Only compiler threads are allowed to sweep
aoqi@0 244 if (!MethodFlushing || !sweep_in_progress() || !Thread::current()->is_Compiler_thread()) {
aoqi@0 245 return;
aoqi@0 246 }
aoqi@0 247
aoqi@0 248 // If there was no state change while nmethod sweeping, 'should_sweep' will be false.
aoqi@0 249 // This is one of the two places where should_sweep can be set to true. The general
aoqi@0 250 // idea is as follows: If there is enough free space in the code cache, there is no
aoqi@0 251 // need to invoke the sweeper. The following formula (which determines whether to invoke
aoqi@0 252 // the sweeper or not) depends on the assumption that for larger ReservedCodeCacheSizes
aoqi@0 253 // we need less frequent sweeps than for smaller ReservedCodecCacheSizes. Furthermore,
aoqi@0 254 // the formula considers how much space in the code cache is currently used. Here are
aoqi@0 255 // some examples that will (hopefully) help in understanding.
aoqi@0 256 //
aoqi@0 257 // Small ReservedCodeCacheSizes: (e.g., < 16M) We invoke the sweeper every time, since
aoqi@0 258 // the result of the division is 0. This
aoqi@0 259 // keeps the used code cache size small
aoqi@0 260 // (important for embedded Java)
aoqi@0 261 // Large ReservedCodeCacheSize : (e.g., 256M + code cache is 10% full). The formula
aoqi@0 262 // computes: (256 / 16) - 1 = 15
aoqi@0 263 // As a result, we invoke the sweeper after
aoqi@0 264 // 15 invocations of 'mark_active_nmethods.
aoqi@0 265 // Large ReservedCodeCacheSize: (e.g., 256M + code Cache is 90% full). The formula
aoqi@0 266 // computes: (256 / 16) - 10 = 6.
aoqi@0 267 if (!_should_sweep) {
aoqi@0 268 const int time_since_last_sweep = _time_counter - _last_sweep;
aoqi@0 269 // ReservedCodeCacheSize has an 'unsigned' type. We need a 'signed' type for max_wait_time,
aoqi@0 270 // since 'time_since_last_sweep' can be larger than 'max_wait_time'. If that happens using
aoqi@0 271 // an unsigned type would cause an underflow (wait_until_next_sweep becomes a large positive
aoqi@0 272 // value) that disables the intended periodic sweeps.
aoqi@0 273 const int max_wait_time = ReservedCodeCacheSize / (16 * M);
aoqi@0 274 double wait_until_next_sweep = max_wait_time - time_since_last_sweep - CodeCache::reverse_free_ratio();
aoqi@0 275 assert(wait_until_next_sweep <= (double)max_wait_time, "Calculation of code cache sweeper interval is incorrect");
aoqi@0 276
aoqi@0 277 if ((wait_until_next_sweep <= 0.0) || !CompileBroker::should_compile_new_jobs()) {
aoqi@0 278 _should_sweep = true;
aoqi@0 279 }
aoqi@0 280 }
aoqi@0 281
aoqi@0 282 if (_should_sweep && _sweep_fractions_left > 0) {
aoqi@0 283 // Only one thread at a time will sweep
aoqi@0 284 jint old = Atomic::cmpxchg( 1, &_sweep_started, 0 );
aoqi@0 285 if (old != 0) {
aoqi@0 286 return;
aoqi@0 287 }
aoqi@0 288 #ifdef ASSERT
aoqi@0 289 if (LogSweeper && _records == NULL) {
aoqi@0 290 // Create the ring buffer for the logging code
aoqi@0 291 _records = NEW_C_HEAP_ARRAY(SweeperRecord, SweeperLogEntries, mtGC);
aoqi@0 292 memset(_records, 0, sizeof(SweeperRecord) * SweeperLogEntries);
aoqi@0 293 }
aoqi@0 294 #endif
aoqi@0 295
aoqi@0 296 if (_sweep_fractions_left > 0) {
aoqi@0 297 sweep_code_cache();
aoqi@0 298 _sweep_fractions_left--;
aoqi@0 299 }
aoqi@0 300
aoqi@0 301 // We are done with sweeping the code cache once.
aoqi@0 302 if (_sweep_fractions_left == 0) {
aoqi@0 303 _total_nof_code_cache_sweeps++;
aoqi@0 304 _last_sweep = _time_counter;
aoqi@0 305 // Reset flag; temporarily disables sweeper
aoqi@0 306 _should_sweep = false;
aoqi@0 307 // If there was enough state change, 'possibly_enable_sweeper()'
aoqi@0 308 // sets '_should_sweep' to true
aoqi@0 309 possibly_enable_sweeper();
aoqi@0 310 // Reset _bytes_changed only if there was enough state change. _bytes_changed
aoqi@0 311 // can further increase by calls to 'report_state_change'.
aoqi@0 312 if (_should_sweep) {
aoqi@0 313 _bytes_changed = 0;
aoqi@0 314 }
aoqi@0 315 }
aoqi@0 316 // Release work, because another compiler thread could continue.
aoqi@0 317 OrderAccess::release_store((int*)&_sweep_started, 0);
aoqi@0 318 }
aoqi@0 319 }
aoqi@0 320
aoqi@0 321 void NMethodSweeper::sweep_code_cache() {
jcm@8713 322 ResourceMark rm;
aoqi@0 323 Ticks sweep_start_counter = Ticks::now();
aoqi@0 324
aoqi@0 325 _flushed_count = 0;
aoqi@0 326 _zombified_count = 0;
aoqi@0 327 _marked_for_reclamation_count = 0;
aoqi@0 328
aoqi@0 329 if (PrintMethodFlushing && Verbose) {
aoqi@0 330 tty->print_cr("### Sweep at %d out of %d. Invocations left: %d", _seen, CodeCache::nof_nmethods(), _sweep_fractions_left);
aoqi@0 331 }
aoqi@0 332
aoqi@0 333 if (!CompileBroker::should_compile_new_jobs()) {
aoqi@0 334 // If we have turned off compilations we might as well do full sweeps
aoqi@0 335 // in order to reach the clean state faster. Otherwise the sleeping compiler
aoqi@0 336 // threads will slow down sweeping.
aoqi@0 337 _sweep_fractions_left = 1;
aoqi@0 338 }
aoqi@0 339
aoqi@0 340 // We want to visit all nmethods after NmethodSweepFraction
aoqi@0 341 // invocations so divide the remaining number of nmethods by the
aoqi@0 342 // remaining number of invocations. This is only an estimate since
aoqi@0 343 // the number of nmethods changes during the sweep so the final
aoqi@0 344 // stage must iterate until it there are no more nmethods.
aoqi@0 345 int todo = (CodeCache::nof_nmethods() - _seen) / _sweep_fractions_left;
aoqi@0 346 int swept_count = 0;
aoqi@0 347
aoqi@0 348
aoqi@0 349 assert(!SafepointSynchronize::is_at_safepoint(), "should not be in safepoint when we get here");
aoqi@0 350 assert(!CodeCache_lock->owned_by_self(), "just checking");
aoqi@0 351
aoqi@0 352 int freed_memory = 0;
aoqi@0 353 {
aoqi@0 354 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
aoqi@0 355
aoqi@0 356 // The last invocation iterates until there are no more nmethods
aoqi@0 357 for (int i = 0; (i < todo || _sweep_fractions_left == 1) && _current != NULL; i++) {
aoqi@0 358 swept_count++;
aoqi@0 359 if (SafepointSynchronize::is_synchronizing()) { // Safepoint request
aoqi@0 360 if (PrintMethodFlushing && Verbose) {
aoqi@0 361 tty->print_cr("### Sweep at %d out of %d, invocation: %d, yielding to safepoint", _seen, CodeCache::nof_nmethods(), _sweep_fractions_left);
aoqi@0 362 }
aoqi@0 363 MutexUnlockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
aoqi@0 364
aoqi@0 365 assert(Thread::current()->is_Java_thread(), "should be java thread");
aoqi@0 366 JavaThread* thread = (JavaThread*)Thread::current();
aoqi@0 367 ThreadBlockInVM tbivm(thread);
aoqi@0 368 thread->java_suspend_self();
aoqi@0 369 }
aoqi@0 370 // Since we will give up the CodeCache_lock, always skip ahead
aoqi@0 371 // to the next nmethod. Other blobs can be deleted by other
aoqi@0 372 // threads but nmethods are only reclaimed by the sweeper.
aoqi@0 373 nmethod* next = CodeCache::next_nmethod(_current);
aoqi@0 374
aoqi@0 375 // Now ready to process nmethod and give up CodeCache_lock
aoqi@0 376 {
aoqi@0 377 MutexUnlockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
aoqi@0 378 freed_memory += process_nmethod(_current);
aoqi@0 379 }
aoqi@0 380 _seen++;
aoqi@0 381 _current = next;
aoqi@0 382 }
aoqi@0 383 }
aoqi@0 384
aoqi@0 385 assert(_sweep_fractions_left > 1 || _current == NULL, "must have scanned the whole cache");
aoqi@0 386
aoqi@0 387 const Ticks sweep_end_counter = Ticks::now();
aoqi@0 388 const Tickspan sweep_time = sweep_end_counter - sweep_start_counter;
aoqi@0 389 _total_time_sweeping += sweep_time;
aoqi@0 390 _total_time_this_sweep += sweep_time;
aoqi@0 391 _peak_sweep_fraction_time = MAX2(sweep_time, _peak_sweep_fraction_time);
aoqi@0 392 _total_flushed_size += freed_memory;
aoqi@0 393 _total_nof_methods_reclaimed += _flushed_count;
aoqi@0 394
aoqi@0 395 EventSweepCodeCache event(UNTIMED);
aoqi@0 396 if (event.should_commit()) {
aoqi@0 397 event.set_starttime(sweep_start_counter);
aoqi@0 398 event.set_endtime(sweep_end_counter);
aoqi@0 399 event.set_sweepIndex(_traversals);
aoqi@0 400 event.set_sweepFractionIndex(NmethodSweepFraction - _sweep_fractions_left + 1);
aoqi@0 401 event.set_sweptCount(swept_count);
aoqi@0 402 event.set_flushedCount(_flushed_count);
aoqi@0 403 event.set_markedCount(_marked_for_reclamation_count);
aoqi@0 404 event.set_zombifiedCount(_zombified_count);
aoqi@0 405 event.commit();
aoqi@0 406 }
aoqi@0 407
aoqi@0 408 #ifdef ASSERT
aoqi@0 409 if(PrintMethodFlushing) {
aoqi@0 410 tty->print_cr("### sweeper: sweep time(%d): "
aoqi@0 411 INT64_FORMAT, _sweep_fractions_left, (jlong)sweep_time.value());
aoqi@0 412 }
aoqi@0 413 #endif
aoqi@0 414
aoqi@0 415 if (_sweep_fractions_left == 1) {
aoqi@0 416 _peak_sweep_time = MAX2(_peak_sweep_time, _total_time_this_sweep);
aoqi@0 417 log_sweep("finished");
aoqi@0 418 }
aoqi@0 419
aoqi@0 420 // Sweeper is the only case where memory is released, check here if it
aoqi@0 421 // is time to restart the compiler. Only checking if there is a certain
aoqi@0 422 // amount of free memory in the code cache might lead to re-enabling
aoqi@0 423 // compilation although no memory has been released. For example, there are
aoqi@0 424 // cases when compilation was disabled although there is 4MB (or more) free
aoqi@0 425 // memory in the code cache. The reason is code cache fragmentation. Therefore,
aoqi@0 426 // it only makes sense to re-enable compilation if we have actually freed memory.
aoqi@0 427 // Note that typically several kB are released for sweeping 16MB of the code
aoqi@0 428 // cache. As a result, 'freed_memory' > 0 to restart the compiler.
aoqi@0 429 if (!CompileBroker::should_compile_new_jobs() && (freed_memory > 0)) {
aoqi@0 430 CompileBroker::set_should_compile_new_jobs(CompileBroker::run_compilation);
aoqi@0 431 log_sweep("restart_compiler");
aoqi@0 432 }
aoqi@0 433 }
aoqi@0 434
aoqi@0 435 /**
aoqi@0 436 * This function updates the sweeper statistics that keep track of nmethods
aoqi@0 437 * state changes. If there is 'enough' state change, the sweeper is invoked
aoqi@0 438 * as soon as possible. There can be data races on _bytes_changed. The data
aoqi@0 439 * races are benign, since it does not matter if we loose a couple of bytes.
aoqi@0 440 * In the worst case we call the sweeper a little later. Also, we are guaranteed
aoqi@0 441 * to invoke the sweeper if the code cache gets full.
aoqi@0 442 */
aoqi@0 443 void NMethodSweeper::report_state_change(nmethod* nm) {
aoqi@0 444 _bytes_changed += nm->total_size();
aoqi@0 445 possibly_enable_sweeper();
aoqi@0 446 }
aoqi@0 447
aoqi@0 448 /**
aoqi@0 449 * Function determines if there was 'enough' state change in the code cache to invoke
aoqi@0 450 * the sweeper again. Currently, we determine 'enough' as more than 1% state change in
aoqi@0 451 * the code cache since the last sweep.
aoqi@0 452 */
aoqi@0 453 void NMethodSweeper::possibly_enable_sweeper() {
aoqi@0 454 double percent_changed = ((double)_bytes_changed / (double)ReservedCodeCacheSize) * 100;
aoqi@0 455 if (percent_changed > 1.0) {
aoqi@0 456 _should_sweep = true;
aoqi@0 457 }
aoqi@0 458 }
aoqi@0 459
aoqi@0 460 class NMethodMarker: public StackObj {
aoqi@0 461 private:
aoqi@0 462 CompilerThread* _thread;
aoqi@0 463 public:
aoqi@0 464 NMethodMarker(nmethod* nm) {
aoqi@0 465 _thread = CompilerThread::current();
aoqi@0 466 if (!nm->is_zombie() && !nm->is_unloaded()) {
aoqi@0 467 // Only expose live nmethods for scanning
aoqi@0 468 _thread->set_scanned_nmethod(nm);
aoqi@0 469 }
aoqi@0 470 }
aoqi@0 471 ~NMethodMarker() {
aoqi@0 472 _thread->set_scanned_nmethod(NULL);
aoqi@0 473 }
aoqi@0 474 };
aoqi@0 475
aoqi@0 476 void NMethodSweeper::release_nmethod(nmethod *nm) {
aoqi@0 477 // Clean up any CompiledICHolders
aoqi@0 478 {
aoqi@0 479 ResourceMark rm;
aoqi@0 480 MutexLocker ml_patch(CompiledIC_lock);
aoqi@0 481 RelocIterator iter(nm);
aoqi@0 482 while (iter.next()) {
aoqi@0 483 if (iter.type() == relocInfo::virtual_call_type) {
aoqi@0 484 CompiledIC::cleanup_call_site(iter.virtual_call_reloc());
aoqi@0 485 }
aoqi@0 486 }
aoqi@0 487 }
aoqi@0 488
aoqi@0 489 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
aoqi@0 490 nm->flush();
aoqi@0 491 }
aoqi@0 492
aoqi@0 493 int NMethodSweeper::process_nmethod(nmethod *nm) {
aoqi@0 494 assert(!CodeCache_lock->owned_by_self(), "just checking");
aoqi@0 495
aoqi@0 496 int freed_memory = 0;
aoqi@0 497 // Make sure this nmethod doesn't get unloaded during the scan,
aoqi@0 498 // since safepoints may happen during acquired below locks.
aoqi@0 499 NMethodMarker nmm(nm);
aoqi@0 500 SWEEP(nm);
aoqi@0 501
aoqi@0 502 // Skip methods that are currently referenced by the VM
aoqi@0 503 if (nm->is_locked_by_vm()) {
aoqi@0 504 // But still remember to clean-up inline caches for alive nmethods
aoqi@0 505 if (nm->is_alive()) {
aoqi@0 506 // Clean inline caches that point to zombie/non-entrant methods
aoqi@0 507 MutexLocker cl(CompiledIC_lock);
aoqi@0 508 nm->cleanup_inline_caches();
aoqi@0 509 SWEEP(nm);
aoqi@0 510 }
aoqi@0 511 return freed_memory;
aoqi@0 512 }
aoqi@0 513
aoqi@0 514 if (nm->is_zombie()) {
aoqi@0 515 // If it is the first time we see nmethod then we mark it. Otherwise,
aoqi@0 516 // we reclaim it. When we have seen a zombie method twice, we know that
aoqi@0 517 // there are no inline caches that refer to it.
aoqi@0 518 if (nm->is_marked_for_reclamation()) {
aoqi@0 519 assert(!nm->is_locked_by_vm(), "must not flush locked nmethods");
aoqi@0 520 if (PrintMethodFlushing && Verbose) {
aoqi@0 521 tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (marked for reclamation) being flushed", nm->compile_id(), nm);
aoqi@0 522 }
aoqi@0 523 freed_memory = nm->total_size();
aoqi@0 524 if (nm->is_compiled_by_c2()) {
aoqi@0 525 _total_nof_c2_methods_reclaimed++;
aoqi@0 526 }
aoqi@0 527 release_nmethod(nm);
aoqi@0 528 _flushed_count++;
aoqi@0 529 } else {
aoqi@0 530 if (PrintMethodFlushing && Verbose) {
aoqi@0 531 tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (zombie) being marked for reclamation", nm->compile_id(), nm);
aoqi@0 532 }
aoqi@0 533 nm->mark_for_reclamation();
aoqi@0 534 // Keep track of code cache state change
aoqi@0 535 _bytes_changed += nm->total_size();
aoqi@0 536 _marked_for_reclamation_count++;
aoqi@0 537 SWEEP(nm);
aoqi@0 538 }
aoqi@0 539 } else if (nm->is_not_entrant()) {
aoqi@0 540 // If there are no current activations of this method on the
aoqi@0 541 // stack we can safely convert it to a zombie method
thartmann@8075 542 if (nm->can_convert_to_zombie()) {
aoqi@0 543 if (PrintMethodFlushing && Verbose) {
aoqi@0 544 tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (not entrant) being made zombie", nm->compile_id(), nm);
aoqi@0 545 }
thartmann@8073 546 // Clear ICStubs to prevent back patching stubs of zombie or unloaded
thartmann@8073 547 // nmethods during the next safepoint (see ICStub::finalize).
thartmann@8073 548 MutexLocker cl(CompiledIC_lock);
thartmann@8073 549 nm->clear_ic_stubs();
aoqi@0 550 // Code cache state change is tracked in make_zombie()
aoqi@0 551 nm->make_zombie();
aoqi@0 552 _zombified_count++;
aoqi@0 553 SWEEP(nm);
aoqi@0 554 } else {
aoqi@0 555 // Still alive, clean up its inline caches
aoqi@0 556 MutexLocker cl(CompiledIC_lock);
aoqi@0 557 nm->cleanup_inline_caches();
aoqi@0 558 SWEEP(nm);
aoqi@0 559 }
aoqi@0 560 } else if (nm->is_unloaded()) {
aoqi@0 561 // Unloaded code, just make it a zombie
aoqi@0 562 if (PrintMethodFlushing && Verbose) {
aoqi@0 563 tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (unloaded) being made zombie", nm->compile_id(), nm);
aoqi@0 564 }
aoqi@0 565 if (nm->is_osr_method()) {
aoqi@0 566 SWEEP(nm);
aoqi@0 567 // No inline caches will ever point to osr methods, so we can just remove it
aoqi@0 568 freed_memory = nm->total_size();
aoqi@0 569 if (nm->is_compiled_by_c2()) {
aoqi@0 570 _total_nof_c2_methods_reclaimed++;
aoqi@0 571 }
aoqi@0 572 release_nmethod(nm);
aoqi@0 573 _flushed_count++;
aoqi@0 574 } else {
thartmann@8075 575 {
thartmann@8075 576 // Clean ICs of unloaded nmethods as well because they may reference other
thartmann@8075 577 // unloaded nmethods that may be flushed earlier in the sweeper cycle.
thartmann@8075 578 MutexLocker cl(CompiledIC_lock);
thartmann@8075 579 nm->cleanup_inline_caches();
thartmann@8075 580 }
aoqi@0 581 // Code cache state change is tracked in make_zombie()
aoqi@0 582 nm->make_zombie();
aoqi@0 583 _zombified_count++;
aoqi@0 584 SWEEP(nm);
aoqi@0 585 }
aoqi@0 586 } else {
aoqi@0 587 if (UseCodeCacheFlushing) {
aoqi@0 588 if (!nm->is_locked_by_vm() && !nm->is_osr_method() && !nm->is_native_method()) {
aoqi@0 589 // Do not make native methods and OSR-methods not-entrant
aoqi@0 590 nm->dec_hotness_counter();
aoqi@0 591 // Get the initial value of the hotness counter. This value depends on the
aoqi@0 592 // ReservedCodeCacheSize
aoqi@0 593 int reset_val = hotness_counter_reset_val();
aoqi@0 594 int time_since_reset = reset_val - nm->hotness_counter();
aoqi@0 595 double threshold = -reset_val + (CodeCache::reverse_free_ratio() * NmethodSweepActivity);
aoqi@0 596 // The less free space in the code cache we have - the bigger reverse_free_ratio() is.
aoqi@0 597 // I.e., 'threshold' increases with lower available space in the code cache and a higher
aoqi@0 598 // NmethodSweepActivity. If the current hotness counter - which decreases from its initial
aoqi@0 599 // value until it is reset by stack walking - is smaller than the computed threshold, the
aoqi@0 600 // corresponding nmethod is considered for removal.
aoqi@0 601 if ((NmethodSweepActivity > 0) && (nm->hotness_counter() < threshold) && (time_since_reset > 10)) {
aoqi@0 602 // A method is marked as not-entrant if the method is
aoqi@0 603 // 1) 'old enough': nm->hotness_counter() < threshold
aoqi@0 604 // 2) The method was in_use for a minimum amount of time: (time_since_reset > 10)
aoqi@0 605 // The second condition is necessary if we are dealing with very small code cache
aoqi@0 606 // sizes (e.g., <10m) and the code cache size is too small to hold all hot methods.
aoqi@0 607 // The second condition ensures that methods are not immediately made not-entrant
aoqi@0 608 // after compilation.
aoqi@0 609 nm->make_not_entrant();
aoqi@0 610 // Code cache state change is tracked in make_not_entrant()
aoqi@0 611 if (PrintMethodFlushing && Verbose) {
aoqi@0 612 tty->print_cr("### Nmethod %d/" PTR_FORMAT "made not-entrant: hotness counter %d/%d threshold %f",
aoqi@0 613 nm->compile_id(), nm, nm->hotness_counter(), reset_val, threshold);
aoqi@0 614 }
aoqi@0 615 }
aoqi@0 616 }
aoqi@0 617 }
aoqi@0 618 // Clean-up all inline caches that point to zombie/non-reentrant methods
aoqi@0 619 MutexLocker cl(CompiledIC_lock);
aoqi@0 620 nm->cleanup_inline_caches();
aoqi@0 621 SWEEP(nm);
aoqi@0 622 }
aoqi@0 623 return freed_memory;
aoqi@0 624 }
aoqi@0 625
aoqi@0 626 // Print out some state information about the current sweep and the
aoqi@0 627 // state of the code cache if it's requested.
aoqi@0 628 void NMethodSweeper::log_sweep(const char* msg, const char* format, ...) {
aoqi@0 629 if (PrintMethodFlushing) {
jcm@8713 630 ResourceMark rm;
aoqi@0 631 stringStream s;
aoqi@0 632 // Dump code cache state into a buffer before locking the tty,
aoqi@0 633 // because log_state() will use locks causing lock conflicts.
aoqi@0 634 CodeCache::log_state(&s);
aoqi@0 635
aoqi@0 636 ttyLocker ttyl;
aoqi@0 637 tty->print("### sweeper: %s ", msg);
aoqi@0 638 if (format != NULL) {
aoqi@0 639 va_list ap;
aoqi@0 640 va_start(ap, format);
aoqi@0 641 tty->vprint(format, ap);
aoqi@0 642 va_end(ap);
aoqi@0 643 }
aoqi@0 644 tty->print_cr("%s", s.as_string());
aoqi@0 645 }
aoqi@0 646
aoqi@0 647 if (LogCompilation && (xtty != NULL)) {
jcm@8713 648 ResourceMark rm;
aoqi@0 649 stringStream s;
aoqi@0 650 // Dump code cache state into a buffer before locking the tty,
aoqi@0 651 // because log_state() will use locks causing lock conflicts.
aoqi@0 652 CodeCache::log_state(&s);
aoqi@0 653
aoqi@0 654 ttyLocker ttyl;
aoqi@0 655 xtty->begin_elem("sweeper state='%s' traversals='" INTX_FORMAT "' ", msg, (intx)traversal_count());
aoqi@0 656 if (format != NULL) {
aoqi@0 657 va_list ap;
aoqi@0 658 va_start(ap, format);
aoqi@0 659 xtty->vprint(format, ap);
aoqi@0 660 va_end(ap);
aoqi@0 661 }
aoqi@0 662 xtty->print("%s", s.as_string());
aoqi@0 663 xtty->stamp();
aoqi@0 664 xtty->end_elem();
aoqi@0 665 }
aoqi@0 666 }
aoqi@0 667
aoqi@0 668 void NMethodSweeper::print() {
aoqi@0 669 ttyLocker ttyl;
aoqi@0 670 tty->print_cr("Code cache sweeper statistics:");
aoqi@0 671 tty->print_cr(" Total sweep time: %1.0lfms", (double)_total_time_sweeping.value()/1000000);
aoqi@0 672 tty->print_cr(" Total number of full sweeps: %ld", _total_nof_code_cache_sweeps);
aoqi@0 673 tty->print_cr(" Total number of flushed methods: %ld(%ld C2 methods)", _total_nof_methods_reclaimed,
aoqi@0 674 _total_nof_c2_methods_reclaimed);
aoqi@0 675 tty->print_cr(" Total size of flushed methods: " SIZE_FORMAT "kB", _total_flushed_size/K);
aoqi@0 676 }

mercurial