Tue, 12 Nov 2013 09:32:50 +0100
8027593: performance drop with constrained codecache starting with hs25 b111
Summary: Fixed proper sweeping of small code cache sizes
Reviewed-by: kvn, iveresov
1.1 --- a/src/share/vm/code/nmethod.cpp Mon Nov 11 11:53:33 2013 -0800 1.2 +++ b/src/share/vm/code/nmethod.cpp Tue Nov 12 09:32:50 2013 +0100 1.3 @@ -1259,7 +1259,7 @@ 1.4 1.5 set_osr_link(NULL); 1.6 //set_scavenge_root_link(NULL); // done by prune_scavenge_root_nmethods 1.7 - NMethodSweeper::notify(); 1.8 + NMethodSweeper::report_state_change(this); 1.9 } 1.10 1.11 void nmethod::invalidate_osr_method() { 1.12 @@ -1293,7 +1293,9 @@ 1.13 } 1.14 } 1.15 1.16 -// Common functionality for both make_not_entrant and make_zombie 1.17 +/** 1.18 + * Common functionality for both make_not_entrant and make_zombie 1.19 + */ 1.20 bool nmethod::make_not_entrant_or_zombie(unsigned int state) { 1.21 assert(state == zombie || state == not_entrant, "must be zombie or not_entrant"); 1.22 assert(!is_zombie(), "should not already be a zombie"); 1.23 @@ -1417,9 +1419,7 @@ 1.24 tty->print_cr("nmethod <" INTPTR_FORMAT "> code made %s", this, (state == not_entrant) ? "not entrant" : "zombie"); 1.25 } 1.26 1.27 - // Make sweeper aware that there is a zombie method that needs to be removed 1.28 - NMethodSweeper::notify(); 1.29 - 1.30 + NMethodSweeper::report_state_change(this); 1.31 return true; 1.32 } 1.33
2.1 --- a/src/share/vm/compiler/compileBroker.cpp Mon Nov 11 11:53:33 2013 -0800 2.2 +++ b/src/share/vm/compiler/compileBroker.cpp Tue Nov 12 09:32:50 2013 +0100 2.3 @@ -126,6 +126,7 @@ 2.4 2.5 bool CompileBroker::_initialized = false; 2.6 volatile bool CompileBroker::_should_block = false; 2.7 +volatile jint CompileBroker::_print_compilation_warning = 0; 2.8 volatile jint CompileBroker::_should_compile_new_jobs = run_compilation; 2.9 2.10 // The installed compiler(s) 2.11 @@ -2027,11 +2028,10 @@ 2.12 #endif 2.13 } 2.14 2.15 -// ------------------------------------------------------------------ 2.16 -// CompileBroker::handle_full_code_cache 2.17 -// 2.18 -// The CodeCache is full. Print out warning and disable compilation or 2.19 -// try code cache cleaning so compilation can continue later. 2.20 +/** 2.21 + * The CodeCache is full. Print out warning and disable compilation 2.22 + * or try code cache cleaning so compilation can continue later. 2.23 + */ 2.24 void CompileBroker::handle_full_code_cache() { 2.25 UseInterpreter = true; 2.26 if (UseCompiler || AlwaysCompileLoopMethods ) { 2.27 @@ -2048,12 +2048,9 @@ 2.28 xtty->stamp(); 2.29 xtty->end_elem(); 2.30 } 2.31 - warning("CodeCache is full. Compiler has been disabled."); 2.32 - warning("Try increasing the code cache size using -XX:ReservedCodeCacheSize="); 2.33 2.34 CodeCache::report_codemem_full(); 2.35 2.36 - 2.37 #ifndef PRODUCT 2.38 if (CompileTheWorld || ExitOnFullCodeCache) { 2.39 codecache_print(/* detailed= */ true); 2.40 @@ -2066,17 +2063,22 @@ 2.41 // Since code cache is full, immediately stop new compiles 2.42 if (CompileBroker::set_should_compile_new_jobs(CompileBroker::stop_compilation)) { 2.43 NMethodSweeper::log_sweep("disable_compiler"); 2.44 - 2.45 - // Switch to 'vm_state'. This ensures that possibly_sweep() can be called 2.46 - // without having to consider the state in which the current thread is. 2.47 - ThreadInVMfromUnknown in_vm; 2.48 - NMethodSweeper::possibly_sweep(); 2.49 } 2.50 + // Switch to 'vm_state'. This ensures that possibly_sweep() can be called 2.51 + // without having to consider the state in which the current thread is. 2.52 + ThreadInVMfromUnknown in_vm; 2.53 + NMethodSweeper::possibly_sweep(); 2.54 } else { 2.55 disable_compilation_forever(); 2.56 } 2.57 + 2.58 + // Print warning only once 2.59 + if (should_print_compiler_warning()) { 2.60 + warning("CodeCache is full. Compiler has been disabled."); 2.61 + warning("Try increasing the code cache size using -XX:ReservedCodeCacheSize="); 2.62 + codecache_print(/* detailed= */ true); 2.63 + } 2.64 } 2.65 - codecache_print(/* detailed= */ true); 2.66 } 2.67 2.68 // ------------------------------------------------------------------
3.1 --- a/src/share/vm/compiler/compileBroker.hpp Mon Nov 11 11:53:33 2013 -0800 3.2 +++ b/src/share/vm/compiler/compileBroker.hpp Tue Nov 12 09:32:50 2013 +0100 3.3 @@ -315,6 +315,8 @@ 3.4 static int _sum_nmethod_code_size; 3.5 static long _peak_compilation_time; 3.6 3.7 + static volatile jint _print_compilation_warning; 3.8 + 3.9 static CompilerThread* make_compiler_thread(const char* name, CompileQueue* queue, CompilerCounters* counters, AbstractCompiler* comp, TRAPS); 3.10 static void init_compiler_threads(int c1_compiler_count, int c2_compiler_count); 3.11 static bool compilation_is_complete (methodHandle method, int osr_bci, int comp_level); 3.12 @@ -418,7 +420,11 @@ 3.13 return _should_compile_new_jobs == shutdown_compilaton; 3.14 } 3.15 static void handle_full_code_cache(); 3.16 - 3.17 + // Ensures that warning is only printed once. 3.18 + static bool should_print_compiler_warning() { 3.19 + jint old = Atomic::cmpxchg(1, &_print_compilation_warning, 0); 3.20 + return old == 0; 3.21 + } 3.22 // Return total compilation ticks 3.23 static jlong total_compilation_ticks() { 3.24 return _perf_total_compilation != NULL ? _perf_total_compilation->get_value() : 0;
4.1 --- a/src/share/vm/runtime/arguments.cpp Mon Nov 11 11:53:33 2013 -0800 4.2 +++ b/src/share/vm/runtime/arguments.cpp Tue Nov 12 09:32:50 2013 +0100 4.3 @@ -1132,9 +1132,6 @@ 4.4 Tier3InvokeNotifyFreqLog = 0; 4.5 Tier4InvocationThreshold = 0; 4.6 } 4.7 - if (FLAG_IS_DEFAULT(NmethodSweepFraction)) { 4.8 - FLAG_SET_DEFAULT(NmethodSweepFraction, 1 + ReservedCodeCacheSize / (16 * M)); 4.9 - } 4.10 } 4.11 4.12 #if INCLUDE_ALL_GCS 4.13 @@ -3643,6 +3640,11 @@ 4.14 "Incompatible compilation policy selected", NULL); 4.15 } 4.16 } 4.17 + // Set NmethodSweepFraction after the size of the code cache is adapted (in case of tiered) 4.18 + if (FLAG_IS_DEFAULT(NmethodSweepFraction)) { 4.19 + FLAG_SET_DEFAULT(NmethodSweepFraction, 1 + ReservedCodeCacheSize / (16 * M)); 4.20 + } 4.21 + 4.22 4.23 // Set heap size based on available physical memory 4.24 set_heap_size();
5.1 --- a/src/share/vm/runtime/globals.hpp Mon Nov 11 11:53:33 2013 -0800 5.2 +++ b/src/share/vm/runtime/globals.hpp Tue Nov 12 09:32:50 2013 +0100 5.3 @@ -3286,7 +3286,7 @@ 5.4 "Exit the VM if we fill the code cache") \ 5.5 \ 5.6 product(bool, UseCodeCacheFlushing, true, \ 5.7 - "Attempt to clean the code cache before shutting off compiler") \ 5.8 + "Remove cold/old nmethods from the code cache") \ 5.9 \ 5.10 /* interpreter debugging */ \ 5.11 develop(intx, BinarySwitchThreshold, 5, \
6.1 --- a/src/share/vm/runtime/sweeper.cpp Mon Nov 11 11:53:33 2013 -0800 6.2 +++ b/src/share/vm/runtime/sweeper.cpp Tue Nov 12 09:32:50 2013 +0100 6.3 @@ -112,14 +112,13 @@ 6.4 if (_records != NULL) { 6.5 _records[_sweep_index].traversal = _traversals; 6.6 _records[_sweep_index].traversal_mark = nm->_stack_traversal_mark; 6.7 - _records[_sweep_index].invocation = _invocations; 6.8 + _records[_sweep_index].invocation = _sweep_fractions_left; 6.9 _records[_sweep_index].compile_id = nm->compile_id(); 6.10 _records[_sweep_index].kind = nm->compile_kind(); 6.11 _records[_sweep_index].state = nm->_state; 6.12 _records[_sweep_index].vep = nm->verified_entry_point(); 6.13 _records[_sweep_index].uep = nm->entry_point(); 6.14 _records[_sweep_index].line = line; 6.15 - 6.16 _sweep_index = (_sweep_index + 1) % SweeperLogEntries; 6.17 } 6.18 } 6.19 @@ -127,26 +126,29 @@ 6.20 #define SWEEP(nm) 6.21 #endif 6.22 6.23 -nmethod* NMethodSweeper::_current = NULL; // Current nmethod 6.24 -long NMethodSweeper::_traversals = 0; // Nof. stack traversals performed 6.25 -int NMethodSweeper::_seen = 0; // Nof. nmethods we have currently processed in current pass of CodeCache 6.26 -int NMethodSweeper::_flushed_count = 0; // Nof. nmethods flushed in current sweep 6.27 -int NMethodSweeper::_zombified_count = 0; // Nof. nmethods made zombie in current sweep 6.28 -int NMethodSweeper::_marked_count = 0; // Nof. nmethods marked for reclaim in current sweep 6.29 +nmethod* NMethodSweeper::_current = NULL; // Current nmethod 6.30 +long NMethodSweeper::_traversals = 0; // Stack scan count, also sweep ID. 6.31 +long NMethodSweeper::_time_counter = 0; // Virtual time used to periodically invoke sweeper 6.32 +long NMethodSweeper::_last_sweep = 0; // Value of _time_counter when the last sweep happened 6.33 +int NMethodSweeper::_seen = 0; // Nof. nmethod we have currently processed in current pass of CodeCache 6.34 +int NMethodSweeper::_flushed_count = 0; // Nof. nmethods flushed in current sweep 6.35 +int NMethodSweeper::_zombified_count = 0; // Nof. nmethods made zombie in current sweep 6.36 +int NMethodSweeper::_marked_for_reclamation_count = 0; // Nof. nmethods marked for reclaim in current sweep 6.37 6.38 -volatile int NMethodSweeper::_invocations = 0; // Nof. invocations left until we are completed with this pass 6.39 -volatile int NMethodSweeper::_sweep_started = 0; // Whether a sweep is in progress. 6.40 +volatile bool NMethodSweeper::_should_sweep = true; // Indicates if we should invoke the sweeper 6.41 +volatile int NMethodSweeper::_sweep_fractions_left = 0; // Nof. invocations left until we are completed with this pass 6.42 +volatile int NMethodSweeper::_sweep_started = 0; // Flag to control conc sweeper 6.43 +volatile int NMethodSweeper::_bytes_changed = 0; // Counts the total nmethod size if the nmethod changed from: 6.44 + // 1) alive -> not_entrant 6.45 + // 2) not_entrant -> zombie 6.46 + // 3) zombie -> marked_for_reclamation 6.47 6.48 -jint NMethodSweeper::_locked_seen = 0; 6.49 -jint NMethodSweeper::_not_entrant_seen_on_stack = 0; 6.50 -bool NMethodSweeper::_request_mark_phase = false; 6.51 - 6.52 -int NMethodSweeper::_total_nof_methods_reclaimed = 0; 6.53 -jlong NMethodSweeper::_total_time_sweeping = 0; 6.54 -jlong NMethodSweeper::_total_time_this_sweep = 0; 6.55 -jlong NMethodSweeper::_peak_sweep_time = 0; 6.56 -jlong NMethodSweeper::_peak_sweep_fraction_time = 0; 6.57 -int NMethodSweeper::_hotness_counter_reset_val = 0; 6.58 +int NMethodSweeper::_total_nof_methods_reclaimed = 0; // Accumulated nof methods flushed 6.59 +jlong NMethodSweeper::_total_time_sweeping = 0; // Accumulated time sweeping 6.60 +jlong NMethodSweeper::_total_time_this_sweep = 0; // Total time this sweep 6.61 +jlong NMethodSweeper::_peak_sweep_time = 0; // Peak time for a full sweep 6.62 +jlong NMethodSweeper::_peak_sweep_fraction_time = 0; // Peak time sweeping one fraction 6.63 +int NMethodSweeper::_hotness_counter_reset_val = 0; 6.64 6.65 6.66 class MarkActivationClosure: public CodeBlobClosure { 6.67 @@ -197,13 +199,16 @@ 6.68 return; 6.69 } 6.70 6.71 + // Increase time so that we can estimate when to invoke the sweeper again. 6.72 + _time_counter++; 6.73 + 6.74 // Check for restart 6.75 assert(CodeCache::find_blob_unsafe(_current) == _current, "Sweeper nmethod cached state invalid"); 6.76 - if (!sweep_in_progress() && need_marking_phase()) { 6.77 - _seen = 0; 6.78 - _invocations = NmethodSweepFraction; 6.79 - _current = CodeCache::first_nmethod(); 6.80 - _traversals += 1; 6.81 + if (!sweep_in_progress()) { 6.82 + _seen = 0; 6.83 + _sweep_fractions_left = NmethodSweepFraction; 6.84 + _current = CodeCache::first_nmethod(); 6.85 + _traversals += 1; 6.86 _total_time_this_sweep = 0; 6.87 6.88 if (PrintMethodFlushing) { 6.89 @@ -211,10 +216,6 @@ 6.90 } 6.91 Threads::nmethods_do(&mark_activation_closure); 6.92 6.93 - // reset the flags since we started a scan from the beginning. 6.94 - reset_nmethod_marking(); 6.95 - _locked_seen = 0; 6.96 - _not_entrant_seen_on_stack = 0; 6.97 } else { 6.98 // Only set hotness counter 6.99 Threads::nmethods_do(&set_hotness_closure); 6.100 @@ -222,14 +223,48 @@ 6.101 6.102 OrderAccess::storestore(); 6.103 } 6.104 - 6.105 +/** 6.106 + * This function invokes the sweeper if at least one of the three conditions is met: 6.107 + * (1) The code cache is getting full 6.108 + * (2) There are sufficient state changes in/since the last sweep. 6.109 + * (3) We have not been sweeping for 'some time' 6.110 + */ 6.111 void NMethodSweeper::possibly_sweep() { 6.112 assert(JavaThread::current()->thread_state() == _thread_in_vm, "must run in vm mode"); 6.113 if (!MethodFlushing || !sweep_in_progress()) { 6.114 return; 6.115 } 6.116 6.117 - if (_invocations > 0) { 6.118 + // If there was no state change while nmethod sweeping, 'should_sweep' will be false. 6.119 + // This is one of the two places where should_sweep can be set to true. The general 6.120 + // idea is as follows: If there is enough free space in the code cache, there is no 6.121 + // need to invoke the sweeper. The following formula (which determines whether to invoke 6.122 + // the sweeper or not) depends on the assumption that for larger ReservedCodeCacheSizes 6.123 + // we need less frequent sweeps than for smaller ReservedCodecCacheSizes. Furthermore, 6.124 + // the formula considers how much space in the code cache is currently used. Here are 6.125 + // some examples that will (hopefully) help in understanding. 6.126 + // 6.127 + // Small ReservedCodeCacheSizes: (e.g., < 16M) We invoke the sweeper every time, since 6.128 + // the result of the division is 0. This 6.129 + // keeps the used code cache size small 6.130 + // (important for embedded Java) 6.131 + // Large ReservedCodeCacheSize : (e.g., 256M + code cache is 10% full). The formula 6.132 + // computes: (256 / 16) - 1 = 15 6.133 + // As a result, we invoke the sweeper after 6.134 + // 15 invocations of 'mark_active_nmethods. 6.135 + // Large ReservedCodeCacheSize: (e.g., 256M + code Cache is 90% full). The formula 6.136 + // computes: (256 / 16) - 10 = 6. 6.137 + if (!_should_sweep) { 6.138 + int time_since_last_sweep = _time_counter - _last_sweep; 6.139 + double wait_until_next_sweep = (ReservedCodeCacheSize / (16 * M)) - time_since_last_sweep - 6.140 + CodeCache::reverse_free_ratio(); 6.141 + 6.142 + if ((wait_until_next_sweep <= 0.0) || !CompileBroker::should_compile_new_jobs()) { 6.143 + _should_sweep = true; 6.144 + } 6.145 + } 6.146 + 6.147 + if (_should_sweep && _sweep_fractions_left > 0) { 6.148 // Only one thread at a time will sweep 6.149 jint old = Atomic::cmpxchg( 1, &_sweep_started, 0 ); 6.150 if (old != 0) { 6.151 @@ -242,31 +277,46 @@ 6.152 memset(_records, 0, sizeof(SweeperRecord) * SweeperLogEntries); 6.153 } 6.154 #endif 6.155 - if (_invocations > 0) { 6.156 + 6.157 + if (_sweep_fractions_left > 0) { 6.158 sweep_code_cache(); 6.159 - _invocations--; 6.160 + _sweep_fractions_left--; 6.161 + } 6.162 + 6.163 + // We are done with sweeping the code cache once. 6.164 + if (_sweep_fractions_left == 0) { 6.165 + _last_sweep = _time_counter; 6.166 + // Reset flag; temporarily disables sweeper 6.167 + _should_sweep = false; 6.168 + // If there was enough state change, 'possibly_enable_sweeper()' 6.169 + // sets '_should_sweep' to true 6.170 + possibly_enable_sweeper(); 6.171 + // Reset _bytes_changed only if there was enough state change. _bytes_changed 6.172 + // can further increase by calls to 'report_state_change'. 6.173 + if (_should_sweep) { 6.174 + _bytes_changed = 0; 6.175 + } 6.176 } 6.177 _sweep_started = 0; 6.178 } 6.179 } 6.180 6.181 void NMethodSweeper::sweep_code_cache() { 6.182 - 6.183 jlong sweep_start_counter = os::elapsed_counter(); 6.184 6.185 - _flushed_count = 0; 6.186 - _zombified_count = 0; 6.187 - _marked_count = 0; 6.188 + _flushed_count = 0; 6.189 + _zombified_count = 0; 6.190 + _marked_for_reclamation_count = 0; 6.191 6.192 if (PrintMethodFlushing && Verbose) { 6.193 - tty->print_cr("### Sweep at %d out of %d. Invocations left: %d", _seen, CodeCache::nof_nmethods(), _invocations); 6.194 + tty->print_cr("### Sweep at %d out of %d. Invocations left: %d", _seen, CodeCache::nof_nmethods(), _sweep_fractions_left); 6.195 } 6.196 6.197 if (!CompileBroker::should_compile_new_jobs()) { 6.198 // If we have turned off compilations we might as well do full sweeps 6.199 // in order to reach the clean state faster. Otherwise the sleeping compiler 6.200 // threads will slow down sweeping. 6.201 - _invocations = 1; 6.202 + _sweep_fractions_left = 1; 6.203 } 6.204 6.205 // We want to visit all nmethods after NmethodSweepFraction 6.206 @@ -274,7 +324,7 @@ 6.207 // remaining number of invocations. This is only an estimate since 6.208 // the number of nmethods changes during the sweep so the final 6.209 // stage must iterate until it there are no more nmethods. 6.210 - int todo = (CodeCache::nof_nmethods() - _seen) / _invocations; 6.211 + int todo = (CodeCache::nof_nmethods() - _seen) / _sweep_fractions_left; 6.212 int swept_count = 0; 6.213 6.214 6.215 @@ -286,11 +336,11 @@ 6.216 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 6.217 6.218 // The last invocation iterates until there are no more nmethods 6.219 - for (int i = 0; (i < todo || _invocations == 1) && _current != NULL; i++) { 6.220 + for (int i = 0; (i < todo || _sweep_fractions_left == 1) && _current != NULL; i++) { 6.221 swept_count++; 6.222 if (SafepointSynchronize::is_synchronizing()) { // Safepoint request 6.223 if (PrintMethodFlushing && Verbose) { 6.224 - tty->print_cr("### Sweep at %d out of %d, invocation: %d, yielding to safepoint", _seen, CodeCache::nof_nmethods(), _invocations); 6.225 + tty->print_cr("### Sweep at %d out of %d, invocation: %d, yielding to safepoint", _seen, CodeCache::nof_nmethods(), _sweep_fractions_left); 6.226 } 6.227 MutexUnlockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 6.228 6.229 @@ -314,19 +364,7 @@ 6.230 } 6.231 } 6.232 6.233 - assert(_invocations > 1 || _current == NULL, "must have scanned the whole cache"); 6.234 - 6.235 - if (!sweep_in_progress() && !need_marking_phase() && (_locked_seen || _not_entrant_seen_on_stack)) { 6.236 - // we've completed a scan without making progress but there were 6.237 - // nmethods we were unable to process either because they were 6.238 - // locked or were still on stack. We don't have to aggressively 6.239 - // clean them up so just stop scanning. We could scan once more 6.240 - // but that complicates the control logic and it's unlikely to 6.241 - // matter much. 6.242 - if (PrintMethodFlushing) { 6.243 - tty->print_cr("### Couldn't make progress on some nmethods so stopping sweep"); 6.244 - } 6.245 - } 6.246 + assert(_sweep_fractions_left > 1 || _current == NULL, "must have scanned the whole cache"); 6.247 6.248 jlong sweep_end_counter = os::elapsed_counter(); 6.249 jlong sweep_time = sweep_end_counter - sweep_start_counter; 6.250 @@ -340,21 +378,21 @@ 6.251 event.set_starttime(sweep_start_counter); 6.252 event.set_endtime(sweep_end_counter); 6.253 event.set_sweepIndex(_traversals); 6.254 - event.set_sweepFractionIndex(NmethodSweepFraction - _invocations + 1); 6.255 + event.set_sweepFractionIndex(NmethodSweepFraction - _sweep_fractions_left + 1); 6.256 event.set_sweptCount(swept_count); 6.257 event.set_flushedCount(_flushed_count); 6.258 - event.set_markedCount(_marked_count); 6.259 + event.set_markedCount(_marked_for_reclamation_count); 6.260 event.set_zombifiedCount(_zombified_count); 6.261 event.commit(); 6.262 } 6.263 6.264 #ifdef ASSERT 6.265 if(PrintMethodFlushing) { 6.266 - tty->print_cr("### sweeper: sweep time(%d): " INT64_FORMAT, _invocations, (jlong)sweep_time); 6.267 + tty->print_cr("### sweeper: sweep time(%d): " INT64_FORMAT, _sweep_fractions_left, (jlong)sweep_time); 6.268 } 6.269 #endif 6.270 6.271 - if (_invocations == 1) { 6.272 + if (_sweep_fractions_left == 1) { 6.273 _peak_sweep_time = MAX2(_peak_sweep_time, _total_time_this_sweep); 6.274 log_sweep("finished"); 6.275 } 6.276 @@ -368,12 +406,37 @@ 6.277 // it only makes sense to re-enable compilation if we have actually freed memory. 6.278 // Note that typically several kB are released for sweeping 16MB of the code 6.279 // cache. As a result, 'freed_memory' > 0 to restart the compiler. 6.280 - if (UseCodeCacheFlushing && (!CompileBroker::should_compile_new_jobs() && (freed_memory > 0))) { 6.281 + if (!CompileBroker::should_compile_new_jobs() && (freed_memory > 0)) { 6.282 CompileBroker::set_should_compile_new_jobs(CompileBroker::run_compilation); 6.283 log_sweep("restart_compiler"); 6.284 } 6.285 } 6.286 6.287 +/** 6.288 + * This function updates the sweeper statistics that keep track of nmethods 6.289 + * state changes. If there is 'enough' state change, the sweeper is invoked 6.290 + * as soon as possible. There can be data races on _bytes_changed. The data 6.291 + * races are benign, since it does not matter if we loose a couple of bytes. 6.292 + * In the worst case we call the sweeper a little later. Also, we are guaranteed 6.293 + * to invoke the sweeper if the code cache gets full. 6.294 + */ 6.295 +void NMethodSweeper::report_state_change(nmethod* nm) { 6.296 + _bytes_changed += nm->total_size(); 6.297 + possibly_enable_sweeper(); 6.298 +} 6.299 + 6.300 +/** 6.301 + * Function determines if there was 'enough' state change in the code cache to invoke 6.302 + * the sweeper again. Currently, we determine 'enough' as more than 1% state change in 6.303 + * the code cache since the last sweep. 6.304 + */ 6.305 +void NMethodSweeper::possibly_enable_sweeper() { 6.306 + double percent_changed = ((double)_bytes_changed / (double)ReservedCodeCacheSize) * 100; 6.307 + if (percent_changed > 1.0) { 6.308 + _should_sweep = true; 6.309 + } 6.310 +} 6.311 + 6.312 class NMethodMarker: public StackObj { 6.313 private: 6.314 CompilerThread* _thread; 6.315 @@ -424,9 +487,6 @@ 6.316 MutexLocker cl(CompiledIC_lock); 6.317 nm->cleanup_inline_caches(); 6.318 SWEEP(nm); 6.319 - } else { 6.320 - _locked_seen++; 6.321 - SWEEP(nm); 6.322 } 6.323 return freed_memory; 6.324 } 6.325 @@ -448,8 +508,9 @@ 6.326 tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (zombie) being marked for reclamation", nm->compile_id(), nm); 6.327 } 6.328 nm->mark_for_reclamation(); 6.329 - request_nmethod_marking(); 6.330 - _marked_count++; 6.331 + // Keep track of code cache state change 6.332 + _bytes_changed += nm->total_size(); 6.333 + _marked_for_reclamation_count++; 6.334 SWEEP(nm); 6.335 } 6.336 } else if (nm->is_not_entrant()) { 6.337 @@ -459,18 +520,14 @@ 6.338 if (PrintMethodFlushing && Verbose) { 6.339 tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (not entrant) being made zombie", nm->compile_id(), nm); 6.340 } 6.341 + // Code cache state change is tracked in make_zombie() 6.342 nm->make_zombie(); 6.343 - request_nmethod_marking(); 6.344 _zombified_count++; 6.345 SWEEP(nm); 6.346 } else { 6.347 // Still alive, clean up its inline caches 6.348 MutexLocker cl(CompiledIC_lock); 6.349 nm->cleanup_inline_caches(); 6.350 - // we coudn't transition this nmethod so don't immediately 6.351 - // request a rescan. If this method stays on the stack for a 6.352 - // long time we don't want to keep rescanning the code cache. 6.353 - _not_entrant_seen_on_stack++; 6.354 SWEEP(nm); 6.355 } 6.356 } else if (nm->is_unloaded()) { 6.357 @@ -485,8 +542,8 @@ 6.358 release_nmethod(nm); 6.359 _flushed_count++; 6.360 } else { 6.361 + // Code cache state change is tracked in make_zombie() 6.362 nm->make_zombie(); 6.363 - request_nmethod_marking(); 6.364 _zombified_count++; 6.365 SWEEP(nm); 6.366 } 6.367 @@ -514,7 +571,11 @@ 6.368 // The second condition ensures that methods are not immediately made not-entrant 6.369 // after compilation. 6.370 nm->make_not_entrant(); 6.371 - request_nmethod_marking(); 6.372 + // Code cache state change is tracked in make_not_entrant() 6.373 + if (PrintMethodFlushing && Verbose) { 6.374 + tty->print_cr("### Nmethod %d/" PTR_FORMAT "made not-entrant: hotness counter %d/%d threshold %f", 6.375 + nm->compile_id(), nm, nm->hotness_counter(), reset_val, threshold); 6.376 + } 6.377 } 6.378 } 6.379 }
7.1 --- a/src/share/vm/runtime/sweeper.hpp Mon Nov 11 11:53:33 2013 -0800 7.2 +++ b/src/share/vm/runtime/sweeper.hpp Tue Nov 12 09:32:50 2013 +0100 7.3 @@ -53,22 +53,22 @@ 7.4 // is full. 7.5 7.6 class NMethodSweeper : public AllStatic { 7.7 - static long _traversals; // Stack scan count, also sweep ID. 7.8 - static nmethod* _current; // Current nmethod 7.9 - static int _seen; // Nof. nmethod we have currently processed in current pass of CodeCache 7.10 - static int _flushed_count; // Nof. nmethods flushed in current sweep 7.11 - static int _zombified_count; // Nof. nmethods made zombie in current sweep 7.12 - static int _marked_count; // Nof. nmethods marked for reclaim in current sweep 7.13 + static long _traversals; // Stack scan count, also sweep ID. 7.14 + static long _time_counter; // Virtual time used to periodically invoke sweeper 7.15 + static long _last_sweep; // Value of _time_counter when the last sweep happened 7.16 + static nmethod* _current; // Current nmethod 7.17 + static int _seen; // Nof. nmethod we have currently processed in current pass of CodeCache 7.18 + static int _flushed_count; // Nof. nmethods flushed in current sweep 7.19 + static int _zombified_count; // Nof. nmethods made zombie in current sweep 7.20 + static int _marked_for_reclamation_count; // Nof. nmethods marked for reclaim in current sweep 7.21 7.22 - static volatile int _invocations; // No. of invocations left until we are completed with this pass 7.23 - static volatile int _sweep_started; // Flag to control conc sweeper 7.24 - 7.25 - //The following are reset in mark_active_nmethods and synchronized by the safepoint 7.26 - static bool _request_mark_phase; // Indicates that a change has happend and we need another mark pahse, 7.27 - // always checked and reset at a safepoint so memory will be in sync. 7.28 - static int _locked_seen; // Number of locked nmethods encountered during the scan 7.29 - static int _not_entrant_seen_on_stack; // Number of not entrant nmethod were are still on stack 7.30 - 7.31 + static volatile int _sweep_fractions_left; // Nof. invocations left until we are completed with this pass 7.32 + static volatile int _sweep_started; // Flag to control conc sweeper 7.33 + static volatile bool _should_sweep; // Indicates if we should invoke the sweeper 7.34 + static volatile int _bytes_changed; // Counts the total nmethod size if the nmethod changed from: 7.35 + // 1) alive -> not_entrant 7.36 + // 2) not_entrant -> zombie 7.37 + // 3) zombie -> marked_for_reclamation 7.38 // Stat counters 7.39 static int _total_nof_methods_reclaimed; // Accumulated nof methods flushed 7.40 static jlong _total_time_sweeping; // Accumulated time sweeping 7.41 @@ -81,9 +81,6 @@ 7.42 7.43 static bool sweep_in_progress(); 7.44 static void sweep_code_cache(); 7.45 - static void request_nmethod_marking() { _request_mark_phase = true; } 7.46 - static void reset_nmethod_marking() { _request_mark_phase = false; } 7.47 - static bool need_marking_phase() { return _request_mark_phase; } 7.48 7.49 static int _hotness_counter_reset_val; 7.50 7.51 @@ -109,13 +106,8 @@ 7.52 7.53 static int sort_nmethods_by_hotness(nmethod** nm1, nmethod** nm2); 7.54 static int hotness_counter_reset_val(); 7.55 - 7.56 - static void notify() { 7.57 - // Request a new sweep of the code cache from the beginning. No 7.58 - // need to synchronize the setting of this flag since it only 7.59 - // changes to false at safepoint so we can never overwrite it with false. 7.60 - request_nmethod_marking(); 7.61 - } 7.62 + static void report_state_change(nmethod* nm); 7.63 + static void possibly_enable_sweeper(); 7.64 }; 7.65 7.66 #endif // SHARE_VM_RUNTIME_SWEEPER_HPP