duke@435: /* drchase@6680: * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved. duke@435: * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. duke@435: * duke@435: * This code is free software; you can redistribute it and/or modify it duke@435: * under the terms of the GNU General Public License version 2 only, as duke@435: * published by the Free Software Foundation. duke@435: * duke@435: * This code is distributed in the hope that it will be useful, but WITHOUT duke@435: * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or duke@435: * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License duke@435: * version 2 for more details (a copy is included in the LICENSE file that duke@435: * accompanied this code). duke@435: * duke@435: * You should have received a copy of the GNU General Public License version duke@435: * 2 along with this work; if not, write to the Free Software Foundation, duke@435: * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. duke@435: * trims@1907: * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA trims@1907: * or visit www.oracle.com if you need additional information or have any trims@1907: * questions. duke@435: * duke@435: */ duke@435: stefank@2314: #include "precompiled.hpp" stefank@2314: #include "code/codeCache.hpp" coleenp@4037: #include "code/compiledIC.hpp" coleenp@4037: #include "code/icBuffer.hpp" stefank@2314: #include "code/nmethod.hpp" stefank@2314: #include "compiler/compileBroker.hpp" stefank@2314: #include "memory/resourceArea.hpp" coleenp@4037: #include "oops/method.hpp" stefank@2314: #include "runtime/atomic.hpp" stefank@2314: #include "runtime/compilationPolicy.hpp" stefank@2314: #include "runtime/mutexLocker.hpp" goetz@6911: #include "runtime/orderAccess.inline.hpp" stefank@2314: #include "runtime/os.hpp" stefank@2314: #include "runtime/sweeper.hpp" goetz@6911: #include "runtime/thread.inline.hpp" stefank@2314: #include "runtime/vm_operations.hpp" sla@5237: #include "trace/tracing.hpp" stefank@2314: #include "utilities/events.hpp" mgronlun@6131: #include "utilities/ticks.inline.hpp" stefank@2314: #include "utilities/xmlstream.hpp" duke@435: drchase@6680: PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC drchase@6680: never@2916: #ifdef ASSERT never@2916: never@2916: #define SWEEP(nm) record_sweep(nm, __LINE__) never@2916: // Sweeper logging code never@2916: class SweeperRecord { never@2916: public: never@2916: int traversal; never@2916: int invocation; never@2916: int compile_id; never@2916: long traversal_mark; never@2916: int state; never@2916: const char* kind; never@2916: address vep; never@2916: address uep; never@2916: int line; never@2916: never@2916: void print() { never@2916: tty->print_cr("traversal = %d invocation = %d compile_id = %d %s uep = " PTR_FORMAT " vep = " never@2916: PTR_FORMAT " state = %d traversal_mark %d line = %d", never@2916: traversal, never@2916: invocation, never@2916: compile_id, never@2916: kind == NULL ? "" : kind, never@2916: uep, never@2916: vep, never@2916: state, never@2916: traversal_mark, never@2916: line); never@2916: } never@2916: }; never@2916: never@2916: static int _sweep_index = 0; never@2916: static SweeperRecord* _records = NULL; never@2916: never@2916: void NMethodSweeper::report_events(int id, address entry) { never@2916: if (_records != NULL) { never@2916: for (int i = _sweep_index; i < SweeperLogEntries; i++) { never@2916: if (_records[i].uep == entry || never@2916: _records[i].vep == entry || never@2916: _records[i].compile_id == id) { never@2916: _records[i].print(); never@2916: } never@2916: } never@2916: for (int i = 0; i < _sweep_index; i++) { never@2916: if (_records[i].uep == entry || never@2916: _records[i].vep == entry || never@2916: _records[i].compile_id == id) { never@2916: _records[i].print(); never@2916: } never@2916: } never@2916: } never@2916: } never@2916: never@2916: void NMethodSweeper::report_events() { never@2916: if (_records != NULL) { never@2916: for (int i = _sweep_index; i < SweeperLogEntries; i++) { never@2916: // skip empty records never@2916: if (_records[i].vep == NULL) continue; never@2916: _records[i].print(); never@2916: } never@2916: for (int i = 0; i < _sweep_index; i++) { never@2916: // skip empty records never@2916: if (_records[i].vep == NULL) continue; never@2916: _records[i].print(); never@2916: } never@2916: } never@2916: } never@2916: never@2916: void NMethodSweeper::record_sweep(nmethod* nm, int line) { never@2916: if (_records != NULL) { never@2916: _records[_sweep_index].traversal = _traversals; never@2916: _records[_sweep_index].traversal_mark = nm->_stack_traversal_mark; anoll@6099: _records[_sweep_index].invocation = _sweep_fractions_left; never@2916: _records[_sweep_index].compile_id = nm->compile_id(); never@2916: _records[_sweep_index].kind = nm->compile_kind(); never@2916: _records[_sweep_index].state = nm->_state; never@2916: _records[_sweep_index].vep = nm->verified_entry_point(); never@2916: _records[_sweep_index].uep = nm->entry_point(); never@2916: _records[_sweep_index].line = line; never@2916: _sweep_index = (_sweep_index + 1) % SweeperLogEntries; never@2916: } never@2916: } never@2916: #else never@2916: #define SWEEP(nm) never@2916: #endif never@2916: anoll@6099: nmethod* NMethodSweeper::_current = NULL; // Current nmethod anoll@6099: long NMethodSweeper::_traversals = 0; // Stack scan count, also sweep ID. anoll@6207: long NMethodSweeper::_total_nof_code_cache_sweeps = 0; // Total number of full sweeps of the code cache anoll@6099: long NMethodSweeper::_time_counter = 0; // Virtual time used to periodically invoke sweeper anoll@6099: long NMethodSweeper::_last_sweep = 0; // Value of _time_counter when the last sweep happened anoll@6099: int NMethodSweeper::_seen = 0; // Nof. nmethod we have currently processed in current pass of CodeCache anoll@6099: int NMethodSweeper::_flushed_count = 0; // Nof. nmethods flushed in current sweep anoll@6099: int NMethodSweeper::_zombified_count = 0; // Nof. nmethods made zombie in current sweep anoll@6099: int NMethodSweeper::_marked_for_reclamation_count = 0; // Nof. nmethods marked for reclaim in current sweep never@2916: anoll@6099: volatile bool NMethodSweeper::_should_sweep = true; // Indicates if we should invoke the sweeper anoll@6099: volatile int NMethodSweeper::_sweep_fractions_left = 0; // Nof. invocations left until we are completed with this pass anoll@6099: volatile int NMethodSweeper::_sweep_started = 0; // Flag to control conc sweeper anoll@6099: volatile int NMethodSweeper::_bytes_changed = 0; // Counts the total nmethod size if the nmethod changed from: anoll@6099: // 1) alive -> not_entrant anoll@6099: // 2) not_entrant -> zombie anoll@6099: // 3) zombie -> marked_for_reclamation anoll@6207: int NMethodSweeper::_hotness_counter_reset_val = 0; duke@435: anoll@6207: long NMethodSweeper::_total_nof_methods_reclaimed = 0; // Accumulated nof methods flushed anoll@6207: long NMethodSweeper::_total_nof_c2_methods_reclaimed = 0; // Accumulated nof methods flushed anoll@6207: size_t NMethodSweeper::_total_flushed_size = 0; // Total number of bytes flushed from the code cache anoll@6207: Tickspan NMethodSweeper::_total_time_sweeping; // Accumulated time sweeping anoll@6207: Tickspan NMethodSweeper::_total_time_this_sweep; // Total time this sweep anoll@6207: Tickspan NMethodSweeper::_peak_sweep_time; // Peak time for a full sweep anoll@6207: Tickspan NMethodSweeper::_peak_sweep_fraction_time; // Peak time sweeping one fraction anoll@6207: anoll@5792: sla@5237: jrose@1424: class MarkActivationClosure: public CodeBlobClosure { jrose@1424: public: jrose@1424: virtual void do_code_blob(CodeBlob* cb) { anoll@5792: if (cb->is_nmethod()) { anoll@5792: nmethod* nm = (nmethod*)cb; anoll@5792: nm->set_hotness_counter(NMethodSweeper::hotness_counter_reset_val()); anoll@5792: // If we see an activation belonging to a non_entrant nmethod, we mark it. anoll@5792: if (nm->is_not_entrant()) { anoll@5792: nm->mark_as_seen_on_stack(); anoll@5792: } jrose@1424: } jrose@1424: } jrose@1424: }; jrose@1424: static MarkActivationClosure mark_activation_closure; jrose@1424: anoll@5792: class SetHotnessClosure: public CodeBlobClosure { anoll@5792: public: anoll@5792: virtual void do_code_blob(CodeBlob* cb) { anoll@5792: if (cb->is_nmethod()) { anoll@5792: nmethod* nm = (nmethod*)cb; anoll@5792: nm->set_hotness_counter(NMethodSweeper::hotness_counter_reset_val()); anoll@5792: } anoll@5792: } anoll@5792: }; anoll@5792: static SetHotnessClosure set_hotness_closure; anoll@5792: anoll@5792: anoll@5792: int NMethodSweeper::hotness_counter_reset_val() { anoll@5792: if (_hotness_counter_reset_val == 0) { anoll@5792: _hotness_counter_reset_val = (ReservedCodeCacheSize < M) ? 1 : (ReservedCodeCacheSize / M) * 2; anoll@5792: } anoll@5792: return _hotness_counter_reset_val; anoll@5792: } neliasso@5038: bool NMethodSweeper::sweep_in_progress() { neliasso@5038: return (_current != NULL); neliasso@5038: } neliasso@5038: anoll@5792: // Scans the stacks of all Java threads and marks activations of not-entrant methods. anoll@5792: // No need to synchronize access, since 'mark_active_nmethods' is always executed at a anoll@5792: // safepoint. anoll@5792: void NMethodSweeper::mark_active_nmethods() { duke@435: assert(SafepointSynchronize::is_at_safepoint(), "must be executed at a safepoint"); anoll@5792: // If we do not want to reclaim not-entrant or zombie methods there is no need anoll@5792: // to scan stacks anoll@5792: if (!MethodFlushing) { anoll@5792: return; anoll@5792: } duke@435: anoll@6099: // Increase time so that we can estimate when to invoke the sweeper again. anoll@6099: _time_counter++; anoll@6099: duke@435: // Check for restart duke@435: assert(CodeCache::find_blob_unsafe(_current) == _current, "Sweeper nmethod cached state invalid"); anoll@6099: if (!sweep_in_progress()) { anoll@6099: _seen = 0; anoll@6099: _sweep_fractions_left = NmethodSweepFraction; anoll@6099: _current = CodeCache::first_nmethod(); anoll@6099: _traversals += 1; mgronlun@6131: _total_time_this_sweep = Tickspan(); sla@5237: duke@435: if (PrintMethodFlushing) { duke@435: tty->print_cr("### Sweep: stack traversal %d", _traversals); duke@435: } jrose@1424: Threads::nmethods_do(&mark_activation_closure); duke@435: anoll@5792: } else { anoll@5792: // Only set hotness counter anoll@5792: Threads::nmethods_do(&set_hotness_closure); duke@435: } duke@435: anoll@5792: OrderAccess::storestore(); duke@435: } anoll@6099: /** anoll@6099: * This function invokes the sweeper if at least one of the three conditions is met: anoll@6099: * (1) The code cache is getting full anoll@6099: * (2) There are sufficient state changes in/since the last sweep. anoll@6099: * (3) We have not been sweeping for 'some time' anoll@6099: */ never@1893: void NMethodSweeper::possibly_sweep() { never@1999: assert(JavaThread::current()->thread_state() == _thread_in_vm, "must run in vm mode"); anoll@6114: // Only compiler threads are allowed to sweep anoll@6114: if (!MethodFlushing || !sweep_in_progress() || !Thread::current()->is_Compiler_thread()) { anoll@5792: return; anoll@5792: } never@1893: anoll@6099: // If there was no state change while nmethod sweeping, 'should_sweep' will be false. anoll@6099: // This is one of the two places where should_sweep can be set to true. The general anoll@6099: // idea is as follows: If there is enough free space in the code cache, there is no anoll@6099: // need to invoke the sweeper. The following formula (which determines whether to invoke anoll@6099: // the sweeper or not) depends on the assumption that for larger ReservedCodeCacheSizes anoll@6099: // we need less frequent sweeps than for smaller ReservedCodecCacheSizes. Furthermore, anoll@6099: // the formula considers how much space in the code cache is currently used. Here are anoll@6099: // some examples that will (hopefully) help in understanding. anoll@6099: // anoll@6099: // Small ReservedCodeCacheSizes: (e.g., < 16M) We invoke the sweeper every time, since anoll@6099: // the result of the division is 0. This anoll@6099: // keeps the used code cache size small anoll@6099: // (important for embedded Java) anoll@6099: // Large ReservedCodeCacheSize : (e.g., 256M + code cache is 10% full). The formula anoll@6099: // computes: (256 / 16) - 1 = 15 anoll@6099: // As a result, we invoke the sweeper after anoll@6099: // 15 invocations of 'mark_active_nmethods. anoll@6099: // Large ReservedCodeCacheSize: (e.g., 256M + code Cache is 90% full). The formula anoll@6099: // computes: (256 / 16) - 10 = 6. anoll@6099: if (!_should_sweep) { anoll@6205: const int time_since_last_sweep = _time_counter - _last_sweep; anoll@6205: // ReservedCodeCacheSize has an 'unsigned' type. We need a 'signed' type for max_wait_time, anoll@6205: // since 'time_since_last_sweep' can be larger than 'max_wait_time'. If that happens using anoll@6205: // an unsigned type would cause an underflow (wait_until_next_sweep becomes a large positive anoll@6205: // value) that disables the intended periodic sweeps. anoll@6205: const int max_wait_time = ReservedCodeCacheSize / (16 * M); anoll@6205: double wait_until_next_sweep = max_wait_time - time_since_last_sweep - CodeCache::reverse_free_ratio(); anoll@6205: assert(wait_until_next_sweep <= (double)max_wait_time, "Calculation of code cache sweeper interval is incorrect"); anoll@6099: anoll@6099: if ((wait_until_next_sweep <= 0.0) || !CompileBroker::should_compile_new_jobs()) { anoll@6099: _should_sweep = true; anoll@6099: } anoll@6099: } anoll@6099: anoll@6099: if (_should_sweep && _sweep_fractions_left > 0) { never@1893: // Only one thread at a time will sweep never@1893: jint old = Atomic::cmpxchg( 1, &_sweep_started, 0 ); never@1893: if (old != 0) { never@1893: return; never@1893: } never@2916: #ifdef ASSERT never@2916: if (LogSweeper && _records == NULL) { never@2916: // Create the ring buffer for the logging code zgu@3900: _records = NEW_C_HEAP_ARRAY(SweeperRecord, SweeperLogEntries, mtGC); never@2916: memset(_records, 0, sizeof(SweeperRecord) * SweeperLogEntries); never@2916: } never@2916: #endif anoll@6099: anoll@6099: if (_sweep_fractions_left > 0) { never@1999: sweep_code_cache(); anoll@6099: _sweep_fractions_left--; anoll@6099: } anoll@6099: anoll@6099: // We are done with sweeping the code cache once. anoll@6099: if (_sweep_fractions_left == 0) { anoll@6207: _total_nof_code_cache_sweeps++; anoll@6099: _last_sweep = _time_counter; anoll@6099: // Reset flag; temporarily disables sweeper anoll@6099: _should_sweep = false; anoll@6099: // If there was enough state change, 'possibly_enable_sweeper()' anoll@6099: // sets '_should_sweep' to true anoll@6099: possibly_enable_sweeper(); anoll@6099: // Reset _bytes_changed only if there was enough state change. _bytes_changed anoll@6099: // can further increase by calls to 'report_state_change'. anoll@6099: if (_should_sweep) { anoll@6099: _bytes_changed = 0; anoll@6099: } never@1999: } goetz@6493: // Release work, because another compiler thread could continue. goetz@6493: OrderAccess::release_store((int*)&_sweep_started, 0); never@1893: } never@1893: } never@1893: never@1893: void NMethodSweeper::sweep_code_cache() { jcm@8713: ResourceMark rm; mgronlun@6131: Ticks sweep_start_counter = Ticks::now(); sla@5237: anoll@6099: _flushed_count = 0; anoll@6099: _zombified_count = 0; anoll@6099: _marked_for_reclamation_count = 0; sla@5237: never@1893: if (PrintMethodFlushing && Verbose) { anoll@6099: tty->print_cr("### Sweep at %d out of %d. Invocations left: %d", _seen, CodeCache::nof_nmethods(), _sweep_fractions_left); never@1893: } never@1893: neliasso@5038: if (!CompileBroker::should_compile_new_jobs()) { neliasso@5038: // If we have turned off compilations we might as well do full sweeps neliasso@5038: // in order to reach the clean state faster. Otherwise the sleeping compiler anoll@5792: // threads will slow down sweeping. anoll@6099: _sweep_fractions_left = 1; neliasso@5038: } neliasso@5038: never@1999: // We want to visit all nmethods after NmethodSweepFraction never@1999: // invocations so divide the remaining number of nmethods by the never@1999: // remaining number of invocations. This is only an estimate since never@1999: // the number of nmethods changes during the sweep so the final never@1999: // stage must iterate until it there are no more nmethods. anoll@6099: int todo = (CodeCache::nof_nmethods() - _seen) / _sweep_fractions_left; anoll@5734: int swept_count = 0; never@1893: anoll@5792: never@1893: assert(!SafepointSynchronize::is_at_safepoint(), "should not be in safepoint when we get here"); never@1893: assert(!CodeCache_lock->owned_by_self(), "just checking"); never@1893: anoll@5792: int freed_memory = 0; never@1893: { never@1893: MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); never@1893: never@1999: // The last invocation iterates until there are no more nmethods anoll@6099: for (int i = 0; (i < todo || _sweep_fractions_left == 1) && _current != NULL; i++) { anoll@5734: swept_count++; iveresov@3572: if (SafepointSynchronize::is_synchronizing()) { // Safepoint request iveresov@3572: if (PrintMethodFlushing && Verbose) { anoll@6099: tty->print_cr("### Sweep at %d out of %d, invocation: %d, yielding to safepoint", _seen, CodeCache::nof_nmethods(), _sweep_fractions_left); iveresov@3572: } iveresov@3572: MutexUnlockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); never@1893: iveresov@3572: assert(Thread::current()->is_Java_thread(), "should be java thread"); iveresov@3572: JavaThread* thread = (JavaThread*)Thread::current(); iveresov@3572: ThreadBlockInVM tbivm(thread); iveresov@3572: thread->java_suspend_self(); iveresov@3572: } never@1999: // Since we will give up the CodeCache_lock, always skip ahead never@1999: // to the next nmethod. Other blobs can be deleted by other never@1999: // threads but nmethods are only reclaimed by the sweeper. never@1970: nmethod* next = CodeCache::next_nmethod(_current); never@1893: never@1893: // Now ready to process nmethod and give up CodeCache_lock never@1893: { never@1893: MutexUnlockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); anoll@5792: freed_memory += process_nmethod(_current); never@1893: } never@1893: _seen++; never@1893: _current = next; never@1893: } never@1893: } never@1893: anoll@6099: assert(_sweep_fractions_left > 1 || _current == NULL, "must have scanned the whole cache"); never@1893: mgronlun@6131: const Ticks sweep_end_counter = Ticks::now(); mgronlun@6131: const Tickspan sweep_time = sweep_end_counter - sweep_start_counter; sla@5237: _total_time_sweeping += sweep_time; sla@5237: _total_time_this_sweep += sweep_time; sla@5237: _peak_sweep_fraction_time = MAX2(sweep_time, _peak_sweep_fraction_time); anoll@6207: _total_flushed_size += freed_memory; sla@5237: _total_nof_methods_reclaimed += _flushed_count; sla@5237: sla@5237: EventSweepCodeCache event(UNTIMED); sla@5237: if (event.should_commit()) { sla@5237: event.set_starttime(sweep_start_counter); sla@5237: event.set_endtime(sweep_end_counter); sla@5237: event.set_sweepIndex(_traversals); anoll@6099: event.set_sweepFractionIndex(NmethodSweepFraction - _sweep_fractions_left + 1); anoll@5734: event.set_sweptCount(swept_count); sla@5237: event.set_flushedCount(_flushed_count); anoll@6099: event.set_markedCount(_marked_for_reclamation_count); sla@5237: event.set_zombifiedCount(_zombified_count); sla@5237: event.commit(); sla@5237: } sla@5237: never@1893: #ifdef ASSERT never@1893: if(PrintMethodFlushing) { mgronlun@6131: tty->print_cr("### sweeper: sweep time(%d): " mgronlun@6131: INT64_FORMAT, _sweep_fractions_left, (jlong)sweep_time.value()); never@1893: } never@1893: #endif never@1999: anoll@6099: if (_sweep_fractions_left == 1) { sla@5237: _peak_sweep_time = MAX2(_peak_sweep_time, _total_time_this_sweep); never@1999: log_sweep("finished"); never@1999: } neliasso@5038: anoll@5792: // Sweeper is the only case where memory is released, check here if it anoll@5792: // is time to restart the compiler. Only checking if there is a certain anoll@5792: // amount of free memory in the code cache might lead to re-enabling anoll@5792: // compilation although no memory has been released. For example, there are anoll@5792: // cases when compilation was disabled although there is 4MB (or more) free anoll@5792: // memory in the code cache. The reason is code cache fragmentation. Therefore, anoll@5792: // it only makes sense to re-enable compilation if we have actually freed memory. anoll@5792: // Note that typically several kB are released for sweeping 16MB of the code anoll@5792: // cache. As a result, 'freed_memory' > 0 to restart the compiler. anoll@6099: if (!CompileBroker::should_compile_new_jobs() && (freed_memory > 0)) { neliasso@5038: CompileBroker::set_should_compile_new_jobs(CompileBroker::run_compilation); neliasso@5038: log_sweep("restart_compiler"); neliasso@5038: } never@1893: } never@1893: anoll@6099: /** anoll@6099: * This function updates the sweeper statistics that keep track of nmethods anoll@6099: * state changes. If there is 'enough' state change, the sweeper is invoked anoll@6099: * as soon as possible. There can be data races on _bytes_changed. The data anoll@6099: * races are benign, since it does not matter if we loose a couple of bytes. anoll@6099: * In the worst case we call the sweeper a little later. Also, we are guaranteed anoll@6099: * to invoke the sweeper if the code cache gets full. anoll@6099: */ anoll@6099: void NMethodSweeper::report_state_change(nmethod* nm) { anoll@6099: _bytes_changed += nm->total_size(); anoll@6099: possibly_enable_sweeper(); anoll@6099: } anoll@6099: anoll@6099: /** anoll@6099: * Function determines if there was 'enough' state change in the code cache to invoke anoll@6099: * the sweeper again. Currently, we determine 'enough' as more than 1% state change in anoll@6099: * the code cache since the last sweep. anoll@6099: */ anoll@6099: void NMethodSweeper::possibly_enable_sweeper() { anoll@6099: double percent_changed = ((double)_bytes_changed / (double)ReservedCodeCacheSize) * 100; anoll@6099: if (percent_changed > 1.0) { anoll@6099: _should_sweep = true; anoll@6099: } anoll@6099: } anoll@6099: never@2916: class NMethodMarker: public StackObj { never@2916: private: never@2916: CompilerThread* _thread; never@2916: public: never@2916: NMethodMarker(nmethod* nm) { never@2916: _thread = CompilerThread::current(); coleenp@4037: if (!nm->is_zombie() && !nm->is_unloaded()) { coleenp@4037: // Only expose live nmethods for scanning anoll@5792: _thread->set_scanned_nmethod(nm); anoll@5792: } coleenp@4037: } never@2916: ~NMethodMarker() { never@2916: _thread->set_scanned_nmethod(NULL); never@2916: } never@2916: }; never@2916: coleenp@4037: void NMethodSweeper::release_nmethod(nmethod *nm) { coleenp@4037: // Clean up any CompiledICHolders coleenp@4037: { coleenp@4037: ResourceMark rm; coleenp@4037: MutexLocker ml_patch(CompiledIC_lock); coleenp@4037: RelocIterator iter(nm); coleenp@4037: while (iter.next()) { coleenp@4037: if (iter.type() == relocInfo::virtual_call_type) { coleenp@4037: CompiledIC::cleanup_call_site(iter.virtual_call_reloc()); coleenp@4037: } coleenp@4037: } coleenp@4037: } coleenp@4037: coleenp@4037: MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); coleenp@4037: nm->flush(); coleenp@4037: } duke@435: anoll@5792: int NMethodSweeper::process_nmethod(nmethod *nm) { never@1893: assert(!CodeCache_lock->owned_by_self(), "just checking"); never@1893: anoll@5792: int freed_memory = 0; never@2916: // Make sure this nmethod doesn't get unloaded during the scan, anoll@5792: // since safepoints may happen during acquired below locks. never@2916: NMethodMarker nmm(nm); never@2916: SWEEP(nm); never@2916: duke@435: // Skip methods that are currently referenced by the VM duke@435: if (nm->is_locked_by_vm()) { duke@435: // But still remember to clean-up inline caches for alive nmethods duke@435: if (nm->is_alive()) { anoll@5792: // Clean inline caches that point to zombie/non-entrant methods never@1893: MutexLocker cl(CompiledIC_lock); duke@435: nm->cleanup_inline_caches(); never@2916: SWEEP(nm); duke@435: } anoll@5792: return freed_memory; duke@435: } duke@435: duke@435: if (nm->is_zombie()) { anoll@5792: // If it is the first time we see nmethod then we mark it. Otherwise, anoll@5792: // we reclaim it. When we have seen a zombie method twice, we know that never@1999: // there are no inline caches that refer to it. duke@435: if (nm->is_marked_for_reclamation()) { duke@435: assert(!nm->is_locked_by_vm(), "must not flush locked nmethods"); ysr@1376: if (PrintMethodFlushing && Verbose) { kvn@1637: tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (marked for reclamation) being flushed", nm->compile_id(), nm); ysr@1376: } anoll@5792: freed_memory = nm->total_size(); anoll@6207: if (nm->is_compiled_by_c2()) { anoll@6207: _total_nof_c2_methods_reclaimed++; anoll@6207: } coleenp@4037: release_nmethod(nm); sla@5237: _flushed_count++; duke@435: } else { ysr@1376: if (PrintMethodFlushing && Verbose) { kvn@1637: tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (zombie) being marked for reclamation", nm->compile_id(), nm); ysr@1376: } duke@435: nm->mark_for_reclamation(); anoll@6099: // Keep track of code cache state change anoll@6099: _bytes_changed += nm->total_size(); anoll@6099: _marked_for_reclamation_count++; never@2916: SWEEP(nm); duke@435: } duke@435: } else if (nm->is_not_entrant()) { anoll@5792: // If there are no current activations of this method on the duke@435: // stack we can safely convert it to a zombie method thartmann@8075: if (nm->can_convert_to_zombie()) { ysr@1376: if (PrintMethodFlushing && Verbose) { kvn@1637: tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (not entrant) being made zombie", nm->compile_id(), nm); ysr@1376: } thartmann@8073: // Clear ICStubs to prevent back patching stubs of zombie or unloaded thartmann@8073: // nmethods during the next safepoint (see ICStub::finalize). thartmann@8073: MutexLocker cl(CompiledIC_lock); thartmann@8073: nm->clear_ic_stubs(); anoll@6099: // Code cache state change is tracked in make_zombie() duke@435: nm->make_zombie(); sla@5237: _zombified_count++; never@2916: SWEEP(nm); duke@435: } else { duke@435: // Still alive, clean up its inline caches never@1893: MutexLocker cl(CompiledIC_lock); duke@435: nm->cleanup_inline_caches(); never@2916: SWEEP(nm); duke@435: } duke@435: } else if (nm->is_unloaded()) { duke@435: // Unloaded code, just make it a zombie anoll@5792: if (PrintMethodFlushing && Verbose) { kvn@1637: tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (unloaded) being made zombie", nm->compile_id(), nm); anoll@5792: } ysr@1376: if (nm->is_osr_method()) { coleenp@4037: SWEEP(nm); duke@435: // No inline caches will ever point to osr methods, so we can just remove it anoll@5792: freed_memory = nm->total_size(); anoll@6207: if (nm->is_compiled_by_c2()) { anoll@6207: _total_nof_c2_methods_reclaimed++; anoll@6207: } coleenp@4037: release_nmethod(nm); sla@5237: _flushed_count++; duke@435: } else { thartmann@8075: { thartmann@8075: // Clean ICs of unloaded nmethods as well because they may reference other thartmann@8075: // unloaded nmethods that may be flushed earlier in the sweeper cycle. thartmann@8075: MutexLocker cl(CompiledIC_lock); thartmann@8075: nm->cleanup_inline_caches(); thartmann@8075: } anoll@6099: // Code cache state change is tracked in make_zombie() duke@435: nm->make_zombie(); sla@5237: _zombified_count++; never@2916: SWEEP(nm); duke@435: } duke@435: } else { kvn@1637: if (UseCodeCacheFlushing) { anoll@5792: if (!nm->is_locked_by_vm() && !nm->is_osr_method() && !nm->is_native_method()) { anoll@5792: // Do not make native methods and OSR-methods not-entrant anoll@5792: nm->dec_hotness_counter(); anoll@5792: // Get the initial value of the hotness counter. This value depends on the anoll@5792: // ReservedCodeCacheSize anoll@5792: int reset_val = hotness_counter_reset_val(); anoll@5792: int time_since_reset = reset_val - nm->hotness_counter(); anoll@5792: double threshold = -reset_val + (CodeCache::reverse_free_ratio() * NmethodSweepActivity); anoll@5792: // The less free space in the code cache we have - the bigger reverse_free_ratio() is. anoll@5792: // I.e., 'threshold' increases with lower available space in the code cache and a higher anoll@5792: // NmethodSweepActivity. If the current hotness counter - which decreases from its initial anoll@5792: // value until it is reset by stack walking - is smaller than the computed threshold, the anoll@5792: // corresponding nmethod is considered for removal. anoll@5792: if ((NmethodSweepActivity > 0) && (nm->hotness_counter() < threshold) && (time_since_reset > 10)) { anoll@5792: // A method is marked as not-entrant if the method is anoll@5792: // 1) 'old enough': nm->hotness_counter() < threshold anoll@5792: // 2) The method was in_use for a minimum amount of time: (time_since_reset > 10) anoll@5792: // The second condition is necessary if we are dealing with very small code cache anoll@5792: // sizes (e.g., <10m) and the code cache size is too small to hold all hot methods. anoll@5792: // The second condition ensures that methods are not immediately made not-entrant anoll@5792: // after compilation. anoll@5792: nm->make_not_entrant(); anoll@6099: // Code cache state change is tracked in make_not_entrant() anoll@6099: if (PrintMethodFlushing && Verbose) { anoll@6099: tty->print_cr("### Nmethod %d/" PTR_FORMAT "made not-entrant: hotness counter %d/%d threshold %f", anoll@6099: nm->compile_id(), nm, nm->hotness_counter(), reset_val, threshold); anoll@6099: } anoll@5792: } kvn@1637: } kvn@1637: } anoll@5792: // Clean-up all inline caches that point to zombie/non-reentrant methods never@1893: MutexLocker cl(CompiledIC_lock); duke@435: nm->cleanup_inline_caches(); never@2916: SWEEP(nm); duke@435: } anoll@5792: return freed_memory; duke@435: } kvn@1637: never@1999: // Print out some state information about the current sweep and the never@1999: // state of the code cache if it's requested. never@1999: void NMethodSweeper::log_sweep(const char* msg, const char* format, ...) { never@1999: if (PrintMethodFlushing) { jcm@8713: ResourceMark rm; iveresov@2764: stringStream s; iveresov@2764: // Dump code cache state into a buffer before locking the tty, iveresov@2764: // because log_state() will use locks causing lock conflicts. iveresov@2764: CodeCache::log_state(&s); iveresov@2764: never@1999: ttyLocker ttyl; never@1999: tty->print("### sweeper: %s ", msg); never@1999: if (format != NULL) { never@1999: va_list ap; never@1999: va_start(ap, format); never@1999: tty->vprint(format, ap); never@1999: va_end(ap); never@1999: } drchase@6680: tty->print_cr("%s", s.as_string()); never@1999: } never@1999: never@1999: if (LogCompilation && (xtty != NULL)) { jcm@8713: ResourceMark rm; iveresov@2764: stringStream s; iveresov@2764: // Dump code cache state into a buffer before locking the tty, iveresov@2764: // because log_state() will use locks causing lock conflicts. iveresov@2764: CodeCache::log_state(&s); iveresov@2764: never@1999: ttyLocker ttyl; never@2001: xtty->begin_elem("sweeper state='%s' traversals='" INTX_FORMAT "' ", msg, (intx)traversal_count()); never@1999: if (format != NULL) { never@1999: va_list ap; never@1999: va_start(ap, format); never@1999: xtty->vprint(format, ap); never@1999: va_end(ap); never@1999: } drchase@6680: xtty->print("%s", s.as_string()); never@1999: xtty->stamp(); never@1999: xtty->end_elem(); never@1999: } never@1999: } anoll@6207: anoll@6207: void NMethodSweeper::print() { anoll@6207: ttyLocker ttyl; anoll@6207: tty->print_cr("Code cache sweeper statistics:"); anoll@6207: tty->print_cr(" Total sweep time: %1.0lfms", (double)_total_time_sweeping.value()/1000000); anoll@6207: tty->print_cr(" Total number of full sweeps: %ld", _total_nof_code_cache_sweeps); anoll@6207: tty->print_cr(" Total number of flushed methods: %ld(%ld C2 methods)", _total_nof_methods_reclaimed, anoll@6207: _total_nof_c2_methods_reclaimed); anoll@6207: tty->print_cr(" Total size of flushed methods: " SIZE_FORMAT "kB", _total_flushed_size/K); anoll@6207: }