duke@435: /* sla@5237: * Copyright (c) 2005, 2013, Oracle and/or its affiliates. All rights reserved. duke@435: * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. duke@435: * duke@435: * This code is free software; you can redistribute it and/or modify it duke@435: * under the terms of the GNU General Public License version 2 only, as duke@435: * published by the Free Software Foundation. duke@435: * duke@435: * This code is distributed in the hope that it will be useful, but WITHOUT duke@435: * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or duke@435: * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License duke@435: * version 2 for more details (a copy is included in the LICENSE file that duke@435: * accompanied this code). duke@435: * duke@435: * You should have received a copy of the GNU General Public License version duke@435: * 2 along with this work; if not, write to the Free Software Foundation, duke@435: * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. duke@435: * trims@1907: * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA trims@1907: * or visit www.oracle.com if you need additional information or have any trims@1907: * questions. duke@435: * duke@435: */ duke@435: stefank@2314: #include "precompiled.hpp" stefank@2314: #include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.inline.hpp" stefank@2314: #include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.hpp" stefank@2314: #include "gc_implementation/concurrentMarkSweep/vmCMSOperations.hpp" sla@5237: #include "gc_implementation/shared/gcTimer.hpp" sla@5237: #include "gc_implementation/shared/gcTraceTime.hpp" stefank@2314: #include "gc_implementation/shared/isGCActiveMark.hpp" stefank@2314: #include "memory/gcLocker.inline.hpp" stefank@2314: #include "runtime/interfaceSupport.hpp" sla@5237: #include "runtime/os.hpp" stefank@2314: #include "utilities/dtrace.hpp" dcubed@3202: dcubed@3202: dcubed@3202: #ifndef USDT2 duke@435: HS_DTRACE_PROBE_DECL(hs_private, cms__initmark__begin); duke@435: HS_DTRACE_PROBE_DECL(hs_private, cms__initmark__end); duke@435: duke@435: HS_DTRACE_PROBE_DECL(hs_private, cms__remark__begin); duke@435: HS_DTRACE_PROBE_DECL(hs_private, cms__remark__end); dcubed@3202: #endif /* !USDT2 */ duke@435: duke@435: ////////////////////////////////////////////////////////// duke@435: // Methods in abstract class VM_CMS_Operation duke@435: ////////////////////////////////////////////////////////// duke@435: void VM_CMS_Operation::acquire_pending_list_lock() { duke@435: // The caller may block while communicating duke@435: // with the SLT thread in order to acquire/release the PLL. duke@435: ConcurrentMarkSweepThread::slt()-> duke@435: manipulatePLL(SurrogateLockerThread::acquirePLL); duke@435: } duke@435: duke@435: void VM_CMS_Operation::release_and_notify_pending_list_lock() { duke@435: // The caller may block while communicating duke@435: // with the SLT thread in order to acquire/release the PLL. duke@435: ConcurrentMarkSweepThread::slt()-> duke@435: manipulatePLL(SurrogateLockerThread::releaseAndNotifyPLL); duke@435: } duke@435: duke@435: void VM_CMS_Operation::verify_before_gc() { duke@435: if (VerifyBeforeGC && duke@435: GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) { sla@5237: GCTraceTime tm("Verify Before", false, false, _collector->_gc_timer_cm); duke@435: HandleMark hm; duke@435: FreelistLocker x(_collector); duke@435: MutexLockerEx y(_collector->bitMapLock(), Mutex::_no_safepoint_check_flag); duke@435: Universe::heap()->prepare_for_verify(); johnc@4176: Universe::verify(); duke@435: } duke@435: } duke@435: duke@435: void VM_CMS_Operation::verify_after_gc() { duke@435: if (VerifyAfterGC && duke@435: GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) { sla@5237: GCTraceTime tm("Verify After", false, false, _collector->_gc_timer_cm); duke@435: HandleMark hm; duke@435: FreelistLocker x(_collector); duke@435: MutexLockerEx y(_collector->bitMapLock(), Mutex::_no_safepoint_check_flag); johnc@4176: Universe::verify(); duke@435: } duke@435: } duke@435: duke@435: bool VM_CMS_Operation::lost_race() const { duke@435: if (CMSCollector::abstract_state() == CMSCollector::Idling) { duke@435: // We lost a race to a foreground collection duke@435: // -- there's nothing to do duke@435: return true; duke@435: } duke@435: assert(CMSCollector::abstract_state() == legal_state(), duke@435: "Inconsistent collector state?"); duke@435: return false; duke@435: } duke@435: duke@435: bool VM_CMS_Operation::doit_prologue() { duke@435: assert(Thread::current()->is_ConcurrentGC_thread(), "just checking"); duke@435: assert(!CMSCollector::foregroundGCShouldWait(), "Possible deadlock"); duke@435: assert(!ConcurrentMarkSweepThread::cms_thread_has_cms_token(), duke@435: "Possible deadlock"); duke@435: duke@435: if (needs_pll()) { duke@435: acquire_pending_list_lock(); duke@435: } duke@435: // Get the Heap_lock after the pending_list_lock. duke@435: Heap_lock->lock(); duke@435: if (lost_race()) { duke@435: assert(_prologue_succeeded == false, "Initialized in c'tor"); duke@435: Heap_lock->unlock(); duke@435: if (needs_pll()) { duke@435: release_and_notify_pending_list_lock(); duke@435: } duke@435: } else { duke@435: _prologue_succeeded = true; duke@435: } duke@435: return _prologue_succeeded; duke@435: } duke@435: duke@435: void VM_CMS_Operation::doit_epilogue() { duke@435: assert(Thread::current()->is_ConcurrentGC_thread(), "just checking"); duke@435: assert(!CMSCollector::foregroundGCShouldWait(), "Possible deadlock"); duke@435: assert(!ConcurrentMarkSweepThread::cms_thread_has_cms_token(), duke@435: "Possible deadlock"); duke@435: duke@435: // Release the Heap_lock first. duke@435: Heap_lock->unlock(); duke@435: if (needs_pll()) { duke@435: release_and_notify_pending_list_lock(); duke@435: } duke@435: } duke@435: duke@435: ////////////////////////////////////////////////////////// duke@435: // Methods in class VM_CMS_Initial_Mark duke@435: ////////////////////////////////////////////////////////// duke@435: void VM_CMS_Initial_Mark::doit() { duke@435: if (lost_race()) { duke@435: // Nothing to do. duke@435: return; duke@435: } dcubed@3202: #ifndef USDT2 duke@435: HS_DTRACE_PROBE(hs_private, cms__initmark__begin); dcubed@3202: #else /* USDT2 */ dcubed@3202: HS_PRIVATE_CMS_INITMARK_BEGIN( dcubed@3202: ); dcubed@3202: #endif /* USDT2 */ duke@435: mgronlun@6131: _collector->_gc_timer_cm->register_gc_pause_start("Initial Mark"); sla@5237: duke@435: GenCollectedHeap* gch = GenCollectedHeap::heap(); duke@435: GCCauseSetter gccs(gch, GCCause::_cms_initial_mark); duke@435: duke@435: VM_CMS_Operation::verify_before_gc(); duke@435: duke@435: IsGCActiveMark x; // stop-world GC active brutisso@3767: _collector->do_CMS_operation(CMSCollector::CMS_op_checkpointRootsInitial, gch->gc_cause()); duke@435: duke@435: VM_CMS_Operation::verify_after_gc(); sla@5237: mgronlun@6131: _collector->_gc_timer_cm->register_gc_pause_end(); sla@5237: dcubed@3202: #ifndef USDT2 duke@435: HS_DTRACE_PROBE(hs_private, cms__initmark__end); dcubed@3202: #else /* USDT2 */ dcubed@3202: HS_PRIVATE_CMS_INITMARK_END( dcubed@3202: ); dcubed@3202: #endif /* USDT2 */ duke@435: } duke@435: duke@435: ////////////////////////////////////////////////////////// duke@435: // Methods in class VM_CMS_Final_Remark_Operation duke@435: ////////////////////////////////////////////////////////// duke@435: void VM_CMS_Final_Remark::doit() { duke@435: if (lost_race()) { duke@435: // Nothing to do. duke@435: return; duke@435: } dcubed@3202: #ifndef USDT2 duke@435: HS_DTRACE_PROBE(hs_private, cms__remark__begin); dcubed@3202: #else /* USDT2 */ dcubed@3202: HS_PRIVATE_CMS_REMARK_BEGIN( dcubed@3202: ); dcubed@3202: #endif /* USDT2 */ duke@435: mgronlun@6131: _collector->_gc_timer_cm->register_gc_pause_start("Final Mark"); sla@5237: duke@435: GenCollectedHeap* gch = GenCollectedHeap::heap(); duke@435: GCCauseSetter gccs(gch, GCCause::_cms_final_remark); duke@435: duke@435: VM_CMS_Operation::verify_before_gc(); duke@435: duke@435: IsGCActiveMark x; // stop-world GC active brutisso@3767: _collector->do_CMS_operation(CMSCollector::CMS_op_checkpointRootsFinal, gch->gc_cause()); duke@435: duke@435: VM_CMS_Operation::verify_after_gc(); sla@5237: sla@5237: _collector->save_heap_summary(); mgronlun@6131: _collector->_gc_timer_cm->register_gc_pause_end(); sla@5237: dcubed@3202: #ifndef USDT2 duke@435: HS_DTRACE_PROBE(hs_private, cms__remark__end); dcubed@3202: #else /* USDT2 */ dcubed@3202: HS_PRIVATE_CMS_REMARK_END( dcubed@3202: ); dcubed@3202: #endif /* USDT2 */ duke@435: } duke@435: duke@435: // VM operation to invoke a concurrent collection of a duke@435: // GenCollectedHeap heap. duke@435: void VM_GenCollectFullConcurrent::doit() { duke@435: assert(Thread::current()->is_VM_thread(), "Should be VM thread"); ysr@1875: assert(GCLockerInvokesConcurrent || ExplicitGCInvokesConcurrent, "Unexpected"); duke@435: duke@435: GenCollectedHeap* gch = GenCollectedHeap::heap(); duke@435: if (_gc_count_before == gch->total_collections()) { duke@435: // The "full" of do_full_collection call below "forces" duke@435: // a collection; the second arg, 0, below ensures that duke@435: // only the young gen is collected. XXX In the future, duke@435: // we'll probably need to have something in this interface duke@435: // to say do this only if we are sure we will not bail duke@435: // out to a full collection in this attempt, but that's duke@435: // for the future. duke@435: assert(SafepointSynchronize::is_at_safepoint(), duke@435: "We can only be executing this arm of if at a safepoint"); duke@435: GCCauseSetter gccs(gch, _gc_cause); duke@435: gch->do_full_collection(gch->must_clear_all_soft_refs(), duke@435: 0 /* collect only youngest gen */); duke@435: } // Else no need for a foreground young gc duke@435: assert((_gc_count_before < gch->total_collections()) || duke@435: (GC_locker::is_active() /* gc may have been skipped */ duke@435: && (_gc_count_before == gch->total_collections())), duke@435: "total_collections() should be monotonically increasing"); duke@435: duke@435: MutexLockerEx x(FullGCCount_lock, Mutex::_no_safepoint_check_flag); ysr@2647: assert(_full_gc_count_before <= gch->total_full_collections(), "Error"); duke@435: if (gch->total_full_collections() == _full_gc_count_before) { ysr@2647: // Disable iCMS until the full collection is done, and ysr@2647: // remember that we did so. duke@435: CMSCollector::disable_icms(); ysr@2647: _disabled_icms = true; duke@435: // In case CMS thread was in icms_wait(), wake it up. duke@435: CMSCollector::start_icms(); ysr@1875: // Nudge the CMS thread to start a concurrent collection. sla@5237: CMSCollector::request_full_gc(_full_gc_count_before, _gc_cause); duke@435: } else { ysr@2647: assert(_full_gc_count_before < gch->total_full_collections(), "Error"); duke@435: FullGCCount_lock->notify_all(); // Inform the Java thread its work is done duke@435: } duke@435: } duke@435: duke@435: bool VM_GenCollectFullConcurrent::evaluate_at_safepoint() const { duke@435: Thread* thr = Thread::current(); duke@435: assert(thr != NULL, "Unexpected tid"); duke@435: if (!thr->is_Java_thread()) { duke@435: assert(thr->is_VM_thread(), "Expected to be evaluated by VM thread"); duke@435: GenCollectedHeap* gch = GenCollectedHeap::heap(); duke@435: if (_gc_count_before != gch->total_collections()) { duke@435: // No need to do a young gc, we'll just nudge the CMS thread duke@435: // in the doit() method above, to be executed soon. duke@435: assert(_gc_count_before < gch->total_collections(), duke@435: "total_collections() should be monotnically increasing"); duke@435: return false; // no need for foreground young gc duke@435: } duke@435: } duke@435: return true; // may still need foreground young gc duke@435: } duke@435: duke@435: duke@435: void VM_GenCollectFullConcurrent::doit_epilogue() { duke@435: Thread* thr = Thread::current(); duke@435: assert(thr->is_Java_thread(), "just checking"); duke@435: JavaThread* jt = (JavaThread*)thr; duke@435: // Release the Heap_lock first. duke@435: Heap_lock->unlock(); duke@435: release_and_notify_pending_list_lock(); duke@435: duke@435: // It is fine to test whether completed collections has duke@435: // exceeded our request count without locking because duke@435: // the completion count is monotonically increasing; duke@435: // this will break for very long-running apps when the duke@435: // count overflows and wraps around. XXX fix me !!! duke@435: // e.g. at the rate of 1 full gc per ms, this could duke@435: // overflow in about 1000 years. duke@435: GenCollectedHeap* gch = GenCollectedHeap::heap(); ysr@1875: if (_gc_cause != GCCause::_gc_locker && ysr@1875: gch->total_full_collections_completed() <= _full_gc_count_before) { tonyp@2011: // maybe we should change the condition to test _gc_cause == tonyp@2011: // GCCause::_java_lang_system_gc, instead of tonyp@2011: // _gc_cause != GCCause::_gc_locker tonyp@2011: assert(_gc_cause == GCCause::_java_lang_system_gc, tonyp@2011: "the only way to get here if this was a System.gc()-induced GC"); ysr@1875: assert(ExplicitGCInvokesConcurrent, "Error"); duke@435: // Now, wait for witnessing concurrent gc cycle to complete, duke@435: // but do so in native mode, because we want to lock the duke@435: // FullGCEvent_lock, which may be needed by the VM thread duke@435: // or by the CMS thread, so we do not want to be suspended duke@435: // while holding that lock. duke@435: ThreadToNativeFromVM native(jt); duke@435: MutexLockerEx ml(FullGCCount_lock, Mutex::_no_safepoint_check_flag); duke@435: // Either a concurrent or a stop-world full gc is sufficient duke@435: // witness to our request. duke@435: while (gch->total_full_collections_completed() <= _full_gc_count_before) { duke@435: FullGCCount_lock->wait(Mutex::_no_safepoint_check_flag); duke@435: } duke@435: } ysr@2647: // Enable iCMS back if we disabled it earlier. ysr@2647: if (_disabled_icms) { ysr@2647: CMSCollector::enable_icms(); ysr@2647: } duke@435: }