aoqi@0: /* aoqi@0: * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved. aoqi@0: * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. aoqi@0: * aoqi@0: * This code is free software; you can redistribute it and/or modify it aoqi@0: * under the terms of the GNU General Public License version 2 only, as aoqi@0: * published by the Free Software Foundation. aoqi@0: * aoqi@0: * This code is distributed in the hope that it will be useful, but WITHOUT aoqi@0: * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or aoqi@0: * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License aoqi@0: * version 2 for more details (a copy is included in the LICENSE file that aoqi@0: * accompanied this code). aoqi@0: * aoqi@0: * You should have received a copy of the GNU General Public License version aoqi@0: * 2 along with this work; if not, write to the Free Software Foundation, aoqi@0: * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. aoqi@0: * aoqi@0: * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA aoqi@0: * or visit www.oracle.com if you need additional information or have any aoqi@0: * questions. aoqi@0: * aoqi@0: */ aoqi@0: aoqi@0: #include "precompiled.hpp" aoqi@0: #include "gc_implementation/g1/concurrentMarkThread.inline.hpp" aoqi@0: #include "gc_implementation/g1/g1CollectedHeap.inline.hpp" aoqi@0: #include "gc_implementation/g1/g1CollectorPolicy.hpp" aoqi@0: #include "gc_implementation/g1/g1Log.hpp" aoqi@0: #include "gc_implementation/g1/g1MMUTracker.hpp" aoqi@0: #include "gc_implementation/g1/vm_operations_g1.hpp" aoqi@0: #include "memory/resourceArea.hpp" aoqi@0: #include "runtime/vmThread.hpp" aoqi@0: aoqi@0: // ======= Concurrent Mark Thread ======== aoqi@0: aoqi@0: // The CM thread is created when the G1 garbage collector is used aoqi@0: aoqi@0: SurrogateLockerThread* aoqi@0: ConcurrentMarkThread::_slt = NULL; aoqi@0: aoqi@0: ConcurrentMarkThread::ConcurrentMarkThread(ConcurrentMark* cm) : aoqi@0: ConcurrentGCThread(), aoqi@0: _cm(cm), aoqi@0: _started(false), aoqi@0: _in_progress(false), aoqi@0: _vtime_accum(0.0), aoqi@0: _vtime_mark_accum(0.0) { aoqi@0: create_and_start(); aoqi@0: } aoqi@0: aoqi@0: class CMCheckpointRootsFinalClosure: public VoidClosure { aoqi@0: aoqi@0: ConcurrentMark* _cm; aoqi@0: public: aoqi@0: aoqi@0: CMCheckpointRootsFinalClosure(ConcurrentMark* cm) : aoqi@0: _cm(cm) {} aoqi@0: aoqi@0: void do_void(){ aoqi@0: _cm->checkpointRootsFinal(false); // !clear_all_soft_refs aoqi@0: } aoqi@0: }; aoqi@0: aoqi@0: class CMCleanUp: public VoidClosure { aoqi@0: ConcurrentMark* _cm; aoqi@0: public: aoqi@0: aoqi@0: CMCleanUp(ConcurrentMark* cm) : aoqi@0: _cm(cm) {} aoqi@0: aoqi@0: void do_void(){ aoqi@0: _cm->cleanup(); aoqi@0: } aoqi@0: }; aoqi@0: aoqi@0: aoqi@0: aoqi@0: void ConcurrentMarkThread::run() { aoqi@0: initialize_in_thread(); aoqi@0: _vtime_start = os::elapsedVTime(); aoqi@0: wait_for_universe_init(); aoqi@0: aoqi@0: G1CollectedHeap* g1h = G1CollectedHeap::heap(); aoqi@0: G1CollectorPolicy* g1_policy = g1h->g1_policy(); aoqi@0: G1MMUTracker *mmu_tracker = g1_policy->mmu_tracker(); aoqi@0: Thread *current_thread = Thread::current(); aoqi@0: aoqi@0: while (!_should_terminate) { aoqi@0: // wait until started is set. aoqi@0: sleepBeforeNextCycle(); aoqi@0: if (_should_terminate) { aoqi@0: break; aoqi@0: } aoqi@0: aoqi@0: { aoqi@0: ResourceMark rm; aoqi@0: HandleMark hm; aoqi@0: double cycle_start = os::elapsedVTime(); aoqi@0: aoqi@0: // We have to ensure that we finish scanning the root regions aoqi@0: // before the next GC takes place. To ensure this we have to aoqi@0: // make sure that we do not join the STS until the root regions aoqi@0: // have been scanned. If we did then it's possible that a aoqi@0: // subsequent GC could block us from joining the STS and proceed aoqi@0: // without the root regions have been scanned which would be a aoqi@0: // correctness issue. aoqi@0: aoqi@0: double scan_start = os::elapsedTime(); aoqi@0: if (!cm()->has_aborted()) { aoqi@0: if (G1Log::fine()) { aoqi@0: gclog_or_tty->date_stamp(PrintGCDateStamps); aoqi@0: gclog_or_tty->stamp(PrintGCTimeStamps); aoqi@0: gclog_or_tty->print_cr("[GC concurrent-root-region-scan-start]"); aoqi@0: } aoqi@0: aoqi@0: _cm->scanRootRegions(); aoqi@0: aoqi@0: double scan_end = os::elapsedTime(); aoqi@0: if (G1Log::fine()) { aoqi@0: gclog_or_tty->date_stamp(PrintGCDateStamps); aoqi@0: gclog_or_tty->stamp(PrintGCTimeStamps); aoqi@0: gclog_or_tty->print_cr("[GC concurrent-root-region-scan-end, %1.7lf secs]", aoqi@0: scan_end - scan_start); aoqi@0: } aoqi@0: } aoqi@0: aoqi@0: double mark_start_sec = os::elapsedTime(); aoqi@0: if (G1Log::fine()) { aoqi@0: gclog_or_tty->date_stamp(PrintGCDateStamps); aoqi@0: gclog_or_tty->stamp(PrintGCTimeStamps); aoqi@0: gclog_or_tty->print_cr("[GC concurrent-mark-start]"); aoqi@0: } aoqi@0: aoqi@0: int iter = 0; aoqi@0: do { aoqi@0: iter++; aoqi@0: if (!cm()->has_aborted()) { aoqi@0: _cm->markFromRoots(); aoqi@0: } aoqi@0: aoqi@0: double mark_end_time = os::elapsedVTime(); aoqi@0: double mark_end_sec = os::elapsedTime(); aoqi@0: _vtime_mark_accum += (mark_end_time - cycle_start); aoqi@0: if (!cm()->has_aborted()) { aoqi@0: if (g1_policy->adaptive_young_list_length()) { aoqi@0: double now = os::elapsedTime(); aoqi@0: double remark_prediction_ms = g1_policy->predict_remark_time_ms(); aoqi@0: jlong sleep_time_ms = mmu_tracker->when_ms(now, remark_prediction_ms); aoqi@0: os::sleep(current_thread, sleep_time_ms, false); aoqi@0: } aoqi@0: aoqi@0: if (G1Log::fine()) { aoqi@0: gclog_or_tty->date_stamp(PrintGCDateStamps); aoqi@0: gclog_or_tty->stamp(PrintGCTimeStamps); aoqi@0: gclog_or_tty->print_cr("[GC concurrent-mark-end, %1.7lf secs]", aoqi@0: mark_end_sec - mark_start_sec); aoqi@0: } aoqi@0: aoqi@0: CMCheckpointRootsFinalClosure final_cl(_cm); aoqi@0: VM_CGC_Operation op(&final_cl, "GC remark", true /* needs_pll */); aoqi@0: VMThread::execute(&op); aoqi@0: } aoqi@0: if (cm()->restart_for_overflow()) { aoqi@0: if (G1TraceMarkStackOverflow) { aoqi@0: gclog_or_tty->print_cr("Restarting conc marking because of MS overflow " aoqi@0: "in remark (restart #%d).", iter); aoqi@0: } aoqi@0: if (G1Log::fine()) { aoqi@0: gclog_or_tty->date_stamp(PrintGCDateStamps); aoqi@0: gclog_or_tty->stamp(PrintGCTimeStamps); aoqi@0: gclog_or_tty->print_cr("[GC concurrent-mark-restart-for-overflow]"); aoqi@0: } aoqi@0: } aoqi@0: } while (cm()->restart_for_overflow()); aoqi@0: aoqi@0: double end_time = os::elapsedVTime(); aoqi@0: // Update the total virtual time before doing this, since it will try aoqi@0: // to measure it to get the vtime for this marking. We purposely aoqi@0: // neglect the presumably-short "completeCleanup" phase here. aoqi@0: _vtime_accum = (end_time - _vtime_start); aoqi@0: aoqi@0: if (!cm()->has_aborted()) { aoqi@0: if (g1_policy->adaptive_young_list_length()) { aoqi@0: double now = os::elapsedTime(); aoqi@0: double cleanup_prediction_ms = g1_policy->predict_cleanup_time_ms(); aoqi@0: jlong sleep_time_ms = mmu_tracker->when_ms(now, cleanup_prediction_ms); aoqi@0: os::sleep(current_thread, sleep_time_ms, false); aoqi@0: } aoqi@0: aoqi@0: CMCleanUp cl_cl(_cm); aoqi@0: VM_CGC_Operation op(&cl_cl, "GC cleanup", false /* needs_pll */); aoqi@0: VMThread::execute(&op); aoqi@0: } else { aoqi@0: // We don't want to update the marking status if a GC pause aoqi@0: // is already underway. aoqi@0: _sts.join(); aoqi@0: g1h->set_marking_complete(); aoqi@0: _sts.leave(); aoqi@0: } aoqi@0: aoqi@0: // Check if cleanup set the free_regions_coming flag. If it aoqi@0: // hasn't, we can just skip the next step. aoqi@0: if (g1h->free_regions_coming()) { aoqi@0: // The following will finish freeing up any regions that we aoqi@0: // found to be empty during cleanup. We'll do this part aoqi@0: // without joining the suspendible set. If an evacuation pause aoqi@0: // takes place, then we would carry on freeing regions in aoqi@0: // case they are needed by the pause. If a Full GC takes aoqi@0: // place, it would wait for us to process the regions aoqi@0: // reclaimed by cleanup. aoqi@0: aoqi@0: double cleanup_start_sec = os::elapsedTime(); aoqi@0: if (G1Log::fine()) { aoqi@0: gclog_or_tty->date_stamp(PrintGCDateStamps); aoqi@0: gclog_or_tty->stamp(PrintGCTimeStamps); aoqi@0: gclog_or_tty->print_cr("[GC concurrent-cleanup-start]"); aoqi@0: } aoqi@0: aoqi@0: // Now do the concurrent cleanup operation. aoqi@0: _cm->completeCleanup(); aoqi@0: aoqi@0: // Notify anyone who's waiting that there are no more free aoqi@0: // regions coming. We have to do this before we join the STS aoqi@0: // (in fact, we should not attempt to join the STS in the aoqi@0: // interval between finishing the cleanup pause and clearing aoqi@0: // the free_regions_coming flag) otherwise we might deadlock: aoqi@0: // a GC worker could be blocked waiting for the notification aoqi@0: // whereas this thread will be blocked for the pause to finish aoqi@0: // while it's trying to join the STS, which is conditional on aoqi@0: // the GC workers finishing. aoqi@0: g1h->reset_free_regions_coming(); aoqi@0: aoqi@0: double cleanup_end_sec = os::elapsedTime(); aoqi@0: if (G1Log::fine()) { aoqi@0: gclog_or_tty->date_stamp(PrintGCDateStamps); aoqi@0: gclog_or_tty->stamp(PrintGCTimeStamps); aoqi@0: gclog_or_tty->print_cr("[GC concurrent-cleanup-end, %1.7lf secs]", aoqi@0: cleanup_end_sec - cleanup_start_sec); aoqi@0: } aoqi@0: } aoqi@0: guarantee(cm()->cleanup_list_is_empty(), aoqi@0: "at this point there should be no regions on the cleanup list"); aoqi@0: aoqi@0: // There is a tricky race before recording that the concurrent aoqi@0: // cleanup has completed and a potential Full GC starting around aoqi@0: // the same time. We want to make sure that the Full GC calls aoqi@0: // abort() on concurrent mark after aoqi@0: // record_concurrent_mark_cleanup_completed(), since abort() is aoqi@0: // the method that will reset the concurrent mark state. If we aoqi@0: // end up calling record_concurrent_mark_cleanup_completed() aoqi@0: // after abort() then we might incorrectly undo some of the work aoqi@0: // abort() did. Checking the has_aborted() flag after joining aoqi@0: // the STS allows the correct ordering of the two methods. There aoqi@0: // are two scenarios: aoqi@0: // aoqi@0: // a) If we reach here before the Full GC, the fact that we have aoqi@0: // joined the STS means that the Full GC cannot start until we aoqi@0: // leave the STS, so record_concurrent_mark_cleanup_completed() aoqi@0: // will complete before abort() is called. aoqi@0: // aoqi@0: // b) If we reach here during the Full GC, we'll be held up from aoqi@0: // joining the STS until the Full GC is done, which means that aoqi@0: // abort() will have completed and has_aborted() will return aoqi@0: // true to prevent us from calling aoqi@0: // record_concurrent_mark_cleanup_completed() (and, in fact, it's aoqi@0: // not needed any more as the concurrent mark state has been aoqi@0: // already reset). aoqi@0: _sts.join(); aoqi@0: if (!cm()->has_aborted()) { aoqi@0: g1_policy->record_concurrent_mark_cleanup_completed(); aoqi@0: } aoqi@0: _sts.leave(); aoqi@0: aoqi@0: if (cm()->has_aborted()) { aoqi@0: if (G1Log::fine()) { aoqi@0: gclog_or_tty->date_stamp(PrintGCDateStamps); aoqi@0: gclog_or_tty->stamp(PrintGCTimeStamps); aoqi@0: gclog_or_tty->print_cr("[GC concurrent-mark-abort]"); aoqi@0: } aoqi@0: } aoqi@0: aoqi@0: // We now want to allow clearing of the marking bitmap to be aoqi@0: // suspended by a collection pause. aoqi@0: _sts.join(); aoqi@0: _cm->clearNextBitmap(); aoqi@0: _sts.leave(); aoqi@0: } aoqi@0: aoqi@0: // Update the number of full collections that have been aoqi@0: // completed. This will also notify the FullGCCount_lock in case a aoqi@0: // Java thread is waiting for a full GC to happen (e.g., it aoqi@0: // called System.gc() with +ExplicitGCInvokesConcurrent). aoqi@0: _sts.join(); aoqi@0: g1h->increment_old_marking_cycles_completed(true /* concurrent */); aoqi@0: g1h->register_concurrent_cycle_end(); aoqi@0: _sts.leave(); aoqi@0: } aoqi@0: assert(_should_terminate, "just checking"); aoqi@0: aoqi@0: terminate(); aoqi@0: } aoqi@0: aoqi@0: aoqi@0: void ConcurrentMarkThread::yield() { aoqi@0: _sts.yield("Concurrent Mark"); aoqi@0: } aoqi@0: aoqi@0: void ConcurrentMarkThread::stop() { aoqi@0: { aoqi@0: MutexLockerEx ml(Terminator_lock); aoqi@0: _should_terminate = true; aoqi@0: } aoqi@0: aoqi@0: { aoqi@0: MutexLockerEx ml(CGC_lock, Mutex::_no_safepoint_check_flag); aoqi@0: CGC_lock->notify_all(); aoqi@0: } aoqi@0: aoqi@0: { aoqi@0: MutexLockerEx ml(Terminator_lock); aoqi@0: while (!_has_terminated) { aoqi@0: Terminator_lock->wait(); aoqi@0: } aoqi@0: } aoqi@0: } aoqi@0: aoqi@0: void ConcurrentMarkThread::print() const { aoqi@0: print_on(tty); aoqi@0: } aoqi@0: aoqi@0: void ConcurrentMarkThread::print_on(outputStream* st) const { aoqi@0: st->print("\"G1 Main Concurrent Mark GC Thread\" "); aoqi@0: Thread::print_on(st); aoqi@0: st->cr(); aoqi@0: } aoqi@0: aoqi@0: void ConcurrentMarkThread::sleepBeforeNextCycle() { aoqi@0: // We join here because we don't want to do the "shouldConcurrentMark()" aoqi@0: // below while the world is otherwise stopped. aoqi@0: assert(!in_progress(), "should have been cleared"); aoqi@0: aoqi@0: MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag); aoqi@0: while (!started() && !_should_terminate) { aoqi@0: CGC_lock->wait(Mutex::_no_safepoint_check_flag); aoqi@0: } aoqi@0: aoqi@0: if (started()) { aoqi@0: set_in_progress(); aoqi@0: clear_started(); aoqi@0: } aoqi@0: } aoqi@0: aoqi@0: // Note: As is the case with CMS - this method, although exported aoqi@0: // by the ConcurrentMarkThread, which is a non-JavaThread, can only aoqi@0: // be called by a JavaThread. Currently this is done at vm creation aoqi@0: // time (post-vm-init) by the main/Primordial (Java)Thread. aoqi@0: // XXX Consider changing this in the future to allow the CM thread aoqi@0: // itself to create this thread? aoqi@0: void ConcurrentMarkThread::makeSurrogateLockerThread(TRAPS) { aoqi@0: assert(UseG1GC, "SLT thread needed only for concurrent GC"); aoqi@0: assert(THREAD->is_Java_thread(), "must be a Java thread"); aoqi@0: assert(_slt == NULL, "SLT already created"); aoqi@0: _slt = SurrogateLockerThread::make(THREAD); aoqi@0: }