duke@435: /* drchase@6680: * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved. duke@435: * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. duke@435: * duke@435: * This code is free software; you can redistribute it and/or modify it duke@435: * under the terms of the GNU General Public License version 2 only, as duke@435: * published by the Free Software Foundation. duke@435: * duke@435: * This code is distributed in the hope that it will be useful, but WITHOUT duke@435: * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or duke@435: * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License duke@435: * version 2 for more details (a copy is included in the LICENSE file that duke@435: * accompanied this code). duke@435: * duke@435: * You should have received a copy of the GNU General Public License version duke@435: * 2 along with this work; if not, write to the Free Software Foundation, duke@435: * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. duke@435: * trims@1907: * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA trims@1907: * or visit www.oracle.com if you need additional information or have any trims@1907: * questions. duke@435: * duke@435: */ duke@435: stefank@2314: #include "precompiled.hpp" stefank@2314: #include "classfile/symbolTable.hpp" stefank@2314: #include "classfile/systemDictionary.hpp" stefank@2314: #include "code/codeCache.hpp" stefank@2314: #include "gc_implementation/parallelScavenge/parallelScavengeHeap.hpp" stefank@2314: #include "gc_implementation/parallelScavenge/psAdaptiveSizePolicy.hpp" stefank@2314: #include "gc_implementation/parallelScavenge/psMarkSweep.hpp" stefank@2314: #include "gc_implementation/parallelScavenge/psMarkSweepDecorator.hpp" stefank@2314: #include "gc_implementation/parallelScavenge/psOldGen.hpp" stefank@2314: #include "gc_implementation/parallelScavenge/psScavenge.hpp" stefank@2314: #include "gc_implementation/parallelScavenge/psYoungGen.hpp" sla@5237: #include "gc_implementation/shared/gcHeapSummary.hpp" sla@5237: #include "gc_implementation/shared/gcTimer.hpp" sla@5237: #include "gc_implementation/shared/gcTrace.hpp" sla@5237: #include "gc_implementation/shared/gcTraceTime.hpp" stefank@2314: #include "gc_implementation/shared/isGCActiveMark.hpp" coleenp@4037: #include "gc_implementation/shared/markSweep.hpp" stefank@2314: #include "gc_implementation/shared/spaceDecorator.hpp" stefank@2314: #include "gc_interface/gcCause.hpp" stefank@2314: #include "memory/gcLocker.inline.hpp" stefank@2314: #include "memory/referencePolicy.hpp" stefank@2314: #include "memory/referenceProcessor.hpp" stefank@2314: #include "oops/oop.inline.hpp" stefank@2314: #include "runtime/biasedLocking.hpp" stefank@2314: #include "runtime/fprofiler.hpp" stefank@2314: #include "runtime/safepoint.hpp" stefank@2314: #include "runtime/vmThread.hpp" stefank@2314: #include "services/management.hpp" stefank@2314: #include "services/memoryService.hpp" stefank@2314: #include "utilities/events.hpp" stefank@2314: #include "utilities/stack.inline.hpp" duke@435: drchase@6680: PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC drchase@6680: duke@435: elapsedTimer PSMarkSweep::_accumulated_time; duke@435: jlong PSMarkSweep::_time_of_last_gc = 0; duke@435: CollectorCounters* PSMarkSweep::_counters = NULL; duke@435: duke@435: void PSMarkSweep::initialize() { duke@435: MemRegion mr = Universe::heap()->reserved_region(); ysr@2651: _ref_processor = new ReferenceProcessor(mr); // a vanilla ref proc jcoomes@809: _counters = new CollectorCounters("PSMarkSweep", 1); duke@435: } duke@435: duke@435: // This method contains all heap specific policy for invoking mark sweep. duke@435: // PSMarkSweep::invoke_no_policy() will only attempt to mark-sweep-compact duke@435: // the heap. It will do nothing further. If we need to bail out for policy duke@435: // reasons, scavenge before full gc, or any other specialized behavior, it duke@435: // needs to be added here. duke@435: // duke@435: // Note that this method should only be called from the vm_thread while duke@435: // at a safepoint! jmasa@1822: // jmasa@1822: // Note that the all_soft_refs_clear flag in the collector policy jmasa@1822: // may be true because this method can be called without intervening jmasa@1822: // activity. For example when the heap space is tight and full measure jmasa@1822: // are being taken to free space. jmasa@1822: duke@435: void PSMarkSweep::invoke(bool maximum_heap_compaction) { duke@435: assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint"); duke@435: assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread"); duke@435: assert(!Universe::heap()->is_gc_active(), "not reentrant"); duke@435: duke@435: ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); duke@435: GCCause::Cause gc_cause = heap->gc_cause(); duke@435: PSAdaptiveSizePolicy* policy = heap->size_policy(); jmasa@1822: IsGCActiveMark mark; duke@435: jmasa@1822: if (ScavengeBeforeFullGC) { jmasa@1822: PSScavenge::invoke_no_policy(); jmasa@1822: } duke@435: jmasa@1822: const bool clear_all_soft_refs = jmasa@1822: heap->collector_policy()->should_clear_all_soft_refs(); duke@435: tschatzl@5119: uint count = maximum_heap_compaction ? 1 : MarkSweepAlwaysCompactCount; tschatzl@5119: UIntFlagSetting flag_setting(MarkSweepAlwaysCompactCount, count); jmasa@1822: PSMarkSweep::invoke_no_policy(clear_all_soft_refs || maximum_heap_compaction); duke@435: } duke@435: duke@435: // This method contains no policy. You should probably duke@435: // be calling invoke() instead. jcoomes@3540: bool PSMarkSweep::invoke_no_policy(bool clear_all_softrefs) { duke@435: assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint"); duke@435: assert(ref_processor() != NULL, "Sanity"); duke@435: duke@435: if (GC_locker::check_active_before_gc()) { jcoomes@3540: return false; duke@435: } duke@435: duke@435: ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); sla@5237: assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); duke@435: GCCause::Cause gc_cause = heap->gc_cause(); sla@5237: mgronlun@6131: _gc_timer->register_gc_start(); sla@5237: _gc_tracer->report_gc_start(gc_cause, _gc_timer->gc_start()); sla@5237: duke@435: PSAdaptiveSizePolicy* size_policy = heap->size_policy(); duke@435: jmasa@1822: // The scope of casr should end after code that can change jmasa@1822: // CollectorPolicy::_should_clear_all_soft_refs. jmasa@1822: ClearedAllSoftRefs casr(clear_all_softrefs, heap->collector_policy()); jmasa@1822: duke@435: PSYoungGen* young_gen = heap->young_gen(); duke@435: PSOldGen* old_gen = heap->old_gen(); duke@435: duke@435: // Increment the invocation count duke@435: heap->increment_total_collections(true /* full */); duke@435: jmasa@698: // Save information needed to minimize mangling jmasa@698: heap->record_gen_tops_before_GC(); jmasa@698: duke@435: // We need to track unique mark sweep invocations as well. duke@435: _total_invocations++; duke@435: duke@435: AdaptiveSizePolicyOutput(size_policy, heap->total_collections()); duke@435: never@3499: heap->print_heap_before_gc(); sla@5237: heap->trace_heap_before_gc(_gc_tracer); duke@435: duke@435: // Fill in TLABs duke@435: heap->accumulate_statistics_all_tlabs(); duke@435: heap->ensure_parsability(true); // retire TLABs duke@435: duke@435: if (VerifyBeforeGC && heap->total_collections() >= VerifyGCStartAt) { duke@435: HandleMark hm; // Discard invalid handles created during verification stefank@5018: Universe::verify(" VerifyBeforeGC:"); duke@435: } duke@435: duke@435: // Verify object start arrays duke@435: if (VerifyObjectStartArray && duke@435: VerifyBeforeGC) { duke@435: old_gen->verify_object_start_array(); duke@435: } duke@435: sla@5237: heap->pre_full_gc_dump(_gc_timer); ysr@1050: duke@435: // Filled in below to track the state of the young gen after the collection. duke@435: bool eden_empty; duke@435: bool survivors_empty; duke@435: bool young_gen_empty; duke@435: duke@435: { duke@435: HandleMark hm; brutisso@3767: duke@435: gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps); duke@435: TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); brutisso@6904: GCTraceTime t1(GCCauseString("Full GC", gc_cause), PrintGC, !PrintGCDetails, NULL, _gc_tracer->gc_id()); duke@435: TraceCollectorStats tcs(counters()); fparain@2888: TraceMemoryManagerStats tms(true /* Full GC */,gc_cause); duke@435: duke@435: if (TraceGen1Time) accumulated_time()->start(); duke@435: duke@435: // Let the size policy know we're starting duke@435: size_policy->major_collection_begin(); duke@435: duke@435: CodeCache::gc_prologue(); duke@435: Threads::gc_prologue(); duke@435: BiasedLocking::preserve_marks(); duke@435: duke@435: // Capture heap size before collection for printing. duke@435: size_t prev_used = heap->used(); duke@435: coleenp@4037: // Capture metadata size before collection for sizing. ehelin@6609: size_t metadata_prev_used = MetaspaceAux::used_bytes(); duke@435: duke@435: // For PrintGCDetails duke@435: size_t old_gen_prev_used = old_gen->used_in_bytes(); duke@435: size_t young_gen_prev_used = young_gen->used_in_bytes(); duke@435: duke@435: allocate_stacks(); duke@435: duke@435: COMPILER2_PRESENT(DerivedPointerTable::clear()); duke@435: johnc@3175: ref_processor()->enable_discovery(true /*verify_disabled*/, true /*verify_no_refs*/); ysr@892: ref_processor()->setup_policy(clear_all_softrefs); duke@435: duke@435: mark_sweep_phase1(clear_all_softrefs); duke@435: duke@435: mark_sweep_phase2(); duke@435: duke@435: // Don't add any more derived pointers during phase3 duke@435: COMPILER2_PRESENT(assert(DerivedPointerTable::is_active(), "Sanity")); duke@435: COMPILER2_PRESENT(DerivedPointerTable::set_active(false)); duke@435: duke@435: mark_sweep_phase3(); duke@435: duke@435: mark_sweep_phase4(); duke@435: duke@435: restore_marks(); duke@435: duke@435: deallocate_stacks(); duke@435: jmasa@698: if (ZapUnusedHeapArea) { jmasa@698: // Do a complete mangle (top to end) because the usage for jmasa@698: // scratch does not maintain a top pointer. jmasa@698: young_gen->to_space()->mangle_unused_area_complete(); jmasa@698: } jmasa@698: duke@435: eden_empty = young_gen->eden_space()->is_empty(); duke@435: if (!eden_empty) { duke@435: eden_empty = absorb_live_data_from_eden(size_policy, young_gen, old_gen); duke@435: } duke@435: duke@435: // Update heap occupancy information which is used as duke@435: // input to soft ref clearing policy at the next gc. duke@435: Universe::update_heap_info_at_gc(); duke@435: duke@435: survivors_empty = young_gen->from_space()->is_empty() && jmasa@698: young_gen->to_space()->is_empty(); duke@435: young_gen_empty = eden_empty && survivors_empty; duke@435: duke@435: BarrierSet* bs = heap->barrier_set(); duke@435: if (bs->is_a(BarrierSet::ModRef)) { duke@435: ModRefBarrierSet* modBS = (ModRefBarrierSet*)bs; duke@435: MemRegion old_mr = heap->old_gen()->reserved(); duke@435: if (young_gen_empty) { coleenp@4037: modBS->clear(MemRegion(old_mr.start(), old_mr.end())); duke@435: } else { coleenp@4037: modBS->invalidate(MemRegion(old_mr.start(), old_mr.end())); duke@435: } duke@435: } duke@435: coleenp@4037: // Delete metaspaces for unloaded class loaders and clean up loader_data graph coleenp@4037: ClassLoaderDataGraph::purge(); jmasa@5015: MetaspaceAux::verify_metrics(); coleenp@4037: duke@435: BiasedLocking::restore_marks(); duke@435: Threads::gc_epilogue(); duke@435: CodeCache::gc_epilogue(); kamg@2467: JvmtiExport::gc_epilogue(); duke@435: duke@435: COMPILER2_PRESENT(DerivedPointerTable::update_pointers()); duke@435: duke@435: ref_processor()->enqueue_discovered_references(NULL); duke@435: duke@435: // Update time of last GC duke@435: reset_millis_since_last_gc(); duke@435: duke@435: // Let the size policy know we're done duke@435: size_policy->major_collection_end(old_gen->used_in_bytes(), gc_cause); duke@435: duke@435: if (UseAdaptiveSizePolicy) { duke@435: duke@435: if (PrintAdaptiveSizePolicy) { duke@435: gclog_or_tty->print("AdaptiveSizeStart: "); duke@435: gclog_or_tty->stamp(); duke@435: gclog_or_tty->print_cr(" collection: %d ", duke@435: heap->total_collections()); duke@435: if (Verbose) { coleenp@4037: gclog_or_tty->print("old_gen_capacity: %d young_gen_capacity: %d", coleenp@4037: old_gen->capacity_in_bytes(), young_gen->capacity_in_bytes()); duke@435: } duke@435: } duke@435: duke@435: // Don't check if the size_policy is ready here. Let duke@435: // the size_policy check that internally. duke@435: if (UseAdaptiveGenerationSizePolicyAtMajorCollection && duke@435: ((gc_cause != GCCause::_java_lang_system_gc) || duke@435: UseAdaptiveSizePolicyWithSystemGC)) { duke@435: // Calculate optimal free space amounts duke@435: assert(young_gen->max_size() > duke@435: young_gen->from_space()->capacity_in_bytes() + duke@435: young_gen->to_space()->capacity_in_bytes(), duke@435: "Sizes of space in young gen are out-of-bounds"); tamao@5120: tamao@5120: size_t young_live = young_gen->used_in_bytes(); tamao@5120: size_t eden_live = young_gen->eden_space()->used_in_bytes(); tamao@5120: size_t old_live = old_gen->used_in_bytes(); tamao@5120: size_t cur_eden = young_gen->eden_space()->capacity_in_bytes(); tamao@5120: size_t max_old_gen_size = old_gen->max_gen_size(); duke@435: size_t max_eden_size = young_gen->max_size() - duke@435: young_gen->from_space()->capacity_in_bytes() - duke@435: young_gen->to_space()->capacity_in_bytes(); tamao@5120: tamao@5120: // Used for diagnostics tamao@5120: size_policy->clear_generation_free_space_flags(); tamao@5120: tamao@5192: size_policy->compute_generations_free_space(young_live, tamao@5192: eden_live, tamao@5192: old_live, tamao@5192: cur_eden, tamao@5192: max_old_gen_size, tamao@5192: max_eden_size, tamao@5192: true /* full gc*/); tamao@5120: tamao@5120: size_policy->check_gc_overhead_limit(young_live, tamao@5120: eden_live, tamao@5120: max_old_gen_size, tamao@5120: max_eden_size, tamao@5120: true /* full gc*/, tamao@5120: gc_cause, tamao@5120: heap->collector_policy()); tamao@5120: tamao@5120: size_policy->decay_supplemental_growth(true /* full gc*/); duke@435: duke@435: heap->resize_old_gen(size_policy->calculated_old_free_size_in_bytes()); duke@435: duke@435: // Don't resize the young generation at an major collection. A duke@435: // desired young generation size may have been calculated but duke@435: // resizing the young generation complicates the code because the duke@435: // resizing of the old generation may have moved the boundary duke@435: // between the young generation and the old generation. Let the duke@435: // young generation resizing happen at the minor collections. duke@435: } duke@435: if (PrintAdaptiveSizePolicy) { duke@435: gclog_or_tty->print_cr("AdaptiveSizeStop: collection: %d ", duke@435: heap->total_collections()); duke@435: } duke@435: } duke@435: duke@435: if (UsePerfData) { duke@435: heap->gc_policy_counters()->update_counters(); duke@435: heap->gc_policy_counters()->update_old_capacity( duke@435: old_gen->capacity_in_bytes()); duke@435: heap->gc_policy_counters()->update_young_capacity( duke@435: young_gen->capacity_in_bytes()); duke@435: } duke@435: duke@435: heap->resize_all_tlabs(); duke@435: coleenp@4037: // We collected the heap, recalculate the metaspace capacity coleenp@4037: MetaspaceGC::compute_new_size(); duke@435: duke@435: if (TraceGen1Time) accumulated_time()->stop(); duke@435: duke@435: if (PrintGC) { duke@435: if (PrintGCDetails) { duke@435: // Don't print a GC timestamp here. This is after the GC so duke@435: // would be confusing. duke@435: young_gen->print_used_change(young_gen_prev_used); duke@435: old_gen->print_used_change(old_gen_prev_used); duke@435: } duke@435: heap->print_heap_change(prev_used); duke@435: if (PrintGCDetails) { coleenp@4037: MetaspaceAux::print_metaspace_change(metadata_prev_used); duke@435: } duke@435: } duke@435: duke@435: // Track memory usage and detect low memory duke@435: MemoryService::track_memory_usage(); duke@435: heap->update_counters(); duke@435: } duke@435: duke@435: if (VerifyAfterGC && heap->total_collections() >= VerifyGCStartAt) { duke@435: HandleMark hm; // Discard invalid handles created during verification stefank@5018: Universe::verify(" VerifyAfterGC:"); duke@435: } duke@435: duke@435: // Re-verify object start arrays duke@435: if (VerifyObjectStartArray && duke@435: VerifyAfterGC) { duke@435: old_gen->verify_object_start_array(); duke@435: } duke@435: jmasa@698: if (ZapUnusedHeapArea) { jmasa@698: old_gen->object_space()->check_mangled_unused_area_complete(); jmasa@698: } jmasa@698: duke@435: NOT_PRODUCT(ref_processor()->verify_no_references_recorded()); duke@435: never@3499: heap->print_heap_after_gc(); sla@5237: heap->trace_heap_after_gc(_gc_tracer); jmasa@981: sla@5237: heap->post_full_gc_dump(_gc_timer); ysr@1050: jmasa@981: #ifdef TRACESPINNING jmasa@981: ParallelTaskTerminator::print_termination_counts(); jmasa@981: #endif jcoomes@3540: mgronlun@6131: _gc_timer->register_gc_end(); sla@5237: sla@5237: _gc_tracer->report_gc_end(_gc_timer->gc_end(), _gc_timer->time_partitions()); sla@5237: jcoomes@3540: return true; duke@435: } duke@435: duke@435: bool PSMarkSweep::absorb_live_data_from_eden(PSAdaptiveSizePolicy* size_policy, duke@435: PSYoungGen* young_gen, duke@435: PSOldGen* old_gen) { duke@435: MutableSpace* const eden_space = young_gen->eden_space(); duke@435: assert(!eden_space->is_empty(), "eden must be non-empty"); duke@435: assert(young_gen->virtual_space()->alignment() == duke@435: old_gen->virtual_space()->alignment(), "alignments do not match"); duke@435: duke@435: if (!(UseAdaptiveSizePolicy && UseAdaptiveGCBoundary)) { duke@435: return false; duke@435: } duke@435: duke@435: // Both generations must be completely committed. duke@435: if (young_gen->virtual_space()->uncommitted_size() != 0) { duke@435: return false; duke@435: } duke@435: if (old_gen->virtual_space()->uncommitted_size() != 0) { duke@435: return false; duke@435: } duke@435: duke@435: // Figure out how much to take from eden. Include the average amount promoted duke@435: // in the total; otherwise the next young gen GC will simply bail out to a duke@435: // full GC. duke@435: const size_t alignment = old_gen->virtual_space()->alignment(); duke@435: const size_t eden_used = eden_space->used_in_bytes(); jcoomes@916: const size_t promoted = (size_t)size_policy->avg_promoted()->padded_average(); duke@435: const size_t absorb_size = align_size_up(eden_used + promoted, alignment); duke@435: const size_t eden_capacity = eden_space->capacity_in_bytes(); duke@435: duke@435: if (absorb_size >= eden_capacity) { duke@435: return false; // Must leave some space in eden. duke@435: } duke@435: duke@435: const size_t new_young_size = young_gen->capacity_in_bytes() - absorb_size; duke@435: if (new_young_size < young_gen->min_gen_size()) { duke@435: return false; // Respect young gen minimum size. duke@435: } duke@435: duke@435: if (TraceAdaptiveGCBoundary && Verbose) { duke@435: gclog_or_tty->print(" absorbing " SIZE_FORMAT "K: " duke@435: "eden " SIZE_FORMAT "K->" SIZE_FORMAT "K " duke@435: "from " SIZE_FORMAT "K, to " SIZE_FORMAT "K " duke@435: "young_gen " SIZE_FORMAT "K->" SIZE_FORMAT "K ", duke@435: absorb_size / K, duke@435: eden_capacity / K, (eden_capacity - absorb_size) / K, duke@435: young_gen->from_space()->used_in_bytes() / K, duke@435: young_gen->to_space()->used_in_bytes() / K, duke@435: young_gen->capacity_in_bytes() / K, new_young_size / K); duke@435: } duke@435: duke@435: // Fill the unused part of the old gen. duke@435: MutableSpace* const old_space = old_gen->object_space(); jcoomes@916: HeapWord* const unused_start = old_space->top(); jcoomes@916: size_t const unused_words = pointer_delta(old_space->end(), unused_start); duke@435: jcoomes@916: if (unused_words > 0) { jcoomes@916: if (unused_words < CollectedHeap::min_fill_size()) { jcoomes@916: return false; // If the old gen cannot be filled, must give up. jcoomes@916: } jcoomes@916: CollectedHeap::fill_with_objects(unused_start, unused_words); duke@435: } duke@435: duke@435: // Take the live data from eden and set both top and end in the old gen to duke@435: // eden top. (Need to set end because reset_after_change() mangles the region duke@435: // from end to virtual_space->high() in debug builds). duke@435: HeapWord* const new_top = eden_space->top(); duke@435: old_gen->virtual_space()->expand_into(young_gen->virtual_space(), duke@435: absorb_size); duke@435: young_gen->reset_after_change(); duke@435: old_space->set_top(new_top); duke@435: old_space->set_end(new_top); duke@435: old_gen->reset_after_change(); duke@435: duke@435: // Update the object start array for the filler object and the data from eden. duke@435: ObjectStartArray* const start_array = old_gen->start_array(); jcoomes@916: for (HeapWord* p = unused_start; p < new_top; p += oop(p)->size()) { jcoomes@916: start_array->allocate_block(p); duke@435: } duke@435: duke@435: // Could update the promoted average here, but it is not typically updated at duke@435: // full GCs and the value to use is unclear. Something like duke@435: // duke@435: // cur_promoted_avg + absorb_size / number_of_scavenges_since_last_full_gc. duke@435: duke@435: size_policy->set_bytes_absorbed_from_eden(absorb_size); duke@435: return true; duke@435: } duke@435: duke@435: void PSMarkSweep::allocate_stacks() { duke@435: ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); duke@435: assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); duke@435: duke@435: PSYoungGen* young_gen = heap->young_gen(); duke@435: duke@435: MutableSpace* to_space = young_gen->to_space(); duke@435: _preserved_marks = (PreservedMark*)to_space->top(); duke@435: _preserved_count = 0; duke@435: duke@435: // We want to calculate the size in bytes first. duke@435: _preserved_count_max = pointer_delta(to_space->end(), to_space->top(), sizeof(jbyte)); duke@435: // Now divide by the size of a PreservedMark duke@435: _preserved_count_max /= sizeof(PreservedMark); duke@435: } duke@435: duke@435: duke@435: void PSMarkSweep::deallocate_stacks() { jcoomes@2191: _preserved_mark_stack.clear(true); jcoomes@2191: _preserved_oop_stack.clear(true); jcoomes@2191: _marking_stack.clear(); jcoomes@2191: _objarray_stack.clear(true); duke@435: } duke@435: duke@435: void PSMarkSweep::mark_sweep_phase1(bool clear_all_softrefs) { duke@435: // Recursively traverse all live objects and mark them brutisso@6904: GCTraceTime tm("phase 1", PrintGCDetails && Verbose, true, _gc_timer, _gc_tracer->gc_id()); duke@435: trace(" 1"); duke@435: duke@435: ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); duke@435: assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); duke@435: coleenp@4037: // Need to clear claim bits before the tracing starts. coleenp@4037: ClassLoaderDataGraph::clear_claimed_marks(); coleenp@4037: duke@435: // General strong roots. jrose@1424: { jrose@1424: ParallelScavengeHeap::ParStrongRootsScope psrs; jrose@1424: Universe::oops_do(mark_and_push_closure()); jrose@1424: JNIHandles::oops_do(mark_and_push_closure()); // Global (strong) JNI handles stefank@4298: CLDToOopClosure mark_and_push_from_cld(mark_and_push_closure()); stefank@6992: MarkingCodeBlobClosure each_active_code_blob(mark_and_push_closure(), !CodeBlobToOopClosure::FixRelocations); stefank@4298: Threads::oops_do(mark_and_push_closure(), &mark_and_push_from_cld, &each_active_code_blob); jrose@1424: ObjectSynchronizer::oops_do(mark_and_push_closure()); jrose@1424: FlatProfiler::oops_do(mark_and_push_closure()); jrose@1424: Management::oops_do(mark_and_push_closure()); jrose@1424: JvmtiExport::oops_do(mark_and_push_closure()); jrose@1424: SystemDictionary::always_strong_oops_do(mark_and_push_closure()); stefank@6992: ClassLoaderDataGraph::always_strong_cld_do(follow_cld_closure()); jrose@1424: // Do not treat nmethods as strong roots for mark/sweep, since we can unload them. jrose@1424: //CodeCache::scavenge_root_nmethods_do(CodeBlobToOopClosure(mark_and_push_closure())); jrose@1424: } duke@435: duke@435: // Flush marking stack. duke@435: follow_stack(); duke@435: duke@435: // Process reference objects found during marking duke@435: { ysr@892: ref_processor()->setup_policy(clear_all_softrefs); sla@5237: const ReferenceProcessorStats& stats = sla@5237: ref_processor()->process_discovered_references( brutisso@6904: is_alive_closure(), mark_and_push_closure(), follow_stack_closure(), NULL, _gc_timer, _gc_tracer->gc_id()); sla@5237: gc_tracer()->report_gc_reference_stats(stats); duke@435: } duke@435: stefank@5020: // This is the point where the entire marking should have completed. stefank@5020: assert(_marking_stack.is_empty(), "Marking should have completed"); stefank@5020: stefank@5020: // Unload classes and purge the SystemDictionary. duke@435: bool purged_class = SystemDictionary::do_unloading(is_alive_closure()); duke@435: stefank@5020: // Unload nmethods. brutisso@4098: CodeCache::do_unloading(is_alive_closure(), purged_class); duke@435: stefank@5020: // Prune dead klasses from subklass/sibling/implementor lists. stefank@5020: Klass::clean_weak_klass_links(is_alive_closure()); ysr@1376: stefank@5020: // Delete entries for dead interned strings. duke@435: StringTable::unlink(is_alive_closure()); stefank@5020: coleenp@2497: // Clean up unreferenced symbols in symbol table. coleenp@2497: SymbolTable::unlink(); sla@5237: _gc_tracer->report_object_count_after_gc(is_alive_closure()); duke@435: } duke@435: duke@435: duke@435: void PSMarkSweep::mark_sweep_phase2() { brutisso@6904: GCTraceTime tm("phase 2", PrintGCDetails && Verbose, true, _gc_timer, _gc_tracer->gc_id()); duke@435: trace("2"); duke@435: duke@435: // Now all live objects are marked, compute the new object addresses. duke@435: duke@435: // It is not required that we traverse spaces in the same order in duke@435: // phase2, phase3 and phase4, but the ValidateMarkSweep live oops duke@435: // tracking expects us to do so. See comment under phase4. duke@435: duke@435: ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); duke@435: assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); duke@435: duke@435: PSOldGen* old_gen = heap->old_gen(); duke@435: duke@435: // Begin compacting into the old gen duke@435: PSMarkSweepDecorator::set_destination_decorator_tenured(); duke@435: duke@435: // This will also compact the young gen spaces. duke@435: old_gen->precompact(); duke@435: } duke@435: duke@435: // This should be moved to the shared markSweep code! duke@435: class PSAlwaysTrueClosure: public BoolObjectClosure { duke@435: public: duke@435: bool do_object_b(oop p) { return true; } duke@435: }; duke@435: static PSAlwaysTrueClosure always_true; duke@435: duke@435: void PSMarkSweep::mark_sweep_phase3() { duke@435: // Adjust the pointers to reflect the new locations brutisso@6904: GCTraceTime tm("phase 3", PrintGCDetails && Verbose, true, _gc_timer, _gc_tracer->gc_id()); duke@435: trace("3"); duke@435: duke@435: ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); duke@435: assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); duke@435: duke@435: PSYoungGen* young_gen = heap->young_gen(); duke@435: PSOldGen* old_gen = heap->old_gen(); coleenp@4037: coleenp@4037: // Need to clear claim bits before the tracing starts. coleenp@4037: ClassLoaderDataGraph::clear_claimed_marks(); duke@435: duke@435: // General strong roots. stefank@5011: Universe::oops_do(adjust_pointer_closure()); stefank@5011: JNIHandles::oops_do(adjust_pointer_closure()); // Global (strong) JNI handles stefank@5011: CLDToOopClosure adjust_from_cld(adjust_pointer_closure()); stefank@5011: Threads::oops_do(adjust_pointer_closure(), &adjust_from_cld, NULL); stefank@5011: ObjectSynchronizer::oops_do(adjust_pointer_closure()); stefank@5011: FlatProfiler::oops_do(adjust_pointer_closure()); stefank@5011: Management::oops_do(adjust_pointer_closure()); stefank@5011: JvmtiExport::oops_do(adjust_pointer_closure()); stefank@5011: SystemDictionary::oops_do(adjust_pointer_closure()); stefank@6992: ClassLoaderDataGraph::cld_do(adjust_cld_closure()); duke@435: duke@435: // Now adjust pointers in remaining weak roots. (All of which should duke@435: // have been cleared if they pointed to non-surviving objects.) duke@435: // Global (weak) JNI handles stefank@5011: JNIHandles::weak_oops_do(&always_true, adjust_pointer_closure()); duke@435: stefank@6992: CodeBlobToOopClosure adjust_from_blobs(adjust_pointer_closure(), CodeBlobToOopClosure::FixRelocations); stefank@6992: CodeCache::blobs_do(&adjust_from_blobs); stefank@5011: StringTable::oops_do(adjust_pointer_closure()); stefank@5011: ref_processor()->weak_oops_do(adjust_pointer_closure()); stefank@5011: PSScavenge::reference_processor()->weak_oops_do(adjust_pointer_closure()); duke@435: duke@435: adjust_marks(); duke@435: duke@435: young_gen->adjust_pointers(); duke@435: old_gen->adjust_pointers(); duke@435: } duke@435: duke@435: void PSMarkSweep::mark_sweep_phase4() { duke@435: EventMark m("4 compact heap"); brutisso@6904: GCTraceTime tm("phase 4", PrintGCDetails && Verbose, true, _gc_timer, _gc_tracer->gc_id()); duke@435: trace("4"); duke@435: duke@435: // All pointers are now adjusted, move objects accordingly duke@435: duke@435: ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); duke@435: assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); duke@435: duke@435: PSYoungGen* young_gen = heap->young_gen(); duke@435: PSOldGen* old_gen = heap->old_gen(); duke@435: duke@435: old_gen->compact(); duke@435: young_gen->compact(); duke@435: } duke@435: duke@435: jlong PSMarkSweep::millis_since_last_gc() { johnc@3339: // We need a monotonically non-deccreasing time in ms but johnc@3339: // os::javaTimeMillis() does not guarantee monotonicity. johnc@3339: jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC; johnc@3339: jlong ret_val = now - _time_of_last_gc; duke@435: // XXX See note in genCollectedHeap::millis_since_last_gc(). duke@435: if (ret_val < 0) { johnc@3339: NOT_PRODUCT(warning("time warp: "INT64_FORMAT, ret_val);) duke@435: return 0; duke@435: } duke@435: return ret_val; duke@435: } duke@435: duke@435: void PSMarkSweep::reset_millis_since_last_gc() { johnc@3339: // We need a monotonically non-deccreasing time in ms but johnc@3339: // os::javaTimeMillis() does not guarantee monotonicity. johnc@3339: _time_of_last_gc = os::javaTimeNanos() / NANOSECS_PER_MILLISEC; duke@435: }