duke@435: /* hseigel@4465: * Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved. duke@435: * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. duke@435: * duke@435: * This code is free software; you can redistribute it and/or modify it duke@435: * under the terms of the GNU General Public License version 2 only, as duke@435: * published by the Free Software Foundation. duke@435: * duke@435: * This code is distributed in the hope that it will be useful, but WITHOUT duke@435: * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or duke@435: * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License duke@435: * version 2 for more details (a copy is included in the LICENSE file that duke@435: * accompanied this code). duke@435: * duke@435: * You should have received a copy of the GNU General Public License version duke@435: * 2 along with this work; if not, write to the Free Software Foundation, duke@435: * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. duke@435: * trims@1907: * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA trims@1907: * or visit www.oracle.com if you need additional information or have any trims@1907: * questions. duke@435: * duke@435: */ duke@435: stefank@2314: #include "precompiled.hpp" jcoomes@2661: #include "classfile/symbolTable.hpp" iveresov@3536: #include "code/codeCache.hpp" stefank@2314: #include "gc_implementation/parallelScavenge/cardTableExtension.hpp" stefank@2314: #include "gc_implementation/parallelScavenge/gcTaskManager.hpp" stefank@2314: #include "gc_implementation/parallelScavenge/generationSizer.hpp" stefank@2314: #include "gc_implementation/parallelScavenge/parallelScavengeHeap.hpp" stefank@2314: #include "gc_implementation/parallelScavenge/psAdaptiveSizePolicy.hpp" stefank@2314: #include "gc_implementation/parallelScavenge/psMarkSweep.hpp" stefank@2314: #include "gc_implementation/parallelScavenge/psParallelCompact.hpp" stefank@2314: #include "gc_implementation/parallelScavenge/psScavenge.inline.hpp" stefank@2314: #include "gc_implementation/parallelScavenge/psTasks.hpp" stefank@2314: #include "gc_implementation/shared/isGCActiveMark.hpp" stefank@2314: #include "gc_implementation/shared/spaceDecorator.hpp" stefank@2314: #include "gc_interface/gcCause.hpp" stefank@2314: #include "memory/collectorPolicy.hpp" stefank@2314: #include "memory/gcLocker.inline.hpp" stefank@2314: #include "memory/referencePolicy.hpp" stefank@2314: #include "memory/referenceProcessor.hpp" stefank@2314: #include "memory/resourceArea.hpp" stefank@2314: #include "oops/oop.inline.hpp" stefank@2314: #include "oops/oop.psgc.inline.hpp" stefank@2314: #include "runtime/biasedLocking.hpp" stefank@2314: #include "runtime/fprofiler.hpp" stefank@2314: #include "runtime/handles.inline.hpp" stefank@2314: #include "runtime/threadCritical.hpp" stefank@2314: #include "runtime/vmThread.hpp" stefank@2314: #include "runtime/vm_operations.hpp" stefank@2314: #include "services/memoryService.hpp" stefank@2314: #include "utilities/stack.inline.hpp" duke@435: duke@435: duke@435: HeapWord* PSScavenge::_to_space_top_before_gc = NULL; duke@435: int PSScavenge::_consecutive_skipped_scavenges = 0; duke@435: ReferenceProcessor* PSScavenge::_ref_processor = NULL; duke@435: CardTableExtension* PSScavenge::_card_table = NULL; duke@435: bool PSScavenge::_survivor_overflow = false; jwilhelm@4129: uint PSScavenge::_tenuring_threshold = 0; duke@435: HeapWord* PSScavenge::_young_generation_boundary = NULL; duke@435: elapsedTimer PSScavenge::_accumulated_time; zgu@3900: Stack PSScavenge::_preserved_mark_stack; zgu@3900: Stack PSScavenge::_preserved_oop_stack; duke@435: CollectorCounters* PSScavenge::_counters = NULL; jcoomes@2191: bool PSScavenge::_promotion_failed = false; duke@435: duke@435: // Define before use duke@435: class PSIsAliveClosure: public BoolObjectClosure { duke@435: public: duke@435: bool do_object_b(oop p) { duke@435: return (!PSScavenge::is_obj_in_young((HeapWord*) p)) || p->is_forwarded(); duke@435: } duke@435: }; duke@435: duke@435: PSIsAliveClosure PSScavenge::_is_alive_closure; duke@435: duke@435: class PSKeepAliveClosure: public OopClosure { duke@435: protected: duke@435: MutableSpace* _to_space; duke@435: PSPromotionManager* _promotion_manager; duke@435: duke@435: public: duke@435: PSKeepAliveClosure(PSPromotionManager* pm) : _promotion_manager(pm) { duke@435: ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); duke@435: assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); duke@435: _to_space = heap->young_gen()->to_space(); duke@435: duke@435: assert(_promotion_manager != NULL, "Sanity"); duke@435: } duke@435: coleenp@548: template void do_oop_work(T* p) { coleenp@548: assert (!oopDesc::is_null(*p), "expected non-null ref"); coleenp@548: assert ((oopDesc::load_decode_heap_oop_not_null(p))->is_oop(), coleenp@548: "expected an oop while scanning weak refs"); duke@435: duke@435: // Weak refs may be visited more than once. coleenp@548: if (PSScavenge::should_scavenge(p, _to_space)) { iveresov@3536: PSScavenge::copy_and_push_safe_barrier(_promotion_manager, p); duke@435: } duke@435: } coleenp@548: virtual void do_oop(oop* p) { PSKeepAliveClosure::do_oop_work(p); } coleenp@548: virtual void do_oop(narrowOop* p) { PSKeepAliveClosure::do_oop_work(p); } duke@435: }; duke@435: duke@435: class PSEvacuateFollowersClosure: public VoidClosure { duke@435: private: duke@435: PSPromotionManager* _promotion_manager; duke@435: public: duke@435: PSEvacuateFollowersClosure(PSPromotionManager* pm) : _promotion_manager(pm) {} duke@435: coleenp@548: virtual void do_void() { duke@435: assert(_promotion_manager != NULL, "Sanity"); duke@435: _promotion_manager->drain_stacks(true); duke@435: guarantee(_promotion_manager->stacks_empty(), duke@435: "stacks should be empty at this point"); duke@435: } duke@435: }; duke@435: duke@435: class PSPromotionFailedClosure : public ObjectClosure { duke@435: virtual void do_object(oop obj) { duke@435: if (obj->is_forwarded()) { duke@435: obj->init_mark(); duke@435: } duke@435: } duke@435: }; duke@435: duke@435: class PSRefProcTaskProxy: public GCTask { duke@435: typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask; duke@435: ProcessTask & _rp_task; duke@435: uint _work_id; duke@435: public: duke@435: PSRefProcTaskProxy(ProcessTask & rp_task, uint work_id) duke@435: : _rp_task(rp_task), duke@435: _work_id(work_id) duke@435: { } duke@435: duke@435: private: duke@435: virtual char* name() { return (char *)"Process referents by policy in parallel"; } duke@435: virtual void do_it(GCTaskManager* manager, uint which); duke@435: }; duke@435: duke@435: void PSRefProcTaskProxy::do_it(GCTaskManager* manager, uint which) duke@435: { duke@435: PSPromotionManager* promotion_manager = duke@435: PSPromotionManager::gc_thread_promotion_manager(which); duke@435: assert(promotion_manager != NULL, "sanity check"); duke@435: PSKeepAliveClosure keep_alive(promotion_manager); duke@435: PSEvacuateFollowersClosure evac_followers(promotion_manager); duke@435: PSIsAliveClosure is_alive; duke@435: _rp_task.work(_work_id, is_alive, keep_alive, evac_followers); duke@435: } duke@435: duke@435: class PSRefEnqueueTaskProxy: public GCTask { duke@435: typedef AbstractRefProcTaskExecutor::EnqueueTask EnqueueTask; duke@435: EnqueueTask& _enq_task; duke@435: uint _work_id; duke@435: duke@435: public: duke@435: PSRefEnqueueTaskProxy(EnqueueTask& enq_task, uint work_id) duke@435: : _enq_task(enq_task), duke@435: _work_id(work_id) duke@435: { } duke@435: duke@435: virtual char* name() { return (char *)"Enqueue reference objects in parallel"; } duke@435: virtual void do_it(GCTaskManager* manager, uint which) duke@435: { duke@435: _enq_task.work(_work_id); duke@435: } duke@435: }; duke@435: duke@435: class PSRefProcTaskExecutor: public AbstractRefProcTaskExecutor { duke@435: virtual void execute(ProcessTask& task); duke@435: virtual void execute(EnqueueTask& task); duke@435: }; duke@435: duke@435: void PSRefProcTaskExecutor::execute(ProcessTask& task) duke@435: { duke@435: GCTaskQueue* q = GCTaskQueue::create(); jmasa@3294: GCTaskManager* manager = ParallelScavengeHeap::gc_task_manager(); jmasa@3294: for(uint i=0; i < manager->active_workers(); i++) { duke@435: q->enqueue(new PSRefProcTaskProxy(task, i)); duke@435: } jmasa@3294: ParallelTaskTerminator terminator(manager->active_workers(), tonyp@2061: (TaskQueueSetSuper*) PSPromotionManager::stack_array_depth()); jmasa@3294: if (task.marks_oops_alive() && manager->active_workers() > 1) { jmasa@3294: for (uint j = 0; j < manager->active_workers(); j++) { duke@435: q->enqueue(new StealTask(&terminator)); duke@435: } duke@435: } jmasa@3294: manager->execute_and_wait(q); duke@435: } duke@435: duke@435: duke@435: void PSRefProcTaskExecutor::execute(EnqueueTask& task) duke@435: { duke@435: GCTaskQueue* q = GCTaskQueue::create(); jmasa@3294: GCTaskManager* manager = ParallelScavengeHeap::gc_task_manager(); jmasa@3294: for(uint i=0; i < manager->active_workers(); i++) { duke@435: q->enqueue(new PSRefEnqueueTaskProxy(task, i)); duke@435: } jmasa@3294: manager->execute_and_wait(q); duke@435: } duke@435: duke@435: // This method contains all heap specific policy for invoking scavenge. duke@435: // PSScavenge::invoke_no_policy() will do nothing but attempt to duke@435: // scavenge. It will not clean up after failed promotions, bail out if duke@435: // we've exceeded policy time limits, or any other special behavior. duke@435: // All such policy should be placed here. duke@435: // duke@435: // Note that this method should only be called from the vm_thread while duke@435: // at a safepoint! jcoomes@3540: bool PSScavenge::invoke() { duke@435: assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint"); duke@435: assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread"); duke@435: assert(!Universe::heap()->is_gc_active(), "not reentrant"); duke@435: jcoomes@3540: ParallelScavengeHeap* const heap = (ParallelScavengeHeap*)Universe::heap(); duke@435: assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); duke@435: duke@435: PSAdaptiveSizePolicy* policy = heap->size_policy(); jmasa@1822: IsGCActiveMark mark; duke@435: jcoomes@3540: const bool scavenge_done = PSScavenge::invoke_no_policy(); jcoomes@3540: const bool need_full_gc = !scavenge_done || jcoomes@3540: policy->should_full_GC(heap->old_gen()->free_in_bytes()); jcoomes@3540: bool full_gc_done = false; duke@435: jcoomes@3540: if (UsePerfData) { jcoomes@3540: PSGCAdaptivePolicyCounters* const counters = heap->gc_policy_counters(); jcoomes@3540: const int ffs_val = need_full_gc ? full_follows_scavenge : not_skipped; jcoomes@3540: counters->update_full_follows_scavenge(ffs_val); jcoomes@3540: } jcoomes@3540: jcoomes@3540: if (need_full_gc) { jmasa@1822: GCCauseSetter gccs(heap, GCCause::_adaptive_size_policy); jmasa@1822: CollectorPolicy* cp = heap->collector_policy(); jmasa@1822: const bool clear_all_softrefs = cp->should_clear_all_soft_refs(); duke@435: jmasa@1822: if (UseParallelOldGC) { jcoomes@3540: full_gc_done = PSParallelCompact::invoke_no_policy(clear_all_softrefs); jmasa@1822: } else { jcoomes@3540: full_gc_done = PSMarkSweep::invoke_no_policy(clear_all_softrefs); duke@435: } duke@435: } jcoomes@3540: jcoomes@3540: return full_gc_done; duke@435: } duke@435: duke@435: // This method contains no policy. You should probably duke@435: // be calling invoke() instead. duke@435: bool PSScavenge::invoke_no_policy() { duke@435: assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint"); duke@435: assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread"); duke@435: jcoomes@2191: assert(_preserved_mark_stack.is_empty(), "should be empty"); jcoomes@2191: assert(_preserved_oop_stack.is_empty(), "should be empty"); jcoomes@2191: duke@435: TimeStamp scavenge_entry; duke@435: TimeStamp scavenge_midpoint; duke@435: TimeStamp scavenge_exit; duke@435: duke@435: scavenge_entry.update(); duke@435: duke@435: if (GC_locker::check_active_before_gc()) { duke@435: return false; duke@435: } duke@435: duke@435: ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); duke@435: GCCause::Cause gc_cause = heap->gc_cause(); duke@435: assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); duke@435: duke@435: // Check for potential problems. duke@435: if (!should_attempt_scavenge()) { duke@435: return false; duke@435: } duke@435: duke@435: bool promotion_failure_occurred = false; duke@435: duke@435: PSYoungGen* young_gen = heap->young_gen(); duke@435: PSOldGen* old_gen = heap->old_gen(); duke@435: PSAdaptiveSizePolicy* size_policy = heap->size_policy(); duke@435: heap->increment_total_collections(); duke@435: duke@435: AdaptiveSizePolicyOutput(size_policy, heap->total_collections()); duke@435: duke@435: if ((gc_cause != GCCause::_java_lang_system_gc) || duke@435: UseAdaptiveSizePolicyWithSystemGC) { duke@435: // Gather the feedback data for eden occupancy. duke@435: young_gen->eden_space()->accumulate_statistics(); duke@435: } duke@435: jmasa@698: if (ZapUnusedHeapArea) { jmasa@698: // Save information needed to minimize mangling jmasa@698: heap->record_gen_tops_before_GC(); jmasa@698: } jmasa@698: never@3499: heap->print_heap_before_gc(); duke@435: duke@435: assert(!NeverTenure || _tenuring_threshold == markOopDesc::max_age + 1, "Sanity"); duke@435: assert(!AlwaysTenure || _tenuring_threshold == 0, "Sanity"); duke@435: duke@435: size_t prev_used = heap->used(); duke@435: assert(promotion_failed() == false, "Sanity"); duke@435: duke@435: // Fill in TLABs duke@435: heap->accumulate_statistics_all_tlabs(); duke@435: heap->ensure_parsability(true); // retire TLABs duke@435: duke@435: if (VerifyBeforeGC && heap->total_collections() >= VerifyGCStartAt) { duke@435: HandleMark hm; // Discard invalid handles created during verification stefank@5018: Universe::verify(" VerifyBeforeGC:"); duke@435: } duke@435: duke@435: { duke@435: ResourceMark rm; duke@435: HandleMark hm; duke@435: duke@435: gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps); duke@435: TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); brutisso@3767: TraceTime t1(GCCauseString("GC", gc_cause), PrintGC, !PrintGCDetails, gclog_or_tty); duke@435: TraceCollectorStats tcs(counters()); fparain@2888: TraceMemoryManagerStats tms(false /* not full GC */,gc_cause); duke@435: duke@435: if (TraceGen0Time) accumulated_time()->start(); duke@435: duke@435: // Let the size policy know we're starting duke@435: size_policy->minor_collection_begin(); duke@435: duke@435: // Verify the object start arrays. duke@435: if (VerifyObjectStartArray && duke@435: VerifyBeforeGC) { duke@435: old_gen->verify_object_start_array(); duke@435: } duke@435: duke@435: // Verify no unmarked old->young roots duke@435: if (VerifyRememberedSets) { duke@435: CardTableExtension::verify_all_young_refs_imprecise(); duke@435: } duke@435: duke@435: if (!ScavengeWithObjectsInToSpace) { duke@435: assert(young_gen->to_space()->is_empty(), duke@435: "Attempt to scavenge with live objects in to_space"); jmasa@698: young_gen->to_space()->clear(SpaceDecorator::Mangle); duke@435: } else if (ZapUnusedHeapArea) { duke@435: young_gen->to_space()->mangle_unused_area(); duke@435: } duke@435: save_to_space_top_before_gc(); duke@435: duke@435: COMPILER2_PRESENT(DerivedPointerTable::clear()); duke@435: johnc@3175: reference_processor()->enable_discovery(true /*verify_disabled*/, true /*verify_no_refs*/); ysr@892: reference_processor()->setup_policy(false); duke@435: duke@435: // We track how much was promoted to the next generation for duke@435: // the AdaptiveSizePolicy. duke@435: size_t old_gen_used_before = old_gen->used_in_bytes(); duke@435: duke@435: // For PrintGCDetails duke@435: size_t young_gen_used_before = young_gen->used_in_bytes(); duke@435: duke@435: // Reset our survivor overflow. duke@435: set_survivor_overflow(false); duke@435: coleenp@4037: // We need to save the old top values before duke@435: // creating the promotion_manager. We pass the top duke@435: // values to the card_table, to prevent it from duke@435: // straying into the promotion labs. duke@435: HeapWord* old_top = old_gen->object_space()->top(); duke@435: duke@435: // Release all previously held resources duke@435: gc_task_manager()->release_all_resources(); duke@435: jmasa@3294: // Set the number of GC threads to be used in this collection jmasa@3294: gc_task_manager()->set_active_gang(); jmasa@3294: gc_task_manager()->task_idle_workers(); jmasa@3294: // Get the active number of workers here and use that value jmasa@3294: // throughout the methods. jmasa@3294: uint active_workers = gc_task_manager()->active_workers(); jmasa@3294: heap->set_par_threads(active_workers); jmasa@3294: duke@435: PSPromotionManager::pre_scavenge(); duke@435: duke@435: // We'll use the promotion manager again later. duke@435: PSPromotionManager* promotion_manager = PSPromotionManager::vm_thread_promotion_manager(); duke@435: { duke@435: // TraceTime("Roots"); jrose@1424: ParallelScavengeHeap::ParStrongRootsScope psrs; duke@435: duke@435: GCTaskQueue* q = GCTaskQueue::create(); duke@435: jmasa@4128: if (!old_gen->object_space()->is_empty()) { jmasa@4128: // There are only old-to-young pointers if there are objects jmasa@4128: // in the old gen. jmasa@4128: uint stripe_total = active_workers; jmasa@4128: for(uint i=0; i < stripe_total; i++) { jmasa@4128: q->enqueue(new OldToYoungRootsTask(old_gen, old_top, i, stripe_total)); jmasa@4128: } duke@435: } duke@435: duke@435: q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::universe)); duke@435: q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::jni_handles)); duke@435: // We scan the thread roots in parallel duke@435: Threads::create_thread_roots_tasks(q); duke@435: q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::object_synchronizer)); duke@435: q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::flat_profiler)); duke@435: q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::management)); duke@435: q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::system_dictionary)); duke@435: q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::jvmti)); jrose@1424: q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::code_cache)); duke@435: duke@435: ParallelTaskTerminator terminator( jmasa@3294: active_workers, tonyp@2061: (TaskQueueSetSuper*) promotion_manager->stack_array_depth()); jmasa@3294: if (active_workers > 1) { jmasa@3294: for (uint j = 0; j < active_workers; j++) { duke@435: q->enqueue(new StealTask(&terminator)); duke@435: } duke@435: } duke@435: duke@435: gc_task_manager()->execute_and_wait(q); duke@435: } duke@435: duke@435: scavenge_midpoint.update(); duke@435: duke@435: // Process reference objects discovered during scavenge duke@435: { ysr@892: reference_processor()->setup_policy(false); // not always_clear jmasa@3294: reference_processor()->set_active_mt_degree(active_workers); duke@435: PSKeepAliveClosure keep_alive(promotion_manager); duke@435: PSEvacuateFollowersClosure evac_followers(promotion_manager); duke@435: if (reference_processor()->processing_is_mt()) { duke@435: PSRefProcTaskExecutor task_executor; duke@435: reference_processor()->process_discovered_references( ysr@888: &_is_alive_closure, &keep_alive, &evac_followers, &task_executor); duke@435: } else { duke@435: reference_processor()->process_discovered_references( ysr@888: &_is_alive_closure, &keep_alive, &evac_followers, NULL); duke@435: } duke@435: } duke@435: duke@435: // Enqueue reference objects discovered during scavenge. duke@435: if (reference_processor()->processing_is_mt()) { duke@435: PSRefProcTaskExecutor task_executor; duke@435: reference_processor()->enqueue_discovered_references(&task_executor); duke@435: } else { duke@435: reference_processor()->enqueue_discovered_references(NULL); duke@435: } duke@435: jcoomes@2661: // Unlink any dead interned Strings jcoomes@2661: StringTable::unlink(&_is_alive_closure); jcoomes@2661: // Process the remaining live ones jcoomes@2661: PSScavengeRootsClosure root_closure(promotion_manager); jcoomes@2661: StringTable::oops_do(&root_closure); jcoomes@2661: duke@435: // Finally, flush the promotion_manager's labs, and deallocate its stacks. duke@435: PSPromotionManager::post_scavenge(); duke@435: duke@435: promotion_failure_occurred = promotion_failed(); duke@435: if (promotion_failure_occurred) { duke@435: clean_up_failed_promotion(); duke@435: if (PrintGC) { duke@435: gclog_or_tty->print("--"); duke@435: } duke@435: } duke@435: duke@435: // Let the size policy know we're done. Note that we count promotion duke@435: // failure cleanup time as part of the collection (otherwise, we're duke@435: // implicitly saying it's mutator time). duke@435: size_policy->minor_collection_end(gc_cause); duke@435: duke@435: if (!promotion_failure_occurred) { duke@435: // Swap the survivor spaces. jmasa@698: jmasa@698: jmasa@698: young_gen->eden_space()->clear(SpaceDecorator::Mangle); jmasa@698: young_gen->from_space()->clear(SpaceDecorator::Mangle); duke@435: young_gen->swap_spaces(); duke@435: duke@435: size_t survived = young_gen->from_space()->used_in_bytes(); duke@435: size_t promoted = old_gen->used_in_bytes() - old_gen_used_before; duke@435: size_policy->update_averages(_survivor_overflow, survived, promoted); duke@435: jmasa@1822: // A successful scavenge should restart the GC time limit count which is jmasa@1822: // for full GC's. jmasa@1822: size_policy->reset_gc_overhead_limit_count(); duke@435: if (UseAdaptiveSizePolicy) { duke@435: // Calculate the new survivor size and tenuring threshold duke@435: duke@435: if (PrintAdaptiveSizePolicy) { duke@435: gclog_or_tty->print("AdaptiveSizeStart: "); duke@435: gclog_or_tty->stamp(); duke@435: gclog_or_tty->print_cr(" collection: %d ", duke@435: heap->total_collections()); duke@435: duke@435: if (Verbose) { coleenp@4037: gclog_or_tty->print("old_gen_capacity: %d young_gen_capacity: %d", coleenp@4037: old_gen->capacity_in_bytes(), young_gen->capacity_in_bytes()); duke@435: } duke@435: } duke@435: duke@435: duke@435: if (UsePerfData) { duke@435: PSGCAdaptivePolicyCounters* counters = heap->gc_policy_counters(); duke@435: counters->update_old_eden_size( duke@435: size_policy->calculated_eden_size_in_bytes()); duke@435: counters->update_old_promo_size( duke@435: size_policy->calculated_promo_size_in_bytes()); duke@435: counters->update_old_capacity(old_gen->capacity_in_bytes()); duke@435: counters->update_young_capacity(young_gen->capacity_in_bytes()); duke@435: counters->update_survived(survived); duke@435: counters->update_promoted(promoted); duke@435: counters->update_survivor_overflowed(_survivor_overflow); duke@435: } duke@435: duke@435: size_t survivor_limit = duke@435: size_policy->max_survivor_size(young_gen->max_size()); duke@435: _tenuring_threshold = duke@435: size_policy->compute_survivor_space_size_and_threshold( duke@435: _survivor_overflow, duke@435: _tenuring_threshold, duke@435: survivor_limit); duke@435: duke@435: if (PrintTenuringDistribution) { duke@435: gclog_or_tty->cr(); hseigel@4465: gclog_or_tty->print_cr("Desired survivor size " SIZE_FORMAT " bytes, new threshold %u (max %u)", duke@435: size_policy->calculated_survivor_size_in_bytes(), duke@435: _tenuring_threshold, MaxTenuringThreshold); duke@435: } duke@435: duke@435: if (UsePerfData) { duke@435: PSGCAdaptivePolicyCounters* counters = heap->gc_policy_counters(); duke@435: counters->update_tenuring_threshold(_tenuring_threshold); duke@435: counters->update_survivor_size_counters(); duke@435: } duke@435: duke@435: // Do call at minor collections? duke@435: // Don't check if the size_policy is ready at this duke@435: // level. Let the size_policy check that internally. duke@435: if (UseAdaptiveSizePolicy && duke@435: UseAdaptiveGenerationSizePolicyAtMinorCollection && duke@435: ((gc_cause != GCCause::_java_lang_system_gc) || duke@435: UseAdaptiveSizePolicyWithSystemGC)) { duke@435: duke@435: // Calculate optimial free space amounts duke@435: assert(young_gen->max_size() > duke@435: young_gen->from_space()->capacity_in_bytes() + duke@435: young_gen->to_space()->capacity_in_bytes(), duke@435: "Sizes of space in young gen are out-of-bounds"); tamao@5120: tamao@5120: size_t young_live = young_gen->used_in_bytes(); tamao@5120: size_t eden_live = young_gen->eden_space()->used_in_bytes(); tamao@5120: size_t cur_eden = young_gen->eden_space()->capacity_in_bytes(); tamao@5120: size_t max_old_gen_size = old_gen->max_gen_size(); duke@435: size_t max_eden_size = young_gen->max_size() - duke@435: young_gen->from_space()->capacity_in_bytes() - duke@435: young_gen->to_space()->capacity_in_bytes(); duke@435: tamao@5120: // Used for diagnostics tamao@5120: size_policy->clear_generation_free_space_flags(); tamao@5120: tamao@5120: size_policy->compute_eden_space_size(young_live, tamao@5120: eden_live, tamao@5120: cur_eden, tamao@5120: max_eden_size, tamao@5120: false /* not full gc*/); tamao@5120: tamao@5120: size_policy->check_gc_overhead_limit(young_live, tamao@5120: eden_live, tamao@5120: max_old_gen_size, tamao@5120: max_eden_size, tamao@5120: false /* not full gc*/, tamao@5120: gc_cause, tamao@5120: heap->collector_policy()); tamao@5120: tamao@5120: size_policy->decay_supplemental_growth(false /* not full gc*/); duke@435: } duke@435: // Resize the young generation at every collection duke@435: // even if new sizes have not been calculated. This is duke@435: // to allow resizes that may have been inhibited by the duke@435: // relative location of the "to" and "from" spaces. duke@435: duke@435: // Resizing the old gen at minor collects can cause increases duke@435: // that don't feed back to the generation sizing policy until duke@435: // a major collection. Don't resize the old gen here. duke@435: duke@435: heap->resize_young_gen(size_policy->calculated_eden_size_in_bytes(), duke@435: size_policy->calculated_survivor_size_in_bytes()); duke@435: duke@435: if (PrintAdaptiveSizePolicy) { duke@435: gclog_or_tty->print_cr("AdaptiveSizeStop: collection: %d ", duke@435: heap->total_collections()); duke@435: } duke@435: } duke@435: duke@435: // Update the structure of the eden. With NUMA-eden CPU hotplugging or offlining can duke@435: // cause the change of the heap layout. Make sure eden is reshaped if that's the case. duke@435: // Also update() will case adaptive NUMA chunk resizing. duke@435: assert(young_gen->eden_space()->is_empty(), "eden space should be empty now"); duke@435: young_gen->eden_space()->update(); duke@435: duke@435: heap->gc_policy_counters()->update_counters(); duke@435: duke@435: heap->resize_all_tlabs(); duke@435: duke@435: assert(young_gen->to_space()->is_empty(), "to space should be empty now"); duke@435: } duke@435: duke@435: COMPILER2_PRESENT(DerivedPointerTable::update_pointers()); duke@435: duke@435: NOT_PRODUCT(reference_processor()->verify_no_references_recorded()); duke@435: iveresov@3536: CodeCache::prune_scavenge_root_nmethods(); iveresov@3536: duke@435: // Re-verify object start arrays duke@435: if (VerifyObjectStartArray && duke@435: VerifyAfterGC) { duke@435: old_gen->verify_object_start_array(); duke@435: } duke@435: duke@435: // Verify all old -> young cards are now precise duke@435: if (VerifyRememberedSets) { duke@435: // Precise verification will give false positives. Until this is fixed, duke@435: // use imprecise verification. duke@435: // CardTableExtension::verify_all_young_refs_precise(); duke@435: CardTableExtension::verify_all_young_refs_imprecise(); duke@435: } duke@435: duke@435: if (TraceGen0Time) accumulated_time()->stop(); duke@435: duke@435: if (PrintGC) { duke@435: if (PrintGCDetails) { duke@435: // Don't print a GC timestamp here. This is after the GC so duke@435: // would be confusing. duke@435: young_gen->print_used_change(young_gen_used_before); duke@435: } duke@435: heap->print_heap_change(prev_used); duke@435: } duke@435: duke@435: // Track memory usage and detect low memory duke@435: MemoryService::track_memory_usage(); duke@435: heap->update_counters(); jmasa@3294: jmasa@3294: gc_task_manager()->release_idle_workers(); duke@435: } duke@435: duke@435: if (VerifyAfterGC && heap->total_collections() >= VerifyGCStartAt) { duke@435: HandleMark hm; // Discard invalid handles created during verification stefank@5018: Universe::verify(" VerifyAfterGC:"); duke@435: } duke@435: never@3499: heap->print_heap_after_gc(); duke@435: jmasa@698: if (ZapUnusedHeapArea) { jmasa@698: young_gen->eden_space()->check_mangled_unused_area_complete(); jmasa@698: young_gen->from_space()->check_mangled_unused_area_complete(); jmasa@698: young_gen->to_space()->check_mangled_unused_area_complete(); jmasa@698: } jmasa@698: duke@435: scavenge_exit.update(); duke@435: duke@435: if (PrintGCTaskTimeStamps) { duke@435: tty->print_cr("VM-Thread " INT64_FORMAT " " INT64_FORMAT " " INT64_FORMAT, duke@435: scavenge_entry.ticks(), scavenge_midpoint.ticks(), duke@435: scavenge_exit.ticks()); duke@435: gc_task_manager()->print_task_time_stamps(); duke@435: } duke@435: jmasa@981: #ifdef TRACESPINNING jmasa@981: ParallelTaskTerminator::print_termination_counts(); jmasa@981: #endif jmasa@981: duke@435: return !promotion_failure_occurred; duke@435: } duke@435: duke@435: // This method iterates over all objects in the young generation, duke@435: // unforwarding markOops. It then restores any preserved mark oops, duke@435: // and clears the _preserved_mark_stack. duke@435: void PSScavenge::clean_up_failed_promotion() { duke@435: ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); duke@435: assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); duke@435: assert(promotion_failed(), "Sanity"); duke@435: duke@435: PSYoungGen* young_gen = heap->young_gen(); duke@435: duke@435: { duke@435: ResourceMark rm; duke@435: duke@435: // Unforward all pointers in the young gen. duke@435: PSPromotionFailedClosure unforward_closure; duke@435: young_gen->object_iterate(&unforward_closure); duke@435: duke@435: if (PrintGC && Verbose) { jcoomes@2191: gclog_or_tty->print_cr("Restoring %d marks", _preserved_oop_stack.size()); duke@435: } duke@435: duke@435: // Restore any saved marks. jcoomes@2191: while (!_preserved_oop_stack.is_empty()) { jcoomes@2191: oop obj = _preserved_oop_stack.pop(); jcoomes@2191: markOop mark = _preserved_mark_stack.pop(); duke@435: obj->set_mark(mark); duke@435: } duke@435: jcoomes@2191: // Clear the preserved mark and oop stack caches. jcoomes@2191: _preserved_mark_stack.clear(true); jcoomes@2191: _preserved_oop_stack.clear(true); jcoomes@2191: _promotion_failed = false; duke@435: } duke@435: duke@435: // Reset the PromotionFailureALot counters. duke@435: NOT_PRODUCT(Universe::heap()->reset_promotion_should_fail();) duke@435: } duke@435: duke@435: // This method is called whenever an attempt to promote an object jcoomes@2191: // fails. Some markOops will need preservation, some will not. Note duke@435: // that the entire eden is traversed after a failed promotion, with duke@435: // all forwarded headers replaced by the default markOop. This means duke@435: // it is not neccessary to preserve most markOops. duke@435: void PSScavenge::oop_promotion_failed(oop obj, markOop obj_mark) { jcoomes@2191: _promotion_failed = true; duke@435: if (obj_mark->must_be_preserved_for_promotion_failure(obj)) { ysr@2380: // Should use per-worker private stakcs hetre rather than ysr@2380: // locking a common pair of stacks. duke@435: ThreadCritical tc; jcoomes@2191: _preserved_oop_stack.push(obj); jcoomes@2191: _preserved_mark_stack.push(obj_mark); duke@435: } duke@435: } duke@435: duke@435: bool PSScavenge::should_attempt_scavenge() { duke@435: ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); duke@435: assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); duke@435: PSGCAdaptivePolicyCounters* counters = heap->gc_policy_counters(); duke@435: duke@435: if (UsePerfData) { duke@435: counters->update_scavenge_skipped(not_skipped); duke@435: } duke@435: duke@435: PSYoungGen* young_gen = heap->young_gen(); duke@435: PSOldGen* old_gen = heap->old_gen(); duke@435: duke@435: if (!ScavengeWithObjectsInToSpace) { duke@435: // Do not attempt to promote unless to_space is empty duke@435: if (!young_gen->to_space()->is_empty()) { duke@435: _consecutive_skipped_scavenges++; duke@435: if (UsePerfData) { duke@435: counters->update_scavenge_skipped(to_space_not_empty); duke@435: } duke@435: return false; duke@435: } duke@435: } duke@435: duke@435: // Test to see if the scavenge will likely fail. duke@435: PSAdaptiveSizePolicy* policy = heap->size_policy(); duke@435: duke@435: // A similar test is done in the policy's should_full_GC(). If this is duke@435: // changed, decide if that test should also be changed. duke@435: size_t avg_promoted = (size_t) policy->padded_average_promoted_in_bytes(); duke@435: size_t promotion_estimate = MIN2(avg_promoted, young_gen->used_in_bytes()); duke@435: bool result = promotion_estimate < old_gen->free_in_bytes(); duke@435: duke@435: if (PrintGCDetails && Verbose) { duke@435: gclog_or_tty->print(result ? " do scavenge: " : " skip scavenge: "); duke@435: gclog_or_tty->print_cr(" average_promoted " SIZE_FORMAT duke@435: " padded_average_promoted " SIZE_FORMAT duke@435: " free in old gen " SIZE_FORMAT, duke@435: (size_t) policy->average_promoted_in_bytes(), duke@435: (size_t) policy->padded_average_promoted_in_bytes(), duke@435: old_gen->free_in_bytes()); duke@435: if (young_gen->used_in_bytes() < duke@435: (size_t) policy->padded_average_promoted_in_bytes()) { duke@435: gclog_or_tty->print_cr(" padded_promoted_average is greater" duke@435: " than maximum promotion = " SIZE_FORMAT, young_gen->used_in_bytes()); duke@435: } duke@435: } duke@435: duke@435: if (result) { duke@435: _consecutive_skipped_scavenges = 0; duke@435: } else { duke@435: _consecutive_skipped_scavenges++; duke@435: if (UsePerfData) { duke@435: counters->update_scavenge_skipped(promoted_too_large); duke@435: } duke@435: } duke@435: return result; duke@435: } duke@435: duke@435: // Used to add tasks duke@435: GCTaskManager* const PSScavenge::gc_task_manager() { duke@435: assert(ParallelScavengeHeap::gc_task_manager() != NULL, duke@435: "shouldn't return NULL"); duke@435: return ParallelScavengeHeap::gc_task_manager(); duke@435: } duke@435: duke@435: void PSScavenge::initialize() { duke@435: // Arguments must have been parsed duke@435: duke@435: if (AlwaysTenure) { duke@435: _tenuring_threshold = 0; duke@435: } else if (NeverTenure) { duke@435: _tenuring_threshold = markOopDesc::max_age + 1; duke@435: } else { duke@435: // We want to smooth out our startup times for the AdaptiveSizePolicy duke@435: _tenuring_threshold = (UseAdaptiveSizePolicy) ? InitialTenuringThreshold : duke@435: MaxTenuringThreshold; duke@435: } duke@435: duke@435: ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); duke@435: assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); duke@435: duke@435: PSYoungGen* young_gen = heap->young_gen(); duke@435: PSOldGen* old_gen = heap->old_gen(); duke@435: duke@435: // Set boundary between young_gen and old_gen duke@435: assert(old_gen->reserved().end() <= young_gen->eden_space()->bottom(), duke@435: "old above young"); duke@435: _young_generation_boundary = young_gen->eden_space()->bottom(); duke@435: duke@435: // Initialize ref handling object for scavenging. duke@435: MemRegion mr = young_gen->reserved(); jmasa@3294: ysr@2651: _ref_processor = ysr@2651: new ReferenceProcessor(mr, // span ysr@2651: ParallelRefProcEnabled && (ParallelGCThreads > 1), // mt processing ysr@2651: (int) ParallelGCThreads, // mt processing degree ysr@2651: true, // mt discovery ysr@2651: (int) ParallelGCThreads, // mt discovery degree ysr@2651: true, // atomic_discovery ysr@2651: NULL, // header provides liveness info ysr@2651: false); // next field updates do not need write barrier duke@435: duke@435: // Cache the cardtable duke@435: BarrierSet* bs = Universe::heap()->barrier_set(); duke@435: assert(bs->kind() == BarrierSet::CardTableModRef, "Wrong barrier set kind"); duke@435: _card_table = (CardTableExtension*)bs; duke@435: duke@435: _counters = new CollectorCounters("PSScavenge", 0); duke@435: }