src/share/vm/gc_implementation/parallelScavenge/psScavenge.cpp

Thu, 22 Sep 2011 10:57:37 -0700

author
johnc
date
Thu, 22 Sep 2011 10:57:37 -0700
changeset 3175
4dfb2df418f2
parent 2888
78542e2b5e35
child 3294
bca17e38de00
permissions
-rw-r--r--

6484982: G1: process references during evacuation pauses
Summary: G1 now uses two reference processors - one is used by concurrent marking and the other is used by STW GCs (both full and incremental evacuation pauses). In an evacuation pause, the reference processor is embedded into the closures used to scan objects. Doing so causes causes reference objects to be 'discovered' by the reference processor. At the end of the evacuation pause, these discovered reference objects are processed - preserving (and copying) referent objects (and their reachable graphs) as appropriate.
Reviewed-by: ysr, jwilhelm, brutisso, stefank, tonyp

     1 /*
     2  * Copyright (c) 2002, 2011, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    20  * or visit www.oracle.com if you need additional information or have any
    21  * questions.
    22  *
    23  */
    25 #include "precompiled.hpp"
    26 #include "classfile/symbolTable.hpp"
    27 #include "gc_implementation/parallelScavenge/cardTableExtension.hpp"
    28 #include "gc_implementation/parallelScavenge/gcTaskManager.hpp"
    29 #include "gc_implementation/parallelScavenge/generationSizer.hpp"
    30 #include "gc_implementation/parallelScavenge/parallelScavengeHeap.hpp"
    31 #include "gc_implementation/parallelScavenge/psAdaptiveSizePolicy.hpp"
    32 #include "gc_implementation/parallelScavenge/psMarkSweep.hpp"
    33 #include "gc_implementation/parallelScavenge/psParallelCompact.hpp"
    34 #include "gc_implementation/parallelScavenge/psScavenge.inline.hpp"
    35 #include "gc_implementation/parallelScavenge/psTasks.hpp"
    36 #include "gc_implementation/shared/isGCActiveMark.hpp"
    37 #include "gc_implementation/shared/spaceDecorator.hpp"
    38 #include "gc_interface/gcCause.hpp"
    39 #include "memory/collectorPolicy.hpp"
    40 #include "memory/gcLocker.inline.hpp"
    41 #include "memory/referencePolicy.hpp"
    42 #include "memory/referenceProcessor.hpp"
    43 #include "memory/resourceArea.hpp"
    44 #include "oops/oop.inline.hpp"
    45 #include "oops/oop.psgc.inline.hpp"
    46 #include "runtime/biasedLocking.hpp"
    47 #include "runtime/fprofiler.hpp"
    48 #include "runtime/handles.inline.hpp"
    49 #include "runtime/threadCritical.hpp"
    50 #include "runtime/vmThread.hpp"
    51 #include "runtime/vm_operations.hpp"
    52 #include "services/memoryService.hpp"
    53 #include "utilities/stack.inline.hpp"
    56 HeapWord*                  PSScavenge::_to_space_top_before_gc = NULL;
    57 int                        PSScavenge::_consecutive_skipped_scavenges = 0;
    58 ReferenceProcessor*        PSScavenge::_ref_processor = NULL;
    59 CardTableExtension*        PSScavenge::_card_table = NULL;
    60 bool                       PSScavenge::_survivor_overflow = false;
    61 int                        PSScavenge::_tenuring_threshold = 0;
    62 HeapWord*                  PSScavenge::_young_generation_boundary = NULL;
    63 elapsedTimer               PSScavenge::_accumulated_time;
    64 Stack<markOop>             PSScavenge::_preserved_mark_stack;
    65 Stack<oop>                 PSScavenge::_preserved_oop_stack;
    66 CollectorCounters*         PSScavenge::_counters = NULL;
    67 bool                       PSScavenge::_promotion_failed = false;
    69 // Define before use
    70 class PSIsAliveClosure: public BoolObjectClosure {
    71 public:
    72   void do_object(oop p) {
    73     assert(false, "Do not call.");
    74   }
    75   bool do_object_b(oop p) {
    76     return (!PSScavenge::is_obj_in_young((HeapWord*) p)) || p->is_forwarded();
    77   }
    78 };
    80 PSIsAliveClosure PSScavenge::_is_alive_closure;
    82 class PSKeepAliveClosure: public OopClosure {
    83 protected:
    84   MutableSpace* _to_space;
    85   PSPromotionManager* _promotion_manager;
    87 public:
    88   PSKeepAliveClosure(PSPromotionManager* pm) : _promotion_manager(pm) {
    89     ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
    90     assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
    91     _to_space = heap->young_gen()->to_space();
    93     assert(_promotion_manager != NULL, "Sanity");
    94   }
    96   template <class T> void do_oop_work(T* p) {
    97     assert (!oopDesc::is_null(*p), "expected non-null ref");
    98     assert ((oopDesc::load_decode_heap_oop_not_null(p))->is_oop(),
    99             "expected an oop while scanning weak refs");
   101     // Weak refs may be visited more than once.
   102     if (PSScavenge::should_scavenge(p, _to_space)) {
   103       PSScavenge::copy_and_push_safe_barrier(_promotion_manager, p);
   104     }
   105   }
   106   virtual void do_oop(oop* p)       { PSKeepAliveClosure::do_oop_work(p); }
   107   virtual void do_oop(narrowOop* p) { PSKeepAliveClosure::do_oop_work(p); }
   108 };
   110 class PSEvacuateFollowersClosure: public VoidClosure {
   111  private:
   112   PSPromotionManager* _promotion_manager;
   113  public:
   114   PSEvacuateFollowersClosure(PSPromotionManager* pm) : _promotion_manager(pm) {}
   116   virtual void do_void() {
   117     assert(_promotion_manager != NULL, "Sanity");
   118     _promotion_manager->drain_stacks(true);
   119     guarantee(_promotion_manager->stacks_empty(),
   120               "stacks should be empty at this point");
   121   }
   122 };
   124 class PSPromotionFailedClosure : public ObjectClosure {
   125   virtual void do_object(oop obj) {
   126     if (obj->is_forwarded()) {
   127       obj->init_mark();
   128     }
   129   }
   130 };
   132 class PSRefProcTaskProxy: public GCTask {
   133   typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask;
   134   ProcessTask & _rp_task;
   135   uint          _work_id;
   136 public:
   137   PSRefProcTaskProxy(ProcessTask & rp_task, uint work_id)
   138     : _rp_task(rp_task),
   139       _work_id(work_id)
   140   { }
   142 private:
   143   virtual char* name() { return (char *)"Process referents by policy in parallel"; }
   144   virtual void do_it(GCTaskManager* manager, uint which);
   145 };
   147 void PSRefProcTaskProxy::do_it(GCTaskManager* manager, uint which)
   148 {
   149   PSPromotionManager* promotion_manager =
   150     PSPromotionManager::gc_thread_promotion_manager(which);
   151   assert(promotion_manager != NULL, "sanity check");
   152   PSKeepAliveClosure keep_alive(promotion_manager);
   153   PSEvacuateFollowersClosure evac_followers(promotion_manager);
   154   PSIsAliveClosure is_alive;
   155   _rp_task.work(_work_id, is_alive, keep_alive, evac_followers);
   156 }
   158 class PSRefEnqueueTaskProxy: public GCTask {
   159   typedef AbstractRefProcTaskExecutor::EnqueueTask EnqueueTask;
   160   EnqueueTask& _enq_task;
   161   uint         _work_id;
   163 public:
   164   PSRefEnqueueTaskProxy(EnqueueTask& enq_task, uint work_id)
   165     : _enq_task(enq_task),
   166       _work_id(work_id)
   167   { }
   169   virtual char* name() { return (char *)"Enqueue reference objects in parallel"; }
   170   virtual void do_it(GCTaskManager* manager, uint which)
   171   {
   172     _enq_task.work(_work_id);
   173   }
   174 };
   176 class PSRefProcTaskExecutor: public AbstractRefProcTaskExecutor {
   177   virtual void execute(ProcessTask& task);
   178   virtual void execute(EnqueueTask& task);
   179 };
   181 void PSRefProcTaskExecutor::execute(ProcessTask& task)
   182 {
   183   GCTaskQueue* q = GCTaskQueue::create();
   184   for(uint i=0; i<ParallelGCThreads; i++) {
   185     q->enqueue(new PSRefProcTaskProxy(task, i));
   186   }
   187   ParallelTaskTerminator terminator(
   188                  ParallelScavengeHeap::gc_task_manager()->workers(),
   189                  (TaskQueueSetSuper*) PSPromotionManager::stack_array_depth());
   190   if (task.marks_oops_alive() && ParallelGCThreads > 1) {
   191     for (uint j=0; j<ParallelGCThreads; j++) {
   192       q->enqueue(new StealTask(&terminator));
   193     }
   194   }
   195   ParallelScavengeHeap::gc_task_manager()->execute_and_wait(q);
   196 }
   199 void PSRefProcTaskExecutor::execute(EnqueueTask& task)
   200 {
   201   GCTaskQueue* q = GCTaskQueue::create();
   202   for(uint i=0; i<ParallelGCThreads; i++) {
   203     q->enqueue(new PSRefEnqueueTaskProxy(task, i));
   204   }
   205   ParallelScavengeHeap::gc_task_manager()->execute_and_wait(q);
   206 }
   208 // This method contains all heap specific policy for invoking scavenge.
   209 // PSScavenge::invoke_no_policy() will do nothing but attempt to
   210 // scavenge. It will not clean up after failed promotions, bail out if
   211 // we've exceeded policy time limits, or any other special behavior.
   212 // All such policy should be placed here.
   213 //
   214 // Note that this method should only be called from the vm_thread while
   215 // at a safepoint!
   216 void PSScavenge::invoke() {
   217   assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
   218   assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
   219   assert(!Universe::heap()->is_gc_active(), "not reentrant");
   221   ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
   222   assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
   224   PSAdaptiveSizePolicy* policy = heap->size_policy();
   225   IsGCActiveMark mark;
   227   bool scavenge_was_done = PSScavenge::invoke_no_policy();
   229   PSGCAdaptivePolicyCounters* counters = heap->gc_policy_counters();
   230   if (UsePerfData)
   231     counters->update_full_follows_scavenge(0);
   232   if (!scavenge_was_done ||
   233       policy->should_full_GC(heap->old_gen()->free_in_bytes())) {
   234     if (UsePerfData)
   235       counters->update_full_follows_scavenge(full_follows_scavenge);
   236     GCCauseSetter gccs(heap, GCCause::_adaptive_size_policy);
   237     CollectorPolicy* cp = heap->collector_policy();
   238     const bool clear_all_softrefs = cp->should_clear_all_soft_refs();
   240     if (UseParallelOldGC) {
   241       PSParallelCompact::invoke_no_policy(clear_all_softrefs);
   242     } else {
   243       PSMarkSweep::invoke_no_policy(clear_all_softrefs);
   244     }
   245   }
   246 }
   248 // This method contains no policy. You should probably
   249 // be calling invoke() instead.
   250 bool PSScavenge::invoke_no_policy() {
   251   assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
   252   assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
   254   assert(_preserved_mark_stack.is_empty(), "should be empty");
   255   assert(_preserved_oop_stack.is_empty(), "should be empty");
   257   TimeStamp scavenge_entry;
   258   TimeStamp scavenge_midpoint;
   259   TimeStamp scavenge_exit;
   261   scavenge_entry.update();
   263   if (GC_locker::check_active_before_gc()) {
   264     return false;
   265   }
   267   ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
   268   GCCause::Cause gc_cause = heap->gc_cause();
   269   assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
   271   // Check for potential problems.
   272   if (!should_attempt_scavenge()) {
   273     return false;
   274   }
   276   bool promotion_failure_occurred = false;
   278   PSYoungGen* young_gen = heap->young_gen();
   279   PSOldGen* old_gen = heap->old_gen();
   280   PSPermGen* perm_gen = heap->perm_gen();
   281   PSAdaptiveSizePolicy* size_policy = heap->size_policy();
   282   heap->increment_total_collections();
   284   AdaptiveSizePolicyOutput(size_policy, heap->total_collections());
   286   if ((gc_cause != GCCause::_java_lang_system_gc) ||
   287        UseAdaptiveSizePolicyWithSystemGC) {
   288     // Gather the feedback data for eden occupancy.
   289     young_gen->eden_space()->accumulate_statistics();
   290   }
   292   if (ZapUnusedHeapArea) {
   293     // Save information needed to minimize mangling
   294     heap->record_gen_tops_before_GC();
   295   }
   297   if (PrintHeapAtGC) {
   298     Universe::print_heap_before_gc();
   299   }
   301   assert(!NeverTenure || _tenuring_threshold == markOopDesc::max_age + 1, "Sanity");
   302   assert(!AlwaysTenure || _tenuring_threshold == 0, "Sanity");
   304   size_t prev_used = heap->used();
   305   assert(promotion_failed() == false, "Sanity");
   307   // Fill in TLABs
   308   heap->accumulate_statistics_all_tlabs();
   309   heap->ensure_parsability(true);  // retire TLABs
   311   if (VerifyBeforeGC && heap->total_collections() >= VerifyGCStartAt) {
   312     HandleMark hm;  // Discard invalid handles created during verification
   313     gclog_or_tty->print(" VerifyBeforeGC:");
   314     Universe::verify(true);
   315   }
   317   {
   318     ResourceMark rm;
   319     HandleMark hm;
   321     gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps);
   322     TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
   323     TraceTime t1("GC", PrintGC, !PrintGCDetails, gclog_or_tty);
   324     TraceCollectorStats tcs(counters());
   325     TraceMemoryManagerStats tms(false /* not full GC */,gc_cause);
   327     if (TraceGen0Time) accumulated_time()->start();
   329     // Let the size policy know we're starting
   330     size_policy->minor_collection_begin();
   332     // Verify the object start arrays.
   333     if (VerifyObjectStartArray &&
   334         VerifyBeforeGC) {
   335       old_gen->verify_object_start_array();
   336       perm_gen->verify_object_start_array();
   337     }
   339     // Verify no unmarked old->young roots
   340     if (VerifyRememberedSets) {
   341       CardTableExtension::verify_all_young_refs_imprecise();
   342     }
   344     if (!ScavengeWithObjectsInToSpace) {
   345       assert(young_gen->to_space()->is_empty(),
   346              "Attempt to scavenge with live objects in to_space");
   347       young_gen->to_space()->clear(SpaceDecorator::Mangle);
   348     } else if (ZapUnusedHeapArea) {
   349       young_gen->to_space()->mangle_unused_area();
   350     }
   351     save_to_space_top_before_gc();
   353     COMPILER2_PRESENT(DerivedPointerTable::clear());
   355     reference_processor()->enable_discovery(true /*verify_disabled*/, true /*verify_no_refs*/);
   356     reference_processor()->setup_policy(false);
   358     // We track how much was promoted to the next generation for
   359     // the AdaptiveSizePolicy.
   360     size_t old_gen_used_before = old_gen->used_in_bytes();
   362     // For PrintGCDetails
   363     size_t young_gen_used_before = young_gen->used_in_bytes();
   365     // Reset our survivor overflow.
   366     set_survivor_overflow(false);
   368     // We need to save the old/perm top values before
   369     // creating the promotion_manager. We pass the top
   370     // values to the card_table, to prevent it from
   371     // straying into the promotion labs.
   372     HeapWord* old_top = old_gen->object_space()->top();
   373     HeapWord* perm_top = perm_gen->object_space()->top();
   375     // Release all previously held resources
   376     gc_task_manager()->release_all_resources();
   378     PSPromotionManager::pre_scavenge();
   380     // We'll use the promotion manager again later.
   381     PSPromotionManager* promotion_manager = PSPromotionManager::vm_thread_promotion_manager();
   382     {
   383       // TraceTime("Roots");
   384       ParallelScavengeHeap::ParStrongRootsScope psrs;
   386       GCTaskQueue* q = GCTaskQueue::create();
   388       for(uint i=0; i<ParallelGCThreads; i++) {
   389         q->enqueue(new OldToYoungRootsTask(old_gen, old_top, i));
   390       }
   392       q->enqueue(new SerialOldToYoungRootsTask(perm_gen, perm_top));
   394       q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::universe));
   395       q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::jni_handles));
   396       // We scan the thread roots in parallel
   397       Threads::create_thread_roots_tasks(q);
   398       q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::object_synchronizer));
   399       q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::flat_profiler));
   400       q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::management));
   401       q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::system_dictionary));
   402       q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::jvmti));
   403       q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::code_cache));
   405       ParallelTaskTerminator terminator(
   406                   gc_task_manager()->workers(),
   407                   (TaskQueueSetSuper*) promotion_manager->stack_array_depth());
   408       if (ParallelGCThreads>1) {
   409         for (uint j=0; j<ParallelGCThreads; j++) {
   410           q->enqueue(new StealTask(&terminator));
   411         }
   412       }
   414       gc_task_manager()->execute_and_wait(q);
   415     }
   417     scavenge_midpoint.update();
   419     // Process reference objects discovered during scavenge
   420     {
   421       reference_processor()->setup_policy(false); // not always_clear
   422       PSKeepAliveClosure keep_alive(promotion_manager);
   423       PSEvacuateFollowersClosure evac_followers(promotion_manager);
   424       if (reference_processor()->processing_is_mt()) {
   425         PSRefProcTaskExecutor task_executor;
   426         reference_processor()->process_discovered_references(
   427           &_is_alive_closure, &keep_alive, &evac_followers, &task_executor);
   428       } else {
   429         reference_processor()->process_discovered_references(
   430           &_is_alive_closure, &keep_alive, &evac_followers, NULL);
   431       }
   432     }
   434     // Enqueue reference objects discovered during scavenge.
   435     if (reference_processor()->processing_is_mt()) {
   436       PSRefProcTaskExecutor task_executor;
   437       reference_processor()->enqueue_discovered_references(&task_executor);
   438     } else {
   439       reference_processor()->enqueue_discovered_references(NULL);
   440     }
   442     if (!JavaObjectsInPerm) {
   443       // Unlink any dead interned Strings
   444       StringTable::unlink(&_is_alive_closure);
   445       // Process the remaining live ones
   446       PSScavengeRootsClosure root_closure(promotion_manager);
   447       StringTable::oops_do(&root_closure);
   448     }
   450     // Finally, flush the promotion_manager's labs, and deallocate its stacks.
   451     PSPromotionManager::post_scavenge();
   453     promotion_failure_occurred = promotion_failed();
   454     if (promotion_failure_occurred) {
   455       clean_up_failed_promotion();
   456       if (PrintGC) {
   457         gclog_or_tty->print("--");
   458       }
   459     }
   461     // Let the size policy know we're done.  Note that we count promotion
   462     // failure cleanup time as part of the collection (otherwise, we're
   463     // implicitly saying it's mutator time).
   464     size_policy->minor_collection_end(gc_cause);
   466     if (!promotion_failure_occurred) {
   467       // Swap the survivor spaces.
   470       young_gen->eden_space()->clear(SpaceDecorator::Mangle);
   471       young_gen->from_space()->clear(SpaceDecorator::Mangle);
   472       young_gen->swap_spaces();
   474       size_t survived = young_gen->from_space()->used_in_bytes();
   475       size_t promoted = old_gen->used_in_bytes() - old_gen_used_before;
   476       size_policy->update_averages(_survivor_overflow, survived, promoted);
   478       // A successful scavenge should restart the GC time limit count which is
   479       // for full GC's.
   480       size_policy->reset_gc_overhead_limit_count();
   481       if (UseAdaptiveSizePolicy) {
   482         // Calculate the new survivor size and tenuring threshold
   484         if (PrintAdaptiveSizePolicy) {
   485           gclog_or_tty->print("AdaptiveSizeStart: ");
   486           gclog_or_tty->stamp();
   487           gclog_or_tty->print_cr(" collection: %d ",
   488                          heap->total_collections());
   490           if (Verbose) {
   491             gclog_or_tty->print("old_gen_capacity: %d young_gen_capacity: %d"
   492               " perm_gen_capacity: %d ",
   493               old_gen->capacity_in_bytes(), young_gen->capacity_in_bytes(),
   494               perm_gen->capacity_in_bytes());
   495           }
   496         }
   499         if (UsePerfData) {
   500           PSGCAdaptivePolicyCounters* counters = heap->gc_policy_counters();
   501           counters->update_old_eden_size(
   502             size_policy->calculated_eden_size_in_bytes());
   503           counters->update_old_promo_size(
   504             size_policy->calculated_promo_size_in_bytes());
   505           counters->update_old_capacity(old_gen->capacity_in_bytes());
   506           counters->update_young_capacity(young_gen->capacity_in_bytes());
   507           counters->update_survived(survived);
   508           counters->update_promoted(promoted);
   509           counters->update_survivor_overflowed(_survivor_overflow);
   510         }
   512         size_t survivor_limit =
   513           size_policy->max_survivor_size(young_gen->max_size());
   514         _tenuring_threshold =
   515           size_policy->compute_survivor_space_size_and_threshold(
   516                                                            _survivor_overflow,
   517                                                            _tenuring_threshold,
   518                                                            survivor_limit);
   520        if (PrintTenuringDistribution) {
   521          gclog_or_tty->cr();
   522          gclog_or_tty->print_cr("Desired survivor size %ld bytes, new threshold %d (max %d)",
   523                                 size_policy->calculated_survivor_size_in_bytes(),
   524                                 _tenuring_threshold, MaxTenuringThreshold);
   525        }
   527         if (UsePerfData) {
   528           PSGCAdaptivePolicyCounters* counters = heap->gc_policy_counters();
   529           counters->update_tenuring_threshold(_tenuring_threshold);
   530           counters->update_survivor_size_counters();
   531         }
   533         // Do call at minor collections?
   534         // Don't check if the size_policy is ready at this
   535         // level.  Let the size_policy check that internally.
   536         if (UseAdaptiveSizePolicy &&
   537             UseAdaptiveGenerationSizePolicyAtMinorCollection &&
   538             ((gc_cause != GCCause::_java_lang_system_gc) ||
   539               UseAdaptiveSizePolicyWithSystemGC)) {
   541           // Calculate optimial free space amounts
   542           assert(young_gen->max_size() >
   543             young_gen->from_space()->capacity_in_bytes() +
   544             young_gen->to_space()->capacity_in_bytes(),
   545             "Sizes of space in young gen are out-of-bounds");
   546           size_t max_eden_size = young_gen->max_size() -
   547             young_gen->from_space()->capacity_in_bytes() -
   548             young_gen->to_space()->capacity_in_bytes();
   549           size_policy->compute_generation_free_space(young_gen->used_in_bytes(),
   550                                    young_gen->eden_space()->used_in_bytes(),
   551                                    old_gen->used_in_bytes(),
   552                                    perm_gen->used_in_bytes(),
   553                                    young_gen->eden_space()->capacity_in_bytes(),
   554                                    old_gen->max_gen_size(),
   555                                    max_eden_size,
   556                                    false  /* full gc*/,
   557                                    gc_cause,
   558                                    heap->collector_policy());
   560         }
   561         // Resize the young generation at every collection
   562         // even if new sizes have not been calculated.  This is
   563         // to allow resizes that may have been inhibited by the
   564         // relative location of the "to" and "from" spaces.
   566         // Resizing the old gen at minor collects can cause increases
   567         // that don't feed back to the generation sizing policy until
   568         // a major collection.  Don't resize the old gen here.
   570         heap->resize_young_gen(size_policy->calculated_eden_size_in_bytes(),
   571                         size_policy->calculated_survivor_size_in_bytes());
   573         if (PrintAdaptiveSizePolicy) {
   574           gclog_or_tty->print_cr("AdaptiveSizeStop: collection: %d ",
   575                          heap->total_collections());
   576         }
   577       }
   579       // Update the structure of the eden. With NUMA-eden CPU hotplugging or offlining can
   580       // cause the change of the heap layout. Make sure eden is reshaped if that's the case.
   581       // Also update() will case adaptive NUMA chunk resizing.
   582       assert(young_gen->eden_space()->is_empty(), "eden space should be empty now");
   583       young_gen->eden_space()->update();
   585       heap->gc_policy_counters()->update_counters();
   587       heap->resize_all_tlabs();
   589       assert(young_gen->to_space()->is_empty(), "to space should be empty now");
   590     }
   592     COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
   594     NOT_PRODUCT(reference_processor()->verify_no_references_recorded());
   596     // Re-verify object start arrays
   597     if (VerifyObjectStartArray &&
   598         VerifyAfterGC) {
   599       old_gen->verify_object_start_array();
   600       perm_gen->verify_object_start_array();
   601     }
   603     // Verify all old -> young cards are now precise
   604     if (VerifyRememberedSets) {
   605       // Precise verification will give false positives. Until this is fixed,
   606       // use imprecise verification.
   607       // CardTableExtension::verify_all_young_refs_precise();
   608       CardTableExtension::verify_all_young_refs_imprecise();
   609     }
   611     if (TraceGen0Time) accumulated_time()->stop();
   613     if (PrintGC) {
   614       if (PrintGCDetails) {
   615         // Don't print a GC timestamp here.  This is after the GC so
   616         // would be confusing.
   617         young_gen->print_used_change(young_gen_used_before);
   618       }
   619       heap->print_heap_change(prev_used);
   620     }
   622     // Track memory usage and detect low memory
   623     MemoryService::track_memory_usage();
   624     heap->update_counters();
   625   }
   627   if (VerifyAfterGC && heap->total_collections() >= VerifyGCStartAt) {
   628     HandleMark hm;  // Discard invalid handles created during verification
   629     gclog_or_tty->print(" VerifyAfterGC:");
   630     Universe::verify(false);
   631   }
   633   if (PrintHeapAtGC) {
   634     Universe::print_heap_after_gc();
   635   }
   637   if (ZapUnusedHeapArea) {
   638     young_gen->eden_space()->check_mangled_unused_area_complete();
   639     young_gen->from_space()->check_mangled_unused_area_complete();
   640     young_gen->to_space()->check_mangled_unused_area_complete();
   641   }
   643   scavenge_exit.update();
   645   if (PrintGCTaskTimeStamps) {
   646     tty->print_cr("VM-Thread " INT64_FORMAT " " INT64_FORMAT " " INT64_FORMAT,
   647                   scavenge_entry.ticks(), scavenge_midpoint.ticks(),
   648                   scavenge_exit.ticks());
   649     gc_task_manager()->print_task_time_stamps();
   650   }
   652 #ifdef TRACESPINNING
   653   ParallelTaskTerminator::print_termination_counts();
   654 #endif
   656   return !promotion_failure_occurred;
   657 }
   659 // This method iterates over all objects in the young generation,
   660 // unforwarding markOops. It then restores any preserved mark oops,
   661 // and clears the _preserved_mark_stack.
   662 void PSScavenge::clean_up_failed_promotion() {
   663   ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
   664   assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
   665   assert(promotion_failed(), "Sanity");
   667   PSYoungGen* young_gen = heap->young_gen();
   669   {
   670     ResourceMark rm;
   672     // Unforward all pointers in the young gen.
   673     PSPromotionFailedClosure unforward_closure;
   674     young_gen->object_iterate(&unforward_closure);
   676     if (PrintGC && Verbose) {
   677       gclog_or_tty->print_cr("Restoring %d marks", _preserved_oop_stack.size());
   678     }
   680     // Restore any saved marks.
   681     while (!_preserved_oop_stack.is_empty()) {
   682       oop obj      = _preserved_oop_stack.pop();
   683       markOop mark = _preserved_mark_stack.pop();
   684       obj->set_mark(mark);
   685     }
   687     // Clear the preserved mark and oop stack caches.
   688     _preserved_mark_stack.clear(true);
   689     _preserved_oop_stack.clear(true);
   690     _promotion_failed = false;
   691   }
   693   // Reset the PromotionFailureALot counters.
   694   NOT_PRODUCT(Universe::heap()->reset_promotion_should_fail();)
   695 }
   697 // This method is called whenever an attempt to promote an object
   698 // fails. Some markOops will need preservation, some will not. Note
   699 // that the entire eden is traversed after a failed promotion, with
   700 // all forwarded headers replaced by the default markOop. This means
   701 // it is not neccessary to preserve most markOops.
   702 void PSScavenge::oop_promotion_failed(oop obj, markOop obj_mark) {
   703   _promotion_failed = true;
   704   if (obj_mark->must_be_preserved_for_promotion_failure(obj)) {
   705     // Should use per-worker private stakcs hetre rather than
   706     // locking a common pair of stacks.
   707     ThreadCritical tc;
   708     _preserved_oop_stack.push(obj);
   709     _preserved_mark_stack.push(obj_mark);
   710   }
   711 }
   713 bool PSScavenge::should_attempt_scavenge() {
   714   ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
   715   assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
   716   PSGCAdaptivePolicyCounters* counters = heap->gc_policy_counters();
   718   if (UsePerfData) {
   719     counters->update_scavenge_skipped(not_skipped);
   720   }
   722   PSYoungGen* young_gen = heap->young_gen();
   723   PSOldGen* old_gen = heap->old_gen();
   725   if (!ScavengeWithObjectsInToSpace) {
   726     // Do not attempt to promote unless to_space is empty
   727     if (!young_gen->to_space()->is_empty()) {
   728       _consecutive_skipped_scavenges++;
   729       if (UsePerfData) {
   730         counters->update_scavenge_skipped(to_space_not_empty);
   731       }
   732       return false;
   733     }
   734   }
   736   // Test to see if the scavenge will likely fail.
   737   PSAdaptiveSizePolicy* policy = heap->size_policy();
   739   // A similar test is done in the policy's should_full_GC().  If this is
   740   // changed, decide if that test should also be changed.
   741   size_t avg_promoted = (size_t) policy->padded_average_promoted_in_bytes();
   742   size_t promotion_estimate = MIN2(avg_promoted, young_gen->used_in_bytes());
   743   bool result = promotion_estimate < old_gen->free_in_bytes();
   745   if (PrintGCDetails && Verbose) {
   746     gclog_or_tty->print(result ? "  do scavenge: " : "  skip scavenge: ");
   747     gclog_or_tty->print_cr(" average_promoted " SIZE_FORMAT
   748       " padded_average_promoted " SIZE_FORMAT
   749       " free in old gen " SIZE_FORMAT,
   750       (size_t) policy->average_promoted_in_bytes(),
   751       (size_t) policy->padded_average_promoted_in_bytes(),
   752       old_gen->free_in_bytes());
   753     if (young_gen->used_in_bytes() <
   754         (size_t) policy->padded_average_promoted_in_bytes()) {
   755       gclog_or_tty->print_cr(" padded_promoted_average is greater"
   756         " than maximum promotion = " SIZE_FORMAT, young_gen->used_in_bytes());
   757     }
   758   }
   760   if (result) {
   761     _consecutive_skipped_scavenges = 0;
   762   } else {
   763     _consecutive_skipped_scavenges++;
   764     if (UsePerfData) {
   765       counters->update_scavenge_skipped(promoted_too_large);
   766     }
   767   }
   768   return result;
   769 }
   771   // Used to add tasks
   772 GCTaskManager* const PSScavenge::gc_task_manager() {
   773   assert(ParallelScavengeHeap::gc_task_manager() != NULL,
   774    "shouldn't return NULL");
   775   return ParallelScavengeHeap::gc_task_manager();
   776 }
   778 void PSScavenge::initialize() {
   779   // Arguments must have been parsed
   781   if (AlwaysTenure) {
   782     _tenuring_threshold = 0;
   783   } else if (NeverTenure) {
   784     _tenuring_threshold = markOopDesc::max_age + 1;
   785   } else {
   786     // We want to smooth out our startup times for the AdaptiveSizePolicy
   787     _tenuring_threshold = (UseAdaptiveSizePolicy) ? InitialTenuringThreshold :
   788                                                     MaxTenuringThreshold;
   789   }
   791   ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
   792   assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
   794   PSYoungGen* young_gen = heap->young_gen();
   795   PSOldGen* old_gen = heap->old_gen();
   796   PSPermGen* perm_gen = heap->perm_gen();
   798   // Set boundary between young_gen and old_gen
   799   assert(perm_gen->reserved().end() <= old_gen->object_space()->bottom(),
   800          "perm above old");
   801   assert(old_gen->reserved().end() <= young_gen->eden_space()->bottom(),
   802          "old above young");
   803   _young_generation_boundary = young_gen->eden_space()->bottom();
   805   // Initialize ref handling object for scavenging.
   806   MemRegion mr = young_gen->reserved();
   807   _ref_processor =
   808     new ReferenceProcessor(mr,                         // span
   809                            ParallelRefProcEnabled && (ParallelGCThreads > 1), // mt processing
   810                            (int) ParallelGCThreads,    // mt processing degree
   811                            true,                       // mt discovery
   812                            (int) ParallelGCThreads,    // mt discovery degree
   813                            true,                       // atomic_discovery
   814                            NULL,                       // header provides liveness info
   815                            false);                     // next field updates do not need write barrier
   817   // Cache the cardtable
   818   BarrierSet* bs = Universe::heap()->barrier_set();
   819   assert(bs->kind() == BarrierSet::CardTableModRef, "Wrong barrier set kind");
   820   _card_table = (CardTableExtension*)bs;
   822   _counters = new CollectorCounters("PSScavenge", 0);
   823 }

mercurial