src/share/vm/gc_implementation/parallelScavenge/psScavenge.cpp

Thu, 20 Nov 2008 16:56:09 -0800

author
ysr
date
Thu, 20 Nov 2008 16:56:09 -0800
changeset 888
c96030fff130
parent 704
850fdf70db2b
child 892
27a80744a83b
permissions
-rw-r--r--

6684579: SoftReference processing can be made more efficient
Summary: For current soft-ref clearing policies, we can decide at marking time if a soft-reference will definitely not be cleared, postponing the decision of whether it will definitely be cleared to the final reference processing phase. This can be especially beneficial in the case of concurrent collectors where the marking is usually concurrent but reference processing is usually not.
Reviewed-by: jmasa

     1 /*
     2  * Copyright 2002-2008 Sun Microsystems, Inc.  All Rights Reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
    20  * CA 95054 USA or visit www.sun.com if you need additional information or
    21  * have any questions.
    22  *
    23  */
    26 # include "incls/_precompiled.incl"
    27 # include "incls/_psScavenge.cpp.incl"
    29 HeapWord*                  PSScavenge::_to_space_top_before_gc = NULL;
    30 int                        PSScavenge::_consecutive_skipped_scavenges = 0;
    31 ReferenceProcessor*        PSScavenge::_ref_processor = NULL;
    32 CardTableExtension*        PSScavenge::_card_table = NULL;
    33 bool                       PSScavenge::_survivor_overflow = false;
    34 int                        PSScavenge::_tenuring_threshold = 0;
    35 HeapWord*                  PSScavenge::_young_generation_boundary = NULL;
    36 elapsedTimer               PSScavenge::_accumulated_time;
    37 GrowableArray<markOop>*    PSScavenge::_preserved_mark_stack = NULL;
    38 GrowableArray<oop>*        PSScavenge::_preserved_oop_stack = NULL;
    39 CollectorCounters*         PSScavenge::_counters = NULL;
    41 // Define before use
    42 class PSIsAliveClosure: public BoolObjectClosure {
    43 public:
    44   void do_object(oop p) {
    45     assert(false, "Do not call.");
    46   }
    47   bool do_object_b(oop p) {
    48     return (!PSScavenge::is_obj_in_young((HeapWord*) p)) || p->is_forwarded();
    49   }
    50 };
    52 PSIsAliveClosure PSScavenge::_is_alive_closure;
    54 class PSKeepAliveClosure: public OopClosure {
    55 protected:
    56   MutableSpace* _to_space;
    57   PSPromotionManager* _promotion_manager;
    59 public:
    60   PSKeepAliveClosure(PSPromotionManager* pm) : _promotion_manager(pm) {
    61     ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
    62     assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
    63     _to_space = heap->young_gen()->to_space();
    65     assert(_promotion_manager != NULL, "Sanity");
    66   }
    68   template <class T> void do_oop_work(T* p) {
    69     assert (!oopDesc::is_null(*p), "expected non-null ref");
    70     assert ((oopDesc::load_decode_heap_oop_not_null(p))->is_oop(),
    71             "expected an oop while scanning weak refs");
    73     // Weak refs may be visited more than once.
    74     if (PSScavenge::should_scavenge(p, _to_space)) {
    75       PSScavenge::copy_and_push_safe_barrier(_promotion_manager, p);
    76     }
    77   }
    78   virtual void do_oop(oop* p)       { PSKeepAliveClosure::do_oop_work(p); }
    79   virtual void do_oop(narrowOop* p) { PSKeepAliveClosure::do_oop_work(p); }
    80 };
    82 class PSEvacuateFollowersClosure: public VoidClosure {
    83  private:
    84   PSPromotionManager* _promotion_manager;
    85  public:
    86   PSEvacuateFollowersClosure(PSPromotionManager* pm) : _promotion_manager(pm) {}
    88   virtual void do_void() {
    89     assert(_promotion_manager != NULL, "Sanity");
    90     _promotion_manager->drain_stacks(true);
    91     guarantee(_promotion_manager->stacks_empty(),
    92               "stacks should be empty at this point");
    93   }
    94 };
    96 class PSPromotionFailedClosure : public ObjectClosure {
    97   virtual void do_object(oop obj) {
    98     if (obj->is_forwarded()) {
    99       obj->init_mark();
   100     }
   101   }
   102 };
   104 class PSRefProcTaskProxy: public GCTask {
   105   typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask;
   106   ProcessTask & _rp_task;
   107   uint          _work_id;
   108 public:
   109   PSRefProcTaskProxy(ProcessTask & rp_task, uint work_id)
   110     : _rp_task(rp_task),
   111       _work_id(work_id)
   112   { }
   114 private:
   115   virtual char* name() { return (char *)"Process referents by policy in parallel"; }
   116   virtual void do_it(GCTaskManager* manager, uint which);
   117 };
   119 void PSRefProcTaskProxy::do_it(GCTaskManager* manager, uint which)
   120 {
   121   PSPromotionManager* promotion_manager =
   122     PSPromotionManager::gc_thread_promotion_manager(which);
   123   assert(promotion_manager != NULL, "sanity check");
   124   PSKeepAliveClosure keep_alive(promotion_manager);
   125   PSEvacuateFollowersClosure evac_followers(promotion_manager);
   126   PSIsAliveClosure is_alive;
   127   _rp_task.work(_work_id, is_alive, keep_alive, evac_followers);
   128 }
   130 class PSRefEnqueueTaskProxy: public GCTask {
   131   typedef AbstractRefProcTaskExecutor::EnqueueTask EnqueueTask;
   132   EnqueueTask& _enq_task;
   133   uint         _work_id;
   135 public:
   136   PSRefEnqueueTaskProxy(EnqueueTask& enq_task, uint work_id)
   137     : _enq_task(enq_task),
   138       _work_id(work_id)
   139   { }
   141   virtual char* name() { return (char *)"Enqueue reference objects in parallel"; }
   142   virtual void do_it(GCTaskManager* manager, uint which)
   143   {
   144     _enq_task.work(_work_id);
   145   }
   146 };
   148 class PSRefProcTaskExecutor: public AbstractRefProcTaskExecutor {
   149   virtual void execute(ProcessTask& task);
   150   virtual void execute(EnqueueTask& task);
   151 };
   153 void PSRefProcTaskExecutor::execute(ProcessTask& task)
   154 {
   155   GCTaskQueue* q = GCTaskQueue::create();
   156   for(uint i=0; i<ParallelGCThreads; i++) {
   157     q->enqueue(new PSRefProcTaskProxy(task, i));
   158   }
   159   ParallelTaskTerminator terminator(
   160     ParallelScavengeHeap::gc_task_manager()->workers(),
   161     UseDepthFirstScavengeOrder ?
   162         (TaskQueueSetSuper*) PSPromotionManager::stack_array_depth()
   163       : (TaskQueueSetSuper*) PSPromotionManager::stack_array_breadth());
   164   if (task.marks_oops_alive() && ParallelGCThreads > 1) {
   165     for (uint j=0; j<ParallelGCThreads; j++) {
   166       q->enqueue(new StealTask(&terminator));
   167     }
   168   }
   169   ParallelScavengeHeap::gc_task_manager()->execute_and_wait(q);
   170 }
   173 void PSRefProcTaskExecutor::execute(EnqueueTask& task)
   174 {
   175   GCTaskQueue* q = GCTaskQueue::create();
   176   for(uint i=0; i<ParallelGCThreads; i++) {
   177     q->enqueue(new PSRefEnqueueTaskProxy(task, i));
   178   }
   179   ParallelScavengeHeap::gc_task_manager()->execute_and_wait(q);
   180 }
   182 // This method contains all heap specific policy for invoking scavenge.
   183 // PSScavenge::invoke_no_policy() will do nothing but attempt to
   184 // scavenge. It will not clean up after failed promotions, bail out if
   185 // we've exceeded policy time limits, or any other special behavior.
   186 // All such policy should be placed here.
   187 //
   188 // Note that this method should only be called from the vm_thread while
   189 // at a safepoint!
   190 void PSScavenge::invoke()
   191 {
   192   assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
   193   assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
   194   assert(!Universe::heap()->is_gc_active(), "not reentrant");
   196   ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
   197   assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
   199   PSAdaptiveSizePolicy* policy = heap->size_policy();
   201   // Before each allocation/collection attempt, find out from the
   202   // policy object if GCs are, on the whole, taking too long. If so,
   203   // bail out without attempting a collection.
   204   if (!policy->gc_time_limit_exceeded()) {
   205     IsGCActiveMark mark;
   207     bool scavenge_was_done = PSScavenge::invoke_no_policy();
   209     PSGCAdaptivePolicyCounters* counters = heap->gc_policy_counters();
   210     if (UsePerfData)
   211       counters->update_full_follows_scavenge(0);
   212     if (!scavenge_was_done ||
   213         policy->should_full_GC(heap->old_gen()->free_in_bytes())) {
   214       if (UsePerfData)
   215         counters->update_full_follows_scavenge(full_follows_scavenge);
   217       GCCauseSetter gccs(heap, GCCause::_adaptive_size_policy);
   218       if (UseParallelOldGC) {
   219         PSParallelCompact::invoke_no_policy(false);
   220       } else {
   221         PSMarkSweep::invoke_no_policy(false);
   222       }
   223     }
   224   }
   225 }
   227 // This method contains no policy. You should probably
   228 // be calling invoke() instead.
   229 bool PSScavenge::invoke_no_policy() {
   230   assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
   231   assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
   233   TimeStamp scavenge_entry;
   234   TimeStamp scavenge_midpoint;
   235   TimeStamp scavenge_exit;
   237   scavenge_entry.update();
   239   if (GC_locker::check_active_before_gc()) {
   240     return false;
   241   }
   243   ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
   244   GCCause::Cause gc_cause = heap->gc_cause();
   245   assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
   247   // Check for potential problems.
   248   if (!should_attempt_scavenge()) {
   249     return false;
   250   }
   252   bool promotion_failure_occurred = false;
   254   PSYoungGen* young_gen = heap->young_gen();
   255   PSOldGen* old_gen = heap->old_gen();
   256   PSPermGen* perm_gen = heap->perm_gen();
   257   PSAdaptiveSizePolicy* size_policy = heap->size_policy();
   258   heap->increment_total_collections();
   260   AdaptiveSizePolicyOutput(size_policy, heap->total_collections());
   262   if ((gc_cause != GCCause::_java_lang_system_gc) ||
   263        UseAdaptiveSizePolicyWithSystemGC) {
   264     // Gather the feedback data for eden occupancy.
   265     young_gen->eden_space()->accumulate_statistics();
   266   }
   268   if (ZapUnusedHeapArea) {
   269     // Save information needed to minimize mangling
   270     heap->record_gen_tops_before_GC();
   271   }
   273   if (PrintHeapAtGC) {
   274     Universe::print_heap_before_gc();
   275   }
   277   assert(!NeverTenure || _tenuring_threshold == markOopDesc::max_age + 1, "Sanity");
   278   assert(!AlwaysTenure || _tenuring_threshold == 0, "Sanity");
   280   size_t prev_used = heap->used();
   281   assert(promotion_failed() == false, "Sanity");
   283   // Fill in TLABs
   284   heap->accumulate_statistics_all_tlabs();
   285   heap->ensure_parsability(true);  // retire TLABs
   287   if (VerifyBeforeGC && heap->total_collections() >= VerifyGCStartAt) {
   288     HandleMark hm;  // Discard invalid handles created during verification
   289     gclog_or_tty->print(" VerifyBeforeGC:");
   290     Universe::verify(true);
   291   }
   293   {
   294     ResourceMark rm;
   295     HandleMark hm;
   297     gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps);
   298     TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
   299     TraceTime t1("GC", PrintGC, !PrintGCDetails, gclog_or_tty);
   300     TraceCollectorStats tcs(counters());
   301     TraceMemoryManagerStats tms(false /* not full GC */);
   303     if (TraceGen0Time) accumulated_time()->start();
   305     // Let the size policy know we're starting
   306     size_policy->minor_collection_begin();
   308     // Verify the object start arrays.
   309     if (VerifyObjectStartArray &&
   310         VerifyBeforeGC) {
   311       old_gen->verify_object_start_array();
   312       perm_gen->verify_object_start_array();
   313     }
   315     // Verify no unmarked old->young roots
   316     if (VerifyRememberedSets) {
   317       CardTableExtension::verify_all_young_refs_imprecise();
   318     }
   320     if (!ScavengeWithObjectsInToSpace) {
   321       assert(young_gen->to_space()->is_empty(),
   322              "Attempt to scavenge with live objects in to_space");
   323       young_gen->to_space()->clear(SpaceDecorator::Mangle);
   324     } else if (ZapUnusedHeapArea) {
   325       young_gen->to_space()->mangle_unused_area();
   326     }
   327     save_to_space_top_before_gc();
   329     NOT_PRODUCT(reference_processor()->verify_no_references_recorded());
   330     COMPILER2_PRESENT(DerivedPointerTable::clear());
   332     reference_processor()->enable_discovery();
   333     reference_processor()->snap_policy(false);
   335     // We track how much was promoted to the next generation for
   336     // the AdaptiveSizePolicy.
   337     size_t old_gen_used_before = old_gen->used_in_bytes();
   339     // For PrintGCDetails
   340     size_t young_gen_used_before = young_gen->used_in_bytes();
   342     // Reset our survivor overflow.
   343     set_survivor_overflow(false);
   345     // We need to save the old/perm top values before
   346     // creating the promotion_manager. We pass the top
   347     // values to the card_table, to prevent it from
   348     // straying into the promotion labs.
   349     HeapWord* old_top = old_gen->object_space()->top();
   350     HeapWord* perm_top = perm_gen->object_space()->top();
   352     // Release all previously held resources
   353     gc_task_manager()->release_all_resources();
   355     PSPromotionManager::pre_scavenge();
   357     // We'll use the promotion manager again later.
   358     PSPromotionManager* promotion_manager = PSPromotionManager::vm_thread_promotion_manager();
   359     {
   360       // TraceTime("Roots");
   362       GCTaskQueue* q = GCTaskQueue::create();
   364       for(uint i=0; i<ParallelGCThreads; i++) {
   365         q->enqueue(new OldToYoungRootsTask(old_gen, old_top, i));
   366       }
   368       q->enqueue(new SerialOldToYoungRootsTask(perm_gen, perm_top));
   370       q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::universe));
   371       q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::jni_handles));
   372       // We scan the thread roots in parallel
   373       Threads::create_thread_roots_tasks(q);
   374       q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::object_synchronizer));
   375       q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::flat_profiler));
   376       q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::management));
   377       q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::system_dictionary));
   378       q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::jvmti));
   380       ParallelTaskTerminator terminator(
   381         gc_task_manager()->workers(),
   382         promotion_manager->depth_first() ?
   383             (TaskQueueSetSuper*) promotion_manager->stack_array_depth()
   384           : (TaskQueueSetSuper*) promotion_manager->stack_array_breadth());
   385       if (ParallelGCThreads>1) {
   386         for (uint j=0; j<ParallelGCThreads; j++) {
   387           q->enqueue(new StealTask(&terminator));
   388         }
   389       }
   391       gc_task_manager()->execute_and_wait(q);
   392     }
   394     scavenge_midpoint.update();
   396     // Process reference objects discovered during scavenge
   397     {
   398       reference_processor()->snap_policy(false); // not always_clear
   399       PSKeepAliveClosure keep_alive(promotion_manager);
   400       PSEvacuateFollowersClosure evac_followers(promotion_manager);
   401       if (reference_processor()->processing_is_mt()) {
   402         PSRefProcTaskExecutor task_executor;
   403         reference_processor()->process_discovered_references(
   404           &_is_alive_closure, &keep_alive, &evac_followers, &task_executor);
   405       } else {
   406         reference_processor()->process_discovered_references(
   407           &_is_alive_closure, &keep_alive, &evac_followers, NULL);
   408       }
   409     }
   411     // Enqueue reference objects discovered during scavenge.
   412     if (reference_processor()->processing_is_mt()) {
   413       PSRefProcTaskExecutor task_executor;
   414       reference_processor()->enqueue_discovered_references(&task_executor);
   415     } else {
   416       reference_processor()->enqueue_discovered_references(NULL);
   417     }
   419     // Finally, flush the promotion_manager's labs, and deallocate its stacks.
   420     assert(promotion_manager->claimed_stack_empty(), "Sanity");
   421     PSPromotionManager::post_scavenge();
   423     promotion_failure_occurred = promotion_failed();
   424     if (promotion_failure_occurred) {
   425       clean_up_failed_promotion();
   426       if (PrintGC) {
   427         gclog_or_tty->print("--");
   428       }
   429     }
   431     // Let the size policy know we're done.  Note that we count promotion
   432     // failure cleanup time as part of the collection (otherwise, we're
   433     // implicitly saying it's mutator time).
   434     size_policy->minor_collection_end(gc_cause);
   436     if (!promotion_failure_occurred) {
   437       // Swap the survivor spaces.
   440       young_gen->eden_space()->clear(SpaceDecorator::Mangle);
   441       young_gen->from_space()->clear(SpaceDecorator::Mangle);
   442       young_gen->swap_spaces();
   444       size_t survived = young_gen->from_space()->used_in_bytes();
   445       size_t promoted = old_gen->used_in_bytes() - old_gen_used_before;
   446       size_policy->update_averages(_survivor_overflow, survived, promoted);
   448       if (UseAdaptiveSizePolicy) {
   449         // Calculate the new survivor size and tenuring threshold
   451         if (PrintAdaptiveSizePolicy) {
   452           gclog_or_tty->print("AdaptiveSizeStart: ");
   453           gclog_or_tty->stamp();
   454           gclog_or_tty->print_cr(" collection: %d ",
   455                          heap->total_collections());
   457           if (Verbose) {
   458             gclog_or_tty->print("old_gen_capacity: %d young_gen_capacity: %d"
   459               " perm_gen_capacity: %d ",
   460               old_gen->capacity_in_bytes(), young_gen->capacity_in_bytes(),
   461               perm_gen->capacity_in_bytes());
   462           }
   463         }
   466         if (UsePerfData) {
   467           PSGCAdaptivePolicyCounters* counters = heap->gc_policy_counters();
   468           counters->update_old_eden_size(
   469             size_policy->calculated_eden_size_in_bytes());
   470           counters->update_old_promo_size(
   471             size_policy->calculated_promo_size_in_bytes());
   472           counters->update_old_capacity(old_gen->capacity_in_bytes());
   473           counters->update_young_capacity(young_gen->capacity_in_bytes());
   474           counters->update_survived(survived);
   475           counters->update_promoted(promoted);
   476           counters->update_survivor_overflowed(_survivor_overflow);
   477         }
   479         size_t survivor_limit =
   480           size_policy->max_survivor_size(young_gen->max_size());
   481         _tenuring_threshold =
   482           size_policy->compute_survivor_space_size_and_threshold(
   483                                                            _survivor_overflow,
   484                                                            _tenuring_threshold,
   485                                                            survivor_limit);
   487        if (PrintTenuringDistribution) {
   488          gclog_or_tty->cr();
   489          gclog_or_tty->print_cr("Desired survivor size %ld bytes, new threshold %d (max %d)",
   490                                 size_policy->calculated_survivor_size_in_bytes(),
   491                                 _tenuring_threshold, MaxTenuringThreshold);
   492        }
   494         if (UsePerfData) {
   495           PSGCAdaptivePolicyCounters* counters = heap->gc_policy_counters();
   496           counters->update_tenuring_threshold(_tenuring_threshold);
   497           counters->update_survivor_size_counters();
   498         }
   500         // Do call at minor collections?
   501         // Don't check if the size_policy is ready at this
   502         // level.  Let the size_policy check that internally.
   503         if (UseAdaptiveSizePolicy &&
   504             UseAdaptiveGenerationSizePolicyAtMinorCollection &&
   505             ((gc_cause != GCCause::_java_lang_system_gc) ||
   506               UseAdaptiveSizePolicyWithSystemGC)) {
   508           // Calculate optimial free space amounts
   509           assert(young_gen->max_size() >
   510             young_gen->from_space()->capacity_in_bytes() +
   511             young_gen->to_space()->capacity_in_bytes(),
   512             "Sizes of space in young gen are out-of-bounds");
   513           size_t max_eden_size = young_gen->max_size() -
   514             young_gen->from_space()->capacity_in_bytes() -
   515             young_gen->to_space()->capacity_in_bytes();
   516           size_policy->compute_generation_free_space(young_gen->used_in_bytes(),
   517                                    young_gen->eden_space()->used_in_bytes(),
   518                                    old_gen->used_in_bytes(),
   519                                    perm_gen->used_in_bytes(),
   520                                    young_gen->eden_space()->capacity_in_bytes(),
   521                                    old_gen->max_gen_size(),
   522                                    max_eden_size,
   523                                    false  /* full gc*/,
   524                                    gc_cause);
   526         }
   527         // Resize the young generation at every collection
   528         // even if new sizes have not been calculated.  This is
   529         // to allow resizes that may have been inhibited by the
   530         // relative location of the "to" and "from" spaces.
   532         // Resizing the old gen at minor collects can cause increases
   533         // that don't feed back to the generation sizing policy until
   534         // a major collection.  Don't resize the old gen here.
   536         heap->resize_young_gen(size_policy->calculated_eden_size_in_bytes(),
   537                         size_policy->calculated_survivor_size_in_bytes());
   539         if (PrintAdaptiveSizePolicy) {
   540           gclog_or_tty->print_cr("AdaptiveSizeStop: collection: %d ",
   541                          heap->total_collections());
   542         }
   543       }
   545       // Update the structure of the eden. With NUMA-eden CPU hotplugging or offlining can
   546       // cause the change of the heap layout. Make sure eden is reshaped if that's the case.
   547       // Also update() will case adaptive NUMA chunk resizing.
   548       assert(young_gen->eden_space()->is_empty(), "eden space should be empty now");
   549       young_gen->eden_space()->update();
   551       heap->gc_policy_counters()->update_counters();
   553       heap->resize_all_tlabs();
   555       assert(young_gen->to_space()->is_empty(), "to space should be empty now");
   556     }
   558     COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
   560     NOT_PRODUCT(reference_processor()->verify_no_references_recorded());
   562     // Re-verify object start arrays
   563     if (VerifyObjectStartArray &&
   564         VerifyAfterGC) {
   565       old_gen->verify_object_start_array();
   566       perm_gen->verify_object_start_array();
   567     }
   569     // Verify all old -> young cards are now precise
   570     if (VerifyRememberedSets) {
   571       // Precise verification will give false positives. Until this is fixed,
   572       // use imprecise verification.
   573       // CardTableExtension::verify_all_young_refs_precise();
   574       CardTableExtension::verify_all_young_refs_imprecise();
   575     }
   577     if (TraceGen0Time) accumulated_time()->stop();
   579     if (PrintGC) {
   580       if (PrintGCDetails) {
   581         // Don't print a GC timestamp here.  This is after the GC so
   582         // would be confusing.
   583         young_gen->print_used_change(young_gen_used_before);
   584       }
   585       heap->print_heap_change(prev_used);
   586     }
   588     // Track memory usage and detect low memory
   589     MemoryService::track_memory_usage();
   590     heap->update_counters();
   591   }
   593   if (VerifyAfterGC && heap->total_collections() >= VerifyGCStartAt) {
   594     HandleMark hm;  // Discard invalid handles created during verification
   595     gclog_or_tty->print(" VerifyAfterGC:");
   596     Universe::verify(false);
   597   }
   599   if (PrintHeapAtGC) {
   600     Universe::print_heap_after_gc();
   601   }
   603   if (ZapUnusedHeapArea) {
   604     young_gen->eden_space()->check_mangled_unused_area_complete();
   605     young_gen->from_space()->check_mangled_unused_area_complete();
   606     young_gen->to_space()->check_mangled_unused_area_complete();
   607   }
   609   scavenge_exit.update();
   611   if (PrintGCTaskTimeStamps) {
   612     tty->print_cr("VM-Thread " INT64_FORMAT " " INT64_FORMAT " " INT64_FORMAT,
   613                   scavenge_entry.ticks(), scavenge_midpoint.ticks(),
   614                   scavenge_exit.ticks());
   615     gc_task_manager()->print_task_time_stamps();
   616   }
   618   return !promotion_failure_occurred;
   619 }
   621 // This method iterates over all objects in the young generation,
   622 // unforwarding markOops. It then restores any preserved mark oops,
   623 // and clears the _preserved_mark_stack.
   624 void PSScavenge::clean_up_failed_promotion() {
   625   ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
   626   assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
   627   assert(promotion_failed(), "Sanity");
   629   PSYoungGen* young_gen = heap->young_gen();
   631   {
   632     ResourceMark rm;
   634     // Unforward all pointers in the young gen.
   635     PSPromotionFailedClosure unforward_closure;
   636     young_gen->object_iterate(&unforward_closure);
   638     if (PrintGC && Verbose) {
   639       gclog_or_tty->print_cr("Restoring %d marks",
   640                               _preserved_oop_stack->length());
   641     }
   643     // Restore any saved marks.
   644     for (int i=0; i < _preserved_oop_stack->length(); i++) {
   645       oop obj       = _preserved_oop_stack->at(i);
   646       markOop mark  = _preserved_mark_stack->at(i);
   647       obj->set_mark(mark);
   648     }
   650     // Deallocate the preserved mark and oop stacks.
   651     // The stacks were allocated as CHeap objects, so
   652     // we must call delete to prevent mem leaks.
   653     delete _preserved_mark_stack;
   654     _preserved_mark_stack = NULL;
   655     delete _preserved_oop_stack;
   656     _preserved_oop_stack = NULL;
   657   }
   659   // Reset the PromotionFailureALot counters.
   660   NOT_PRODUCT(Universe::heap()->reset_promotion_should_fail();)
   661 }
   663 // This method is called whenever an attempt to promote an object
   664 // fails. Some markOops will need preserving, some will not. Note
   665 // that the entire eden is traversed after a failed promotion, with
   666 // all forwarded headers replaced by the default markOop. This means
   667 // it is not neccessary to preserve most markOops.
   668 void PSScavenge::oop_promotion_failed(oop obj, markOop obj_mark) {
   669   if (_preserved_mark_stack == NULL) {
   670     ThreadCritical tc; // Lock and retest
   671     if (_preserved_mark_stack == NULL) {
   672       assert(_preserved_oop_stack == NULL, "Sanity");
   673       _preserved_mark_stack = new (ResourceObj::C_HEAP) GrowableArray<markOop>(40, true);
   674       _preserved_oop_stack = new (ResourceObj::C_HEAP) GrowableArray<oop>(40, true);
   675     }
   676   }
   678   // Because we must hold the ThreadCritical lock before using
   679   // the stacks, we should be safe from observing partial allocations,
   680   // which are also guarded by the ThreadCritical lock.
   681   if (obj_mark->must_be_preserved_for_promotion_failure(obj)) {
   682     ThreadCritical tc;
   683     _preserved_oop_stack->push(obj);
   684     _preserved_mark_stack->push(obj_mark);
   685   }
   686 }
   688 bool PSScavenge::should_attempt_scavenge() {
   689   ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
   690   assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
   691   PSGCAdaptivePolicyCounters* counters = heap->gc_policy_counters();
   693   if (UsePerfData) {
   694     counters->update_scavenge_skipped(not_skipped);
   695   }
   697   PSYoungGen* young_gen = heap->young_gen();
   698   PSOldGen* old_gen = heap->old_gen();
   700   if (!ScavengeWithObjectsInToSpace) {
   701     // Do not attempt to promote unless to_space is empty
   702     if (!young_gen->to_space()->is_empty()) {
   703       _consecutive_skipped_scavenges++;
   704       if (UsePerfData) {
   705         counters->update_scavenge_skipped(to_space_not_empty);
   706       }
   707       return false;
   708     }
   709   }
   711   // Test to see if the scavenge will likely fail.
   712   PSAdaptiveSizePolicy* policy = heap->size_policy();
   714   // A similar test is done in the policy's should_full_GC().  If this is
   715   // changed, decide if that test should also be changed.
   716   size_t avg_promoted = (size_t) policy->padded_average_promoted_in_bytes();
   717   size_t promotion_estimate = MIN2(avg_promoted, young_gen->used_in_bytes());
   718   bool result = promotion_estimate < old_gen->free_in_bytes();
   720   if (PrintGCDetails && Verbose) {
   721     gclog_or_tty->print(result ? "  do scavenge: " : "  skip scavenge: ");
   722     gclog_or_tty->print_cr(" average_promoted " SIZE_FORMAT
   723       " padded_average_promoted " SIZE_FORMAT
   724       " free in old gen " SIZE_FORMAT,
   725       (size_t) policy->average_promoted_in_bytes(),
   726       (size_t) policy->padded_average_promoted_in_bytes(),
   727       old_gen->free_in_bytes());
   728     if (young_gen->used_in_bytes() <
   729         (size_t) policy->padded_average_promoted_in_bytes()) {
   730       gclog_or_tty->print_cr(" padded_promoted_average is greater"
   731         " than maximum promotion = " SIZE_FORMAT, young_gen->used_in_bytes());
   732     }
   733   }
   735   if (result) {
   736     _consecutive_skipped_scavenges = 0;
   737   } else {
   738     _consecutive_skipped_scavenges++;
   739     if (UsePerfData) {
   740       counters->update_scavenge_skipped(promoted_too_large);
   741     }
   742   }
   743   return result;
   744 }
   746   // Used to add tasks
   747 GCTaskManager* const PSScavenge::gc_task_manager() {
   748   assert(ParallelScavengeHeap::gc_task_manager() != NULL,
   749    "shouldn't return NULL");
   750   return ParallelScavengeHeap::gc_task_manager();
   751 }
   753 void PSScavenge::initialize() {
   754   // Arguments must have been parsed
   756   if (AlwaysTenure) {
   757     _tenuring_threshold = 0;
   758   } else if (NeverTenure) {
   759     _tenuring_threshold = markOopDesc::max_age + 1;
   760   } else {
   761     // We want to smooth out our startup times for the AdaptiveSizePolicy
   762     _tenuring_threshold = (UseAdaptiveSizePolicy) ? InitialTenuringThreshold :
   763                                                     MaxTenuringThreshold;
   764   }
   766   ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
   767   assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
   769   PSYoungGen* young_gen = heap->young_gen();
   770   PSOldGen* old_gen = heap->old_gen();
   771   PSPermGen* perm_gen = heap->perm_gen();
   773   // Set boundary between young_gen and old_gen
   774   assert(perm_gen->reserved().end() <= old_gen->object_space()->bottom(),
   775          "perm above old");
   776   assert(old_gen->reserved().end() <= young_gen->eden_space()->bottom(),
   777          "old above young");
   778   _young_generation_boundary = young_gen->eden_space()->bottom();
   780   // Initialize ref handling object for scavenging.
   781   MemRegion mr = young_gen->reserved();
   782   _ref_processor = ReferenceProcessor::create_ref_processor(
   783     mr,                         // span
   784     true,                       // atomic_discovery
   785     true,                       // mt_discovery
   786     NULL,                       // is_alive_non_header
   787     ParallelGCThreads,
   788     ParallelRefProcEnabled);
   790   // Cache the cardtable
   791   BarrierSet* bs = Universe::heap()->barrier_set();
   792   assert(bs->kind() == BarrierSet::CardTableModRef, "Wrong barrier set kind");
   793   _card_table = (CardTableExtension*)bs;
   795   _counters = new CollectorCounters("PSScavenge", 0);
   796 }

mercurial