src/share/vm/gc_implementation/parallelScavenge/psScavenge.cpp

Fri, 29 Apr 2016 00:06:10 +0800

author
aoqi
date
Fri, 29 Apr 2016 00:06:10 +0800
changeset 1
2d8a650513c2
parent 0
f90c822e73f8
child 25
873fd82b133d
permissions
-rw-r--r--

Added MIPS 64-bit port.

     1 /*
     2  * Copyright (c) 2002, 2014, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    20  * or visit www.oracle.com if you need additional information or have any
    21  * questions.
    22  *
    23  */
    25 /*
    26  * This file has been modified by Loongson Technology in 2015. These
    27  * modifications are Copyright (c) 2015 Loongson Technology, and are made
    28  * available on the same license terms set forth above.
    29  */
    31 #include "precompiled.hpp"
    32 #include "classfile/symbolTable.hpp"
    33 #include "code/codeCache.hpp"
    34 #include "gc_implementation/parallelScavenge/cardTableExtension.hpp"
    35 #include "gc_implementation/parallelScavenge/gcTaskManager.hpp"
    36 #include "gc_implementation/parallelScavenge/parallelScavengeHeap.hpp"
    37 #include "gc_implementation/parallelScavenge/psAdaptiveSizePolicy.hpp"
    38 #include "gc_implementation/parallelScavenge/psMarkSweep.hpp"
    39 #include "gc_implementation/parallelScavenge/psParallelCompact.hpp"
    40 #include "gc_implementation/parallelScavenge/psScavenge.inline.hpp"
    41 #include "gc_implementation/parallelScavenge/psTasks.hpp"
    42 #include "gc_implementation/shared/gcHeapSummary.hpp"
    43 #include "gc_implementation/shared/gcTimer.hpp"
    44 #include "gc_implementation/shared/gcTrace.hpp"
    45 #include "gc_implementation/shared/gcTraceTime.hpp"
    46 #include "gc_implementation/shared/isGCActiveMark.hpp"
    47 #include "gc_implementation/shared/mutableNUMASpace.hpp"
    48 #include "gc_implementation/shared/spaceDecorator.hpp"
    49 #include "gc_interface/gcCause.hpp"
    50 #include "memory/collectorPolicy.hpp"
    51 #include "memory/gcLocker.inline.hpp"
    52 #include "memory/referencePolicy.hpp"
    53 #include "memory/referenceProcessor.hpp"
    54 #include "memory/resourceArea.hpp"
    55 #include "oops/oop.inline.hpp"
    56 #include "oops/oop.psgc.inline.hpp"
    57 #include "runtime/biasedLocking.hpp"
    58 #include "runtime/fprofiler.hpp"
    59 #include "runtime/handles.inline.hpp"
    60 #include "runtime/threadCritical.hpp"
    61 #include "runtime/vmThread.hpp"
    62 #include "runtime/vm_operations.hpp"
    63 #include "services/memoryService.hpp"
    64 #include "utilities/stack.inline.hpp"
    66 PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
    68 HeapWord*                  PSScavenge::_to_space_top_before_gc = NULL;
    69 int                        PSScavenge::_consecutive_skipped_scavenges = 0;
    70 ReferenceProcessor*        PSScavenge::_ref_processor = NULL;
    71 CardTableExtension*        PSScavenge::_card_table = NULL;
    72 bool                       PSScavenge::_survivor_overflow = false;
    73 uint                       PSScavenge::_tenuring_threshold = 0;
    74 HeapWord*                  PSScavenge::_young_generation_boundary = NULL;
    75 uintptr_t                  PSScavenge::_young_generation_boundary_compressed = 0;
    76 elapsedTimer               PSScavenge::_accumulated_time;
    77 STWGCTimer                 PSScavenge::_gc_timer;
    78 ParallelScavengeTracer     PSScavenge::_gc_tracer;
    79 Stack<markOop, mtGC>       PSScavenge::_preserved_mark_stack;
    80 Stack<oop, mtGC>           PSScavenge::_preserved_oop_stack;
    81 CollectorCounters*         PSScavenge::_counters = NULL;
    83 // Define before use
    84 class PSIsAliveClosure: public BoolObjectClosure {
    85 public:
    86   bool do_object_b(oop p) {
    87     return (!PSScavenge::is_obj_in_young(p)) || p->is_forwarded();
    88   }
    89 };
    91 PSIsAliveClosure PSScavenge::_is_alive_closure;
    93 class PSKeepAliveClosure: public OopClosure {
    94 protected:
    95   MutableSpace* _to_space;
    96   PSPromotionManager* _promotion_manager;
    98 public:
    99   PSKeepAliveClosure(PSPromotionManager* pm) : _promotion_manager(pm) {
   100     ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
   101     assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
   102     _to_space = heap->young_gen()->to_space();
   104     assert(_promotion_manager != NULL, "Sanity");
   105   }
   107   template <class T> void do_oop_work(T* p) {
   108     assert (!oopDesc::is_null(*p), "expected non-null ref");
   109     assert ((oopDesc::load_decode_heap_oop_not_null(p))->is_oop(),
   110             "expected an oop while scanning weak refs");
   112     // Weak refs may be visited more than once.
   113     if (PSScavenge::should_scavenge(p, _to_space)) {
   114       PSScavenge::copy_and_push_safe_barrier<T, /*promote_immediately=*/false>(_promotion_manager, p);
   115     }
   116   }
   117   virtual void do_oop(oop* p)       { PSKeepAliveClosure::do_oop_work(p); }
   118   virtual void do_oop(narrowOop* p) { PSKeepAliveClosure::do_oop_work(p); }
   119 };
   121 class PSEvacuateFollowersClosure: public VoidClosure {
   122  private:
   123   PSPromotionManager* _promotion_manager;
   124  public:
   125   PSEvacuateFollowersClosure(PSPromotionManager* pm) : _promotion_manager(pm) {}
   127   virtual void do_void() {
   128     assert(_promotion_manager != NULL, "Sanity");
   129     _promotion_manager->drain_stacks(true);
   130     guarantee(_promotion_manager->stacks_empty(),
   131               "stacks should be empty at this point");
   132   }
   133 };
   135 class PSPromotionFailedClosure : public ObjectClosure {
   136   virtual void do_object(oop obj) {
   137     if (obj->is_forwarded()) {
   138       obj->init_mark();
   139     }
   140   }
   141 };
   143 class PSRefProcTaskProxy: public GCTask {
   144   typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask;
   145   ProcessTask & _rp_task;
   146   uint          _work_id;
   147 public:
   148   PSRefProcTaskProxy(ProcessTask & rp_task, uint work_id)
   149     : _rp_task(rp_task),
   150       _work_id(work_id)
   151   { }
   153 private:
   154   virtual char* name() { return (char *)"Process referents by policy in parallel"; }
   155   virtual void do_it(GCTaskManager* manager, uint which);
   156 };
   158 void PSRefProcTaskProxy::do_it(GCTaskManager* manager, uint which)
   159 {
   160   PSPromotionManager* promotion_manager =
   161     PSPromotionManager::gc_thread_promotion_manager(which);
   162   assert(promotion_manager != NULL, "sanity check");
   163   PSKeepAliveClosure keep_alive(promotion_manager);
   164   PSEvacuateFollowersClosure evac_followers(promotion_manager);
   165   PSIsAliveClosure is_alive;
   166   _rp_task.work(_work_id, is_alive, keep_alive, evac_followers);
   167 }
   169 class PSRefEnqueueTaskProxy: public GCTask {
   170   typedef AbstractRefProcTaskExecutor::EnqueueTask EnqueueTask;
   171   EnqueueTask& _enq_task;
   172   uint         _work_id;
   174 public:
   175   PSRefEnqueueTaskProxy(EnqueueTask& enq_task, uint work_id)
   176     : _enq_task(enq_task),
   177       _work_id(work_id)
   178   { }
   180   virtual char* name() { return (char *)"Enqueue reference objects in parallel"; }
   181   virtual void do_it(GCTaskManager* manager, uint which)
   182   {
   183     _enq_task.work(_work_id);
   184   }
   185 };
   187 class PSRefProcTaskExecutor: public AbstractRefProcTaskExecutor {
   188   virtual void execute(ProcessTask& task);
   189   virtual void execute(EnqueueTask& task);
   190 };
   192 void PSRefProcTaskExecutor::execute(ProcessTask& task)
   193 {
   194   GCTaskQueue* q = GCTaskQueue::create();
   195   GCTaskManager* manager = ParallelScavengeHeap::gc_task_manager();
   196   for(uint i=0; i < manager->active_workers(); i++) {
   197     q->enqueue(new PSRefProcTaskProxy(task, i));
   198   }
   199   ParallelTaskTerminator terminator(manager->active_workers(),
   200                  (TaskQueueSetSuper*) PSPromotionManager::stack_array_depth());
   201   if (task.marks_oops_alive() && manager->active_workers() > 1) {
   202     for (uint j = 0; j < manager->active_workers(); j++) {
   203       q->enqueue(new StealTask(&terminator));
   204     }
   205   }
   206   manager->execute_and_wait(q);
   207 }
   210 void PSRefProcTaskExecutor::execute(EnqueueTask& task)
   211 {
   212   GCTaskQueue* q = GCTaskQueue::create();
   213   GCTaskManager* manager = ParallelScavengeHeap::gc_task_manager();
   214   for(uint i=0; i < manager->active_workers(); i++) {
   215     q->enqueue(new PSRefEnqueueTaskProxy(task, i));
   216   }
   217   manager->execute_and_wait(q);
   218 }
   220 // This method contains all heap specific policy for invoking scavenge.
   221 // PSScavenge::invoke_no_policy() will do nothing but attempt to
   222 // scavenge. It will not clean up after failed promotions, bail out if
   223 // we've exceeded policy time limits, or any other special behavior.
   224 // All such policy should be placed here.
   225 //
   226 // Note that this method should only be called from the vm_thread while
   227 // at a safepoint!
   228 bool PSScavenge::invoke() {
   229   assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
   230   assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
   231   assert(!Universe::heap()->is_gc_active(), "not reentrant");
   233   ParallelScavengeHeap* const heap = (ParallelScavengeHeap*)Universe::heap();
   234   assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
   236   PSAdaptiveSizePolicy* policy = heap->size_policy();
   237   IsGCActiveMark mark;
   239   const bool scavenge_done = PSScavenge::invoke_no_policy();
   240   bool need_full_gc;
   241   if(UseOldNUMA) {
   242     need_full_gc = !scavenge_done ||
   243       policy->should_full_GC(heap->old_gen()->free_in_bytes_numa());
   244   }
   245   else {
   246     need_full_gc = !scavenge_done ||
   247       policy->should_full_GC(heap->old_gen()->free_in_bytes());
   248   }
   249   bool full_gc_done = false;
   251   if (UsePerfData) {
   252     PSGCAdaptivePolicyCounters* const counters = heap->gc_policy_counters();
   253     const int ffs_val = need_full_gc ? full_follows_scavenge : not_skipped;
   254     counters->update_full_follows_scavenge(ffs_val);
   255   }
   257   if (need_full_gc) {
   258     GCCauseSetter gccs(heap, GCCause::_adaptive_size_policy);
   259     CollectorPolicy* cp = heap->collector_policy();
   260     const bool clear_all_softrefs = cp->should_clear_all_soft_refs();
   262     if (UseParallelOldGC) {
   263       full_gc_done = PSParallelCompact::invoke_no_policy(clear_all_softrefs);
   264     } else {
   265       full_gc_done = PSMarkSweep::invoke_no_policy(clear_all_softrefs);
   266     }
   267   }
   269   return full_gc_done;
   270 }
   272 /* 2014/7/7 Liao: Add these variables to stastic detail information during GC. */
   273 /* Used for objects copy stastic. */
   274 float each_gc_copy_time[16];
   275 int   each_gc_copy_fre[16];
   277 /* Used for GC details stastic. */
   278 float total_gc_time = 0;
   279 int   total_gc_fre  = 0;
   281 /* Used to statstic ThreadRoots optimization. */
   282 int task_tag[16];
   283 //Used to stastic each cpu
   284 int each_total_num[16];
   285 int each_eden_total_num[3][16];
   286 int each_eden_aligned_num[3][16];
   287 //Used to stastic every GC
   288 int every_total_num;
   289 int every_eden_total_num[3];
   290 int every_eden_aligned_num[3];
   291 //Used to stastic all the time
   292 int all_total_num;
   293 int all_eden_total_num[3];
   294 int all_eden_aligned_num[3];
   296 // This method contains no policy. You should probably
   297 // be calling invoke() instead.
   298 bool PSScavenge::invoke_no_policy() {
   299   assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
   300   assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
   302   assert(_preserved_mark_stack.is_empty(), "should be empty");
   303   assert(_preserved_oop_stack.is_empty(), "should be empty");
   305   _gc_timer.register_gc_start();
   307   TimeStamp scavenge_entry;
   308   TimeStamp scavenge_midpoint;
   309   TimeStamp scavenge_exit;
   311   scavenge_entry.update();
   313   if (GC_locker::check_active_before_gc()) {
   314     return false;
   315   }
   317   ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
   318   GCCause::Cause gc_cause = heap->gc_cause();
   319   assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
   321   // Check for potential problems.
   322   if (!should_attempt_scavenge()) {
   323     return false;
   324   }
   326   _gc_tracer.report_gc_start(heap->gc_cause(), _gc_timer.gc_start());
   328   bool promotion_failure_occurred = false;
   330   PSYoungGen* young_gen = heap->young_gen();
   331   PSOldGen* old_gen = heap->old_gen();
   332   PSAdaptiveSizePolicy* size_policy = heap->size_policy();
   334   heap->increment_total_collections();
   336   AdaptiveSizePolicyOutput(size_policy, heap->total_collections());
   338   if ((gc_cause != GCCause::_java_lang_system_gc) ||
   339        UseAdaptiveSizePolicyWithSystemGC) {
   340     // Gather the feedback data for eden occupancy.
   341     young_gen->eden_space()->accumulate_statistics();
   342   }
   344   if (ZapUnusedHeapArea) {
   345     // Save information needed to minimize mangling
   346     heap->record_gen_tops_before_GC();
   347   }
   349   if(UseStasticCopy) {
   350     for(uint i = 0; i < ParallelGCThreads; i++) {
   351       each_gc_copy_time[i] = 0;
   352       each_gc_copy_fre[i] = 0;
   353     }
   354   }
   356   if(UseStasticScavenge) {
   357     for(int j = 0; j < 3; j++) {
   358       for(uint i = 0; i < ParallelGCThreads; i++) {
   359         task_tag[i] = 0;
   361         each_total_num[i] = 0;
   362         each_eden_total_num[j][i] = 0;
   363         each_eden_aligned_num[j][i] = 0;
   365         every_total_num = 0;
   366         every_eden_total_num[j]  = 0;
   367         every_eden_aligned_num[j] = 0;
   368       }
   369     }
   370   }
   372   heap->print_heap_before_gc();
   373   heap->trace_heap_before_gc(&_gc_tracer);
   375   assert(!NeverTenure || _tenuring_threshold == markOopDesc::max_age + 1, "Sanity");
   376   assert(!AlwaysTenure || _tenuring_threshold == 0, "Sanity");
   378   size_t prev_used = heap->used();
   380   // Fill in TLABs
   381   heap->accumulate_statistics_all_tlabs();
   382   heap->ensure_parsability(true);  // retire TLABs
   384   if (VerifyBeforeGC && heap->total_collections() >= VerifyGCStartAt) {
   385     HandleMark hm;  // Discard invalid handles created during verification
   386     Universe::verify(" VerifyBeforeGC:");
   387   }
   389   {
   390     ResourceMark rm;
   391     HandleMark hm;
   393     gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps);
   394     TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
   395     GCTraceTime t1(GCCauseString("GC", gc_cause), PrintGC, !PrintGCDetails, NULL);
   396     TraceCollectorStats tcs(counters());
   397     TraceMemoryManagerStats tms(false /* not full GC */,gc_cause);
   399     if (TraceGen0Time) accumulated_time()->start();
   401     // Let the size policy know we're starting
   402     size_policy->minor_collection_begin();
   404     // Verify the object start arrays.
   405     if (VerifyObjectStartArray &&
   406         VerifyBeforeGC) {
   407       old_gen->verify_object_start_array();
   408     }
   410     // Verify no unmarked old->young roots
   411     if (VerifyRememberedSets) {
   412       CardTableExtension::verify_all_young_refs_imprecise();
   413     }
   415     if (!ScavengeWithObjectsInToSpace) {
   416       assert(young_gen->to_space()->is_empty(),
   417              "Attempt to scavenge with live objects in to_space");
   418       young_gen->to_space()->clear(SpaceDecorator::Mangle);
   419     } else if (ZapUnusedHeapArea) {
   420       young_gen->to_space()->mangle_unused_area();
   421     }
   422     save_to_space_top_before_gc();
   424     COMPILER2_PRESENT(DerivedPointerTable::clear());
   426     reference_processor()->enable_discovery(true /*verify_disabled*/, true /*verify_no_refs*/);
   427     reference_processor()->setup_policy(false);
   429     // We track how much was promoted to the next generation for
   430     // the AdaptiveSizePolicy.
   431     size_t old_gen_used_before = old_gen->used_in_bytes();
   433     // For PrintGCDetails
   434     size_t young_gen_used_before = young_gen->used_in_bytes();
   436     // Reset our survivor overflow.
   437     set_survivor_overflow(false);
   439     // We need to save the old top values before
   440     // creating the promotion_manager. We pass the top
   441     // values to the card_table, to prevent it from
   442     // straying into the promotion labs.
   443     HeapWord* old_top = old_gen->object_space()->top();
   445     // Release all previously held resources
   446     gc_task_manager()->release_all_resources();
   448     // Set the number of GC threads to be used in this collection
   449     gc_task_manager()->set_active_gang();
   450     gc_task_manager()->task_idle_workers();
   451     // Get the active number of workers here and use that value
   452     // throughout the methods.
   453     uint active_workers = gc_task_manager()->active_workers();
   454     heap->set_par_threads(active_workers);
   456     PSPromotionManager::pre_scavenge();
   458     // We'll use the promotion manager again later.
   459     PSPromotionManager* promotion_manager = PSPromotionManager::vm_thread_promotion_manager();
   460     {
   461       GCTraceTime tm("Scavenge", false, false, &_gc_timer);
   462       ParallelScavengeHeap::ParStrongRootsScope psrs;
   464       GCTaskQueue* q = GCTaskQueue::create();
   466       if(UseOldNUMA) {
   467         MutableSpace* sp;
   468         MutableNUMASpace::LGRPSpace *ls;
   469         MutableNUMASpace* s = (MutableNUMASpace*) old_gen->object_space();
   470         int i, j;
   471         i = s->lgrp_spaces()->length();
   472         HeapWord** gen_top = (HeapWord**) malloc (i * sizeof(HeapWord));
   473         for(j = 0; j < i; j++) {
   474           ls = s->lgrp_spaces()->at(j);
   475           sp = ls->space();
   476           *(gen_top + j) = sp->top();
   477         }
   479         if (!old_gen->object_space()->is_empty()) {
   480           uint stripe_total = active_workers;
   481           for(uint i=0; i < stripe_total; i++) {
   482             q->enqueue(new OldToYoungRootsTask_OldNUMA(old_gen, gen_top, i, stripe_total));
   483           }
   484         }
   485       }
   486       else {
   487         if (!old_gen->object_space()->is_empty()) {
   488           // There are only old-to-young pointers if there are objects
   489           // in the old gen.
   490           uint stripe_total = active_workers;
   491           for(uint i=0; i < stripe_total; i++) {
   492             q->enqueue(new OldToYoungRootsTask(old_gen, old_top, i, stripe_total));
   493           }
   494         }
   495       }
   497       q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::universe));
   498       q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::jni_handles));
   499       // We scan the thread roots in parallel
   500       Threads::create_thread_roots_tasks(q);
   501       q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::object_synchronizer));
   502       q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::flat_profiler));
   503       q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::management));
   504       q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::system_dictionary));
   505       q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::class_loader_data));
   506       q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::jvmti));
   507       q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::code_cache));
   509       ParallelTaskTerminator terminator(
   510         active_workers,
   511                   (TaskQueueSetSuper*) promotion_manager->stack_array_depth());
   512       if (active_workers > 1) {
   513         for (uint j = 0; j < active_workers; j++) {
   514           q->enqueue(new StealTask(&terminator));
   515         }
   516       }
   518       gc_task_manager()->execute_and_wait(q);
   519     }
   521     scavenge_midpoint.update();
   523     // Process reference objects discovered during scavenge
   524     {
   525       GCTraceTime tm("References", false, false, &_gc_timer);
   527       reference_processor()->setup_policy(false); // not always_clear
   528       reference_processor()->set_active_mt_degree(active_workers);
   529       PSKeepAliveClosure keep_alive(promotion_manager);
   530       PSEvacuateFollowersClosure evac_followers(promotion_manager);
   531       ReferenceProcessorStats stats;
   532       if (reference_processor()->processing_is_mt()) {
   533         PSRefProcTaskExecutor task_executor;
   534         stats = reference_processor()->process_discovered_references(
   535           &_is_alive_closure, &keep_alive, &evac_followers, &task_executor,
   536           &_gc_timer);
   537       } else {
   538         stats = reference_processor()->process_discovered_references(
   539           &_is_alive_closure, &keep_alive, &evac_followers, NULL, &_gc_timer);
   540       }
   542       _gc_tracer.report_gc_reference_stats(stats);
   544       // Enqueue reference objects discovered during scavenge.
   545       if (reference_processor()->processing_is_mt()) {
   546         PSRefProcTaskExecutor task_executor;
   547         reference_processor()->enqueue_discovered_references(&task_executor);
   548       } else {
   549         reference_processor()->enqueue_discovered_references(NULL);
   550       }
   551     }
   553     {
   554       GCTraceTime tm("StringTable", false, false, &_gc_timer);
   555       // Unlink any dead interned Strings and process the remaining live ones.
   556       PSScavengeRootsClosure root_closure(promotion_manager);
   557       StringTable::unlink_or_oops_do(&_is_alive_closure, &root_closure);
   558     }
   560     // Finally, flush the promotion_manager's labs, and deallocate its stacks.
   561     promotion_failure_occurred = PSPromotionManager::post_scavenge(_gc_tracer);
   562     if (promotion_failure_occurred) {
   563       clean_up_failed_promotion();
   564       if (PrintGC) {
   565         gclog_or_tty->print("--");
   566       }
   567     }
   569     // Let the size policy know we're done.  Note that we count promotion
   570     // failure cleanup time as part of the collection (otherwise, we're
   571     // implicitly saying it's mutator time).
   572     size_policy->minor_collection_end(gc_cause);
   574     if (!promotion_failure_occurred) {
   575       // Swap the survivor spaces.
   576       young_gen->eden_space()->clear(SpaceDecorator::Mangle);
   577       young_gen->from_space()->clear(SpaceDecorator::Mangle);
   578       young_gen->swap_spaces();
   580       size_t survived = young_gen->from_space()->used_in_bytes();
   581       size_t promoted = old_gen->used_in_bytes() - old_gen_used_before;
   582       size_policy->update_averages(_survivor_overflow, survived, promoted);
   584       // A successful scavenge should restart the GC time limit count which is
   585       // for full GC's.
   586       size_policy->reset_gc_overhead_limit_count();
   587       if (UseAdaptiveSizePolicy) {
   588         // Calculate the new survivor size and tenuring threshold
   590         if (PrintAdaptiveSizePolicy) {
   591           gclog_or_tty->print("AdaptiveSizeStart: ");
   592           gclog_or_tty->stamp();
   593           gclog_or_tty->print_cr(" collection: %d ",
   594                          heap->total_collections());
   596           if (Verbose) {
   597             gclog_or_tty->print("old_gen_capacity: %d young_gen_capacity: %d",
   598               old_gen->capacity_in_bytes(), young_gen->capacity_in_bytes());
   599           }
   600         }
   603         if (UsePerfData) {
   604           PSGCAdaptivePolicyCounters* counters = heap->gc_policy_counters();
   605           counters->update_old_eden_size(
   606             size_policy->calculated_eden_size_in_bytes());
   607           counters->update_old_promo_size(
   608             size_policy->calculated_promo_size_in_bytes());
   609           counters->update_old_capacity(old_gen->capacity_in_bytes());
   610           counters->update_young_capacity(young_gen->capacity_in_bytes());
   611           counters->update_survived(survived);
   612           counters->update_promoted(promoted);
   613           counters->update_survivor_overflowed(_survivor_overflow);
   614         }
   616         size_t max_young_size = young_gen->max_size();
   618         // Deciding a free ratio in the young generation is tricky, so if
   619         // MinHeapFreeRatio or MaxHeapFreeRatio are in use (implicating
   620         // that the old generation size may have been limited because of them) we
   621         // should then limit our young generation size using NewRatio to have it
   622         // follow the old generation size.
   623         if (MinHeapFreeRatio != 0 || MaxHeapFreeRatio != 100) {
   624           max_young_size = MIN2(old_gen->capacity_in_bytes() / NewRatio, young_gen->max_size());
   625         }
   627         size_t survivor_limit =
   628           size_policy->max_survivor_size(max_young_size);
   629         _tenuring_threshold =
   630           size_policy->compute_survivor_space_size_and_threshold(
   631                                                            _survivor_overflow,
   632                                                            _tenuring_threshold,
   633                                                            survivor_limit);
   635        if (PrintTenuringDistribution) {
   636          gclog_or_tty->cr();
   637          gclog_or_tty->print_cr("Desired survivor size " SIZE_FORMAT " bytes, new threshold %u (max %u)",
   638                                 size_policy->calculated_survivor_size_in_bytes(),
   639                                 _tenuring_threshold, MaxTenuringThreshold);
   640        }
   642         if (UsePerfData) {
   643           PSGCAdaptivePolicyCounters* counters = heap->gc_policy_counters();
   644           counters->update_tenuring_threshold(_tenuring_threshold);
   645           counters->update_survivor_size_counters();
   646         }
   648         // Do call at minor collections?
   649         // Don't check if the size_policy is ready at this
   650         // level.  Let the size_policy check that internally.
   651         if (UseAdaptiveGenerationSizePolicyAtMinorCollection &&
   652             ((gc_cause != GCCause::_java_lang_system_gc) ||
   653               UseAdaptiveSizePolicyWithSystemGC)) {
   655           // Calculate optimial free space amounts
   656           assert(young_gen->max_size() >
   657             young_gen->from_space()->capacity_in_bytes() +
   658             young_gen->to_space()->capacity_in_bytes(),
   659             "Sizes of space in young gen are out-of-bounds");
   661           size_t young_live = young_gen->used_in_bytes();
   662           size_t eden_live = young_gen->eden_space()->used_in_bytes();
   663           size_t cur_eden = young_gen->eden_space()->capacity_in_bytes();
   664           size_t max_old_gen_size = old_gen->max_gen_size();
   665           size_t max_eden_size = max_young_size -
   666             young_gen->from_space()->capacity_in_bytes() -
   667             young_gen->to_space()->capacity_in_bytes();
   669           // Used for diagnostics
   670           size_policy->clear_generation_free_space_flags();
   672           size_policy->compute_eden_space_size(young_live,
   673                                                eden_live,
   674                                                cur_eden,
   675                                                max_eden_size,
   676                                                false /* not full gc*/);
   678           size_policy->check_gc_overhead_limit(young_live,
   679                                                eden_live,
   680                                                max_old_gen_size,
   681                                                max_eden_size,
   682                                                false /* not full gc*/,
   683                                                gc_cause,
   684                                                heap->collector_policy());
   686           size_policy->decay_supplemental_growth(false /* not full gc*/);
   687         }
   688         // Resize the young generation at every collection
   689         // even if new sizes have not been calculated.  This is
   690         // to allow resizes that may have been inhibited by the
   691         // relative location of the "to" and "from" spaces.
   693         // Resizing the old gen at minor collects can cause increases
   694         // that don't feed back to the generation sizing policy until
   695         // a major collection.  Don't resize the old gen here.
   697         heap->resize_young_gen(size_policy->calculated_eden_size_in_bytes(),
   698                         size_policy->calculated_survivor_size_in_bytes());
   700         if (PrintAdaptiveSizePolicy) {
   701           gclog_or_tty->print_cr("AdaptiveSizeStop: collection: %d ",
   702                          heap->total_collections());
   703         }
   704       }
   706       // Update the structure of the eden. With NUMA-eden CPU hotplugging or offlining can
   707       // cause the change of the heap layout. Make sure eden is reshaped if that's the case.
   708       // Also update() will case adaptive NUMA chunk resizing.
   709       assert(young_gen->eden_space()->is_empty(), "eden space should be empty now");
   710       young_gen->eden_space()->update();
   712       heap->gc_policy_counters()->update_counters();
   714       heap->resize_all_tlabs();
   716       assert(young_gen->to_space()->is_empty(), "to space should be empty now");
   717     }
   719     COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
   721     NOT_PRODUCT(reference_processor()->verify_no_references_recorded());
   723     {
   724       GCTraceTime tm("Prune Scavenge Root Methods", false, false, &_gc_timer);
   726       CodeCache::prune_scavenge_root_nmethods();
   727     }
   729     // Re-verify object start arrays
   730     if (VerifyObjectStartArray &&
   731         VerifyAfterGC) {
   732       old_gen->verify_object_start_array();
   733     }
   735     // Verify all old -> young cards are now precise
   736     if (VerifyRememberedSets) {
   737       // Precise verification will give false positives. Until this is fixed,
   738       // use imprecise verification.
   739       // CardTableExtension::verify_all_young_refs_precise();
   740       CardTableExtension::verify_all_young_refs_imprecise();
   741     }
   743     if (TraceGen0Time) accumulated_time()->stop();
   745     if (PrintGC) {
   746       if (PrintGCDetails) {
   747         // Don't print a GC timestamp here.  This is after the GC so
   748         // would be confusing.
   749         young_gen->print_used_change(young_gen_used_before);
   750       }
   751       heap->print_heap_change(prev_used);
   752     }
   754     // Track memory usage and detect low memory
   755     MemoryService::track_memory_usage();
   756     heap->update_counters();
   758     gc_task_manager()->release_idle_workers();
   759   }
   761   if (VerifyAfterGC && heap->total_collections() >= VerifyGCStartAt) {
   762     HandleMark hm;  // Discard invalid handles created during verification
   763     Universe::verify(" VerifyAfterGC:");
   764   }
   766   heap->print_heap_after_gc();
   767   heap->trace_heap_after_gc(&_gc_tracer);
   768   _gc_tracer.report_tenuring_threshold(tenuring_threshold());
   770   if (ZapUnusedHeapArea) {
   771     young_gen->eden_space()->check_mangled_unused_area_complete();
   772     young_gen->from_space()->check_mangled_unused_area_complete();
   773     young_gen->to_space()->check_mangled_unused_area_complete();
   774   }
   776   scavenge_exit.update();
   778   if (PrintGCTaskTimeStamps) {
   779     tty->print_cr("VM-Thread " INT64_FORMAT " " INT64_FORMAT " " INT64_FORMAT,
   780                   scavenge_entry.ticks(), scavenge_midpoint.ticks(),
   781                   scavenge_exit.ticks());
   782     gc_task_manager()->print_task_time_stamps();
   783   }
   785   if(PrintGCDetails) {
   786     float young_gc_time;
   787     total_gc_fre++; 
   788     young_gc_time = ((float)(scavenge_exit.ticks() - scavenge_entry.ticks()))/1e9;
   789     total_gc_time = total_gc_time + ((float)(scavenge_exit.ticks() - scavenge_entry.ticks()))/1e9;
   790     tty->print_cr("total_gc_fre = %d, young_gc_time = %f, total_gc_time = %f", total_gc_fre, young_gc_time, total_gc_time);
   791   }
   793   if(UseStasticCopy) {
   794     for(uint i = 0; i < ParallelGCThreads; i++) {
   795       tty->print_cr("each_gc_copy_time[%d] = %f", i, each_gc_copy_time[i]/each_gc_copy_fre[i]);
   796     }
   797     tty->print_cr("");
   798     for(uint i = 0; i < ParallelGCThreads; i++) {
   799       tty->print_cr("each_gc_copy_fre[%d] = %d", i, each_gc_copy_fre[i]);
   800     }
   801   }
   803   if(UseStasticScavenge) {
   804     for(int i = 0; i < 3; i++) {
   805       for(uint j = 0; j < ParallelGCThreads; j++) {
   806         every_eden_total_num[i] += each_eden_total_num[i][j];
   807         every_eden_aligned_num[i] += each_eden_aligned_num[i][j];
   808       }
   809     }
   811     for(uint i = 0; i < ParallelGCThreads; i++) {
   812       every_total_num += each_total_num[i];
   813     }
   815     all_total_num += every_total_num;
   817     for(int i = 0; i < 3; i++) {
   818       all_eden_total_num[i] +=  every_eden_total_num[i];
   819       all_eden_aligned_num[i] += every_eden_aligned_num[i];
   820     }
   822     tty->print_cr("============= Every GCDetails: =============");
   823     tty->print_cr("ThreadRootTask: prop of all = %f, prop of aligned = %f", (float)every_eden_total_num[0]/(float)every_total_num, (float)every_eden_aligned_num[0]/(float)every_eden_total_num[0]);
   824     tty->print_cr("OldToYoungRootTask: prop of all = %f, prop of aligned = %f", (float)every_eden_total_num[1]/(float)every_total_num, (float)every_eden_aligned_num[1]/(float)every_eden_total_num[1]);
   825     tty->print_cr("StealTask: prop of all = %f, prop of aligned = %f", (float)every_eden_total_num[2]/(float)every_total_num, (float)every_eden_aligned_num[2]/(float)every_eden_total_num[2]);
   826     tty->print_cr("");
   828     tty->print_cr("============= Total GCDetails: =============");
   829     tty->print_cr("ThreadRootTask: prop of all = %f, prop of aligned = %f", (float)all_eden_total_num[0]/(float)all_total_num, (float)all_eden_aligned_num[0]/(float)all_eden_total_num[0]);
   830     tty->print_cr("OldToYoungRootTask: prop of all = %f, prop of aligned = %f", (float)all_eden_total_num[1]/(float)all_total_num, (float)all_eden_aligned_num[1]/(float)all_eden_total_num[1]);
   831     tty->print_cr("StealTask: prop of all = %f, prop of aligned = %f", (float)all_eden_total_num[2]/(float)all_total_num, (float)all_eden_aligned_num[2]/(float)all_eden_total_num[2]);
   832     tty->print_cr("");
   833   }
   835 #ifdef TRACESPINNING
   836   ParallelTaskTerminator::print_termination_counts();
   837 #endif
   840   _gc_timer.register_gc_end();
   842   _gc_tracer.report_gc_end(_gc_timer.gc_end(), _gc_timer.time_partitions());
   844   return !promotion_failure_occurred;
   845 }
   847 // This method iterates over all objects in the young generation,
   848 // unforwarding markOops. It then restores any preserved mark oops,
   849 // and clears the _preserved_mark_stack.
   850 void PSScavenge::clean_up_failed_promotion() {
   851   ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
   852   assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
   854   PSYoungGen* young_gen = heap->young_gen();
   856   {
   857     ResourceMark rm;
   859     // Unforward all pointers in the young gen.
   860     PSPromotionFailedClosure unforward_closure;
   861     young_gen->object_iterate(&unforward_closure);
   863     if (PrintGC && Verbose) {
   864       gclog_or_tty->print_cr("Restoring %d marks", _preserved_oop_stack.size());
   865     }
   867     // Restore any saved marks.
   868     while (!_preserved_oop_stack.is_empty()) {
   869       oop obj      = _preserved_oop_stack.pop();
   870       markOop mark = _preserved_mark_stack.pop();
   871       obj->set_mark(mark);
   872     }
   874     // Clear the preserved mark and oop stack caches.
   875     _preserved_mark_stack.clear(true);
   876     _preserved_oop_stack.clear(true);
   877   }
   879   // Reset the PromotionFailureALot counters.
   880   NOT_PRODUCT(Universe::heap()->reset_promotion_should_fail();)
   881 }
   883 // This method is called whenever an attempt to promote an object
   884 // fails. Some markOops will need preservation, some will not. Note
   885 // that the entire eden is traversed after a failed promotion, with
   886 // all forwarded headers replaced by the default markOop. This means
   887 // it is not necessary to preserve most markOops.
   888 void PSScavenge::oop_promotion_failed(oop obj, markOop obj_mark) {
   889   if (obj_mark->must_be_preserved_for_promotion_failure(obj)) {
   890     // Should use per-worker private stacks here rather than
   891     // locking a common pair of stacks.
   892     ThreadCritical tc;
   893     _preserved_oop_stack.push(obj);
   894     _preserved_mark_stack.push(obj_mark);
   895   }
   896 }
   898 bool PSScavenge::should_attempt_scavenge() {
   899   ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
   900   assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
   901   PSGCAdaptivePolicyCounters* counters = heap->gc_policy_counters();
   903   if (UsePerfData) {
   904     counters->update_scavenge_skipped(not_skipped);
   905   }
   907   PSYoungGen* young_gen = heap->young_gen();
   908   PSOldGen* old_gen = heap->old_gen();
   910   if (!ScavengeWithObjectsInToSpace) {
   911     // Do not attempt to promote unless to_space is empty
   912     if (!young_gen->to_space()->is_empty()) {
   913       _consecutive_skipped_scavenges++;
   914       if (UsePerfData) {
   915         counters->update_scavenge_skipped(to_space_not_empty);
   916       }
   917       return false;
   918     }
   919   }
   921   // Test to see if the scavenge will likely fail.
   922   PSAdaptiveSizePolicy* policy = heap->size_policy();
   924   // A similar test is done in the policy's should_full_GC().  If this is
   925   // changed, decide if that test should also be changed.
   926   size_t avg_promoted = (size_t) policy->padded_average_promoted_in_bytes();
   927   size_t promotion_estimate = MIN2(avg_promoted, young_gen->used_in_bytes());
   928   bool result = promotion_estimate < old_gen->free_in_bytes();
   930   if (PrintGCDetails && Verbose) {
   931     gclog_or_tty->print(result ? "  do scavenge: " : "  skip scavenge: ");
   932     gclog_or_tty->print_cr(" average_promoted " SIZE_FORMAT
   933       " padded_average_promoted " SIZE_FORMAT
   934       " free in old gen " SIZE_FORMAT,
   935       (size_t) policy->average_promoted_in_bytes(),
   936       (size_t) policy->padded_average_promoted_in_bytes(),
   937       old_gen->free_in_bytes());
   938     if (young_gen->used_in_bytes() <
   939         (size_t) policy->padded_average_promoted_in_bytes()) {
   940       gclog_or_tty->print_cr(" padded_promoted_average is greater"
   941         " than maximum promotion = " SIZE_FORMAT, young_gen->used_in_bytes());
   942     }
   943   }
   945   if (result) {
   946     _consecutive_skipped_scavenges = 0;
   947   } else {
   948     _consecutive_skipped_scavenges++;
   949     if (UsePerfData) {
   950       counters->update_scavenge_skipped(promoted_too_large);
   951     }
   952   }
   953   return result;
   954 }
   956   // Used to add tasks
   957 GCTaskManager* const PSScavenge::gc_task_manager() {
   958   assert(ParallelScavengeHeap::gc_task_manager() != NULL,
   959    "shouldn't return NULL");
   960   return ParallelScavengeHeap::gc_task_manager();
   961 }
   963 void PSScavenge::initialize() {
   964   // Arguments must have been parsed
   966   if (AlwaysTenure) {
   967     _tenuring_threshold = 0;
   968   } else if (NeverTenure) {
   969     _tenuring_threshold = markOopDesc::max_age + 1;
   970   } else {
   971     // We want to smooth out our startup times for the AdaptiveSizePolicy
   972     _tenuring_threshold = (UseAdaptiveSizePolicy) ? InitialTenuringThreshold :
   973                                                     MaxTenuringThreshold;
   974   }
   976   ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
   977   assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
   979   PSYoungGen* young_gen = heap->young_gen();
   980   PSOldGen* old_gen = heap->old_gen();
   982   // Set boundary between young_gen and old_gen
   983   assert(old_gen->reserved().end() <= young_gen->eden_space()->bottom(),
   984          "old above young");
   985   set_young_generation_boundary(young_gen->eden_space()->bottom());
   987   // Initialize ref handling object for scavenging.
   988   MemRegion mr = young_gen->reserved();
   990   _ref_processor =
   991     new ReferenceProcessor(mr,                         // span
   992                            ParallelRefProcEnabled && (ParallelGCThreads > 1), // mt processing
   993                            (int) ParallelGCThreads,    // mt processing degree
   994                            true,                       // mt discovery
   995                            (int) ParallelGCThreads,    // mt discovery degree
   996                            true,                       // atomic_discovery
   997                            NULL);                      // header provides liveness info
   999   // Cache the cardtable
  1000   BarrierSet* bs = Universe::heap()->barrier_set();
  1001   assert(bs->kind() == BarrierSet::CardTableModRef, "Wrong barrier set kind");
  1002   _card_table = (CardTableExtension*)bs;
  1004   _counters = new CollectorCounters("PSScavenge", 0);

mercurial