src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp

Sat, 07 Nov 2020 10:30:02 +0800

author
aoqi
date
Sat, 07 Nov 2020 10:30:02 +0800
changeset 10026
8c95980d0b66
parent 9806
758c07667682
permissions
-rw-r--r--

Added tag mips-jdk8u275-b01 for changeset d3b4d62f391f

     1 /*
     2  * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    20  * or visit www.oracle.com if you need additional information or have any
    21  * questions.
    22  *
    23  */
    25 #include "precompiled.hpp"
    26 #include "gc_implementation/parallelScavenge/adjoiningGenerations.hpp"
    27 #include "gc_implementation/parallelScavenge/adjoiningVirtualSpaces.hpp"
    28 #include "gc_implementation/parallelScavenge/cardTableExtension.hpp"
    29 #include "gc_implementation/parallelScavenge/gcTaskManager.hpp"
    30 #include "gc_implementation/parallelScavenge/generationSizer.hpp"
    31 #include "gc_implementation/parallelScavenge/parallelScavengeHeap.inline.hpp"
    32 #include "gc_implementation/parallelScavenge/psAdaptiveSizePolicy.hpp"
    33 #include "gc_implementation/parallelScavenge/psMarkSweep.hpp"
    34 #include "gc_implementation/parallelScavenge/psParallelCompact.hpp"
    35 #include "gc_implementation/parallelScavenge/psPromotionManager.hpp"
    36 #include "gc_implementation/parallelScavenge/psScavenge.hpp"
    37 #include "gc_implementation/parallelScavenge/vmPSOperations.hpp"
    38 #include "gc_implementation/shared/gcHeapSummary.hpp"
    39 #include "gc_implementation/shared/gcWhen.hpp"
    40 #include "memory/gcLocker.inline.hpp"
    41 #include "oops/oop.inline.hpp"
    42 #include "runtime/handles.inline.hpp"
    43 #include "runtime/java.hpp"
    44 #include "runtime/vmThread.hpp"
    45 #include "services/memTracker.hpp"
    46 #include "utilities/vmError.hpp"
    48 PSYoungGen*  ParallelScavengeHeap::_young_gen = NULL;
    49 PSOldGen*    ParallelScavengeHeap::_old_gen = NULL;
    50 PSAdaptiveSizePolicy* ParallelScavengeHeap::_size_policy = NULL;
    51 PSGCAdaptivePolicyCounters* ParallelScavengeHeap::_gc_policy_counters = NULL;
    52 ParallelScavengeHeap* ParallelScavengeHeap::_psh = NULL;
    53 GCTaskManager* ParallelScavengeHeap::_gc_task_manager = NULL;
    55 jint ParallelScavengeHeap::initialize() {
    56   CollectedHeap::pre_initialize();
    58   // Initialize collector policy
    59   _collector_policy = new GenerationSizer();
    60   _collector_policy->initialize_all();
    62   const size_t heap_size = _collector_policy->max_heap_byte_size();
    64   ReservedSpace heap_rs = Universe::reserve_heap(heap_size, _collector_policy->heap_alignment());
    65   MemTracker::record_virtual_memory_type((address)heap_rs.base(), mtJavaHeap);
    67   os::trace_page_sizes("ps main", _collector_policy->min_heap_byte_size(),
    68                        heap_size, generation_alignment(),
    69                        heap_rs.base(),
    70                        heap_rs.size());
    71   if (!heap_rs.is_reserved()) {
    72     vm_shutdown_during_initialization(
    73       "Could not reserve enough space for object heap");
    74     return JNI_ENOMEM;
    75   }
    77   _reserved = MemRegion((HeapWord*)heap_rs.base(),
    78                         (HeapWord*)(heap_rs.base() + heap_rs.size()));
    80   CardTableExtension* const barrier_set = new CardTableExtension(_reserved, 3);
    81   barrier_set->initialize();
    82   _barrier_set = barrier_set;
    83   oopDesc::set_bs(_barrier_set);
    84   if (_barrier_set == NULL) {
    85     vm_shutdown_during_initialization(
    86       "Could not reserve enough space for barrier set");
    87     return JNI_ENOMEM;
    88   }
    90   // Make up the generations
    91   // Calculate the maximum size that a generation can grow.  This
    92   // includes growth into the other generation.  Note that the
    93   // parameter _max_gen_size is kept as the maximum
    94   // size of the generation as the boundaries currently stand.
    95   // _max_gen_size is still used as that value.
    96   double max_gc_pause_sec = ((double) MaxGCPauseMillis)/1000.0;
    97   double max_gc_minor_pause_sec = ((double) MaxGCMinorPauseMillis)/1000.0;
    99   _gens = new AdjoiningGenerations(heap_rs, _collector_policy, generation_alignment());
   101   _old_gen = _gens->old_gen();
   102   _young_gen = _gens->young_gen();
   104   const size_t eden_capacity = _young_gen->eden_space()->capacity_in_bytes();
   105   const size_t old_capacity = _old_gen->capacity_in_bytes();
   106   const size_t initial_promo_size = MIN2(eden_capacity, old_capacity);
   107   _size_policy =
   108     new PSAdaptiveSizePolicy(eden_capacity,
   109                              initial_promo_size,
   110                              young_gen()->to_space()->capacity_in_bytes(),
   111                              _collector_policy->gen_alignment(),
   112                              max_gc_pause_sec,
   113                              max_gc_minor_pause_sec,
   114                              GCTimeRatio
   115                              );
   117   assert(!UseAdaptiveGCBoundary ||
   118     (old_gen()->virtual_space()->high_boundary() ==
   119      young_gen()->virtual_space()->low_boundary()),
   120     "Boundaries must meet");
   121   // initialize the policy counters - 2 collectors, 3 generations
   122   _gc_policy_counters =
   123     new PSGCAdaptivePolicyCounters("ParScav:MSC", 2, 3, _size_policy);
   124   _psh = this;
   126   // Set up the GCTaskManager
   127   _gc_task_manager = GCTaskManager::create(ParallelGCThreads);
   129   if (UseParallelOldGC && !PSParallelCompact::initialize()) {
   130     return JNI_ENOMEM;
   131   }
   133   return JNI_OK;
   134 }
   136 void ParallelScavengeHeap::post_initialize() {
   137   // Need to init the tenuring threshold
   138   PSScavenge::initialize();
   139   if (UseParallelOldGC) {
   140     PSParallelCompact::post_initialize();
   141   } else {
   142     PSMarkSweep::initialize();
   143   }
   144   PSPromotionManager::initialize();
   145 }
   147 void ParallelScavengeHeap::update_counters() {
   148   young_gen()->update_counters();
   149   old_gen()->update_counters();
   150   MetaspaceCounters::update_performance_counters();
   151   CompressedClassSpaceCounters::update_performance_counters();
   152 }
   154 size_t ParallelScavengeHeap::capacity() const {
   155   size_t value = young_gen()->capacity_in_bytes() + old_gen()->capacity_in_bytes();
   156   return value;
   157 }
   159 size_t ParallelScavengeHeap::used() const {
   160   size_t value = young_gen()->used_in_bytes() + old_gen()->used_in_bytes();
   161   return value;
   162 }
   164 bool ParallelScavengeHeap::is_maximal_no_gc() const {
   165   return old_gen()->is_maximal_no_gc() && young_gen()->is_maximal_no_gc();
   166 }
   169 size_t ParallelScavengeHeap::max_capacity() const {
   170   size_t estimated = reserved_region().byte_size();
   171   if (UseAdaptiveSizePolicy) {
   172     estimated -= _size_policy->max_survivor_size(young_gen()->max_size());
   173   } else {
   174     estimated -= young_gen()->to_space()->capacity_in_bytes();
   175   }
   176   return MAX2(estimated, capacity());
   177 }
   179 bool ParallelScavengeHeap::is_in(const void* p) const {
   180   if (young_gen()->is_in(p)) {
   181     return true;
   182   }
   184   if (old_gen()->is_in(p)) {
   185     return true;
   186   }
   188   return false;
   189 }
   191 bool ParallelScavengeHeap::is_in_reserved(const void* p) const {
   192   if (young_gen()->is_in_reserved(p)) {
   193     return true;
   194   }
   196   if (old_gen()->is_in_reserved(p)) {
   197     return true;
   198   }
   200   return false;
   201 }
   203 bool ParallelScavengeHeap::is_scavengable(const void* addr) {
   204   return is_in_young((oop)addr);
   205 }
   207 #ifdef ASSERT
   208 // Don't implement this by using is_in_young().  This method is used
   209 // in some cases to check that is_in_young() is correct.
   210 bool ParallelScavengeHeap::is_in_partial_collection(const void *p) {
   211   assert(is_in_reserved(p) || p == NULL,
   212     "Does not work if address is non-null and outside of the heap");
   213   // The order of the generations is old (low addr), young (high addr)
   214   return p >= old_gen()->reserved().end();
   215 }
   216 #endif
   218 // There are two levels of allocation policy here.
   219 //
   220 // When an allocation request fails, the requesting thread must invoke a VM
   221 // operation, transfer control to the VM thread, and await the results of a
   222 // garbage collection. That is quite expensive, and we should avoid doing it
   223 // multiple times if possible.
   224 //
   225 // To accomplish this, we have a basic allocation policy, and also a
   226 // failed allocation policy.
   227 //
   228 // The basic allocation policy controls how you allocate memory without
   229 // attempting garbage collection. It is okay to grab locks and
   230 // expand the heap, if that can be done without coming to a safepoint.
   231 // It is likely that the basic allocation policy will not be very
   232 // aggressive.
   233 //
   234 // The failed allocation policy is invoked from the VM thread after
   235 // the basic allocation policy is unable to satisfy a mem_allocate
   236 // request. This policy needs to cover the entire range of collection,
   237 // heap expansion, and out-of-memory conditions. It should make every
   238 // attempt to allocate the requested memory.
   240 // Basic allocation policy. Should never be called at a safepoint, or
   241 // from the VM thread.
   242 //
   243 // This method must handle cases where many mem_allocate requests fail
   244 // simultaneously. When that happens, only one VM operation will succeed,
   245 // and the rest will not be executed. For that reason, this method loops
   246 // during failed allocation attempts. If the java heap becomes exhausted,
   247 // we rely on the size_policy object to force a bail out.
   248 HeapWord* ParallelScavengeHeap::mem_allocate(
   249                                      size_t size,
   250                                      bool* gc_overhead_limit_was_exceeded) {
   251   assert(!SafepointSynchronize::is_at_safepoint(), "should not be at safepoint");
   252   assert(Thread::current() != (Thread*)VMThread::vm_thread(), "should not be in vm thread");
   253   assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock");
   255   // In general gc_overhead_limit_was_exceeded should be false so
   256   // set it so here and reset it to true only if the gc time
   257   // limit is being exceeded as checked below.
   258   *gc_overhead_limit_was_exceeded = false;
   260   HeapWord* result = young_gen()->allocate(size);
   262   uint loop_count = 0;
   263   uint gc_count = 0;
   264   uint gclocker_stalled_count = 0;
   266   while (result == NULL) {
   267     // We don't want to have multiple collections for a single filled generation.
   268     // To prevent this, each thread tracks the total_collections() value, and if
   269     // the count has changed, does not do a new collection.
   270     //
   271     // The collection count must be read only while holding the heap lock. VM
   272     // operations also hold the heap lock during collections. There is a lock
   273     // contention case where thread A blocks waiting on the Heap_lock, while
   274     // thread B is holding it doing a collection. When thread A gets the lock,
   275     // the collection count has already changed. To prevent duplicate collections,
   276     // The policy MUST attempt allocations during the same period it reads the
   277     // total_collections() value!
   278     {
   279       MutexLocker ml(Heap_lock);
   280       gc_count = Universe::heap()->total_collections();
   282       result = young_gen()->allocate(size);
   283       if (result != NULL) {
   284         return result;
   285       }
   287       // If certain conditions hold, try allocating from the old gen.
   288       result = mem_allocate_old_gen(size);
   289       if (result != NULL) {
   290         return result;
   291       }
   293       if (gclocker_stalled_count > GCLockerRetryAllocationCount) {
   294         return NULL;
   295       }
   297       // Failed to allocate without a gc.
   298       if (GC_locker::is_active_and_needs_gc()) {
   299         // If this thread is not in a jni critical section, we stall
   300         // the requestor until the critical section has cleared and
   301         // GC allowed. When the critical section clears, a GC is
   302         // initiated by the last thread exiting the critical section; so
   303         // we retry the allocation sequence from the beginning of the loop,
   304         // rather than causing more, now probably unnecessary, GC attempts.
   305         JavaThread* jthr = JavaThread::current();
   306         if (!jthr->in_critical()) {
   307           MutexUnlocker mul(Heap_lock);
   308           GC_locker::stall_until_clear();
   309           gclocker_stalled_count += 1;
   310           continue;
   311         } else {
   312           if (CheckJNICalls) {
   313             fatal("Possible deadlock due to allocating while"
   314                   " in jni critical section");
   315           }
   316           return NULL;
   317         }
   318       }
   319     }
   321     if (result == NULL) {
   322       // Generate a VM operation
   323       VM_ParallelGCFailedAllocation op(size, gc_count);
   324       VMThread::execute(&op);
   326       // Did the VM operation execute? If so, return the result directly.
   327       // This prevents us from looping until time out on requests that can
   328       // not be satisfied.
   329       if (op.prologue_succeeded()) {
   330         assert(Universe::heap()->is_in_or_null(op.result()),
   331           "result not in heap");
   333         // If GC was locked out during VM operation then retry allocation
   334         // and/or stall as necessary.
   335         if (op.gc_locked()) {
   336           assert(op.result() == NULL, "must be NULL if gc_locked() is true");
   337           continue;  // retry and/or stall as necessary
   338         }
   340         // Exit the loop if the gc time limit has been exceeded.
   341         // The allocation must have failed above ("result" guarding
   342         // this path is NULL) and the most recent collection has exceeded the
   343         // gc overhead limit (although enough may have been collected to
   344         // satisfy the allocation).  Exit the loop so that an out-of-memory
   345         // will be thrown (return a NULL ignoring the contents of
   346         // op.result()),
   347         // but clear gc_overhead_limit_exceeded so that the next collection
   348         // starts with a clean slate (i.e., forgets about previous overhead
   349         // excesses).  Fill op.result() with a filler object so that the
   350         // heap remains parsable.
   351         const bool limit_exceeded = size_policy()->gc_overhead_limit_exceeded();
   352         const bool softrefs_clear = collector_policy()->all_soft_refs_clear();
   354         if (limit_exceeded && softrefs_clear) {
   355           *gc_overhead_limit_was_exceeded = true;
   356           size_policy()->set_gc_overhead_limit_exceeded(false);
   357           if (PrintGCDetails && Verbose) {
   358             gclog_or_tty->print_cr("ParallelScavengeHeap::mem_allocate: "
   359               "return NULL because gc_overhead_limit_exceeded is set");
   360           }
   361           if (op.result() != NULL) {
   362             CollectedHeap::fill_with_object(op.result(), size);
   363           }
   364           return NULL;
   365         }
   367         return op.result();
   368       }
   369     }
   371     // The policy object will prevent us from looping forever. If the
   372     // time spent in gc crosses a threshold, we will bail out.
   373     loop_count++;
   374     if ((result == NULL) && (QueuedAllocationWarningCount > 0) &&
   375         (loop_count % QueuedAllocationWarningCount == 0)) {
   376       warning("ParallelScavengeHeap::mem_allocate retries %d times \n\t"
   377               " size=" SIZE_FORMAT, loop_count, size);
   378     }
   379   }
   381   return result;
   382 }
   384 // A "death march" is a series of ultra-slow allocations in which a full gc is
   385 // done before each allocation, and after the full gc the allocation still
   386 // cannot be satisfied from the young gen.  This routine detects that condition;
   387 // it should be called after a full gc has been done and the allocation
   388 // attempted from the young gen. The parameter 'addr' should be the result of
   389 // that young gen allocation attempt.
   390 void
   391 ParallelScavengeHeap::death_march_check(HeapWord* const addr, size_t size) {
   392   if (addr != NULL) {
   393     _death_march_count = 0;  // death march has ended
   394   } else if (_death_march_count == 0) {
   395     if (should_alloc_in_eden(size)) {
   396       _death_march_count = 1;    // death march has started
   397     }
   398   }
   399 }
   401 HeapWord* ParallelScavengeHeap::mem_allocate_old_gen(size_t size) {
   402   if (!should_alloc_in_eden(size) || GC_locker::is_active_and_needs_gc()) {
   403     // Size is too big for eden, or gc is locked out.
   404     return old_gen()->allocate(size);
   405   }
   407   // If a "death march" is in progress, allocate from the old gen a limited
   408   // number of times before doing a GC.
   409   if (_death_march_count > 0) {
   410     if (_death_march_count < 64) {
   411       ++_death_march_count;
   412       return old_gen()->allocate(size);
   413     } else {
   414       _death_march_count = 0;
   415     }
   416   }
   417   return NULL;
   418 }
   420 void ParallelScavengeHeap::do_full_collection(bool clear_all_soft_refs) {
   421   if (UseParallelOldGC) {
   422     // The do_full_collection() parameter clear_all_soft_refs
   423     // is interpreted here as maximum_compaction which will
   424     // cause SoftRefs to be cleared.
   425     bool maximum_compaction = clear_all_soft_refs;
   426     PSParallelCompact::invoke(maximum_compaction);
   427   } else {
   428     PSMarkSweep::invoke(clear_all_soft_refs);
   429   }
   430 }
   432 // Failed allocation policy. Must be called from the VM thread, and
   433 // only at a safepoint! Note that this method has policy for allocation
   434 // flow, and NOT collection policy. So we do not check for gc collection
   435 // time over limit here, that is the responsibility of the heap specific
   436 // collection methods. This method decides where to attempt allocations,
   437 // and when to attempt collections, but no collection specific policy.
   438 HeapWord* ParallelScavengeHeap::failed_mem_allocate(size_t size) {
   439   assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
   440   assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
   441   assert(!Universe::heap()->is_gc_active(), "not reentrant");
   442   assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock");
   444   // We assume that allocation in eden will fail unless we collect.
   446   // First level allocation failure, scavenge and allocate in young gen.
   447   GCCauseSetter gccs(this, GCCause::_allocation_failure);
   448   const bool invoked_full_gc = PSScavenge::invoke();
   449   HeapWord* result = young_gen()->allocate(size);
   451   // Second level allocation failure.
   452   //   Mark sweep and allocate in young generation.
   453   if (result == NULL && !invoked_full_gc) {
   454     do_full_collection(false);
   455     result = young_gen()->allocate(size);
   456   }
   458   death_march_check(result, size);
   460   // Third level allocation failure.
   461   //   After mark sweep and young generation allocation failure,
   462   //   allocate in old generation.
   463   if (result == NULL) {
   464     result = old_gen()->allocate(size);
   465   }
   467   // Fourth level allocation failure. We're running out of memory.
   468   //   More complete mark sweep and allocate in young generation.
   469   if (result == NULL) {
   470     do_full_collection(true);
   471     result = young_gen()->allocate(size);
   472   }
   474   // Fifth level allocation failure.
   475   //   After more complete mark sweep, allocate in old generation.
   476   if (result == NULL) {
   477     result = old_gen()->allocate(size);
   478   }
   480   return result;
   481 }
   483 void ParallelScavengeHeap::ensure_parsability(bool retire_tlabs) {
   484   CollectedHeap::ensure_parsability(retire_tlabs);
   485   young_gen()->eden_space()->ensure_parsability();
   486 }
   488 size_t ParallelScavengeHeap::tlab_capacity(Thread* thr) const {
   489   return young_gen()->eden_space()->tlab_capacity(thr);
   490 }
   492 size_t ParallelScavengeHeap::tlab_used(Thread* thr) const {
   493   return young_gen()->eden_space()->tlab_used(thr);
   494 }
   496 size_t ParallelScavengeHeap::unsafe_max_tlab_alloc(Thread* thr) const {
   497   return young_gen()->eden_space()->unsafe_max_tlab_alloc(thr);
   498 }
   500 HeapWord* ParallelScavengeHeap::allocate_new_tlab(size_t size) {
   501   return young_gen()->allocate(size);
   502 }
   504 void ParallelScavengeHeap::accumulate_statistics_all_tlabs() {
   505   CollectedHeap::accumulate_statistics_all_tlabs();
   506 }
   508 void ParallelScavengeHeap::resize_all_tlabs() {
   509   CollectedHeap::resize_all_tlabs();
   510 }
   512 bool ParallelScavengeHeap::can_elide_initializing_store_barrier(oop new_obj) {
   513   // We don't need barriers for stores to objects in the
   514   // young gen and, a fortiori, for initializing stores to
   515   // objects therein.
   516   return is_in_young(new_obj);
   517 }
   519 // This method is used by System.gc() and JVMTI.
   520 void ParallelScavengeHeap::collect(GCCause::Cause cause) {
   521   assert(!Heap_lock->owned_by_self(),
   522     "this thread should not own the Heap_lock");
   524   uint gc_count      = 0;
   525   uint full_gc_count = 0;
   526   {
   527     MutexLocker ml(Heap_lock);
   528     // This value is guarded by the Heap_lock
   529     gc_count      = Universe::heap()->total_collections();
   530     full_gc_count = Universe::heap()->total_full_collections();
   531   }
   533   if (GC_locker::should_discard(cause, gc_count)) {
   534     return;
   535   }
   537   VM_ParallelGCSystemGC op(gc_count, full_gc_count, cause);
   538   VMThread::execute(&op);
   539 }
   541 void ParallelScavengeHeap::oop_iterate(ExtendedOopClosure* cl) {
   542   Unimplemented();
   543 }
   545 void ParallelScavengeHeap::object_iterate(ObjectClosure* cl) {
   546   young_gen()->object_iterate(cl);
   547   old_gen()->object_iterate(cl);
   548 }
   551 HeapWord* ParallelScavengeHeap::block_start(const void* addr) const {
   552   if (young_gen()->is_in_reserved(addr)) {
   553     assert(young_gen()->is_in(addr),
   554            "addr should be in allocated part of young gen");
   555     // called from os::print_location by find or VMError
   556     if (Debugging || VMError::fatal_error_in_progress())  return NULL;
   557     Unimplemented();
   558   } else if (old_gen()->is_in_reserved(addr)) {
   559     assert(old_gen()->is_in(addr),
   560            "addr should be in allocated part of old gen");
   561     return old_gen()->start_array()->object_start((HeapWord*)addr);
   562   }
   563   return 0;
   564 }
   566 size_t ParallelScavengeHeap::block_size(const HeapWord* addr) const {
   567   return oop(addr)->size();
   568 }
   570 bool ParallelScavengeHeap::block_is_obj(const HeapWord* addr) const {
   571   return block_start(addr) == addr;
   572 }
   574 jlong ParallelScavengeHeap::millis_since_last_gc() {
   575   return UseParallelOldGC ?
   576     PSParallelCompact::millis_since_last_gc() :
   577     PSMarkSweep::millis_since_last_gc();
   578 }
   580 void ParallelScavengeHeap::prepare_for_verify() {
   581   ensure_parsability(false);  // no need to retire TLABs for verification
   582 }
   584 PSHeapSummary ParallelScavengeHeap::create_ps_heap_summary() {
   585   PSOldGen* old = old_gen();
   586   HeapWord* old_committed_end = (HeapWord*)old->virtual_space()->committed_high_addr();
   587   VirtualSpaceSummary old_summary(old->reserved().start(), old_committed_end, old->reserved().end());
   588   SpaceSummary old_space(old->reserved().start(), old_committed_end, old->used_in_bytes());
   590   PSYoungGen* young = young_gen();
   591   VirtualSpaceSummary young_summary(young->reserved().start(),
   592     (HeapWord*)young->virtual_space()->committed_high_addr(), young->reserved().end());
   594   MutableSpace* eden = young_gen()->eden_space();
   595   SpaceSummary eden_space(eden->bottom(), eden->end(), eden->used_in_bytes());
   597   MutableSpace* from = young_gen()->from_space();
   598   SpaceSummary from_space(from->bottom(), from->end(), from->used_in_bytes());
   600   MutableSpace* to = young_gen()->to_space();
   601   SpaceSummary to_space(to->bottom(), to->end(), to->used_in_bytes());
   603   VirtualSpaceSummary heap_summary = create_heap_space_summary();
   604   return PSHeapSummary(heap_summary, used(), old_summary, old_space, young_summary, eden_space, from_space, to_space);
   605 }
   607 void ParallelScavengeHeap::print_on(outputStream* st) const {
   608   young_gen()->print_on(st);
   609   old_gen()->print_on(st);
   610   MetaspaceAux::print_on(st);
   611 }
   613 void ParallelScavengeHeap::print_on_error(outputStream* st) const {
   614   this->CollectedHeap::print_on_error(st);
   616   if (UseParallelOldGC) {
   617     st->cr();
   618     PSParallelCompact::print_on_error(st);
   619   }
   620 }
   622 void ParallelScavengeHeap::gc_threads_do(ThreadClosure* tc) const {
   623   PSScavenge::gc_task_manager()->threads_do(tc);
   624 }
   626 void ParallelScavengeHeap::print_gc_threads_on(outputStream* st) const {
   627   PSScavenge::gc_task_manager()->print_threads_on(st);
   628 }
   630 void ParallelScavengeHeap::print_tracing_info() const {
   631   if (TraceGen0Time) {
   632     double time = PSScavenge::accumulated_time()->seconds();
   633     tty->print_cr("[Accumulated GC generation 0 time %3.7f secs]", time);
   634   }
   635   if (TraceGen1Time) {
   636     double time = UseParallelOldGC ? PSParallelCompact::accumulated_time()->seconds() : PSMarkSweep::accumulated_time()->seconds();
   637     tty->print_cr("[Accumulated GC generation 1 time %3.7f secs]", time);
   638   }
   639 }
   642 void ParallelScavengeHeap::verify(bool silent, VerifyOption option /* ignored */) {
   643   // Why do we need the total_collections()-filter below?
   644   if (total_collections() > 0) {
   645     if (!silent) {
   646       gclog_or_tty->print("tenured ");
   647     }
   648     old_gen()->verify();
   650     if (!silent) {
   651       gclog_or_tty->print("eden ");
   652     }
   653     young_gen()->verify();
   654   }
   655 }
   657 void ParallelScavengeHeap::print_heap_change(size_t prev_used) {
   658   if (PrintGCDetails && Verbose) {
   659     gclog_or_tty->print(" "  SIZE_FORMAT
   660                         "->" SIZE_FORMAT
   661                         "("  SIZE_FORMAT ")",
   662                         prev_used, used(), capacity());
   663   } else {
   664     gclog_or_tty->print(" "  SIZE_FORMAT "K"
   665                         "->" SIZE_FORMAT "K"
   666                         "("  SIZE_FORMAT "K)",
   667                         prev_used / K, used() / K, capacity() / K);
   668   }
   669 }
   671 void ParallelScavengeHeap::trace_heap(GCWhen::Type when, GCTracer* gc_tracer) {
   672   const PSHeapSummary& heap_summary = create_ps_heap_summary();
   673   gc_tracer->report_gc_heap_summary(when, heap_summary);
   675   const MetaspaceSummary& metaspace_summary = create_metaspace_summary();
   676   gc_tracer->report_metaspace_summary(when, metaspace_summary);
   677 }
   679 ParallelScavengeHeap* ParallelScavengeHeap::heap() {
   680   assert(_psh != NULL, "Uninitialized access to ParallelScavengeHeap::heap()");
   681   assert(_psh->kind() == CollectedHeap::ParallelScavengeHeap, "not a parallel scavenge heap");
   682   return _psh;
   683 }
   685 // Before delegating the resize to the young generation,
   686 // the reserved space for the young and old generations
   687 // may be changed to accomodate the desired resize.
   688 void ParallelScavengeHeap::resize_young_gen(size_t eden_size,
   689     size_t survivor_size) {
   690   if (UseAdaptiveGCBoundary) {
   691     if (size_policy()->bytes_absorbed_from_eden() != 0) {
   692       size_policy()->reset_bytes_absorbed_from_eden();
   693       return;  // The generation changed size already.
   694     }
   695     gens()->adjust_boundary_for_young_gen_needs(eden_size, survivor_size);
   696   }
   698   // Delegate the resize to the generation.
   699   _young_gen->resize(eden_size, survivor_size);
   700 }
   702 // Before delegating the resize to the old generation,
   703 // the reserved space for the young and old generations
   704 // may be changed to accomodate the desired resize.
   705 void ParallelScavengeHeap::resize_old_gen(size_t desired_free_space) {
   706   if (UseAdaptiveGCBoundary) {
   707     if (size_policy()->bytes_absorbed_from_eden() != 0) {
   708       size_policy()->reset_bytes_absorbed_from_eden();
   709       return;  // The generation changed size already.
   710     }
   711     gens()->adjust_boundary_for_old_gen_needs(desired_free_space);
   712   }
   714   // Delegate the resize to the generation.
   715   _old_gen->resize(desired_free_space);
   716 }
   718 ParallelScavengeHeap::ParStrongRootsScope::ParStrongRootsScope() {
   719   // nothing particular
   720 }
   722 ParallelScavengeHeap::ParStrongRootsScope::~ParStrongRootsScope() {
   723   // nothing particular
   724 }
   726 #ifndef PRODUCT
   727 void ParallelScavengeHeap::record_gen_tops_before_GC() {
   728   if (ZapUnusedHeapArea) {
   729     young_gen()->record_spaces_top();
   730     old_gen()->record_spaces_top();
   731   }
   732 }
   734 void ParallelScavengeHeap::gen_mangle_unused_area() {
   735   if (ZapUnusedHeapArea) {
   736     young_gen()->eden_space()->mangle_unused_area();
   737     young_gen()->to_space()->mangle_unused_area();
   738     young_gen()->from_space()->mangle_unused_area();
   739     old_gen()->object_space()->mangle_unused_area();
   740   }
   741 }
   742 #endif

mercurial