src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp

Thu, 22 May 2014 15:52:41 -0400

author
drchase
date
Thu, 22 May 2014 15:52:41 -0400
changeset 6680
78bbf4d43a14
parent 6420
9fdaa79b0c27
child 6876
710a3c8b516e
child 7051
1f1d373cd044
permissions
-rw-r--r--

8037816: Fix for 8036122 breaks build with Xcode5/clang
8043029: Change 8037816 breaks HS build with older GCC versions which don't support diagnostic pragmas
8043164: Format warning in traceStream.hpp
Summary: Backport of main fix + two corrections, enables clang compilation, turns on format attributes, corrects/mutes warnings
Reviewed-by: kvn, coleenp, iveresov, twisti

     1 /*
     2  * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    20  * or visit www.oracle.com if you need additional information or have any
    21  * questions.
    22  *
    23  */
    25 #include "precompiled.hpp"
    26 #include "gc_implementation/parallelScavenge/adjoiningGenerations.hpp"
    27 #include "gc_implementation/parallelScavenge/adjoiningVirtualSpaces.hpp"
    28 #include "gc_implementation/parallelScavenge/cardTableExtension.hpp"
    29 #include "gc_implementation/parallelScavenge/gcTaskManager.hpp"
    30 #include "gc_implementation/parallelScavenge/generationSizer.hpp"
    31 #include "gc_implementation/parallelScavenge/parallelScavengeHeap.inline.hpp"
    32 #include "gc_implementation/parallelScavenge/psAdaptiveSizePolicy.hpp"
    33 #include "gc_implementation/parallelScavenge/psMarkSweep.hpp"
    34 #include "gc_implementation/parallelScavenge/psParallelCompact.hpp"
    35 #include "gc_implementation/parallelScavenge/psPromotionManager.hpp"
    36 #include "gc_implementation/parallelScavenge/psScavenge.hpp"
    37 #include "gc_implementation/parallelScavenge/vmPSOperations.hpp"
    38 #include "gc_implementation/shared/gcHeapSummary.hpp"
    39 #include "gc_implementation/shared/gcWhen.hpp"
    40 #include "memory/gcLocker.inline.hpp"
    41 #include "oops/oop.inline.hpp"
    42 #include "runtime/handles.inline.hpp"
    43 #include "runtime/java.hpp"
    44 #include "runtime/vmThread.hpp"
    45 #include "services/memTracker.hpp"
    46 #include "utilities/vmError.hpp"
    48 PSYoungGen*  ParallelScavengeHeap::_young_gen = NULL;
    49 PSOldGen*    ParallelScavengeHeap::_old_gen = NULL;
    50 PSAdaptiveSizePolicy* ParallelScavengeHeap::_size_policy = NULL;
    51 PSGCAdaptivePolicyCounters* ParallelScavengeHeap::_gc_policy_counters = NULL;
    52 ParallelScavengeHeap* ParallelScavengeHeap::_psh = NULL;
    53 GCTaskManager* ParallelScavengeHeap::_gc_task_manager = NULL;
    55 jint ParallelScavengeHeap::initialize() {
    56   CollectedHeap::pre_initialize();
    58   // Initialize collector policy
    59   _collector_policy = new GenerationSizer();
    60   _collector_policy->initialize_all();
    62   const size_t heap_size = _collector_policy->max_heap_byte_size();
    64   ReservedSpace heap_rs = Universe::reserve_heap(heap_size, _collector_policy->heap_alignment());
    65   MemTracker::record_virtual_memory_type((address)heap_rs.base(), mtJavaHeap);
    67   os::trace_page_sizes("ps main", _collector_policy->min_heap_byte_size(),
    68                        heap_size, generation_alignment(),
    69                        heap_rs.base(),
    70                        heap_rs.size());
    71   if (!heap_rs.is_reserved()) {
    72     vm_shutdown_during_initialization(
    73       "Could not reserve enough space for object heap");
    74     return JNI_ENOMEM;
    75   }
    77   _reserved = MemRegion((HeapWord*)heap_rs.base(),
    78                         (HeapWord*)(heap_rs.base() + heap_rs.size()));
    80   CardTableExtension* const barrier_set = new CardTableExtension(_reserved, 3);
    81   _barrier_set = barrier_set;
    82   oopDesc::set_bs(_barrier_set);
    83   if (_barrier_set == NULL) {
    84     vm_shutdown_during_initialization(
    85       "Could not reserve enough space for barrier set");
    86     return JNI_ENOMEM;
    87   }
    89   // Make up the generations
    90   // Calculate the maximum size that a generation can grow.  This
    91   // includes growth into the other generation.  Note that the
    92   // parameter _max_gen_size is kept as the maximum
    93   // size of the generation as the boundaries currently stand.
    94   // _max_gen_size is still used as that value.
    95   double max_gc_pause_sec = ((double) MaxGCPauseMillis)/1000.0;
    96   double max_gc_minor_pause_sec = ((double) MaxGCMinorPauseMillis)/1000.0;
    98   _gens = new AdjoiningGenerations(heap_rs, _collector_policy, generation_alignment());
   100   _old_gen = _gens->old_gen();
   101   _young_gen = _gens->young_gen();
   103   const size_t eden_capacity = _young_gen->eden_space()->capacity_in_bytes();
   104   const size_t old_capacity = _old_gen->capacity_in_bytes();
   105   const size_t initial_promo_size = MIN2(eden_capacity, old_capacity);
   106   _size_policy =
   107     new PSAdaptiveSizePolicy(eden_capacity,
   108                              initial_promo_size,
   109                              young_gen()->to_space()->capacity_in_bytes(),
   110                              _collector_policy->gen_alignment(),
   111                              max_gc_pause_sec,
   112                              max_gc_minor_pause_sec,
   113                              GCTimeRatio
   114                              );
   116   assert(!UseAdaptiveGCBoundary ||
   117     (old_gen()->virtual_space()->high_boundary() ==
   118      young_gen()->virtual_space()->low_boundary()),
   119     "Boundaries must meet");
   120   // initialize the policy counters - 2 collectors, 3 generations
   121   _gc_policy_counters =
   122     new PSGCAdaptivePolicyCounters("ParScav:MSC", 2, 3, _size_policy);
   123   _psh = this;
   125   // Set up the GCTaskManager
   126   _gc_task_manager = GCTaskManager::create(ParallelGCThreads);
   128   if (UseParallelOldGC && !PSParallelCompact::initialize()) {
   129     return JNI_ENOMEM;
   130   }
   132   return JNI_OK;
   133 }
   135 void ParallelScavengeHeap::post_initialize() {
   136   // Need to init the tenuring threshold
   137   PSScavenge::initialize();
   138   if (UseParallelOldGC) {
   139     PSParallelCompact::post_initialize();
   140   } else {
   141     PSMarkSweep::initialize();
   142   }
   143   PSPromotionManager::initialize();
   144 }
   146 void ParallelScavengeHeap::update_counters() {
   147   young_gen()->update_counters();
   148   old_gen()->update_counters();
   149   MetaspaceCounters::update_performance_counters();
   150   CompressedClassSpaceCounters::update_performance_counters();
   151 }
   153 size_t ParallelScavengeHeap::capacity() const {
   154   size_t value = young_gen()->capacity_in_bytes() + old_gen()->capacity_in_bytes();
   155   return value;
   156 }
   158 size_t ParallelScavengeHeap::used() const {
   159   size_t value = young_gen()->used_in_bytes() + old_gen()->used_in_bytes();
   160   return value;
   161 }
   163 bool ParallelScavengeHeap::is_maximal_no_gc() const {
   164   return old_gen()->is_maximal_no_gc() && young_gen()->is_maximal_no_gc();
   165 }
   168 size_t ParallelScavengeHeap::max_capacity() const {
   169   size_t estimated = reserved_region().byte_size();
   170   if (UseAdaptiveSizePolicy) {
   171     estimated -= _size_policy->max_survivor_size(young_gen()->max_size());
   172   } else {
   173     estimated -= young_gen()->to_space()->capacity_in_bytes();
   174   }
   175   return MAX2(estimated, capacity());
   176 }
   178 bool ParallelScavengeHeap::is_in(const void* p) const {
   179   if (young_gen()->is_in(p)) {
   180     return true;
   181   }
   183   if (old_gen()->is_in(p)) {
   184     return true;
   185   }
   187   return false;
   188 }
   190 bool ParallelScavengeHeap::is_in_reserved(const void* p) const {
   191   if (young_gen()->is_in_reserved(p)) {
   192     return true;
   193   }
   195   if (old_gen()->is_in_reserved(p)) {
   196     return true;
   197   }
   199   return false;
   200 }
   202 bool ParallelScavengeHeap::is_scavengable(const void* addr) {
   203   return is_in_young((oop)addr);
   204 }
   206 #ifdef ASSERT
   207 // Don't implement this by using is_in_young().  This method is used
   208 // in some cases to check that is_in_young() is correct.
   209 bool ParallelScavengeHeap::is_in_partial_collection(const void *p) {
   210   assert(is_in_reserved(p) || p == NULL,
   211     "Does not work if address is non-null and outside of the heap");
   212   // The order of the generations is old (low addr), young (high addr)
   213   return p >= old_gen()->reserved().end();
   214 }
   215 #endif
   217 // There are two levels of allocation policy here.
   218 //
   219 // When an allocation request fails, the requesting thread must invoke a VM
   220 // operation, transfer control to the VM thread, and await the results of a
   221 // garbage collection. That is quite expensive, and we should avoid doing it
   222 // multiple times if possible.
   223 //
   224 // To accomplish this, we have a basic allocation policy, and also a
   225 // failed allocation policy.
   226 //
   227 // The basic allocation policy controls how you allocate memory without
   228 // attempting garbage collection. It is okay to grab locks and
   229 // expand the heap, if that can be done without coming to a safepoint.
   230 // It is likely that the basic allocation policy will not be very
   231 // aggressive.
   232 //
   233 // The failed allocation policy is invoked from the VM thread after
   234 // the basic allocation policy is unable to satisfy a mem_allocate
   235 // request. This policy needs to cover the entire range of collection,
   236 // heap expansion, and out-of-memory conditions. It should make every
   237 // attempt to allocate the requested memory.
   239 // Basic allocation policy. Should never be called at a safepoint, or
   240 // from the VM thread.
   241 //
   242 // This method must handle cases where many mem_allocate requests fail
   243 // simultaneously. When that happens, only one VM operation will succeed,
   244 // and the rest will not be executed. For that reason, this method loops
   245 // during failed allocation attempts. If the java heap becomes exhausted,
   246 // we rely on the size_policy object to force a bail out.
   247 HeapWord* ParallelScavengeHeap::mem_allocate(
   248                                      size_t size,
   249                                      bool* gc_overhead_limit_was_exceeded) {
   250   assert(!SafepointSynchronize::is_at_safepoint(), "should not be at safepoint");
   251   assert(Thread::current() != (Thread*)VMThread::vm_thread(), "should not be in vm thread");
   252   assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock");
   254   // In general gc_overhead_limit_was_exceeded should be false so
   255   // set it so here and reset it to true only if the gc time
   256   // limit is being exceeded as checked below.
   257   *gc_overhead_limit_was_exceeded = false;
   259   HeapWord* result = young_gen()->allocate(size);
   261   uint loop_count = 0;
   262   uint gc_count = 0;
   263   int gclocker_stalled_count = 0;
   265   while (result == NULL) {
   266     // We don't want to have multiple collections for a single filled generation.
   267     // To prevent this, each thread tracks the total_collections() value, and if
   268     // the count has changed, does not do a new collection.
   269     //
   270     // The collection count must be read only while holding the heap lock. VM
   271     // operations also hold the heap lock during collections. There is a lock
   272     // contention case where thread A blocks waiting on the Heap_lock, while
   273     // thread B is holding it doing a collection. When thread A gets the lock,
   274     // the collection count has already changed. To prevent duplicate collections,
   275     // The policy MUST attempt allocations during the same period it reads the
   276     // total_collections() value!
   277     {
   278       MutexLocker ml(Heap_lock);
   279       gc_count = Universe::heap()->total_collections();
   281       result = young_gen()->allocate(size);
   282       if (result != NULL) {
   283         return result;
   284       }
   286       // If certain conditions hold, try allocating from the old gen.
   287       result = mem_allocate_old_gen(size);
   288       if (result != NULL) {
   289         return result;
   290       }
   292       if (gclocker_stalled_count > GCLockerRetryAllocationCount) {
   293         return NULL;
   294       }
   296       // Failed to allocate without a gc.
   297       if (GC_locker::is_active_and_needs_gc()) {
   298         // If this thread is not in a jni critical section, we stall
   299         // the requestor until the critical section has cleared and
   300         // GC allowed. When the critical section clears, a GC is
   301         // initiated by the last thread exiting the critical section; so
   302         // we retry the allocation sequence from the beginning of the loop,
   303         // rather than causing more, now probably unnecessary, GC attempts.
   304         JavaThread* jthr = JavaThread::current();
   305         if (!jthr->in_critical()) {
   306           MutexUnlocker mul(Heap_lock);
   307           GC_locker::stall_until_clear();
   308           gclocker_stalled_count += 1;
   309           continue;
   310         } else {
   311           if (CheckJNICalls) {
   312             fatal("Possible deadlock due to allocating while"
   313                   " in jni critical section");
   314           }
   315           return NULL;
   316         }
   317       }
   318     }
   320     if (result == NULL) {
   321       // Generate a VM operation
   322       VM_ParallelGCFailedAllocation op(size, gc_count);
   323       VMThread::execute(&op);
   325       // Did the VM operation execute? If so, return the result directly.
   326       // This prevents us from looping until time out on requests that can
   327       // not be satisfied.
   328       if (op.prologue_succeeded()) {
   329         assert(Universe::heap()->is_in_or_null(op.result()),
   330           "result not in heap");
   332         // If GC was locked out during VM operation then retry allocation
   333         // and/or stall as necessary.
   334         if (op.gc_locked()) {
   335           assert(op.result() == NULL, "must be NULL if gc_locked() is true");
   336           continue;  // retry and/or stall as necessary
   337         }
   339         // Exit the loop if the gc time limit has been exceeded.
   340         // The allocation must have failed above ("result" guarding
   341         // this path is NULL) and the most recent collection has exceeded the
   342         // gc overhead limit (although enough may have been collected to
   343         // satisfy the allocation).  Exit the loop so that an out-of-memory
   344         // will be thrown (return a NULL ignoring the contents of
   345         // op.result()),
   346         // but clear gc_overhead_limit_exceeded so that the next collection
   347         // starts with a clean slate (i.e., forgets about previous overhead
   348         // excesses).  Fill op.result() with a filler object so that the
   349         // heap remains parsable.
   350         const bool limit_exceeded = size_policy()->gc_overhead_limit_exceeded();
   351         const bool softrefs_clear = collector_policy()->all_soft_refs_clear();
   353         if (limit_exceeded && softrefs_clear) {
   354           *gc_overhead_limit_was_exceeded = true;
   355           size_policy()->set_gc_overhead_limit_exceeded(false);
   356           if (PrintGCDetails && Verbose) {
   357             gclog_or_tty->print_cr("ParallelScavengeHeap::mem_allocate: "
   358               "return NULL because gc_overhead_limit_exceeded is set");
   359           }
   360           if (op.result() != NULL) {
   361             CollectedHeap::fill_with_object(op.result(), size);
   362           }
   363           return NULL;
   364         }
   366         return op.result();
   367       }
   368     }
   370     // The policy object will prevent us from looping forever. If the
   371     // time spent in gc crosses a threshold, we will bail out.
   372     loop_count++;
   373     if ((result == NULL) && (QueuedAllocationWarningCount > 0) &&
   374         (loop_count % QueuedAllocationWarningCount == 0)) {
   375       warning("ParallelScavengeHeap::mem_allocate retries %d times \n\t"
   376               " size=" SIZE_FORMAT, loop_count, size);
   377     }
   378   }
   380   return result;
   381 }
   383 // A "death march" is a series of ultra-slow allocations in which a full gc is
   384 // done before each allocation, and after the full gc the allocation still
   385 // cannot be satisfied from the young gen.  This routine detects that condition;
   386 // it should be called after a full gc has been done and the allocation
   387 // attempted from the young gen. The parameter 'addr' should be the result of
   388 // that young gen allocation attempt.
   389 void
   390 ParallelScavengeHeap::death_march_check(HeapWord* const addr, size_t size) {
   391   if (addr != NULL) {
   392     _death_march_count = 0;  // death march has ended
   393   } else if (_death_march_count == 0) {
   394     if (should_alloc_in_eden(size)) {
   395       _death_march_count = 1;    // death march has started
   396     }
   397   }
   398 }
   400 HeapWord* ParallelScavengeHeap::mem_allocate_old_gen(size_t size) {
   401   if (!should_alloc_in_eden(size) || GC_locker::is_active_and_needs_gc()) {
   402     // Size is too big for eden, or gc is locked out.
   403     return old_gen()->allocate(size);
   404   }
   406   // If a "death march" is in progress, allocate from the old gen a limited
   407   // number of times before doing a GC.
   408   if (_death_march_count > 0) {
   409     if (_death_march_count < 64) {
   410       ++_death_march_count;
   411       return old_gen()->allocate(size);
   412     } else {
   413       _death_march_count = 0;
   414     }
   415   }
   416   return NULL;
   417 }
   419 void ParallelScavengeHeap::do_full_collection(bool clear_all_soft_refs) {
   420   if (UseParallelOldGC) {
   421     // The do_full_collection() parameter clear_all_soft_refs
   422     // is interpreted here as maximum_compaction which will
   423     // cause SoftRefs to be cleared.
   424     bool maximum_compaction = clear_all_soft_refs;
   425     PSParallelCompact::invoke(maximum_compaction);
   426   } else {
   427     PSMarkSweep::invoke(clear_all_soft_refs);
   428   }
   429 }
   431 // Failed allocation policy. Must be called from the VM thread, and
   432 // only at a safepoint! Note that this method has policy for allocation
   433 // flow, and NOT collection policy. So we do not check for gc collection
   434 // time over limit here, that is the responsibility of the heap specific
   435 // collection methods. This method decides where to attempt allocations,
   436 // and when to attempt collections, but no collection specific policy.
   437 HeapWord* ParallelScavengeHeap::failed_mem_allocate(size_t size) {
   438   assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
   439   assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
   440   assert(!Universe::heap()->is_gc_active(), "not reentrant");
   441   assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock");
   443   // We assume that allocation in eden will fail unless we collect.
   445   // First level allocation failure, scavenge and allocate in young gen.
   446   GCCauseSetter gccs(this, GCCause::_allocation_failure);
   447   const bool invoked_full_gc = PSScavenge::invoke();
   448   HeapWord* result = young_gen()->allocate(size);
   450   // Second level allocation failure.
   451   //   Mark sweep and allocate in young generation.
   452   if (result == NULL && !invoked_full_gc) {
   453     do_full_collection(false);
   454     result = young_gen()->allocate(size);
   455   }
   457   death_march_check(result, size);
   459   // Third level allocation failure.
   460   //   After mark sweep and young generation allocation failure,
   461   //   allocate in old generation.
   462   if (result == NULL) {
   463     result = old_gen()->allocate(size);
   464   }
   466   // Fourth level allocation failure. We're running out of memory.
   467   //   More complete mark sweep and allocate in young generation.
   468   if (result == NULL) {
   469     do_full_collection(true);
   470     result = young_gen()->allocate(size);
   471   }
   473   // Fifth level allocation failure.
   474   //   After more complete mark sweep, allocate in old generation.
   475   if (result == NULL) {
   476     result = old_gen()->allocate(size);
   477   }
   479   return result;
   480 }
   482 void ParallelScavengeHeap::ensure_parsability(bool retire_tlabs) {
   483   CollectedHeap::ensure_parsability(retire_tlabs);
   484   young_gen()->eden_space()->ensure_parsability();
   485 }
   487 size_t ParallelScavengeHeap::unsafe_max_alloc() {
   488   return young_gen()->eden_space()->free_in_bytes();
   489 }
   491 size_t ParallelScavengeHeap::tlab_capacity(Thread* thr) const {
   492   return young_gen()->eden_space()->tlab_capacity(thr);
   493 }
   495 size_t ParallelScavengeHeap::tlab_used(Thread* thr) const {
   496   return young_gen()->eden_space()->tlab_used(thr);
   497 }
   499 size_t ParallelScavengeHeap::unsafe_max_tlab_alloc(Thread* thr) const {
   500   return young_gen()->eden_space()->unsafe_max_tlab_alloc(thr);
   501 }
   503 HeapWord* ParallelScavengeHeap::allocate_new_tlab(size_t size) {
   504   return young_gen()->allocate(size);
   505 }
   507 void ParallelScavengeHeap::accumulate_statistics_all_tlabs() {
   508   CollectedHeap::accumulate_statistics_all_tlabs();
   509 }
   511 void ParallelScavengeHeap::resize_all_tlabs() {
   512   CollectedHeap::resize_all_tlabs();
   513 }
   515 bool ParallelScavengeHeap::can_elide_initializing_store_barrier(oop new_obj) {
   516   // We don't need barriers for stores to objects in the
   517   // young gen and, a fortiori, for initializing stores to
   518   // objects therein.
   519   return is_in_young(new_obj);
   520 }
   522 // This method is used by System.gc() and JVMTI.
   523 void ParallelScavengeHeap::collect(GCCause::Cause cause) {
   524   assert(!Heap_lock->owned_by_self(),
   525     "this thread should not own the Heap_lock");
   527   unsigned int gc_count      = 0;
   528   unsigned int full_gc_count = 0;
   529   {
   530     MutexLocker ml(Heap_lock);
   531     // This value is guarded by the Heap_lock
   532     gc_count      = Universe::heap()->total_collections();
   533     full_gc_count = Universe::heap()->total_full_collections();
   534   }
   536   VM_ParallelGCSystemGC op(gc_count, full_gc_count, cause);
   537   VMThread::execute(&op);
   538 }
   540 void ParallelScavengeHeap::oop_iterate(ExtendedOopClosure* cl) {
   541   Unimplemented();
   542 }
   544 void ParallelScavengeHeap::object_iterate(ObjectClosure* cl) {
   545   young_gen()->object_iterate(cl);
   546   old_gen()->object_iterate(cl);
   547 }
   550 HeapWord* ParallelScavengeHeap::block_start(const void* addr) const {
   551   if (young_gen()->is_in_reserved(addr)) {
   552     assert(young_gen()->is_in(addr),
   553            "addr should be in allocated part of young gen");
   554     // called from os::print_location by find or VMError
   555     if (Debugging || VMError::fatal_error_in_progress())  return NULL;
   556     Unimplemented();
   557   } else if (old_gen()->is_in_reserved(addr)) {
   558     assert(old_gen()->is_in(addr),
   559            "addr should be in allocated part of old gen");
   560     return old_gen()->start_array()->object_start((HeapWord*)addr);
   561   }
   562   return 0;
   563 }
   565 size_t ParallelScavengeHeap::block_size(const HeapWord* addr) const {
   566   return oop(addr)->size();
   567 }
   569 bool ParallelScavengeHeap::block_is_obj(const HeapWord* addr) const {
   570   return block_start(addr) == addr;
   571 }
   573 jlong ParallelScavengeHeap::millis_since_last_gc() {
   574   return UseParallelOldGC ?
   575     PSParallelCompact::millis_since_last_gc() :
   576     PSMarkSweep::millis_since_last_gc();
   577 }
   579 void ParallelScavengeHeap::prepare_for_verify() {
   580   ensure_parsability(false);  // no need to retire TLABs for verification
   581 }
   583 PSHeapSummary ParallelScavengeHeap::create_ps_heap_summary() {
   584   PSOldGen* old = old_gen();
   585   HeapWord* old_committed_end = (HeapWord*)old->virtual_space()->committed_high_addr();
   586   VirtualSpaceSummary old_summary(old->reserved().start(), old_committed_end, old->reserved().end());
   587   SpaceSummary old_space(old->reserved().start(), old_committed_end, old->used_in_bytes());
   589   PSYoungGen* young = young_gen();
   590   VirtualSpaceSummary young_summary(young->reserved().start(),
   591     (HeapWord*)young->virtual_space()->committed_high_addr(), young->reserved().end());
   593   MutableSpace* eden = young_gen()->eden_space();
   594   SpaceSummary eden_space(eden->bottom(), eden->end(), eden->used_in_bytes());
   596   MutableSpace* from = young_gen()->from_space();
   597   SpaceSummary from_space(from->bottom(), from->end(), from->used_in_bytes());
   599   MutableSpace* to = young_gen()->to_space();
   600   SpaceSummary to_space(to->bottom(), to->end(), to->used_in_bytes());
   602   VirtualSpaceSummary heap_summary = create_heap_space_summary();
   603   return PSHeapSummary(heap_summary, used(), old_summary, old_space, young_summary, eden_space, from_space, to_space);
   604 }
   606 void ParallelScavengeHeap::print_on(outputStream* st) const {
   607   young_gen()->print_on(st);
   608   old_gen()->print_on(st);
   609   MetaspaceAux::print_on(st);
   610 }
   612 void ParallelScavengeHeap::print_on_error(outputStream* st) const {
   613   this->CollectedHeap::print_on_error(st);
   615   if (UseParallelOldGC) {
   616     st->cr();
   617     PSParallelCompact::print_on_error(st);
   618   }
   619 }
   621 void ParallelScavengeHeap::gc_threads_do(ThreadClosure* tc) const {
   622   PSScavenge::gc_task_manager()->threads_do(tc);
   623 }
   625 void ParallelScavengeHeap::print_gc_threads_on(outputStream* st) const {
   626   PSScavenge::gc_task_manager()->print_threads_on(st);
   627 }
   629 void ParallelScavengeHeap::print_tracing_info() const {
   630   if (TraceGen0Time) {
   631     double time = PSScavenge::accumulated_time()->seconds();
   632     tty->print_cr("[Accumulated GC generation 0 time %3.7f secs]", time);
   633   }
   634   if (TraceGen1Time) {
   635     double time = UseParallelOldGC ? PSParallelCompact::accumulated_time()->seconds() : PSMarkSweep::accumulated_time()->seconds();
   636     tty->print_cr("[Accumulated GC generation 1 time %3.7f secs]", time);
   637   }
   638 }
   641 void ParallelScavengeHeap::verify(bool silent, VerifyOption option /* ignored */) {
   642   // Why do we need the total_collections()-filter below?
   643   if (total_collections() > 0) {
   644     if (!silent) {
   645       gclog_or_tty->print("tenured ");
   646     }
   647     old_gen()->verify();
   649     if (!silent) {
   650       gclog_or_tty->print("eden ");
   651     }
   652     young_gen()->verify();
   653   }
   654 }
   656 void ParallelScavengeHeap::print_heap_change(size_t prev_used) {
   657   if (PrintGCDetails && Verbose) {
   658     gclog_or_tty->print(" "  SIZE_FORMAT
   659                         "->" SIZE_FORMAT
   660                         "("  SIZE_FORMAT ")",
   661                         prev_used, used(), capacity());
   662   } else {
   663     gclog_or_tty->print(" "  SIZE_FORMAT "K"
   664                         "->" SIZE_FORMAT "K"
   665                         "("  SIZE_FORMAT "K)",
   666                         prev_used / K, used() / K, capacity() / K);
   667   }
   668 }
   670 void ParallelScavengeHeap::trace_heap(GCWhen::Type when, GCTracer* gc_tracer) {
   671   const PSHeapSummary& heap_summary = create_ps_heap_summary();
   672   gc_tracer->report_gc_heap_summary(when, heap_summary);
   674   const MetaspaceSummary& metaspace_summary = create_metaspace_summary();
   675   gc_tracer->report_metaspace_summary(when, metaspace_summary);
   676 }
   678 ParallelScavengeHeap* ParallelScavengeHeap::heap() {
   679   assert(_psh != NULL, "Uninitialized access to ParallelScavengeHeap::heap()");
   680   assert(_psh->kind() == CollectedHeap::ParallelScavengeHeap, "not a parallel scavenge heap");
   681   return _psh;
   682 }
   684 // Before delegating the resize to the young generation,
   685 // the reserved space for the young and old generations
   686 // may be changed to accomodate the desired resize.
   687 void ParallelScavengeHeap::resize_young_gen(size_t eden_size,
   688     size_t survivor_size) {
   689   if (UseAdaptiveGCBoundary) {
   690     if (size_policy()->bytes_absorbed_from_eden() != 0) {
   691       size_policy()->reset_bytes_absorbed_from_eden();
   692       return;  // The generation changed size already.
   693     }
   694     gens()->adjust_boundary_for_young_gen_needs(eden_size, survivor_size);
   695   }
   697   // Delegate the resize to the generation.
   698   _young_gen->resize(eden_size, survivor_size);
   699 }
   701 // Before delegating the resize to the old generation,
   702 // the reserved space for the young and old generations
   703 // may be changed to accomodate the desired resize.
   704 void ParallelScavengeHeap::resize_old_gen(size_t desired_free_space) {
   705   if (UseAdaptiveGCBoundary) {
   706     if (size_policy()->bytes_absorbed_from_eden() != 0) {
   707       size_policy()->reset_bytes_absorbed_from_eden();
   708       return;  // The generation changed size already.
   709     }
   710     gens()->adjust_boundary_for_old_gen_needs(desired_free_space);
   711   }
   713   // Delegate the resize to the generation.
   714   _old_gen->resize(desired_free_space);
   715 }
   717 ParallelScavengeHeap::ParStrongRootsScope::ParStrongRootsScope() {
   718   // nothing particular
   719 }
   721 ParallelScavengeHeap::ParStrongRootsScope::~ParStrongRootsScope() {
   722   // nothing particular
   723 }
   725 #ifndef PRODUCT
   726 void ParallelScavengeHeap::record_gen_tops_before_GC() {
   727   if (ZapUnusedHeapArea) {
   728     young_gen()->record_spaces_top();
   729     old_gen()->record_spaces_top();
   730   }
   731 }
   733 void ParallelScavengeHeap::gen_mangle_unused_area() {
   734   if (ZapUnusedHeapArea) {
   735     young_gen()->eden_space()->mangle_unused_area();
   736     young_gen()->to_space()->mangle_unused_area();
   737     young_gen()->from_space()->mangle_unused_area();
   738     old_gen()->object_space()->mangle_unused_area();
   739   }
   740 }
   741 #endif

mercurial