src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp

Thu, 27 May 2010 19:08:38 -0700

author
trims
date
Thu, 27 May 2010 19:08:38 -0700
changeset 1907
c18cbe5936b8
parent 1822
0bfd3fb24150
child 2262
1e9a9d2e6509
permissions
-rw-r--r--

6941466: Oracle rebranding changes for Hotspot repositories
Summary: Change all the Sun copyrights to Oracle copyright
Reviewed-by: ohair

     1 /*
     2  * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    20  * or visit www.oracle.com if you need additional information or have any
    21  * questions.
    22  *
    23  */
    25 # include "incls/_precompiled.incl"
    26 # include "incls/_parallelScavengeHeap.cpp.incl"
    28 PSYoungGen*  ParallelScavengeHeap::_young_gen = NULL;
    29 PSOldGen*    ParallelScavengeHeap::_old_gen = NULL;
    30 PSPermGen*   ParallelScavengeHeap::_perm_gen = NULL;
    31 PSAdaptiveSizePolicy* ParallelScavengeHeap::_size_policy = NULL;
    32 PSGCAdaptivePolicyCounters* ParallelScavengeHeap::_gc_policy_counters = NULL;
    33 ParallelScavengeHeap* ParallelScavengeHeap::_psh = NULL;
    34 GCTaskManager* ParallelScavengeHeap::_gc_task_manager = NULL;
    36 static void trace_gen_sizes(const char* const str,
    37                             size_t pg_min, size_t pg_max,
    38                             size_t og_min, size_t og_max,
    39                             size_t yg_min, size_t yg_max)
    40 {
    41   if (TracePageSizes) {
    42     tty->print_cr("%s:  " SIZE_FORMAT "," SIZE_FORMAT " "
    43                   SIZE_FORMAT "," SIZE_FORMAT " "
    44                   SIZE_FORMAT "," SIZE_FORMAT " "
    45                   SIZE_FORMAT,
    46                   str, pg_min / K, pg_max / K,
    47                   og_min / K, og_max / K,
    48                   yg_min / K, yg_max / K,
    49                   (pg_max + og_max + yg_max) / K);
    50   }
    51 }
    53 jint ParallelScavengeHeap::initialize() {
    54   CollectedHeap::pre_initialize();
    56   // Cannot be initialized until after the flags are parsed
    57   // GenerationSizer flag_parser;
    58   _collector_policy = new GenerationSizer();
    60   size_t yg_min_size = _collector_policy->min_young_gen_size();
    61   size_t yg_max_size = _collector_policy->max_young_gen_size();
    62   size_t og_min_size = _collector_policy->min_old_gen_size();
    63   size_t og_max_size = _collector_policy->max_old_gen_size();
    64   // Why isn't there a min_perm_gen_size()?
    65   size_t pg_min_size = _collector_policy->perm_gen_size();
    66   size_t pg_max_size = _collector_policy->max_perm_gen_size();
    68   trace_gen_sizes("ps heap raw",
    69                   pg_min_size, pg_max_size,
    70                   og_min_size, og_max_size,
    71                   yg_min_size, yg_max_size);
    73   // The ReservedSpace ctor used below requires that the page size for the perm
    74   // gen is <= the page size for the rest of the heap (young + old gens).
    75   const size_t og_page_sz = os::page_size_for_region(yg_min_size + og_min_size,
    76                                                      yg_max_size + og_max_size,
    77                                                      8);
    78   const size_t pg_page_sz = MIN2(os::page_size_for_region(pg_min_size,
    79                                                           pg_max_size, 16),
    80                                  og_page_sz);
    82   const size_t pg_align = set_alignment(_perm_gen_alignment,  pg_page_sz);
    83   const size_t og_align = set_alignment(_old_gen_alignment,   og_page_sz);
    84   const size_t yg_align = set_alignment(_young_gen_alignment, og_page_sz);
    86   // Update sizes to reflect the selected page size(s).
    87   //
    88   // NEEDS_CLEANUP.  The default TwoGenerationCollectorPolicy uses NewRatio; it
    89   // should check UseAdaptiveSizePolicy.  Changes from generationSizer could
    90   // move to the common code.
    91   yg_min_size = align_size_up(yg_min_size, yg_align);
    92   yg_max_size = align_size_up(yg_max_size, yg_align);
    93   size_t yg_cur_size =
    94     align_size_up(_collector_policy->young_gen_size(), yg_align);
    95   yg_cur_size = MAX2(yg_cur_size, yg_min_size);
    97   og_min_size = align_size_up(og_min_size, og_align);
    98   og_max_size = align_size_up(og_max_size, og_align);
    99   size_t og_cur_size =
   100     align_size_up(_collector_policy->old_gen_size(), og_align);
   101   og_cur_size = MAX2(og_cur_size, og_min_size);
   103   pg_min_size = align_size_up(pg_min_size, pg_align);
   104   pg_max_size = align_size_up(pg_max_size, pg_align);
   105   size_t pg_cur_size = pg_min_size;
   107   trace_gen_sizes("ps heap rnd",
   108                   pg_min_size, pg_max_size,
   109                   og_min_size, og_max_size,
   110                   yg_min_size, yg_max_size);
   112   const size_t total_reserved = pg_max_size + og_max_size + yg_max_size;
   113   char* addr = Universe::preferred_heap_base(total_reserved, Universe::UnscaledNarrowOop);
   115   // The main part of the heap (old gen + young gen) can often use a larger page
   116   // size than is needed or wanted for the perm gen.  Use the "compound
   117   // alignment" ReservedSpace ctor to avoid having to use the same page size for
   118   // all gens.
   120   ReservedHeapSpace heap_rs(pg_max_size, pg_align, og_max_size + yg_max_size,
   121                             og_align, addr);
   123   if (UseCompressedOops) {
   124     if (addr != NULL && !heap_rs.is_reserved()) {
   125       // Failed to reserve at specified address - the requested memory
   126       // region is taken already, for example, by 'java' launcher.
   127       // Try again to reserver heap higher.
   128       addr = Universe::preferred_heap_base(total_reserved, Universe::ZeroBasedNarrowOop);
   129       ReservedHeapSpace heap_rs0(pg_max_size, pg_align, og_max_size + yg_max_size,
   130                                  og_align, addr);
   131       if (addr != NULL && !heap_rs0.is_reserved()) {
   132         // Failed to reserve at specified address again - give up.
   133         addr = Universe::preferred_heap_base(total_reserved, Universe::HeapBasedNarrowOop);
   134         assert(addr == NULL, "");
   135         ReservedHeapSpace heap_rs1(pg_max_size, pg_align, og_max_size + yg_max_size,
   136                                    og_align, addr);
   137         heap_rs = heap_rs1;
   138       } else {
   139         heap_rs = heap_rs0;
   140       }
   141     }
   142   }
   144   os::trace_page_sizes("ps perm", pg_min_size, pg_max_size, pg_page_sz,
   145                        heap_rs.base(), pg_max_size);
   146   os::trace_page_sizes("ps main", og_min_size + yg_min_size,
   147                        og_max_size + yg_max_size, og_page_sz,
   148                        heap_rs.base() + pg_max_size,
   149                        heap_rs.size() - pg_max_size);
   150   if (!heap_rs.is_reserved()) {
   151     vm_shutdown_during_initialization(
   152       "Could not reserve enough space for object heap");
   153     return JNI_ENOMEM;
   154   }
   156   _reserved = MemRegion((HeapWord*)heap_rs.base(),
   157                         (HeapWord*)(heap_rs.base() + heap_rs.size()));
   159   CardTableExtension* const barrier_set = new CardTableExtension(_reserved, 3);
   160   _barrier_set = barrier_set;
   161   oopDesc::set_bs(_barrier_set);
   162   if (_barrier_set == NULL) {
   163     vm_shutdown_during_initialization(
   164       "Could not reserve enough space for barrier set");
   165     return JNI_ENOMEM;
   166   }
   168   // Initial young gen size is 4 Mb
   169   //
   170   // XXX - what about flag_parser.young_gen_size()?
   171   const size_t init_young_size = align_size_up(4 * M, yg_align);
   172   yg_cur_size = MAX2(MIN2(init_young_size, yg_max_size), yg_cur_size);
   174   // Split the reserved space into perm gen and the main heap (everything else).
   175   // The main heap uses a different alignment.
   176   ReservedSpace perm_rs = heap_rs.first_part(pg_max_size);
   177   ReservedSpace main_rs = heap_rs.last_part(pg_max_size, og_align);
   179   // Make up the generations
   180   // Calculate the maximum size that a generation can grow.  This
   181   // includes growth into the other generation.  Note that the
   182   // parameter _max_gen_size is kept as the maximum
   183   // size of the generation as the boundaries currently stand.
   184   // _max_gen_size is still used as that value.
   185   double max_gc_pause_sec = ((double) MaxGCPauseMillis)/1000.0;
   186   double max_gc_minor_pause_sec = ((double) MaxGCMinorPauseMillis)/1000.0;
   188   _gens = new AdjoiningGenerations(main_rs,
   189                                    og_cur_size,
   190                                    og_min_size,
   191                                    og_max_size,
   192                                    yg_cur_size,
   193                                    yg_min_size,
   194                                    yg_max_size,
   195                                    yg_align);
   197   _old_gen = _gens->old_gen();
   198   _young_gen = _gens->young_gen();
   200   const size_t eden_capacity = _young_gen->eden_space()->capacity_in_bytes();
   201   const size_t old_capacity = _old_gen->capacity_in_bytes();
   202   const size_t initial_promo_size = MIN2(eden_capacity, old_capacity);
   203   _size_policy =
   204     new PSAdaptiveSizePolicy(eden_capacity,
   205                              initial_promo_size,
   206                              young_gen()->to_space()->capacity_in_bytes(),
   207                              intra_heap_alignment(),
   208                              max_gc_pause_sec,
   209                              max_gc_minor_pause_sec,
   210                              GCTimeRatio
   211                              );
   213   _perm_gen = new PSPermGen(perm_rs,
   214                             pg_align,
   215                             pg_cur_size,
   216                             pg_cur_size,
   217                             pg_max_size,
   218                             "perm", 2);
   220   assert(!UseAdaptiveGCBoundary ||
   221     (old_gen()->virtual_space()->high_boundary() ==
   222      young_gen()->virtual_space()->low_boundary()),
   223     "Boundaries must meet");
   224   // initialize the policy counters - 2 collectors, 3 generations
   225   _gc_policy_counters =
   226     new PSGCAdaptivePolicyCounters("ParScav:MSC", 2, 3, _size_policy);
   227   _psh = this;
   229   // Set up the GCTaskManager
   230   _gc_task_manager = GCTaskManager::create(ParallelGCThreads);
   232   if (UseParallelOldGC && !PSParallelCompact::initialize()) {
   233     return JNI_ENOMEM;
   234   }
   236   return JNI_OK;
   237 }
   239 void ParallelScavengeHeap::post_initialize() {
   240   // Need to init the tenuring threshold
   241   PSScavenge::initialize();
   242   if (UseParallelOldGC) {
   243     PSParallelCompact::post_initialize();
   244   } else {
   245     PSMarkSweep::initialize();
   246   }
   247   PSPromotionManager::initialize();
   248 }
   250 void ParallelScavengeHeap::update_counters() {
   251   young_gen()->update_counters();
   252   old_gen()->update_counters();
   253   perm_gen()->update_counters();
   254 }
   256 size_t ParallelScavengeHeap::capacity() const {
   257   size_t value = young_gen()->capacity_in_bytes() + old_gen()->capacity_in_bytes();
   258   return value;
   259 }
   261 size_t ParallelScavengeHeap::used() const {
   262   size_t value = young_gen()->used_in_bytes() + old_gen()->used_in_bytes();
   263   return value;
   264 }
   266 bool ParallelScavengeHeap::is_maximal_no_gc() const {
   267   return old_gen()->is_maximal_no_gc() && young_gen()->is_maximal_no_gc();
   268 }
   271 size_t ParallelScavengeHeap::permanent_capacity() const {
   272   return perm_gen()->capacity_in_bytes();
   273 }
   275 size_t ParallelScavengeHeap::permanent_used() const {
   276   return perm_gen()->used_in_bytes();
   277 }
   279 size_t ParallelScavengeHeap::max_capacity() const {
   280   size_t estimated = reserved_region().byte_size();
   281   estimated -= perm_gen()->reserved().byte_size();
   282   if (UseAdaptiveSizePolicy) {
   283     estimated -= _size_policy->max_survivor_size(young_gen()->max_size());
   284   } else {
   285     estimated -= young_gen()->to_space()->capacity_in_bytes();
   286   }
   287   return MAX2(estimated, capacity());
   288 }
   290 bool ParallelScavengeHeap::is_in(const void* p) const {
   291   if (young_gen()->is_in(p)) {
   292     return true;
   293   }
   295   if (old_gen()->is_in(p)) {
   296     return true;
   297   }
   299   if (perm_gen()->is_in(p)) {
   300     return true;
   301   }
   303   return false;
   304 }
   306 bool ParallelScavengeHeap::is_in_reserved(const void* p) const {
   307   if (young_gen()->is_in_reserved(p)) {
   308     return true;
   309   }
   311   if (old_gen()->is_in_reserved(p)) {
   312     return true;
   313   }
   315   if (perm_gen()->is_in_reserved(p)) {
   316     return true;
   317   }
   319   return false;
   320 }
   322 // There are two levels of allocation policy here.
   323 //
   324 // When an allocation request fails, the requesting thread must invoke a VM
   325 // operation, transfer control to the VM thread, and await the results of a
   326 // garbage collection. That is quite expensive, and we should avoid doing it
   327 // multiple times if possible.
   328 //
   329 // To accomplish this, we have a basic allocation policy, and also a
   330 // failed allocation policy.
   331 //
   332 // The basic allocation policy controls how you allocate memory without
   333 // attempting garbage collection. It is okay to grab locks and
   334 // expand the heap, if that can be done without coming to a safepoint.
   335 // It is likely that the basic allocation policy will not be very
   336 // aggressive.
   337 //
   338 // The failed allocation policy is invoked from the VM thread after
   339 // the basic allocation policy is unable to satisfy a mem_allocate
   340 // request. This policy needs to cover the entire range of collection,
   341 // heap expansion, and out-of-memory conditions. It should make every
   342 // attempt to allocate the requested memory.
   344 // Basic allocation policy. Should never be called at a safepoint, or
   345 // from the VM thread.
   346 //
   347 // This method must handle cases where many mem_allocate requests fail
   348 // simultaneously. When that happens, only one VM operation will succeed,
   349 // and the rest will not be executed. For that reason, this method loops
   350 // during failed allocation attempts. If the java heap becomes exhausted,
   351 // we rely on the size_policy object to force a bail out.
   352 HeapWord* ParallelScavengeHeap::mem_allocate(
   353                                      size_t size,
   354                                      bool is_noref,
   355                                      bool is_tlab,
   356                                      bool* gc_overhead_limit_was_exceeded) {
   357   assert(!SafepointSynchronize::is_at_safepoint(), "should not be at safepoint");
   358   assert(Thread::current() != (Thread*)VMThread::vm_thread(), "should not be in vm thread");
   359   assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock");
   361   // In general gc_overhead_limit_was_exceeded should be false so
   362   // set it so here and reset it to true only if the gc time
   363   // limit is being exceeded as checked below.
   364   *gc_overhead_limit_was_exceeded = false;
   366   HeapWord* result = young_gen()->allocate(size, is_tlab);
   368   uint loop_count = 0;
   369   uint gc_count = 0;
   371   while (result == NULL) {
   372     // We don't want to have multiple collections for a single filled generation.
   373     // To prevent this, each thread tracks the total_collections() value, and if
   374     // the count has changed, does not do a new collection.
   375     //
   376     // The collection count must be read only while holding the heap lock. VM
   377     // operations also hold the heap lock during collections. There is a lock
   378     // contention case where thread A blocks waiting on the Heap_lock, while
   379     // thread B is holding it doing a collection. When thread A gets the lock,
   380     // the collection count has already changed. To prevent duplicate collections,
   381     // The policy MUST attempt allocations during the same period it reads the
   382     // total_collections() value!
   383     {
   384       MutexLocker ml(Heap_lock);
   385       gc_count = Universe::heap()->total_collections();
   387       result = young_gen()->allocate(size, is_tlab);
   389       // (1) If the requested object is too large to easily fit in the
   390       //     young_gen, or
   391       // (2) If GC is locked out via GCLocker, young gen is full and
   392       //     the need for a GC already signalled to GCLocker (done
   393       //     at a safepoint),
   394       // ... then, rather than force a safepoint and (a potentially futile)
   395       // collection (attempt) for each allocation, try allocation directly
   396       // in old_gen. For case (2) above, we may in the future allow
   397       // TLAB allocation directly in the old gen.
   398       if (result != NULL) {
   399         return result;
   400       }
   401       if (!is_tlab &&
   402           size >= (young_gen()->eden_space()->capacity_in_words(Thread::current()) / 2)) {
   403         result = old_gen()->allocate(size, is_tlab);
   404         if (result != NULL) {
   405           return result;
   406         }
   407       }
   408       if (GC_locker::is_active_and_needs_gc()) {
   409         // GC is locked out. If this is a TLAB allocation,
   410         // return NULL; the requestor will retry allocation
   411         // of an idividual object at a time.
   412         if (is_tlab) {
   413           return NULL;
   414         }
   416         // If this thread is not in a jni critical section, we stall
   417         // the requestor until the critical section has cleared and
   418         // GC allowed. When the critical section clears, a GC is
   419         // initiated by the last thread exiting the critical section; so
   420         // we retry the allocation sequence from the beginning of the loop,
   421         // rather than causing more, now probably unnecessary, GC attempts.
   422         JavaThread* jthr = JavaThread::current();
   423         if (!jthr->in_critical()) {
   424           MutexUnlocker mul(Heap_lock);
   425           GC_locker::stall_until_clear();
   426           continue;
   427         } else {
   428           if (CheckJNICalls) {
   429             fatal("Possible deadlock due to allocating while"
   430                   " in jni critical section");
   431           }
   432           return NULL;
   433         }
   434       }
   435     }
   437     if (result == NULL) {
   439       // Generate a VM operation
   440       VM_ParallelGCFailedAllocation op(size, is_tlab, gc_count);
   441       VMThread::execute(&op);
   443       // Did the VM operation execute? If so, return the result directly.
   444       // This prevents us from looping until time out on requests that can
   445       // not be satisfied.
   446       if (op.prologue_succeeded()) {
   447         assert(Universe::heap()->is_in_or_null(op.result()),
   448           "result not in heap");
   450         // If GC was locked out during VM operation then retry allocation
   451         // and/or stall as necessary.
   452         if (op.gc_locked()) {
   453           assert(op.result() == NULL, "must be NULL if gc_locked() is true");
   454           continue;  // retry and/or stall as necessary
   455         }
   457         // Exit the loop if the gc time limit has been exceeded.
   458         // The allocation must have failed above ("result" guarding
   459         // this path is NULL) and the most recent collection has exceeded the
   460         // gc overhead limit (although enough may have been collected to
   461         // satisfy the allocation).  Exit the loop so that an out-of-memory
   462         // will be thrown (return a NULL ignoring the contents of
   463         // op.result()),
   464         // but clear gc_overhead_limit_exceeded so that the next collection
   465         // starts with a clean slate (i.e., forgets about previous overhead
   466         // excesses).  Fill op.result() with a filler object so that the
   467         // heap remains parsable.
   468         const bool limit_exceeded = size_policy()->gc_overhead_limit_exceeded();
   469         const bool softrefs_clear = collector_policy()->all_soft_refs_clear();
   470         assert(!limit_exceeded || softrefs_clear, "Should have been cleared");
   471         if (limit_exceeded && softrefs_clear) {
   472           *gc_overhead_limit_was_exceeded = true;
   473           size_policy()->set_gc_overhead_limit_exceeded(false);
   474           if (PrintGCDetails && Verbose) {
   475             gclog_or_tty->print_cr("ParallelScavengeHeap::mem_allocate: "
   476               "return NULL because gc_overhead_limit_exceeded is set");
   477           }
   478           if (op.result() != NULL) {
   479             CollectedHeap::fill_with_object(op.result(), size);
   480           }
   481           return NULL;
   482         }
   484         return op.result();
   485       }
   486     }
   488     // The policy object will prevent us from looping forever. If the
   489     // time spent in gc crosses a threshold, we will bail out.
   490     loop_count++;
   491     if ((result == NULL) && (QueuedAllocationWarningCount > 0) &&
   492         (loop_count % QueuedAllocationWarningCount == 0)) {
   493       warning("ParallelScavengeHeap::mem_allocate retries %d times \n\t"
   494               " size=%d %s", loop_count, size, is_tlab ? "(TLAB)" : "");
   495     }
   496   }
   498   return result;
   499 }
   501 // Failed allocation policy. Must be called from the VM thread, and
   502 // only at a safepoint! Note that this method has policy for allocation
   503 // flow, and NOT collection policy. So we do not check for gc collection
   504 // time over limit here, that is the responsibility of the heap specific
   505 // collection methods. This method decides where to attempt allocations,
   506 // and when to attempt collections, but no collection specific policy.
   507 HeapWord* ParallelScavengeHeap::failed_mem_allocate(size_t size, bool is_tlab) {
   508   assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
   509   assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
   510   assert(!Universe::heap()->is_gc_active(), "not reentrant");
   511   assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock");
   513   size_t mark_sweep_invocation_count = total_invocations();
   515   // We assume (and assert!) that an allocation at this point will fail
   516   // unless we collect.
   518   // First level allocation failure, scavenge and allocate in young gen.
   519   GCCauseSetter gccs(this, GCCause::_allocation_failure);
   520   PSScavenge::invoke();
   521   HeapWord* result = young_gen()->allocate(size, is_tlab);
   523   // Second level allocation failure.
   524   //   Mark sweep and allocate in young generation.
   525   if (result == NULL) {
   526     // There is some chance the scavenge method decided to invoke mark_sweep.
   527     // Don't mark sweep twice if so.
   528     if (mark_sweep_invocation_count == total_invocations()) {
   529       invoke_full_gc(false);
   530       result = young_gen()->allocate(size, is_tlab);
   531     }
   532   }
   534   // Third level allocation failure.
   535   //   After mark sweep and young generation allocation failure,
   536   //   allocate in old generation.
   537   if (result == NULL && !is_tlab) {
   538     result = old_gen()->allocate(size, is_tlab);
   539   }
   541   // Fourth level allocation failure. We're running out of memory.
   542   //   More complete mark sweep and allocate in young generation.
   543   if (result == NULL) {
   544     invoke_full_gc(true);
   545     result = young_gen()->allocate(size, is_tlab);
   546   }
   548   // Fifth level allocation failure.
   549   //   After more complete mark sweep, allocate in old generation.
   550   if (result == NULL && !is_tlab) {
   551     result = old_gen()->allocate(size, is_tlab);
   552   }
   554   return result;
   555 }
   557 //
   558 // This is the policy loop for allocating in the permanent generation.
   559 // If the initial allocation fails, we create a vm operation which will
   560 // cause a collection.
   561 HeapWord* ParallelScavengeHeap::permanent_mem_allocate(size_t size) {
   562   assert(!SafepointSynchronize::is_at_safepoint(), "should not be at safepoint");
   563   assert(Thread::current() != (Thread*)VMThread::vm_thread(), "should not be in vm thread");
   564   assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock");
   566   HeapWord* result;
   568   uint loop_count = 0;
   569   uint gc_count = 0;
   570   uint full_gc_count = 0;
   572   do {
   573     // We don't want to have multiple collections for a single filled generation.
   574     // To prevent this, each thread tracks the total_collections() value, and if
   575     // the count has changed, does not do a new collection.
   576     //
   577     // The collection count must be read only while holding the heap lock. VM
   578     // operations also hold the heap lock during collections. There is a lock
   579     // contention case where thread A blocks waiting on the Heap_lock, while
   580     // thread B is holding it doing a collection. When thread A gets the lock,
   581     // the collection count has already changed. To prevent duplicate collections,
   582     // The policy MUST attempt allocations during the same period it reads the
   583     // total_collections() value!
   584     {
   585       MutexLocker ml(Heap_lock);
   586       gc_count      = Universe::heap()->total_collections();
   587       full_gc_count = Universe::heap()->total_full_collections();
   589       result = perm_gen()->allocate_permanent(size);
   591       if (result != NULL) {
   592         return result;
   593       }
   595       if (GC_locker::is_active_and_needs_gc()) {
   596         // If this thread is not in a jni critical section, we stall
   597         // the requestor until the critical section has cleared and
   598         // GC allowed. When the critical section clears, a GC is
   599         // initiated by the last thread exiting the critical section; so
   600         // we retry the allocation sequence from the beginning of the loop,
   601         // rather than causing more, now probably unnecessary, GC attempts.
   602         JavaThread* jthr = JavaThread::current();
   603         if (!jthr->in_critical()) {
   604           MutexUnlocker mul(Heap_lock);
   605           GC_locker::stall_until_clear();
   606           continue;
   607         } else {
   608           if (CheckJNICalls) {
   609             fatal("Possible deadlock due to allocating while"
   610                   " in jni critical section");
   611           }
   612           return NULL;
   613         }
   614       }
   615     }
   617     if (result == NULL) {
   619       // Exit the loop if the gc time limit has been exceeded.
   620       // The allocation must have failed above (result must be NULL),
   621       // and the most recent collection must have exceeded the
   622       // gc time limit.  Exit the loop so that an out-of-memory
   623       // will be thrown (returning a NULL will do that), but
   624       // clear gc_overhead_limit_exceeded so that the next collection
   625       // will succeeded if the applications decides to handle the
   626       // out-of-memory and tries to go on.
   627       const bool limit_exceeded = size_policy()->gc_overhead_limit_exceeded();
   628       if (limit_exceeded) {
   629         size_policy()->set_gc_overhead_limit_exceeded(false);
   630         if (PrintGCDetails && Verbose) {
   631           gclog_or_tty->print_cr("ParallelScavengeHeap::permanent_mem_allocate:"
   632             " return NULL because gc_overhead_limit_exceeded is set");
   633         }
   634         assert(result == NULL, "Allocation did not fail");
   635         return NULL;
   636       }
   638       // Generate a VM operation
   639       VM_ParallelGCFailedPermanentAllocation op(size, gc_count, full_gc_count);
   640       VMThread::execute(&op);
   642       // Did the VM operation execute? If so, return the result directly.
   643       // This prevents us from looping until time out on requests that can
   644       // not be satisfied.
   645       if (op.prologue_succeeded()) {
   646         assert(Universe::heap()->is_in_permanent_or_null(op.result()),
   647           "result not in heap");
   648         // If GC was locked out during VM operation then retry allocation
   649         // and/or stall as necessary.
   650         if (op.gc_locked()) {
   651           assert(op.result() == NULL, "must be NULL if gc_locked() is true");
   652           continue;  // retry and/or stall as necessary
   653         }
   654         // If a NULL results is being returned, an out-of-memory
   655         // will be thrown now.  Clear the gc_overhead_limit_exceeded
   656         // flag to avoid the following situation.
   657         //      gc_overhead_limit_exceeded is set during a collection
   658         //      the collection fails to return enough space and an OOM is thrown
   659         //      a subsequent GC prematurely throws an out-of-memory because
   660         //        the gc_overhead_limit_exceeded counts did not start
   661         //        again from 0.
   662         if (op.result() == NULL) {
   663           size_policy()->reset_gc_overhead_limit_count();
   664         }
   665         return op.result();
   666       }
   667     }
   669     // The policy object will prevent us from looping forever. If the
   670     // time spent in gc crosses a threshold, we will bail out.
   671     loop_count++;
   672     if ((QueuedAllocationWarningCount > 0) &&
   673         (loop_count % QueuedAllocationWarningCount == 0)) {
   674       warning("ParallelScavengeHeap::permanent_mem_allocate retries %d times \n\t"
   675               " size=%d", loop_count, size);
   676     }
   677   } while (result == NULL);
   679   return result;
   680 }
   682 //
   683 // This is the policy code for permanent allocations which have failed
   684 // and require a collection. Note that just as in failed_mem_allocate,
   685 // we do not set collection policy, only where & when to allocate and
   686 // collect.
   687 HeapWord* ParallelScavengeHeap::failed_permanent_mem_allocate(size_t size) {
   688   assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
   689   assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
   690   assert(!Universe::heap()->is_gc_active(), "not reentrant");
   691   assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock");
   692   assert(size > perm_gen()->free_in_words(), "Allocation should fail");
   694   // We assume (and assert!) that an allocation at this point will fail
   695   // unless we collect.
   697   // First level allocation failure.  Mark-sweep and allocate in perm gen.
   698   GCCauseSetter gccs(this, GCCause::_allocation_failure);
   699   invoke_full_gc(false);
   700   HeapWord* result = perm_gen()->allocate_permanent(size);
   702   // Second level allocation failure. We're running out of memory.
   703   if (result == NULL) {
   704     invoke_full_gc(true);
   705     result = perm_gen()->allocate_permanent(size);
   706   }
   708   return result;
   709 }
   711 void ParallelScavengeHeap::ensure_parsability(bool retire_tlabs) {
   712   CollectedHeap::ensure_parsability(retire_tlabs);
   713   young_gen()->eden_space()->ensure_parsability();
   714 }
   716 size_t ParallelScavengeHeap::unsafe_max_alloc() {
   717   return young_gen()->eden_space()->free_in_bytes();
   718 }
   720 size_t ParallelScavengeHeap::tlab_capacity(Thread* thr) const {
   721   return young_gen()->eden_space()->tlab_capacity(thr);
   722 }
   724 size_t ParallelScavengeHeap::unsafe_max_tlab_alloc(Thread* thr) const {
   725   return young_gen()->eden_space()->unsafe_max_tlab_alloc(thr);
   726 }
   728 HeapWord* ParallelScavengeHeap::allocate_new_tlab(size_t size) {
   729   return young_gen()->allocate(size, true);
   730 }
   732 void ParallelScavengeHeap::accumulate_statistics_all_tlabs() {
   733   CollectedHeap::accumulate_statistics_all_tlabs();
   734 }
   736 void ParallelScavengeHeap::resize_all_tlabs() {
   737   CollectedHeap::resize_all_tlabs();
   738 }
   740 bool ParallelScavengeHeap::can_elide_initializing_store_barrier(oop new_obj) {
   741   // We don't need barriers for stores to objects in the
   742   // young gen and, a fortiori, for initializing stores to
   743   // objects therein.
   744   return is_in_young(new_obj);
   745 }
   747 // This method is used by System.gc() and JVMTI.
   748 void ParallelScavengeHeap::collect(GCCause::Cause cause) {
   749   assert(!Heap_lock->owned_by_self(),
   750     "this thread should not own the Heap_lock");
   752   unsigned int gc_count      = 0;
   753   unsigned int full_gc_count = 0;
   754   {
   755     MutexLocker ml(Heap_lock);
   756     // This value is guarded by the Heap_lock
   757     gc_count      = Universe::heap()->total_collections();
   758     full_gc_count = Universe::heap()->total_full_collections();
   759   }
   761   VM_ParallelGCSystemGC op(gc_count, full_gc_count, cause);
   762   VMThread::execute(&op);
   763 }
   765 // This interface assumes that it's being called by the
   766 // vm thread. It collects the heap assuming that the
   767 // heap lock is already held and that we are executing in
   768 // the context of the vm thread.
   769 void ParallelScavengeHeap::collect_as_vm_thread(GCCause::Cause cause) {
   770   assert(Thread::current()->is_VM_thread(), "Precondition#1");
   771   assert(Heap_lock->is_locked(), "Precondition#2");
   772   GCCauseSetter gcs(this, cause);
   773   switch (cause) {
   774     case GCCause::_heap_inspection:
   775     case GCCause::_heap_dump: {
   776       HandleMark hm;
   777       invoke_full_gc(false);
   778       break;
   779     }
   780     default: // XXX FIX ME
   781       ShouldNotReachHere();
   782   }
   783 }
   786 void ParallelScavengeHeap::oop_iterate(OopClosure* cl) {
   787   Unimplemented();
   788 }
   790 void ParallelScavengeHeap::object_iterate(ObjectClosure* cl) {
   791   young_gen()->object_iterate(cl);
   792   old_gen()->object_iterate(cl);
   793   perm_gen()->object_iterate(cl);
   794 }
   796 void ParallelScavengeHeap::permanent_oop_iterate(OopClosure* cl) {
   797   Unimplemented();
   798 }
   800 void ParallelScavengeHeap::permanent_object_iterate(ObjectClosure* cl) {
   801   perm_gen()->object_iterate(cl);
   802 }
   804 HeapWord* ParallelScavengeHeap::block_start(const void* addr) const {
   805   if (young_gen()->is_in_reserved(addr)) {
   806     assert(young_gen()->is_in(addr),
   807            "addr should be in allocated part of young gen");
   808     if (Debugging)  return NULL;  // called from find() in debug.cpp
   809     Unimplemented();
   810   } else if (old_gen()->is_in_reserved(addr)) {
   811     assert(old_gen()->is_in(addr),
   812            "addr should be in allocated part of old gen");
   813     return old_gen()->start_array()->object_start((HeapWord*)addr);
   814   } else if (perm_gen()->is_in_reserved(addr)) {
   815     assert(perm_gen()->is_in(addr),
   816            "addr should be in allocated part of perm gen");
   817     return perm_gen()->start_array()->object_start((HeapWord*)addr);
   818   }
   819   return 0;
   820 }
   822 size_t ParallelScavengeHeap::block_size(const HeapWord* addr) const {
   823   return oop(addr)->size();
   824 }
   826 bool ParallelScavengeHeap::block_is_obj(const HeapWord* addr) const {
   827   return block_start(addr) == addr;
   828 }
   830 jlong ParallelScavengeHeap::millis_since_last_gc() {
   831   return UseParallelOldGC ?
   832     PSParallelCompact::millis_since_last_gc() :
   833     PSMarkSweep::millis_since_last_gc();
   834 }
   836 void ParallelScavengeHeap::prepare_for_verify() {
   837   ensure_parsability(false);  // no need to retire TLABs for verification
   838 }
   840 void ParallelScavengeHeap::print() const { print_on(tty); }
   842 void ParallelScavengeHeap::print_on(outputStream* st) const {
   843   young_gen()->print_on(st);
   844   old_gen()->print_on(st);
   845   perm_gen()->print_on(st);
   846 }
   848 void ParallelScavengeHeap::gc_threads_do(ThreadClosure* tc) const {
   849   PSScavenge::gc_task_manager()->threads_do(tc);
   850 }
   852 void ParallelScavengeHeap::print_gc_threads_on(outputStream* st) const {
   853   PSScavenge::gc_task_manager()->print_threads_on(st);
   854 }
   856 void ParallelScavengeHeap::print_tracing_info() const {
   857   if (TraceGen0Time) {
   858     double time = PSScavenge::accumulated_time()->seconds();
   859     tty->print_cr("[Accumulated GC generation 0 time %3.7f secs]", time);
   860   }
   861   if (TraceGen1Time) {
   862     double time = PSMarkSweep::accumulated_time()->seconds();
   863     tty->print_cr("[Accumulated GC generation 1 time %3.7f secs]", time);
   864   }
   865 }
   868 void ParallelScavengeHeap::verify(bool allow_dirty, bool silent, bool option /* ignored */) {
   869   // Why do we need the total_collections()-filter below?
   870   if (total_collections() > 0) {
   871     if (!silent) {
   872       gclog_or_tty->print("permanent ");
   873     }
   874     perm_gen()->verify(allow_dirty);
   876     if (!silent) {
   877       gclog_or_tty->print("tenured ");
   878     }
   879     old_gen()->verify(allow_dirty);
   881     if (!silent) {
   882       gclog_or_tty->print("eden ");
   883     }
   884     young_gen()->verify(allow_dirty);
   885   }
   886   if (!silent) {
   887     gclog_or_tty->print("ref_proc ");
   888   }
   889   ReferenceProcessor::verify();
   890 }
   892 void ParallelScavengeHeap::print_heap_change(size_t prev_used) {
   893   if (PrintGCDetails && Verbose) {
   894     gclog_or_tty->print(" "  SIZE_FORMAT
   895                         "->" SIZE_FORMAT
   896                         "("  SIZE_FORMAT ")",
   897                         prev_used, used(), capacity());
   898   } else {
   899     gclog_or_tty->print(" "  SIZE_FORMAT "K"
   900                         "->" SIZE_FORMAT "K"
   901                         "("  SIZE_FORMAT "K)",
   902                         prev_used / K, used() / K, capacity() / K);
   903   }
   904 }
   906 ParallelScavengeHeap* ParallelScavengeHeap::heap() {
   907   assert(_psh != NULL, "Uninitialized access to ParallelScavengeHeap::heap()");
   908   assert(_psh->kind() == CollectedHeap::ParallelScavengeHeap, "not a parallel scavenge heap");
   909   return _psh;
   910 }
   912 // Before delegating the resize to the young generation,
   913 // the reserved space for the young and old generations
   914 // may be changed to accomodate the desired resize.
   915 void ParallelScavengeHeap::resize_young_gen(size_t eden_size,
   916     size_t survivor_size) {
   917   if (UseAdaptiveGCBoundary) {
   918     if (size_policy()->bytes_absorbed_from_eden() != 0) {
   919       size_policy()->reset_bytes_absorbed_from_eden();
   920       return;  // The generation changed size already.
   921     }
   922     gens()->adjust_boundary_for_young_gen_needs(eden_size, survivor_size);
   923   }
   925   // Delegate the resize to the generation.
   926   _young_gen->resize(eden_size, survivor_size);
   927 }
   929 // Before delegating the resize to the old generation,
   930 // the reserved space for the young and old generations
   931 // may be changed to accomodate the desired resize.
   932 void ParallelScavengeHeap::resize_old_gen(size_t desired_free_space) {
   933   if (UseAdaptiveGCBoundary) {
   934     if (size_policy()->bytes_absorbed_from_eden() != 0) {
   935       size_policy()->reset_bytes_absorbed_from_eden();
   936       return;  // The generation changed size already.
   937     }
   938     gens()->adjust_boundary_for_old_gen_needs(desired_free_space);
   939   }
   941   // Delegate the resize to the generation.
   942   _old_gen->resize(desired_free_space);
   943 }
   945 ParallelScavengeHeap::ParStrongRootsScope::ParStrongRootsScope() {
   946   // nothing particular
   947 }
   949 ParallelScavengeHeap::ParStrongRootsScope::~ParStrongRootsScope() {
   950   // nothing particular
   951 }
   953 #ifndef PRODUCT
   954 void ParallelScavengeHeap::record_gen_tops_before_GC() {
   955   if (ZapUnusedHeapArea) {
   956     young_gen()->record_spaces_top();
   957     old_gen()->record_spaces_top();
   958     perm_gen()->record_spaces_top();
   959   }
   960 }
   962 void ParallelScavengeHeap::gen_mangle_unused_area() {
   963   if (ZapUnusedHeapArea) {
   964     young_gen()->eden_space()->mangle_unused_area();
   965     young_gen()->to_space()->mangle_unused_area();
   966     young_gen()->from_space()->mangle_unused_area();
   967     old_gen()->object_space()->mangle_unused_area();
   968     perm_gen()->object_space()->mangle_unused_area();
   969   }
   970 }
   971 #endif

mercurial