src/share/vm/memory/collectorPolicy.cpp

Thu, 17 Jan 2013 19:04:48 -0800

author
jmasa
date
Thu, 17 Jan 2013 19:04:48 -0800
changeset 4457
59a58e20dc60
parent 4387
ca0a78017dc7
child 4542
db9981fd3124
child 4554
95ccff9eee8e
permissions
-rw-r--r--

8006537: Assert when dumping archive with default methods
Reviewed-by: coleenp

     1 /*
     2  * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    20  * or visit www.oracle.com if you need additional information or have any
    21  * questions.
    22  *
    23  */
    25 #include "precompiled.hpp"
    26 #include "gc_implementation/shared/adaptiveSizePolicy.hpp"
    27 #include "gc_implementation/shared/gcPolicyCounters.hpp"
    28 #include "gc_implementation/shared/vmGCOperations.hpp"
    29 #include "memory/cardTableRS.hpp"
    30 #include "memory/collectorPolicy.hpp"
    31 #include "memory/gcLocker.inline.hpp"
    32 #include "memory/genCollectedHeap.hpp"
    33 #include "memory/generationSpec.hpp"
    34 #include "memory/space.hpp"
    35 #include "memory/universe.hpp"
    36 #include "runtime/arguments.hpp"
    37 #include "runtime/globals_extension.hpp"
    38 #include "runtime/handles.inline.hpp"
    39 #include "runtime/java.hpp"
    40 #include "runtime/thread.inline.hpp"
    41 #include "runtime/vmThread.hpp"
    42 #ifndef SERIALGC
    43 #include "gc_implementation/concurrentMarkSweep/cmsAdaptiveSizePolicy.hpp"
    44 #include "gc_implementation/concurrentMarkSweep/cmsGCAdaptivePolicyCounters.hpp"
    45 #endif
    47 // CollectorPolicy methods.
    49 void CollectorPolicy::initialize_flags() {
    50   if (MetaspaceSize > MaxMetaspaceSize) {
    51     MaxMetaspaceSize = MetaspaceSize;
    52   }
    53   MetaspaceSize = MAX2(min_alignment(), align_size_down_(MetaspaceSize, min_alignment()));
    54   // Don't increase Metaspace size limit above specified.
    55   MaxMetaspaceSize = align_size_down(MaxMetaspaceSize, max_alignment());
    56   if (MetaspaceSize > MaxMetaspaceSize) {
    57     MetaspaceSize = MaxMetaspaceSize;
    58   }
    60   MinMetaspaceExpansion = MAX2(min_alignment(), align_size_down_(MinMetaspaceExpansion, min_alignment()));
    61   MaxMetaspaceExpansion = MAX2(min_alignment(), align_size_down_(MaxMetaspaceExpansion, min_alignment()));
    63   MinHeapDeltaBytes = align_size_up(MinHeapDeltaBytes, min_alignment());
    65   assert(MetaspaceSize    % min_alignment() == 0, "metapace alignment");
    66   assert(MaxMetaspaceSize % max_alignment() == 0, "maximum metaspace alignment");
    67   if (MetaspaceSize < 256*K) {
    68     vm_exit_during_initialization("Too small initial Metaspace size");
    69   }
    70 }
    72 void CollectorPolicy::initialize_size_info() {
    73   // User inputs from -mx and ms are aligned
    74   set_initial_heap_byte_size(InitialHeapSize);
    75   if (initial_heap_byte_size() == 0) {
    76     set_initial_heap_byte_size(NewSize + OldSize);
    77   }
    78   set_initial_heap_byte_size(align_size_up(_initial_heap_byte_size,
    79                                            min_alignment()));
    81   set_min_heap_byte_size(Arguments::min_heap_size());
    82   if (min_heap_byte_size() == 0) {
    83     set_min_heap_byte_size(NewSize + OldSize);
    84   }
    85   set_min_heap_byte_size(align_size_up(_min_heap_byte_size,
    86                                        min_alignment()));
    88   set_max_heap_byte_size(align_size_up(MaxHeapSize, max_alignment()));
    90   // Check heap parameter properties
    91   if (initial_heap_byte_size() < M) {
    92     vm_exit_during_initialization("Too small initial heap");
    93   }
    94   // Check heap parameter properties
    95   if (min_heap_byte_size() < M) {
    96     vm_exit_during_initialization("Too small minimum heap");
    97   }
    98   if (initial_heap_byte_size() <= NewSize) {
    99      // make sure there is at least some room in old space
   100     vm_exit_during_initialization("Too small initial heap for new size specified");
   101   }
   102   if (max_heap_byte_size() < min_heap_byte_size()) {
   103     vm_exit_during_initialization("Incompatible minimum and maximum heap sizes specified");
   104   }
   105   if (initial_heap_byte_size() < min_heap_byte_size()) {
   106     vm_exit_during_initialization("Incompatible minimum and initial heap sizes specified");
   107   }
   108   if (max_heap_byte_size() < initial_heap_byte_size()) {
   109     vm_exit_during_initialization("Incompatible initial and maximum heap sizes specified");
   110   }
   112   if (PrintGCDetails && Verbose) {
   113     gclog_or_tty->print_cr("Minimum heap " SIZE_FORMAT "  Initial heap "
   114       SIZE_FORMAT "  Maximum heap " SIZE_FORMAT,
   115       min_heap_byte_size(), initial_heap_byte_size(), max_heap_byte_size());
   116   }
   117 }
   119 bool CollectorPolicy::use_should_clear_all_soft_refs(bool v) {
   120   bool result = _should_clear_all_soft_refs;
   121   set_should_clear_all_soft_refs(false);
   122   return result;
   123 }
   125 GenRemSet* CollectorPolicy::create_rem_set(MemRegion whole_heap,
   126                                            int max_covered_regions) {
   127   switch (rem_set_name()) {
   128   case GenRemSet::CardTable: {
   129     CardTableRS* res = new CardTableRS(whole_heap, max_covered_regions);
   130     return res;
   131   }
   132   default:
   133     guarantee(false, "unrecognized GenRemSet::Name");
   134     return NULL;
   135   }
   136 }
   138 void CollectorPolicy::cleared_all_soft_refs() {
   139   // If near gc overhear limit, continue to clear SoftRefs.  SoftRefs may
   140   // have been cleared in the last collection but if the gc overhear
   141   // limit continues to be near, SoftRefs should still be cleared.
   142   if (size_policy() != NULL) {
   143     _should_clear_all_soft_refs = size_policy()->gc_overhead_limit_near();
   144   }
   145   _all_soft_refs_clear = true;
   146 }
   149 // GenCollectorPolicy methods.
   151 size_t GenCollectorPolicy::scale_by_NewRatio_aligned(size_t base_size) {
   152   size_t x = base_size / (NewRatio+1);
   153   size_t new_gen_size = x > min_alignment() ?
   154                      align_size_down(x, min_alignment()) :
   155                      min_alignment();
   156   return new_gen_size;
   157 }
   159 size_t GenCollectorPolicy::bound_minus_alignment(size_t desired_size,
   160                                                  size_t maximum_size) {
   161   size_t alignment = min_alignment();
   162   size_t max_minus = maximum_size - alignment;
   163   return desired_size < max_minus ? desired_size : max_minus;
   164 }
   167 void GenCollectorPolicy::initialize_size_policy(size_t init_eden_size,
   168                                                 size_t init_promo_size,
   169                                                 size_t init_survivor_size) {
   170   const double max_gc_minor_pause_sec = ((double) MaxGCMinorPauseMillis)/1000.0;
   171   _size_policy = new AdaptiveSizePolicy(init_eden_size,
   172                                         init_promo_size,
   173                                         init_survivor_size,
   174                                         max_gc_minor_pause_sec,
   175                                         GCTimeRatio);
   176 }
   178 size_t GenCollectorPolicy::compute_max_alignment() {
   179   // The card marking array and the offset arrays for old generations are
   180   // committed in os pages as well. Make sure they are entirely full (to
   181   // avoid partial page problems), e.g. if 512 bytes heap corresponds to 1
   182   // byte entry and the os page size is 4096, the maximum heap size should
   183   // be 512*4096 = 2MB aligned.
   184   size_t alignment = GenRemSet::max_alignment_constraint(rem_set_name());
   186   // Parallel GC does its own alignment of the generations to avoid requiring a
   187   // large page (256M on some platforms) for the permanent generation.  The
   188   // other collectors should also be updated to do their own alignment and then
   189   // this use of lcm() should be removed.
   190   if (UseLargePages && !UseParallelGC) {
   191       // in presence of large pages we have to make sure that our
   192       // alignment is large page aware
   193       alignment = lcm(os::large_page_size(), alignment);
   194   }
   196   return alignment;
   197 }
   199 void GenCollectorPolicy::initialize_flags() {
   200   // All sizes must be multiples of the generation granularity.
   201   set_min_alignment((uintx) Generation::GenGrain);
   202   set_max_alignment(compute_max_alignment());
   203   assert(max_alignment() >= min_alignment() &&
   204          max_alignment() % min_alignment() == 0,
   205          "invalid alignment constraints");
   207   CollectorPolicy::initialize_flags();
   209   // All generational heaps have a youngest gen; handle those flags here.
   211   // Adjust max size parameters
   212   if (NewSize > MaxNewSize) {
   213     MaxNewSize = NewSize;
   214   }
   215   NewSize = align_size_down(NewSize, min_alignment());
   216   MaxNewSize = align_size_down(MaxNewSize, min_alignment());
   218   // Check validity of heap flags
   219   assert(NewSize     % min_alignment() == 0, "eden space alignment");
   220   assert(MaxNewSize  % min_alignment() == 0, "survivor space alignment");
   222   if (NewSize < 3*min_alignment()) {
   223      // make sure there room for eden and two survivor spaces
   224     vm_exit_during_initialization("Too small new size specified");
   225   }
   226   if (SurvivorRatio < 1 || NewRatio < 1) {
   227     vm_exit_during_initialization("Invalid heap ratio specified");
   228   }
   229 }
   231 void TwoGenerationCollectorPolicy::initialize_flags() {
   232   GenCollectorPolicy::initialize_flags();
   234   OldSize = align_size_down(OldSize, min_alignment());
   235   if (NewSize + OldSize > MaxHeapSize) {
   236     MaxHeapSize = NewSize + OldSize;
   237   }
   238   MaxHeapSize = align_size_up(MaxHeapSize, max_alignment());
   240   always_do_update_barrier = UseConcMarkSweepGC;
   242   // Check validity of heap flags
   243   assert(OldSize     % min_alignment() == 0, "old space alignment");
   244   assert(MaxHeapSize % max_alignment() == 0, "maximum heap alignment");
   245 }
   247 // Values set on the command line win over any ergonomically
   248 // set command line parameters.
   249 // Ergonomic choice of parameters are done before this
   250 // method is called.  Values for command line parameters such as NewSize
   251 // and MaxNewSize feed those ergonomic choices into this method.
   252 // This method makes the final generation sizings consistent with
   253 // themselves and with overall heap sizings.
   254 // In the absence of explicitly set command line flags, policies
   255 // such as the use of NewRatio are used to size the generation.
   256 void GenCollectorPolicy::initialize_size_info() {
   257   CollectorPolicy::initialize_size_info();
   259   // min_alignment() is used for alignment within a generation.
   260   // There is additional alignment done down stream for some
   261   // collectors that sometimes causes unwanted rounding up of
   262   // generations sizes.
   264   // Determine maximum size of gen0
   266   size_t max_new_size = 0;
   267   if (FLAG_IS_CMDLINE(MaxNewSize) || FLAG_IS_ERGO(MaxNewSize)) {
   268     if (MaxNewSize < min_alignment()) {
   269       max_new_size = min_alignment();
   270     }
   271     if (MaxNewSize >= max_heap_byte_size()) {
   272       max_new_size = align_size_down(max_heap_byte_size() - min_alignment(),
   273                                      min_alignment());
   274       warning("MaxNewSize (" SIZE_FORMAT "k) is equal to or "
   275         "greater than the entire heap (" SIZE_FORMAT "k).  A "
   276         "new generation size of " SIZE_FORMAT "k will be used.",
   277         MaxNewSize/K, max_heap_byte_size()/K, max_new_size/K);
   278     } else {
   279       max_new_size = align_size_down(MaxNewSize, min_alignment());
   280     }
   282   // The case for FLAG_IS_ERGO(MaxNewSize) could be treated
   283   // specially at this point to just use an ergonomically set
   284   // MaxNewSize to set max_new_size.  For cases with small
   285   // heaps such a policy often did not work because the MaxNewSize
   286   // was larger than the entire heap.  The interpretation given
   287   // to ergonomically set flags is that the flags are set
   288   // by different collectors for their own special needs but
   289   // are not allowed to badly shape the heap.  This allows the
   290   // different collectors to decide what's best for themselves
   291   // without having to factor in the overall heap shape.  It
   292   // can be the case in the future that the collectors would
   293   // only make "wise" ergonomics choices and this policy could
   294   // just accept those choices.  The choices currently made are
   295   // not always "wise".
   296   } else {
   297     max_new_size = scale_by_NewRatio_aligned(max_heap_byte_size());
   298     // Bound the maximum size by NewSize below (since it historically
   299     // would have been NewSize and because the NewRatio calculation could
   300     // yield a size that is too small) and bound it by MaxNewSize above.
   301     // Ergonomics plays here by previously calculating the desired
   302     // NewSize and MaxNewSize.
   303     max_new_size = MIN2(MAX2(max_new_size, NewSize), MaxNewSize);
   304   }
   305   assert(max_new_size > 0, "All paths should set max_new_size");
   307   // Given the maximum gen0 size, determine the initial and
   308   // minimum gen0 sizes.
   310   if (max_heap_byte_size() == min_heap_byte_size()) {
   311     // The maximum and minimum heap sizes are the same so
   312     // the generations minimum and initial must be the
   313     // same as its maximum.
   314     set_min_gen0_size(max_new_size);
   315     set_initial_gen0_size(max_new_size);
   316     set_max_gen0_size(max_new_size);
   317   } else {
   318     size_t desired_new_size = 0;
   319     if (!FLAG_IS_DEFAULT(NewSize)) {
   320       // If NewSize is set ergonomically (for example by cms), it
   321       // would make sense to use it.  If it is used, also use it
   322       // to set the initial size.  Although there is no reason
   323       // the minimum size and the initial size have to be the same,
   324       // the current implementation gets into trouble during the calculation
   325       // of the tenured generation sizes if they are different.
   326       // Note that this makes the initial size and the minimum size
   327       // generally small compared to the NewRatio calculation.
   328       _min_gen0_size = NewSize;
   329       desired_new_size = NewSize;
   330       max_new_size = MAX2(max_new_size, NewSize);
   331     } else {
   332       // For the case where NewSize is the default, use NewRatio
   333       // to size the minimum and initial generation sizes.
   334       // Use the default NewSize as the floor for these values.  If
   335       // NewRatio is overly large, the resulting sizes can be too
   336       // small.
   337       _min_gen0_size = MAX2(scale_by_NewRatio_aligned(min_heap_byte_size()),
   338                           NewSize);
   339       desired_new_size =
   340         MAX2(scale_by_NewRatio_aligned(initial_heap_byte_size()),
   341              NewSize);
   342     }
   344     assert(_min_gen0_size > 0, "Sanity check");
   345     set_initial_gen0_size(desired_new_size);
   346     set_max_gen0_size(max_new_size);
   348     // At this point the desirable initial and minimum sizes have been
   349     // determined without regard to the maximum sizes.
   351     // Bound the sizes by the corresponding overall heap sizes.
   352     set_min_gen0_size(
   353       bound_minus_alignment(_min_gen0_size, min_heap_byte_size()));
   354     set_initial_gen0_size(
   355       bound_minus_alignment(_initial_gen0_size, initial_heap_byte_size()));
   356     set_max_gen0_size(
   357       bound_minus_alignment(_max_gen0_size, max_heap_byte_size()));
   359     // At this point all three sizes have been checked against the
   360     // maximum sizes but have not been checked for consistency
   361     // among the three.
   363     // Final check min <= initial <= max
   364     set_min_gen0_size(MIN2(_min_gen0_size, _max_gen0_size));
   365     set_initial_gen0_size(
   366       MAX2(MIN2(_initial_gen0_size, _max_gen0_size), _min_gen0_size));
   367     set_min_gen0_size(MIN2(_min_gen0_size, _initial_gen0_size));
   368   }
   370   if (PrintGCDetails && Verbose) {
   371     gclog_or_tty->print_cr("1: Minimum gen0 " SIZE_FORMAT "  Initial gen0 "
   372       SIZE_FORMAT "  Maximum gen0 " SIZE_FORMAT,
   373       min_gen0_size(), initial_gen0_size(), max_gen0_size());
   374   }
   375 }
   377 // Call this method during the sizing of the gen1 to make
   378 // adjustments to gen0 because of gen1 sizing policy.  gen0 initially has
   379 // the most freedom in sizing because it is done before the
   380 // policy for gen1 is applied.  Once gen1 policies have been applied,
   381 // there may be conflicts in the shape of the heap and this method
   382 // is used to make the needed adjustments.  The application of the
   383 // policies could be more sophisticated (iterative for example) but
   384 // keeping it simple also seems a worthwhile goal.
   385 bool TwoGenerationCollectorPolicy::adjust_gen0_sizes(size_t* gen0_size_ptr,
   386                                                      size_t* gen1_size_ptr,
   387                                                      size_t heap_size,
   388                                                      size_t min_gen0_size) {
   389   bool result = false;
   390   if ((*gen1_size_ptr + *gen0_size_ptr) > heap_size) {
   391     if (((*gen0_size_ptr + OldSize) > heap_size) &&
   392        (heap_size - min_gen0_size) >= min_alignment()) {
   393       // Adjust gen0 down to accomodate OldSize
   394       *gen0_size_ptr = heap_size - min_gen0_size;
   395       *gen0_size_ptr =
   396         MAX2((uintx)align_size_down(*gen0_size_ptr, min_alignment()),
   397              min_alignment());
   398       assert(*gen0_size_ptr > 0, "Min gen0 is too large");
   399       result = true;
   400     } else {
   401       *gen1_size_ptr = heap_size - *gen0_size_ptr;
   402       *gen1_size_ptr =
   403         MAX2((uintx)align_size_down(*gen1_size_ptr, min_alignment()),
   404                        min_alignment());
   405     }
   406   }
   407   return result;
   408 }
   410 // Minimum sizes of the generations may be different than
   411 // the initial sizes.  An inconsistently is permitted here
   412 // in the total size that can be specified explicitly by
   413 // command line specification of OldSize and NewSize and
   414 // also a command line specification of -Xms.  Issue a warning
   415 // but allow the values to pass.
   417 void TwoGenerationCollectorPolicy::initialize_size_info() {
   418   GenCollectorPolicy::initialize_size_info();
   420   // At this point the minimum, initial and maximum sizes
   421   // of the overall heap and of gen0 have been determined.
   422   // The maximum gen1 size can be determined from the maximum gen0
   423   // and maximum heap size since no explicit flags exits
   424   // for setting the gen1 maximum.
   425   _max_gen1_size = max_heap_byte_size() - _max_gen0_size;
   426   _max_gen1_size =
   427     MAX2((uintx)align_size_down(_max_gen1_size, min_alignment()),
   428          min_alignment());
   429   // If no explicit command line flag has been set for the
   430   // gen1 size, use what is left for gen1.
   431   if (FLAG_IS_DEFAULT(OldSize) || FLAG_IS_ERGO(OldSize)) {
   432     // The user has not specified any value or ergonomics
   433     // has chosen a value (which may or may not be consistent
   434     // with the overall heap size).  In either case make
   435     // the minimum, maximum and initial sizes consistent
   436     // with the gen0 sizes and the overall heap sizes.
   437     assert(min_heap_byte_size() > _min_gen0_size,
   438       "gen0 has an unexpected minimum size");
   439     set_min_gen1_size(min_heap_byte_size() - min_gen0_size());
   440     set_min_gen1_size(
   441       MAX2((uintx)align_size_down(_min_gen1_size, min_alignment()),
   442            min_alignment()));
   443     set_initial_gen1_size(initial_heap_byte_size() - initial_gen0_size());
   444     set_initial_gen1_size(
   445       MAX2((uintx)align_size_down(_initial_gen1_size, min_alignment()),
   446            min_alignment()));
   448   } else {
   449     // It's been explicitly set on the command line.  Use the
   450     // OldSize and then determine the consequences.
   451     set_min_gen1_size(OldSize);
   452     set_initial_gen1_size(OldSize);
   454     // If the user has explicitly set an OldSize that is inconsistent
   455     // with other command line flags, issue a warning.
   456     // The generation minimums and the overall heap mimimum should
   457     // be within one heap alignment.
   458     if ((_min_gen1_size + _min_gen0_size + min_alignment()) <
   459            min_heap_byte_size()) {
   460       warning("Inconsistency between minimum heap size and minimum "
   461           "generation sizes: using minimum heap = " SIZE_FORMAT,
   462           min_heap_byte_size());
   463     }
   464     if ((OldSize > _max_gen1_size)) {
   465       warning("Inconsistency between maximum heap size and maximum "
   466           "generation sizes: using maximum heap = " SIZE_FORMAT
   467           " -XX:OldSize flag is being ignored",
   468           max_heap_byte_size());
   469     }
   470     // If there is an inconsistency between the OldSize and the minimum and/or
   471     // initial size of gen0, since OldSize was explicitly set, OldSize wins.
   472     if (adjust_gen0_sizes(&_min_gen0_size, &_min_gen1_size,
   473                           min_heap_byte_size(), OldSize)) {
   474       if (PrintGCDetails && Verbose) {
   475         gclog_or_tty->print_cr("2: Minimum gen0 " SIZE_FORMAT "  Initial gen0 "
   476               SIZE_FORMAT "  Maximum gen0 " SIZE_FORMAT,
   477               min_gen0_size(), initial_gen0_size(), max_gen0_size());
   478       }
   479     }
   480     // Initial size
   481     if (adjust_gen0_sizes(&_initial_gen0_size, &_initial_gen1_size,
   482                          initial_heap_byte_size(), OldSize)) {
   483       if (PrintGCDetails && Verbose) {
   484         gclog_or_tty->print_cr("3: Minimum gen0 " SIZE_FORMAT "  Initial gen0 "
   485           SIZE_FORMAT "  Maximum gen0 " SIZE_FORMAT,
   486           min_gen0_size(), initial_gen0_size(), max_gen0_size());
   487       }
   488     }
   489   }
   490   // Enforce the maximum gen1 size.
   491   set_min_gen1_size(MIN2(_min_gen1_size, _max_gen1_size));
   493   // Check that min gen1 <= initial gen1 <= max gen1
   494   set_initial_gen1_size(MAX2(_initial_gen1_size, _min_gen1_size));
   495   set_initial_gen1_size(MIN2(_initial_gen1_size, _max_gen1_size));
   497   if (PrintGCDetails && Verbose) {
   498     gclog_or_tty->print_cr("Minimum gen1 " SIZE_FORMAT "  Initial gen1 "
   499       SIZE_FORMAT "  Maximum gen1 " SIZE_FORMAT,
   500       min_gen1_size(), initial_gen1_size(), max_gen1_size());
   501   }
   502 }
   504 HeapWord* GenCollectorPolicy::mem_allocate_work(size_t size,
   505                                         bool is_tlab,
   506                                         bool* gc_overhead_limit_was_exceeded) {
   507   GenCollectedHeap *gch = GenCollectedHeap::heap();
   509   debug_only(gch->check_for_valid_allocation_state());
   510   assert(gch->no_gc_in_progress(), "Allocation during gc not allowed");
   512   // In general gc_overhead_limit_was_exceeded should be false so
   513   // set it so here and reset it to true only if the gc time
   514   // limit is being exceeded as checked below.
   515   *gc_overhead_limit_was_exceeded = false;
   517   HeapWord* result = NULL;
   519   // Loop until the allocation is satisified,
   520   // or unsatisfied after GC.
   521   for (int try_count = 1; /* return or throw */; try_count += 1) {
   522     HandleMark hm; // discard any handles allocated in each iteration
   524     // First allocation attempt is lock-free.
   525     Generation *gen0 = gch->get_gen(0);
   526     assert(gen0->supports_inline_contig_alloc(),
   527       "Otherwise, must do alloc within heap lock");
   528     if (gen0->should_allocate(size, is_tlab)) {
   529       result = gen0->par_allocate(size, is_tlab);
   530       if (result != NULL) {
   531         assert(gch->is_in_reserved(result), "result not in heap");
   532         return result;
   533       }
   534     }
   535     unsigned int gc_count_before;  // read inside the Heap_lock locked region
   536     {
   537       MutexLocker ml(Heap_lock);
   538       if (PrintGC && Verbose) {
   539         gclog_or_tty->print_cr("TwoGenerationCollectorPolicy::mem_allocate_work:"
   540                       " attempting locked slow path allocation");
   541       }
   542       // Note that only large objects get a shot at being
   543       // allocated in later generations.
   544       bool first_only = ! should_try_older_generation_allocation(size);
   546       result = gch->attempt_allocation(size, is_tlab, first_only);
   547       if (result != NULL) {
   548         assert(gch->is_in_reserved(result), "result not in heap");
   549         return result;
   550       }
   552       if (GC_locker::is_active_and_needs_gc()) {
   553         if (is_tlab) {
   554           return NULL;  // Caller will retry allocating individual object
   555         }
   556         if (!gch->is_maximal_no_gc()) {
   557           // Try and expand heap to satisfy request
   558           result = expand_heap_and_allocate(size, is_tlab);
   559           // result could be null if we are out of space
   560           if (result != NULL) {
   561             return result;
   562           }
   563         }
   565         // If this thread is not in a jni critical section, we stall
   566         // the requestor until the critical section has cleared and
   567         // GC allowed. When the critical section clears, a GC is
   568         // initiated by the last thread exiting the critical section; so
   569         // we retry the allocation sequence from the beginning of the loop,
   570         // rather than causing more, now probably unnecessary, GC attempts.
   571         JavaThread* jthr = JavaThread::current();
   572         if (!jthr->in_critical()) {
   573           MutexUnlocker mul(Heap_lock);
   574           // Wait for JNI critical section to be exited
   575           GC_locker::stall_until_clear();
   576           continue;
   577         } else {
   578           if (CheckJNICalls) {
   579             fatal("Possible deadlock due to allocating while"
   580                   " in jni critical section");
   581           }
   582           return NULL;
   583         }
   584       }
   586       // Read the gc count while the heap lock is held.
   587       gc_count_before = Universe::heap()->total_collections();
   588     }
   590     VM_GenCollectForAllocation op(size,
   591                                   is_tlab,
   592                                   gc_count_before);
   593     VMThread::execute(&op);
   594     if (op.prologue_succeeded()) {
   595       result = op.result();
   596       if (op.gc_locked()) {
   597          assert(result == NULL, "must be NULL if gc_locked() is true");
   598          continue;  // retry and/or stall as necessary
   599       }
   601       // Allocation has failed and a collection
   602       // has been done.  If the gc time limit was exceeded the
   603       // this time, return NULL so that an out-of-memory
   604       // will be thrown.  Clear gc_overhead_limit_exceeded
   605       // so that the overhead exceeded does not persist.
   607       const bool limit_exceeded = size_policy()->gc_overhead_limit_exceeded();
   608       const bool softrefs_clear = all_soft_refs_clear();
   609       assert(!limit_exceeded || softrefs_clear, "Should have been cleared");
   610       if (limit_exceeded && softrefs_clear) {
   611         *gc_overhead_limit_was_exceeded = true;
   612         size_policy()->set_gc_overhead_limit_exceeded(false);
   613         if (op.result() != NULL) {
   614           CollectedHeap::fill_with_object(op.result(), size);
   615         }
   616         return NULL;
   617       }
   618       assert(result == NULL || gch->is_in_reserved(result),
   619              "result not in heap");
   620       return result;
   621     }
   623     // Give a warning if we seem to be looping forever.
   624     if ((QueuedAllocationWarningCount > 0) &&
   625         (try_count % QueuedAllocationWarningCount == 0)) {
   626           warning("TwoGenerationCollectorPolicy::mem_allocate_work retries %d times \n\t"
   627                   " size=%d %s", try_count, size, is_tlab ? "(TLAB)" : "");
   628     }
   629   }
   630 }
   632 HeapWord* GenCollectorPolicy::expand_heap_and_allocate(size_t size,
   633                                                        bool   is_tlab) {
   634   GenCollectedHeap *gch = GenCollectedHeap::heap();
   635   HeapWord* result = NULL;
   636   for (int i = number_of_generations() - 1; i >= 0 && result == NULL; i--) {
   637     Generation *gen = gch->get_gen(i);
   638     if (gen->should_allocate(size, is_tlab)) {
   639       result = gen->expand_and_allocate(size, is_tlab);
   640     }
   641   }
   642   assert(result == NULL || gch->is_in_reserved(result), "result not in heap");
   643   return result;
   644 }
   646 HeapWord* GenCollectorPolicy::satisfy_failed_allocation(size_t size,
   647                                                         bool   is_tlab) {
   648   GenCollectedHeap *gch = GenCollectedHeap::heap();
   649   GCCauseSetter x(gch, GCCause::_allocation_failure);
   650   HeapWord* result = NULL;
   652   assert(size != 0, "Precondition violated");
   653   if (GC_locker::is_active_and_needs_gc()) {
   654     // GC locker is active; instead of a collection we will attempt
   655     // to expand the heap, if there's room for expansion.
   656     if (!gch->is_maximal_no_gc()) {
   657       result = expand_heap_and_allocate(size, is_tlab);
   658     }
   659     return result;   // could be null if we are out of space
   660   } else if (!gch->incremental_collection_will_fail(false /* don't consult_young */)) {
   661     // Do an incremental collection.
   662     gch->do_collection(false            /* full */,
   663                        false            /* clear_all_soft_refs */,
   664                        size             /* size */,
   665                        is_tlab          /* is_tlab */,
   666                        number_of_generations() - 1 /* max_level */);
   667   } else {
   668     if (Verbose && PrintGCDetails) {
   669       gclog_or_tty->print(" :: Trying full because partial may fail :: ");
   670     }
   671     // Try a full collection; see delta for bug id 6266275
   672     // for the original code and why this has been simplified
   673     // with from-space allocation criteria modified and
   674     // such allocation moved out of the safepoint path.
   675     gch->do_collection(true             /* full */,
   676                        false            /* clear_all_soft_refs */,
   677                        size             /* size */,
   678                        is_tlab          /* is_tlab */,
   679                        number_of_generations() - 1 /* max_level */);
   680   }
   682   result = gch->attempt_allocation(size, is_tlab, false /*first_only*/);
   684   if (result != NULL) {
   685     assert(gch->is_in_reserved(result), "result not in heap");
   686     return result;
   687   }
   689   // OK, collection failed, try expansion.
   690   result = expand_heap_and_allocate(size, is_tlab);
   691   if (result != NULL) {
   692     return result;
   693   }
   695   // If we reach this point, we're really out of memory. Try every trick
   696   // we can to reclaim memory. Force collection of soft references. Force
   697   // a complete compaction of the heap. Any additional methods for finding
   698   // free memory should be here, especially if they are expensive. If this
   699   // attempt fails, an OOM exception will be thrown.
   700   {
   701     IntFlagSetting flag_change(MarkSweepAlwaysCompactCount, 1); // Make sure the heap is fully compacted
   703     gch->do_collection(true             /* full */,
   704                        true             /* clear_all_soft_refs */,
   705                        size             /* size */,
   706                        is_tlab          /* is_tlab */,
   707                        number_of_generations() - 1 /* max_level */);
   708   }
   710   result = gch->attempt_allocation(size, is_tlab, false /* first_only */);
   711   if (result != NULL) {
   712     assert(gch->is_in_reserved(result), "result not in heap");
   713     return result;
   714   }
   716   assert(!should_clear_all_soft_refs(),
   717     "Flag should have been handled and cleared prior to this point");
   719   // What else?  We might try synchronous finalization later.  If the total
   720   // space available is large enough for the allocation, then a more
   721   // complete compaction phase than we've tried so far might be
   722   // appropriate.
   723   return NULL;
   724 }
   726 MetaWord* CollectorPolicy::satisfy_failed_metadata_allocation(
   727                                                  ClassLoaderData* loader_data,
   728                                                  size_t word_size,
   729                                                  Metaspace::MetadataType mdtype) {
   730   uint loop_count = 0;
   731   uint gc_count = 0;
   732   uint full_gc_count = 0;
   734   assert(!Heap_lock->owned_by_self(), "Should not be holding the Heap_lock");
   736   do {
   737     MetaWord* result = NULL;
   738     if (GC_locker::is_active_and_needs_gc()) {
   739       // If the GC_locker is active, just expand and allocate.
   740       // If that does not succeed, wait if this thread is not
   741       // in a critical section itself.
   742       result =
   743         loader_data->metaspace_non_null()->expand_and_allocate(word_size,
   744                                                                mdtype);
   745       if (result != NULL) {
   746         return result;
   747       }
   748       JavaThread* jthr = JavaThread::current();
   749       if (!jthr->in_critical()) {
   750         // Wait for JNI critical section to be exited
   751         GC_locker::stall_until_clear();
   752         // The GC invoked by the last thread leaving the critical
   753         // section will be a young collection and a full collection
   754         // is (currently) needed for unloading classes so continue
   755         // to the next iteration to get a full GC.
   756         continue;
   757       } else {
   758         if (CheckJNICalls) {
   759           fatal("Possible deadlock due to allocating while"
   760                 " in jni critical section");
   761         }
   762         return NULL;
   763       }
   764     }
   766     {  // Need lock to get self consistent gc_count's
   767       MutexLocker ml(Heap_lock);
   768       gc_count      = Universe::heap()->total_collections();
   769       full_gc_count = Universe::heap()->total_full_collections();
   770     }
   772     // Generate a VM operation
   773     VM_CollectForMetadataAllocation op(loader_data,
   774                                        word_size,
   775                                        mdtype,
   776                                        gc_count,
   777                                        full_gc_count,
   778                                        GCCause::_metadata_GC_threshold);
   779     VMThread::execute(&op);
   781     // If GC was locked out, try again.  Check
   782     // before checking success because the prologue
   783     // could have succeeded and the GC still have
   784     // been locked out.
   785     if (op.gc_locked()) {
   786       continue;
   787     }
   789     if (op.prologue_succeeded()) {
   790       return op.result();
   791     }
   792     loop_count++;
   793     if ((QueuedAllocationWarningCount > 0) &&
   794         (loop_count % QueuedAllocationWarningCount == 0)) {
   795       warning("satisfy_failed_metadata_allocation() retries %d times \n\t"
   796               " size=%d", loop_count, word_size);
   797     }
   798   } while (true);  // Until a GC is done
   799 }
   801 // Return true if any of the following is true:
   802 // . the allocation won't fit into the current young gen heap
   803 // . gc locker is occupied (jni critical section)
   804 // . heap memory is tight -- the most recent previous collection
   805 //   was a full collection because a partial collection (would
   806 //   have) failed and is likely to fail again
   807 bool GenCollectorPolicy::should_try_older_generation_allocation(
   808         size_t word_size) const {
   809   GenCollectedHeap* gch = GenCollectedHeap::heap();
   810   size_t gen0_capacity = gch->get_gen(0)->capacity_before_gc();
   811   return    (word_size > heap_word_size(gen0_capacity))
   812          || GC_locker::is_active_and_needs_gc()
   813          || gch->incremental_collection_failed();
   814 }
   817 //
   818 // MarkSweepPolicy methods
   819 //
   821 MarkSweepPolicy::MarkSweepPolicy() {
   822   initialize_all();
   823 }
   825 void MarkSweepPolicy::initialize_generations() {
   826   _generations = new GenerationSpecPtr[number_of_generations()];
   827   if (_generations == NULL)
   828     vm_exit_during_initialization("Unable to allocate gen spec");
   830   if (UseParNewGC) {
   831     _generations[0] = new GenerationSpec(Generation::ParNew, _initial_gen0_size, _max_gen0_size);
   832   } else {
   833     _generations[0] = new GenerationSpec(Generation::DefNew, _initial_gen0_size, _max_gen0_size);
   834   }
   835   _generations[1] = new GenerationSpec(Generation::MarkSweepCompact, _initial_gen1_size, _max_gen1_size);
   837   if (_generations[0] == NULL || _generations[1] == NULL)
   838     vm_exit_during_initialization("Unable to allocate gen spec");
   839 }
   841 void MarkSweepPolicy::initialize_gc_policy_counters() {
   842   // initialize the policy counters - 2 collectors, 3 generations
   843   if (UseParNewGC) {
   844     _gc_policy_counters = new GCPolicyCounters("ParNew:MSC", 2, 3);
   845   } else {
   846     _gc_policy_counters = new GCPolicyCounters("Copy:MSC", 2, 3);
   847   }
   848 }

mercurial