src/share/vm/memory/collectorPolicy.cpp

Thu, 03 Oct 2013 13:19:19 +0200

author
jwilhelm
date
Thu, 03 Oct 2013 13:19:19 +0200
changeset 5818
ab68fc0101ce
parent 5814
9ecd6d3782b1
child 5855
9b4d0569f2f4
permissions
-rw-r--r--

8025855: Simplify GenRemSet code slightly
Summary: Remove a few redundant switch-statements
Reviewed-by: jcoomes, tschatzl

     1 /*
     2  * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    20  * or visit www.oracle.com if you need additional information or have any
    21  * questions.
    22  *
    23  */
    25 #include "precompiled.hpp"
    26 #include "gc_implementation/shared/adaptiveSizePolicy.hpp"
    27 #include "gc_implementation/shared/gcPolicyCounters.hpp"
    28 #include "gc_implementation/shared/vmGCOperations.hpp"
    29 #include "memory/cardTableRS.hpp"
    30 #include "memory/collectorPolicy.hpp"
    31 #include "memory/gcLocker.inline.hpp"
    32 #include "memory/genCollectedHeap.hpp"
    33 #include "memory/generationSpec.hpp"
    34 #include "memory/space.hpp"
    35 #include "memory/universe.hpp"
    36 #include "runtime/arguments.hpp"
    37 #include "runtime/globals_extension.hpp"
    38 #include "runtime/handles.inline.hpp"
    39 #include "runtime/java.hpp"
    40 #include "runtime/thread.inline.hpp"
    41 #include "runtime/vmThread.hpp"
    42 #include "utilities/macros.hpp"
    43 #if INCLUDE_ALL_GCS
    44 #include "gc_implementation/concurrentMarkSweep/cmsAdaptiveSizePolicy.hpp"
    45 #include "gc_implementation/concurrentMarkSweep/cmsGCAdaptivePolicyCounters.hpp"
    46 #endif // INCLUDE_ALL_GCS
    48 // CollectorPolicy methods.
    50 // Align down. If the aligning result in 0, return 'alignment'.
    51 static size_t restricted_align_down(size_t size, size_t alignment) {
    52   return MAX2(alignment, align_size_down_(size, alignment));
    53 }
    55 void CollectorPolicy::initialize_flags() {
    56   assert(max_alignment() >= min_alignment(),
    57       err_msg("max_alignment: " SIZE_FORMAT " less than min_alignment: " SIZE_FORMAT,
    58           max_alignment(), min_alignment()));
    59   assert(max_alignment() % min_alignment() == 0,
    60       err_msg("max_alignment: " SIZE_FORMAT " not aligned by min_alignment: " SIZE_FORMAT,
    61           max_alignment(), min_alignment()));
    63   if (MaxHeapSize < InitialHeapSize) {
    64     vm_exit_during_initialization("Incompatible initial and maximum heap sizes specified");
    65   }
    67   // Do not use FLAG_SET_ERGO to update MaxMetaspaceSize, since this will
    68   // override if MaxMetaspaceSize was set on the command line or not.
    69   // This information is needed later to conform to the specification of the
    70   // java.lang.management.MemoryUsage API.
    71   //
    72   // Ideally, we would be able to set the default value of MaxMetaspaceSize in
    73   // globals.hpp to the aligned value, but this is not possible, since the
    74   // alignment depends on other flags being parsed.
    75   MaxMetaspaceSize = restricted_align_down(MaxMetaspaceSize, max_alignment());
    77   if (MetaspaceSize > MaxMetaspaceSize) {
    78     MetaspaceSize = MaxMetaspaceSize;
    79   }
    81   MetaspaceSize = restricted_align_down(MetaspaceSize, min_alignment());
    83   assert(MetaspaceSize <= MaxMetaspaceSize, "Must be");
    85   MinMetaspaceExpansion = restricted_align_down(MinMetaspaceExpansion, min_alignment());
    86   MaxMetaspaceExpansion = restricted_align_down(MaxMetaspaceExpansion, min_alignment());
    88   MinHeapDeltaBytes = align_size_up(MinHeapDeltaBytes, min_alignment());
    90   assert(MetaspaceSize    % min_alignment() == 0, "metapace alignment");
    91   assert(MaxMetaspaceSize % max_alignment() == 0, "maximum metaspace alignment");
    92   if (MetaspaceSize < 256*K) {
    93     vm_exit_during_initialization("Too small initial Metaspace size");
    94   }
    95 }
    97 void CollectorPolicy::initialize_size_info() {
    98   // User inputs from -mx and ms must be aligned
    99   set_min_heap_byte_size(align_size_up(Arguments::min_heap_size(), min_alignment()));
   100   set_initial_heap_byte_size(align_size_up(InitialHeapSize, min_alignment()));
   101   set_max_heap_byte_size(align_size_up(MaxHeapSize, max_alignment()));
   103   // Check heap parameter properties
   104   if (initial_heap_byte_size() < M) {
   105     vm_exit_during_initialization("Too small initial heap");
   106   }
   107   // Check heap parameter properties
   108   if (min_heap_byte_size() < M) {
   109     vm_exit_during_initialization("Too small minimum heap");
   110   }
   111   if (initial_heap_byte_size() <= NewSize) {
   112      // make sure there is at least some room in old space
   113     vm_exit_during_initialization("Too small initial heap for new size specified");
   114   }
   115   if (max_heap_byte_size() < min_heap_byte_size()) {
   116     vm_exit_during_initialization("Incompatible minimum and maximum heap sizes specified");
   117   }
   118   if (initial_heap_byte_size() < min_heap_byte_size()) {
   119     vm_exit_during_initialization("Incompatible minimum and initial heap sizes specified");
   120   }
   121   if (max_heap_byte_size() < initial_heap_byte_size()) {
   122     vm_exit_during_initialization("Incompatible initial and maximum heap sizes specified");
   123   }
   125   if (PrintGCDetails && Verbose) {
   126     gclog_or_tty->print_cr("Minimum heap " SIZE_FORMAT "  Initial heap "
   127       SIZE_FORMAT "  Maximum heap " SIZE_FORMAT,
   128       min_heap_byte_size(), initial_heap_byte_size(), max_heap_byte_size());
   129   }
   130 }
   132 bool CollectorPolicy::use_should_clear_all_soft_refs(bool v) {
   133   bool result = _should_clear_all_soft_refs;
   134   set_should_clear_all_soft_refs(false);
   135   return result;
   136 }
   138 GenRemSet* CollectorPolicy::create_rem_set(MemRegion whole_heap,
   139                                            int max_covered_regions) {
   140   assert(rem_set_name() == GenRemSet::CardTable, "unrecognized GenRemSet::Name");
   141   return new CardTableRS(whole_heap, max_covered_regions);
   142 }
   144 void CollectorPolicy::cleared_all_soft_refs() {
   145   // If near gc overhear limit, continue to clear SoftRefs.  SoftRefs may
   146   // have been cleared in the last collection but if the gc overhear
   147   // limit continues to be near, SoftRefs should still be cleared.
   148   if (size_policy() != NULL) {
   149     _should_clear_all_soft_refs = size_policy()->gc_overhead_limit_near();
   150   }
   151   _all_soft_refs_clear = true;
   152 }
   154 size_t CollectorPolicy::compute_max_alignment() {
   155   // The card marking array and the offset arrays for old generations are
   156   // committed in os pages as well. Make sure they are entirely full (to
   157   // avoid partial page problems), e.g. if 512 bytes heap corresponds to 1
   158   // byte entry and the os page size is 4096, the maximum heap size should
   159   // be 512*4096 = 2MB aligned.
   161   // There is only the GenRemSet in Hotspot and only the GenRemSet::CardTable
   162   // is supported.
   163   // Requirements of any new remembered set implementations must be added here.
   164   size_t alignment = GenRemSet::max_alignment_constraint(GenRemSet::CardTable);
   166   // Parallel GC does its own alignment of the generations to avoid requiring a
   167   // large page (256M on some platforms) for the permanent generation.  The
   168   // other collectors should also be updated to do their own alignment and then
   169   // this use of lcm() should be removed.
   170   if (UseLargePages && !UseParallelGC) {
   171       // in presence of large pages we have to make sure that our
   172       // alignment is large page aware
   173       alignment = lcm(os::large_page_size(), alignment);
   174   }
   176   return alignment;
   177 }
   179 // GenCollectorPolicy methods.
   181 size_t GenCollectorPolicy::scale_by_NewRatio_aligned(size_t base_size) {
   182   size_t x = base_size / (NewRatio+1);
   183   size_t new_gen_size = x > min_alignment() ?
   184                      align_size_down(x, min_alignment()) :
   185                      min_alignment();
   186   return new_gen_size;
   187 }
   189 size_t GenCollectorPolicy::bound_minus_alignment(size_t desired_size,
   190                                                  size_t maximum_size) {
   191   size_t alignment = min_alignment();
   192   size_t max_minus = maximum_size - alignment;
   193   return desired_size < max_minus ? desired_size : max_minus;
   194 }
   197 void GenCollectorPolicy::initialize_size_policy(size_t init_eden_size,
   198                                                 size_t init_promo_size,
   199                                                 size_t init_survivor_size) {
   200   const double max_gc_pause_sec = ((double) MaxGCPauseMillis)/1000.0;
   201   _size_policy = new AdaptiveSizePolicy(init_eden_size,
   202                                         init_promo_size,
   203                                         init_survivor_size,
   204                                         max_gc_pause_sec,
   205                                         GCTimeRatio);
   206 }
   208 void GenCollectorPolicy::initialize_flags() {
   209   // All sizes must be multiples of the generation granularity.
   210   set_min_alignment((uintx) Generation::GenGrain);
   211   set_max_alignment(compute_max_alignment());
   213   CollectorPolicy::initialize_flags();
   215   // All generational heaps have a youngest gen; handle those flags here.
   217   // Adjust max size parameters
   218   if (NewSize > MaxNewSize) {
   219     MaxNewSize = NewSize;
   220   }
   221   NewSize = align_size_down(NewSize, min_alignment());
   222   MaxNewSize = align_size_down(MaxNewSize, min_alignment());
   224   // Check validity of heap flags
   225   assert(NewSize     % min_alignment() == 0, "eden space alignment");
   226   assert(MaxNewSize  % min_alignment() == 0, "survivor space alignment");
   228   if (NewSize < 3*min_alignment()) {
   229      // make sure there room for eden and two survivor spaces
   230     vm_exit_during_initialization("Too small new size specified");
   231   }
   232   if (SurvivorRatio < 1 || NewRatio < 1) {
   233     vm_exit_during_initialization("Invalid heap ratio specified");
   234   }
   235 }
   237 void TwoGenerationCollectorPolicy::initialize_flags() {
   238   GenCollectorPolicy::initialize_flags();
   240   OldSize = align_size_down(OldSize, min_alignment());
   242   if (FLAG_IS_CMDLINE(OldSize) && FLAG_IS_DEFAULT(NewSize)) {
   243     // NewRatio will be used later to set the young generation size so we use
   244     // it to calculate how big the heap should be based on the requested OldSize
   245     // and NewRatio.
   246     assert(NewRatio > 0, "NewRatio should have been set up earlier");
   247     size_t calculated_heapsize = (OldSize / NewRatio) * (NewRatio + 1);
   249     calculated_heapsize = align_size_up(calculated_heapsize, max_alignment());
   250     MaxHeapSize = calculated_heapsize;
   251     InitialHeapSize = calculated_heapsize;
   252   }
   253   MaxHeapSize = align_size_up(MaxHeapSize, max_alignment());
   255   // adjust max heap size if necessary
   256   if (NewSize + OldSize > MaxHeapSize) {
   257     if (FLAG_IS_CMDLINE(MaxHeapSize)) {
   258       // somebody set a maximum heap size with the intention that we should not
   259       // exceed it. Adjust New/OldSize as necessary.
   260       uintx calculated_size = NewSize + OldSize;
   261       double shrink_factor = (double) MaxHeapSize / calculated_size;
   262       // align
   263       NewSize = align_size_down((uintx) (NewSize * shrink_factor), min_alignment());
   264       // OldSize is already aligned because above we aligned MaxHeapSize to
   265       // max_alignment(), and we just made sure that NewSize is aligned to
   266       // min_alignment(). In initialize_flags() we verified that max_alignment()
   267       // is a multiple of min_alignment().
   268       OldSize = MaxHeapSize - NewSize;
   269     } else {
   270       MaxHeapSize = NewSize + OldSize;
   271     }
   272   }
   273   // need to do this again
   274   MaxHeapSize = align_size_up(MaxHeapSize, max_alignment());
   276   // adjust max heap size if necessary
   277   if (NewSize + OldSize > MaxHeapSize) {
   278     if (FLAG_IS_CMDLINE(MaxHeapSize)) {
   279       // somebody set a maximum heap size with the intention that we should not
   280       // exceed it. Adjust New/OldSize as necessary.
   281       uintx calculated_size = NewSize + OldSize;
   282       double shrink_factor = (double) MaxHeapSize / calculated_size;
   283       // align
   284       NewSize = align_size_down((uintx) (NewSize * shrink_factor), min_alignment());
   285       // OldSize is already aligned because above we aligned MaxHeapSize to
   286       // max_alignment(), and we just made sure that NewSize is aligned to
   287       // min_alignment(). In initialize_flags() we verified that max_alignment()
   288       // is a multiple of min_alignment().
   289       OldSize = MaxHeapSize - NewSize;
   290     } else {
   291       MaxHeapSize = NewSize + OldSize;
   292     }
   293   }
   294   // need to do this again
   295   MaxHeapSize = align_size_up(MaxHeapSize, max_alignment());
   297   always_do_update_barrier = UseConcMarkSweepGC;
   299   // Check validity of heap flags
   300   assert(OldSize     % min_alignment() == 0, "old space alignment");
   301   assert(MaxHeapSize % max_alignment() == 0, "maximum heap alignment");
   302 }
   304 // Values set on the command line win over any ergonomically
   305 // set command line parameters.
   306 // Ergonomic choice of parameters are done before this
   307 // method is called.  Values for command line parameters such as NewSize
   308 // and MaxNewSize feed those ergonomic choices into this method.
   309 // This method makes the final generation sizings consistent with
   310 // themselves and with overall heap sizings.
   311 // In the absence of explicitly set command line flags, policies
   312 // such as the use of NewRatio are used to size the generation.
   313 void GenCollectorPolicy::initialize_size_info() {
   314   CollectorPolicy::initialize_size_info();
   316   // min_alignment() is used for alignment within a generation.
   317   // There is additional alignment done down stream for some
   318   // collectors that sometimes causes unwanted rounding up of
   319   // generations sizes.
   321   // Determine maximum size of gen0
   323   size_t max_new_size = 0;
   324   if (FLAG_IS_CMDLINE(MaxNewSize) || FLAG_IS_ERGO(MaxNewSize)) {
   325     if (MaxNewSize < min_alignment()) {
   326       max_new_size = min_alignment();
   327     }
   328     if (MaxNewSize >= max_heap_byte_size()) {
   329       max_new_size = align_size_down(max_heap_byte_size() - min_alignment(),
   330                                      min_alignment());
   331       warning("MaxNewSize (" SIZE_FORMAT "k) is equal to or "
   332         "greater than the entire heap (" SIZE_FORMAT "k).  A "
   333         "new generation size of " SIZE_FORMAT "k will be used.",
   334         MaxNewSize/K, max_heap_byte_size()/K, max_new_size/K);
   335     } else {
   336       max_new_size = align_size_down(MaxNewSize, min_alignment());
   337     }
   339   // The case for FLAG_IS_ERGO(MaxNewSize) could be treated
   340   // specially at this point to just use an ergonomically set
   341   // MaxNewSize to set max_new_size.  For cases with small
   342   // heaps such a policy often did not work because the MaxNewSize
   343   // was larger than the entire heap.  The interpretation given
   344   // to ergonomically set flags is that the flags are set
   345   // by different collectors for their own special needs but
   346   // are not allowed to badly shape the heap.  This allows the
   347   // different collectors to decide what's best for themselves
   348   // without having to factor in the overall heap shape.  It
   349   // can be the case in the future that the collectors would
   350   // only make "wise" ergonomics choices and this policy could
   351   // just accept those choices.  The choices currently made are
   352   // not always "wise".
   353   } else {
   354     max_new_size = scale_by_NewRatio_aligned(max_heap_byte_size());
   355     // Bound the maximum size by NewSize below (since it historically
   356     // would have been NewSize and because the NewRatio calculation could
   357     // yield a size that is too small) and bound it by MaxNewSize above.
   358     // Ergonomics plays here by previously calculating the desired
   359     // NewSize and MaxNewSize.
   360     max_new_size = MIN2(MAX2(max_new_size, NewSize), MaxNewSize);
   361   }
   362   assert(max_new_size > 0, "All paths should set max_new_size");
   364   // Given the maximum gen0 size, determine the initial and
   365   // minimum gen0 sizes.
   367   if (max_heap_byte_size() == min_heap_byte_size()) {
   368     // The maximum and minimum heap sizes are the same so
   369     // the generations minimum and initial must be the
   370     // same as its maximum.
   371     set_min_gen0_size(max_new_size);
   372     set_initial_gen0_size(max_new_size);
   373     set_max_gen0_size(max_new_size);
   374   } else {
   375     size_t desired_new_size = 0;
   376     if (!FLAG_IS_DEFAULT(NewSize)) {
   377       // If NewSize is set ergonomically (for example by cms), it
   378       // would make sense to use it.  If it is used, also use it
   379       // to set the initial size.  Although there is no reason
   380       // the minimum size and the initial size have to be the same,
   381       // the current implementation gets into trouble during the calculation
   382       // of the tenured generation sizes if they are different.
   383       // Note that this makes the initial size and the minimum size
   384       // generally small compared to the NewRatio calculation.
   385       _min_gen0_size = NewSize;
   386       desired_new_size = NewSize;
   387       max_new_size = MAX2(max_new_size, NewSize);
   388     } else {
   389       // For the case where NewSize is the default, use NewRatio
   390       // to size the minimum and initial generation sizes.
   391       // Use the default NewSize as the floor for these values.  If
   392       // NewRatio is overly large, the resulting sizes can be too
   393       // small.
   394       _min_gen0_size = MAX2(scale_by_NewRatio_aligned(min_heap_byte_size()),
   395                           NewSize);
   396       desired_new_size =
   397         MAX2(scale_by_NewRatio_aligned(initial_heap_byte_size()),
   398              NewSize);
   399     }
   401     assert(_min_gen0_size > 0, "Sanity check");
   402     set_initial_gen0_size(desired_new_size);
   403     set_max_gen0_size(max_new_size);
   405     // At this point the desirable initial and minimum sizes have been
   406     // determined without regard to the maximum sizes.
   408     // Bound the sizes by the corresponding overall heap sizes.
   409     set_min_gen0_size(
   410       bound_minus_alignment(_min_gen0_size, min_heap_byte_size()));
   411     set_initial_gen0_size(
   412       bound_minus_alignment(_initial_gen0_size, initial_heap_byte_size()));
   413     set_max_gen0_size(
   414       bound_minus_alignment(_max_gen0_size, max_heap_byte_size()));
   416     // At this point all three sizes have been checked against the
   417     // maximum sizes but have not been checked for consistency
   418     // among the three.
   420     // Final check min <= initial <= max
   421     set_min_gen0_size(MIN2(_min_gen0_size, _max_gen0_size));
   422     set_initial_gen0_size(
   423       MAX2(MIN2(_initial_gen0_size, _max_gen0_size), _min_gen0_size));
   424     set_min_gen0_size(MIN2(_min_gen0_size, _initial_gen0_size));
   425   }
   427   if (PrintGCDetails && Verbose) {
   428     gclog_or_tty->print_cr("1: Minimum gen0 " SIZE_FORMAT "  Initial gen0 "
   429       SIZE_FORMAT "  Maximum gen0 " SIZE_FORMAT,
   430       min_gen0_size(), initial_gen0_size(), max_gen0_size());
   431   }
   432 }
   434 // Call this method during the sizing of the gen1 to make
   435 // adjustments to gen0 because of gen1 sizing policy.  gen0 initially has
   436 // the most freedom in sizing because it is done before the
   437 // policy for gen1 is applied.  Once gen1 policies have been applied,
   438 // there may be conflicts in the shape of the heap and this method
   439 // is used to make the needed adjustments.  The application of the
   440 // policies could be more sophisticated (iterative for example) but
   441 // keeping it simple also seems a worthwhile goal.
   442 bool TwoGenerationCollectorPolicy::adjust_gen0_sizes(size_t* gen0_size_ptr,
   443                                                      size_t* gen1_size_ptr,
   444                                                      const size_t heap_size,
   445                                                      const size_t min_gen1_size) {
   446   bool result = false;
   448   if ((*gen1_size_ptr + *gen0_size_ptr) > heap_size) {
   449     if ((heap_size < (*gen0_size_ptr + min_gen1_size)) &&
   450         (heap_size >= min_gen1_size + min_alignment())) {
   451       // Adjust gen0 down to accommodate min_gen1_size
   452       *gen0_size_ptr = heap_size - min_gen1_size;
   453       *gen0_size_ptr =
   454         MAX2((uintx)align_size_down(*gen0_size_ptr, min_alignment()),
   455              min_alignment());
   456       assert(*gen0_size_ptr > 0, "Min gen0 is too large");
   457       result = true;
   458     } else {
   459       *gen1_size_ptr = heap_size - *gen0_size_ptr;
   460       *gen1_size_ptr =
   461         MAX2((uintx)align_size_down(*gen1_size_ptr, min_alignment()),
   462                        min_alignment());
   463     }
   464   }
   465   return result;
   466 }
   468 // Minimum sizes of the generations may be different than
   469 // the initial sizes.  An inconsistently is permitted here
   470 // in the total size that can be specified explicitly by
   471 // command line specification of OldSize and NewSize and
   472 // also a command line specification of -Xms.  Issue a warning
   473 // but allow the values to pass.
   475 void TwoGenerationCollectorPolicy::initialize_size_info() {
   476   GenCollectorPolicy::initialize_size_info();
   478   // At this point the minimum, initial and maximum sizes
   479   // of the overall heap and of gen0 have been determined.
   480   // The maximum gen1 size can be determined from the maximum gen0
   481   // and maximum heap size since no explicit flags exits
   482   // for setting the gen1 maximum.
   483   _max_gen1_size = max_heap_byte_size() - _max_gen0_size;
   484   _max_gen1_size =
   485     MAX2((uintx)align_size_down(_max_gen1_size, min_alignment()),
   486          min_alignment());
   487   // If no explicit command line flag has been set for the
   488   // gen1 size, use what is left for gen1.
   489   if (FLAG_IS_DEFAULT(OldSize) || FLAG_IS_ERGO(OldSize)) {
   490     // The user has not specified any value or ergonomics
   491     // has chosen a value (which may or may not be consistent
   492     // with the overall heap size).  In either case make
   493     // the minimum, maximum and initial sizes consistent
   494     // with the gen0 sizes and the overall heap sizes.
   495     assert(min_heap_byte_size() > _min_gen0_size,
   496       "gen0 has an unexpected minimum size");
   497     set_min_gen1_size(min_heap_byte_size() - min_gen0_size());
   498     set_min_gen1_size(
   499       MAX2((uintx)align_size_down(_min_gen1_size, min_alignment()),
   500            min_alignment()));
   501     set_initial_gen1_size(initial_heap_byte_size() - initial_gen0_size());
   502     set_initial_gen1_size(
   503       MAX2((uintx)align_size_down(_initial_gen1_size, min_alignment()),
   504            min_alignment()));
   506   } else {
   507     // It's been explicitly set on the command line.  Use the
   508     // OldSize and then determine the consequences.
   509     set_min_gen1_size(OldSize);
   510     set_initial_gen1_size(OldSize);
   512     // If the user has explicitly set an OldSize that is inconsistent
   513     // with other command line flags, issue a warning.
   514     // The generation minimums and the overall heap mimimum should
   515     // be within one heap alignment.
   516     if ((_min_gen1_size + _min_gen0_size + min_alignment()) <
   517            min_heap_byte_size()) {
   518       warning("Inconsistency between minimum heap size and minimum "
   519           "generation sizes: using minimum heap = " SIZE_FORMAT,
   520           min_heap_byte_size());
   521     }
   522     if ((OldSize > _max_gen1_size)) {
   523       warning("Inconsistency between maximum heap size and maximum "
   524           "generation sizes: using maximum heap = " SIZE_FORMAT
   525           " -XX:OldSize flag is being ignored",
   526           max_heap_byte_size());
   527     }
   528     // If there is an inconsistency between the OldSize and the minimum and/or
   529     // initial size of gen0, since OldSize was explicitly set, OldSize wins.
   530     if (adjust_gen0_sizes(&_min_gen0_size, &_min_gen1_size,
   531                           min_heap_byte_size(), OldSize)) {
   532       if (PrintGCDetails && Verbose) {
   533         gclog_or_tty->print_cr("2: Minimum gen0 " SIZE_FORMAT "  Initial gen0 "
   534               SIZE_FORMAT "  Maximum gen0 " SIZE_FORMAT,
   535               min_gen0_size(), initial_gen0_size(), max_gen0_size());
   536       }
   537     }
   538     // Initial size
   539     if (adjust_gen0_sizes(&_initial_gen0_size, &_initial_gen1_size,
   540                          initial_heap_byte_size(), OldSize)) {
   541       if (PrintGCDetails && Verbose) {
   542         gclog_or_tty->print_cr("3: Minimum gen0 " SIZE_FORMAT "  Initial gen0 "
   543           SIZE_FORMAT "  Maximum gen0 " SIZE_FORMAT,
   544           min_gen0_size(), initial_gen0_size(), max_gen0_size());
   545       }
   546     }
   547   }
   548   // Enforce the maximum gen1 size.
   549   set_min_gen1_size(MIN2(_min_gen1_size, _max_gen1_size));
   551   // Check that min gen1 <= initial gen1 <= max gen1
   552   set_initial_gen1_size(MAX2(_initial_gen1_size, _min_gen1_size));
   553   set_initial_gen1_size(MIN2(_initial_gen1_size, _max_gen1_size));
   555   if (PrintGCDetails && Verbose) {
   556     gclog_or_tty->print_cr("Minimum gen1 " SIZE_FORMAT "  Initial gen1 "
   557       SIZE_FORMAT "  Maximum gen1 " SIZE_FORMAT,
   558       min_gen1_size(), initial_gen1_size(), max_gen1_size());
   559   }
   560 }
   562 HeapWord* GenCollectorPolicy::mem_allocate_work(size_t size,
   563                                         bool is_tlab,
   564                                         bool* gc_overhead_limit_was_exceeded) {
   565   GenCollectedHeap *gch = GenCollectedHeap::heap();
   567   debug_only(gch->check_for_valid_allocation_state());
   568   assert(gch->no_gc_in_progress(), "Allocation during gc not allowed");
   570   // In general gc_overhead_limit_was_exceeded should be false so
   571   // set it so here and reset it to true only if the gc time
   572   // limit is being exceeded as checked below.
   573   *gc_overhead_limit_was_exceeded = false;
   575   HeapWord* result = NULL;
   577   // Loop until the allocation is satisified,
   578   // or unsatisfied after GC.
   579   for (int try_count = 1, gclocker_stalled_count = 0; /* return or throw */; try_count += 1) {
   580     HandleMark hm; // discard any handles allocated in each iteration
   582     // First allocation attempt is lock-free.
   583     Generation *gen0 = gch->get_gen(0);
   584     assert(gen0->supports_inline_contig_alloc(),
   585       "Otherwise, must do alloc within heap lock");
   586     if (gen0->should_allocate(size, is_tlab)) {
   587       result = gen0->par_allocate(size, is_tlab);
   588       if (result != NULL) {
   589         assert(gch->is_in_reserved(result), "result not in heap");
   590         return result;
   591       }
   592     }
   593     unsigned int gc_count_before;  // read inside the Heap_lock locked region
   594     {
   595       MutexLocker ml(Heap_lock);
   596       if (PrintGC && Verbose) {
   597         gclog_or_tty->print_cr("TwoGenerationCollectorPolicy::mem_allocate_work:"
   598                       " attempting locked slow path allocation");
   599       }
   600       // Note that only large objects get a shot at being
   601       // allocated in later generations.
   602       bool first_only = ! should_try_older_generation_allocation(size);
   604       result = gch->attempt_allocation(size, is_tlab, first_only);
   605       if (result != NULL) {
   606         assert(gch->is_in_reserved(result), "result not in heap");
   607         return result;
   608       }
   610       if (GC_locker::is_active_and_needs_gc()) {
   611         if (is_tlab) {
   612           return NULL;  // Caller will retry allocating individual object
   613         }
   614         if (!gch->is_maximal_no_gc()) {
   615           // Try and expand heap to satisfy request
   616           result = expand_heap_and_allocate(size, is_tlab);
   617           // result could be null if we are out of space
   618           if (result != NULL) {
   619             return result;
   620           }
   621         }
   623         if (gclocker_stalled_count > GCLockerRetryAllocationCount) {
   624           return NULL; // we didn't get to do a GC and we didn't get any memory
   625         }
   627         // If this thread is not in a jni critical section, we stall
   628         // the requestor until the critical section has cleared and
   629         // GC allowed. When the critical section clears, a GC is
   630         // initiated by the last thread exiting the critical section; so
   631         // we retry the allocation sequence from the beginning of the loop,
   632         // rather than causing more, now probably unnecessary, GC attempts.
   633         JavaThread* jthr = JavaThread::current();
   634         if (!jthr->in_critical()) {
   635           MutexUnlocker mul(Heap_lock);
   636           // Wait for JNI critical section to be exited
   637           GC_locker::stall_until_clear();
   638           gclocker_stalled_count += 1;
   639           continue;
   640         } else {
   641           if (CheckJNICalls) {
   642             fatal("Possible deadlock due to allocating while"
   643                   " in jni critical section");
   644           }
   645           return NULL;
   646         }
   647       }
   649       // Read the gc count while the heap lock is held.
   650       gc_count_before = Universe::heap()->total_collections();
   651     }
   653     VM_GenCollectForAllocation op(size,
   654                                   is_tlab,
   655                                   gc_count_before);
   656     VMThread::execute(&op);
   657     if (op.prologue_succeeded()) {
   658       result = op.result();
   659       if (op.gc_locked()) {
   660          assert(result == NULL, "must be NULL if gc_locked() is true");
   661          continue;  // retry and/or stall as necessary
   662       }
   664       // Allocation has failed and a collection
   665       // has been done.  If the gc time limit was exceeded the
   666       // this time, return NULL so that an out-of-memory
   667       // will be thrown.  Clear gc_overhead_limit_exceeded
   668       // so that the overhead exceeded does not persist.
   670       const bool limit_exceeded = size_policy()->gc_overhead_limit_exceeded();
   671       const bool softrefs_clear = all_soft_refs_clear();
   673       if (limit_exceeded && softrefs_clear) {
   674         *gc_overhead_limit_was_exceeded = true;
   675         size_policy()->set_gc_overhead_limit_exceeded(false);
   676         if (op.result() != NULL) {
   677           CollectedHeap::fill_with_object(op.result(), size);
   678         }
   679         return NULL;
   680       }
   681       assert(result == NULL || gch->is_in_reserved(result),
   682              "result not in heap");
   683       return result;
   684     }
   686     // Give a warning if we seem to be looping forever.
   687     if ((QueuedAllocationWarningCount > 0) &&
   688         (try_count % QueuedAllocationWarningCount == 0)) {
   689           warning("TwoGenerationCollectorPolicy::mem_allocate_work retries %d times \n\t"
   690                   " size=%d %s", try_count, size, is_tlab ? "(TLAB)" : "");
   691     }
   692   }
   693 }
   695 HeapWord* GenCollectorPolicy::expand_heap_and_allocate(size_t size,
   696                                                        bool   is_tlab) {
   697   GenCollectedHeap *gch = GenCollectedHeap::heap();
   698   HeapWord* result = NULL;
   699   for (int i = number_of_generations() - 1; i >= 0 && result == NULL; i--) {
   700     Generation *gen = gch->get_gen(i);
   701     if (gen->should_allocate(size, is_tlab)) {
   702       result = gen->expand_and_allocate(size, is_tlab);
   703     }
   704   }
   705   assert(result == NULL || gch->is_in_reserved(result), "result not in heap");
   706   return result;
   707 }
   709 HeapWord* GenCollectorPolicy::satisfy_failed_allocation(size_t size,
   710                                                         bool   is_tlab) {
   711   GenCollectedHeap *gch = GenCollectedHeap::heap();
   712   GCCauseSetter x(gch, GCCause::_allocation_failure);
   713   HeapWord* result = NULL;
   715   assert(size != 0, "Precondition violated");
   716   if (GC_locker::is_active_and_needs_gc()) {
   717     // GC locker is active; instead of a collection we will attempt
   718     // to expand the heap, if there's room for expansion.
   719     if (!gch->is_maximal_no_gc()) {
   720       result = expand_heap_and_allocate(size, is_tlab);
   721     }
   722     return result;   // could be null if we are out of space
   723   } else if (!gch->incremental_collection_will_fail(false /* don't consult_young */)) {
   724     // Do an incremental collection.
   725     gch->do_collection(false            /* full */,
   726                        false            /* clear_all_soft_refs */,
   727                        size             /* size */,
   728                        is_tlab          /* is_tlab */,
   729                        number_of_generations() - 1 /* max_level */);
   730   } else {
   731     if (Verbose && PrintGCDetails) {
   732       gclog_or_tty->print(" :: Trying full because partial may fail :: ");
   733     }
   734     // Try a full collection; see delta for bug id 6266275
   735     // for the original code and why this has been simplified
   736     // with from-space allocation criteria modified and
   737     // such allocation moved out of the safepoint path.
   738     gch->do_collection(true             /* full */,
   739                        false            /* clear_all_soft_refs */,
   740                        size             /* size */,
   741                        is_tlab          /* is_tlab */,
   742                        number_of_generations() - 1 /* max_level */);
   743   }
   745   result = gch->attempt_allocation(size, is_tlab, false /*first_only*/);
   747   if (result != NULL) {
   748     assert(gch->is_in_reserved(result), "result not in heap");
   749     return result;
   750   }
   752   // OK, collection failed, try expansion.
   753   result = expand_heap_and_allocate(size, is_tlab);
   754   if (result != NULL) {
   755     return result;
   756   }
   758   // If we reach this point, we're really out of memory. Try every trick
   759   // we can to reclaim memory. Force collection of soft references. Force
   760   // a complete compaction of the heap. Any additional methods for finding
   761   // free memory should be here, especially if they are expensive. If this
   762   // attempt fails, an OOM exception will be thrown.
   763   {
   764     UIntFlagSetting flag_change(MarkSweepAlwaysCompactCount, 1); // Make sure the heap is fully compacted
   766     gch->do_collection(true             /* full */,
   767                        true             /* clear_all_soft_refs */,
   768                        size             /* size */,
   769                        is_tlab          /* is_tlab */,
   770                        number_of_generations() - 1 /* max_level */);
   771   }
   773   result = gch->attempt_allocation(size, is_tlab, false /* first_only */);
   774   if (result != NULL) {
   775     assert(gch->is_in_reserved(result), "result not in heap");
   776     return result;
   777   }
   779   assert(!should_clear_all_soft_refs(),
   780     "Flag should have been handled and cleared prior to this point");
   782   // What else?  We might try synchronous finalization later.  If the total
   783   // space available is large enough for the allocation, then a more
   784   // complete compaction phase than we've tried so far might be
   785   // appropriate.
   786   return NULL;
   787 }
   789 MetaWord* CollectorPolicy::satisfy_failed_metadata_allocation(
   790                                                  ClassLoaderData* loader_data,
   791                                                  size_t word_size,
   792                                                  Metaspace::MetadataType mdtype) {
   793   uint loop_count = 0;
   794   uint gc_count = 0;
   795   uint full_gc_count = 0;
   797   assert(!Heap_lock->owned_by_self(), "Should not be holding the Heap_lock");
   799   do {
   800     MetaWord* result = NULL;
   801     if (GC_locker::is_active_and_needs_gc()) {
   802       // If the GC_locker is active, just expand and allocate.
   803       // If that does not succeed, wait if this thread is not
   804       // in a critical section itself.
   805       result =
   806         loader_data->metaspace_non_null()->expand_and_allocate(word_size,
   807                                                                mdtype);
   808       if (result != NULL) {
   809         return result;
   810       }
   811       JavaThread* jthr = JavaThread::current();
   812       if (!jthr->in_critical()) {
   813         // Wait for JNI critical section to be exited
   814         GC_locker::stall_until_clear();
   815         // The GC invoked by the last thread leaving the critical
   816         // section will be a young collection and a full collection
   817         // is (currently) needed for unloading classes so continue
   818         // to the next iteration to get a full GC.
   819         continue;
   820       } else {
   821         if (CheckJNICalls) {
   822           fatal("Possible deadlock due to allocating while"
   823                 " in jni critical section");
   824         }
   825         return NULL;
   826       }
   827     }
   829     {  // Need lock to get self consistent gc_count's
   830       MutexLocker ml(Heap_lock);
   831       gc_count      = Universe::heap()->total_collections();
   832       full_gc_count = Universe::heap()->total_full_collections();
   833     }
   835     // Generate a VM operation
   836     VM_CollectForMetadataAllocation op(loader_data,
   837                                        word_size,
   838                                        mdtype,
   839                                        gc_count,
   840                                        full_gc_count,
   841                                        GCCause::_metadata_GC_threshold);
   842     VMThread::execute(&op);
   844     // If GC was locked out, try again.  Check
   845     // before checking success because the prologue
   846     // could have succeeded and the GC still have
   847     // been locked out.
   848     if (op.gc_locked()) {
   849       continue;
   850     }
   852     if (op.prologue_succeeded()) {
   853       return op.result();
   854     }
   855     loop_count++;
   856     if ((QueuedAllocationWarningCount > 0) &&
   857         (loop_count % QueuedAllocationWarningCount == 0)) {
   858       warning("satisfy_failed_metadata_allocation() retries %d times \n\t"
   859               " size=%d", loop_count, word_size);
   860     }
   861   } while (true);  // Until a GC is done
   862 }
   864 // Return true if any of the following is true:
   865 // . the allocation won't fit into the current young gen heap
   866 // . gc locker is occupied (jni critical section)
   867 // . heap memory is tight -- the most recent previous collection
   868 //   was a full collection because a partial collection (would
   869 //   have) failed and is likely to fail again
   870 bool GenCollectorPolicy::should_try_older_generation_allocation(
   871         size_t word_size) const {
   872   GenCollectedHeap* gch = GenCollectedHeap::heap();
   873   size_t gen0_capacity = gch->get_gen(0)->capacity_before_gc();
   874   return    (word_size > heap_word_size(gen0_capacity))
   875          || GC_locker::is_active_and_needs_gc()
   876          || gch->incremental_collection_failed();
   877 }
   880 //
   881 // MarkSweepPolicy methods
   882 //
   884 MarkSweepPolicy::MarkSweepPolicy() {
   885   initialize_all();
   886 }
   888 void MarkSweepPolicy::initialize_generations() {
   889   _generations = NEW_C_HEAP_ARRAY3(GenerationSpecPtr, number_of_generations(), mtGC, 0, AllocFailStrategy::RETURN_NULL);
   890   if (_generations == NULL)
   891     vm_exit_during_initialization("Unable to allocate gen spec");
   893   if (UseParNewGC) {
   894     _generations[0] = new GenerationSpec(Generation::ParNew, _initial_gen0_size, _max_gen0_size);
   895   } else {
   896     _generations[0] = new GenerationSpec(Generation::DefNew, _initial_gen0_size, _max_gen0_size);
   897   }
   898   _generations[1] = new GenerationSpec(Generation::MarkSweepCompact, _initial_gen1_size, _max_gen1_size);
   900   if (_generations[0] == NULL || _generations[1] == NULL)
   901     vm_exit_during_initialization("Unable to allocate gen spec");
   902 }
   904 void MarkSweepPolicy::initialize_gc_policy_counters() {
   905   // initialize the policy counters - 2 collectors, 3 generations
   906   if (UseParNewGC) {
   907     _gc_policy_counters = new GCPolicyCounters("ParNew:MSC", 2, 3);
   908   } else {
   909     _gc_policy_counters = new GCPolicyCounters("Copy:MSC", 2, 3);
   910   }
   911 }

mercurial