src/share/vm/memory/collectorPolicy.cpp

Sat, 01 Dec 2007 00:00:00 +0000

author
duke
date
Sat, 01 Dec 2007 00:00:00 +0000
changeset 435
a61af66fc99e
child 448
183f41cf8bfe
permissions
-rw-r--r--

Initial load

     1 /*
     2  * Copyright 2001-2007 Sun Microsystems, Inc.  All Rights Reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
    20  * CA 95054 USA or visit www.sun.com if you need additional information or
    21  * have any questions.
    22  *
    23  */
    25 # include "incls/_precompiled.incl"
    26 # include "incls/_collectorPolicy.cpp.incl"
    28 // CollectorPolicy methods.
    30 void CollectorPolicy::initialize_flags() {
    31   if (PermSize > MaxPermSize) {
    32     MaxPermSize = PermSize;
    33   }
    34   PermSize = align_size_down(PermSize, min_alignment());
    35   MaxPermSize = align_size_up(MaxPermSize, max_alignment());
    37   MinPermHeapExpansion = align_size_down(MinPermHeapExpansion, min_alignment());
    38   MaxPermHeapExpansion = align_size_down(MaxPermHeapExpansion, min_alignment());
    40   MinHeapDeltaBytes = align_size_up(MinHeapDeltaBytes, min_alignment());
    42   SharedReadOnlySize = align_size_up(SharedReadOnlySize, max_alignment());
    43   SharedReadWriteSize = align_size_up(SharedReadWriteSize, max_alignment());
    44   SharedMiscDataSize = align_size_up(SharedMiscDataSize, max_alignment());
    46   assert(PermSize    % min_alignment() == 0, "permanent space alignment");
    47   assert(MaxPermSize % max_alignment() == 0, "maximum permanent space alignment");
    48   assert(SharedReadOnlySize % max_alignment() == 0, "read-only space alignment");
    49   assert(SharedReadWriteSize % max_alignment() == 0, "read-write space alignment");
    50   assert(SharedMiscDataSize % max_alignment() == 0, "misc-data space alignment");
    51   if (PermSize < M) {
    52     vm_exit_during_initialization("Too small initial permanent heap");
    53   }
    54 }
    56 void CollectorPolicy::initialize_size_info() {
    57   // User inputs from -mx and ms are aligned
    58   _initial_heap_byte_size = align_size_up(Arguments::initial_heap_size(),
    59                                           min_alignment());
    60   _min_heap_byte_size = align_size_up(Arguments::min_heap_size(),
    61                                           min_alignment());
    62   _max_heap_byte_size = align_size_up(MaxHeapSize, max_alignment());
    64   // Check validity of heap parameters from launcher
    65   if (_initial_heap_byte_size == 0) {
    66     _initial_heap_byte_size = NewSize + OldSize;
    67   } else {
    68     Universe::check_alignment(_initial_heap_byte_size, min_alignment(),
    69                             "initial heap");
    70   }
    71   if (_min_heap_byte_size == 0) {
    72     _min_heap_byte_size = NewSize + OldSize;
    73   } else {
    74     Universe::check_alignment(_min_heap_byte_size, min_alignment(),
    75                             "initial heap");
    76   }
    78   // Check heap parameter properties
    79   if (_initial_heap_byte_size < M) {
    80     vm_exit_during_initialization("Too small initial heap");
    81   }
    82   // Check heap parameter properties
    83   if (_min_heap_byte_size < M) {
    84     vm_exit_during_initialization("Too small minimum heap");
    85   }
    86   if (_initial_heap_byte_size <= NewSize) {
    87      // make sure there is at least some room in old space
    88     vm_exit_during_initialization("Too small initial heap for new size specified");
    89   }
    90   if (_max_heap_byte_size < _min_heap_byte_size) {
    91     vm_exit_during_initialization("Incompatible minimum and maximum heap sizes specified");
    92   }
    93   if (_initial_heap_byte_size < _min_heap_byte_size) {
    94     vm_exit_during_initialization("Incompatible minimum and initial heap sizes specified");
    95   }
    96   if (_max_heap_byte_size < _initial_heap_byte_size) {
    97     vm_exit_during_initialization("Incompatible initial and maximum heap sizes specified");
    98   }
    99 }
   101 void CollectorPolicy::initialize_perm_generation(PermGen::Name pgnm) {
   102   _permanent_generation =
   103     new PermanentGenerationSpec(pgnm, PermSize, MaxPermSize,
   104                                 SharedReadOnlySize,
   105                                 SharedReadWriteSize,
   106                                 SharedMiscDataSize,
   107                                 SharedMiscCodeSize);
   108   if (_permanent_generation == NULL) {
   109     vm_exit_during_initialization("Unable to allocate gen spec");
   110   }
   111 }
   114 GenRemSet* CollectorPolicy::create_rem_set(MemRegion whole_heap,
   115                                            int max_covered_regions) {
   116   switch (rem_set_name()) {
   117   case GenRemSet::CardTable: {
   118     if (barrier_set_name() != BarrierSet::CardTableModRef)
   119       vm_exit_during_initialization("Mismatch between RS and BS.");
   120     CardTableRS* res = new CardTableRS(whole_heap, max_covered_regions);
   121     return res;
   122   }
   123   default:
   124     guarantee(false, "unrecognized GenRemSet::Name");
   125     return NULL;
   126   }
   127 }
   129 // GenCollectorPolicy methods.
   131 void GenCollectorPolicy::initialize_size_policy(size_t init_eden_size,
   132                                                 size_t init_promo_size,
   133                                                 size_t init_survivor_size) {
   134   double max_gc_minor_pause_sec = ((double) MaxGCMinorPauseMillis)/1000.0;
   135   _size_policy = new AdaptiveSizePolicy(init_eden_size,
   136                                         init_promo_size,
   137                                         init_survivor_size,
   138                                         max_gc_minor_pause_sec,
   139                                         GCTimeRatio);
   140 }
   142 size_t GenCollectorPolicy::compute_max_alignment() {
   143   // The card marking array and the offset arrays for old generations are
   144   // committed in os pages as well. Make sure they are entirely full (to
   145   // avoid partial page problems), e.g. if 512 bytes heap corresponds to 1
   146   // byte entry and the os page size is 4096, the maximum heap size should
   147   // be 512*4096 = 2MB aligned.
   148   size_t alignment = GenRemSet::max_alignment_constraint(rem_set_name());
   150   // Parallel GC does its own alignment of the generations to avoid requiring a
   151   // large page (256M on some platforms) for the permanent generation.  The
   152   // other collectors should also be updated to do their own alignment and then
   153   // this use of lcm() should be removed.
   154   if (UseLargePages && !UseParallelGC) {
   155       // in presence of large pages we have to make sure that our
   156       // alignment is large page aware
   157       alignment = lcm(os::large_page_size(), alignment);
   158   }
   160   return alignment;
   161 }
   163 void GenCollectorPolicy::initialize_flags() {
   164   // All sizes must be multiples of the generation granularity.
   165   set_min_alignment((uintx) Generation::GenGrain);
   166   set_max_alignment(compute_max_alignment());
   167   assert(max_alignment() >= min_alignment() &&
   168          max_alignment() % min_alignment() == 0,
   169          "invalid alignment constraints");
   171   CollectorPolicy::initialize_flags();
   173   // All generational heaps have a youngest gen; handle those flags here.
   175   // Adjust max size parameters
   176   if (NewSize > MaxNewSize) {
   177     MaxNewSize = NewSize;
   178   }
   179   NewSize = align_size_down(NewSize, min_alignment());
   180   MaxNewSize = align_size_down(MaxNewSize, min_alignment());
   182   // Check validity of heap flags
   183   assert(NewSize     % min_alignment() == 0, "eden space alignment");
   184   assert(MaxNewSize  % min_alignment() == 0, "survivor space alignment");
   186   if (NewSize < 3*min_alignment()) {
   187      // make sure there room for eden and two survivor spaces
   188     vm_exit_during_initialization("Too small new size specified");
   189   }
   190   if (SurvivorRatio < 1 || NewRatio < 1) {
   191     vm_exit_during_initialization("Invalid heap ratio specified");
   192   }
   193 }
   195 void TwoGenerationCollectorPolicy::initialize_flags() {
   196   GenCollectorPolicy::initialize_flags();
   198   OldSize = align_size_down(OldSize, min_alignment());
   199   if (NewSize + OldSize > MaxHeapSize) {
   200     MaxHeapSize = NewSize + OldSize;
   201   }
   202   MaxHeapSize = align_size_up(MaxHeapSize, max_alignment());
   204   always_do_update_barrier = UseConcMarkSweepGC;
   205   BlockOffsetArrayUseUnallocatedBlock =
   206       BlockOffsetArrayUseUnallocatedBlock || ParallelGCThreads > 0;
   208   // Check validity of heap flags
   209   assert(OldSize     % min_alignment() == 0, "old space alignment");
   210   assert(MaxHeapSize % max_alignment() == 0, "maximum heap alignment");
   211 }
   213 void GenCollectorPolicy::initialize_size_info() {
   214   CollectorPolicy::initialize_size_info();
   216   // Minimum sizes of the generations may be different than
   217   // the initial sizes.
   218   if (!FLAG_IS_DEFAULT(NewSize)) {
   219     _min_gen0_size = NewSize;
   220   } else {
   221     _min_gen0_size = align_size_down(_min_heap_byte_size / (NewRatio+1),
   222                                      min_alignment());
   223     // We bound the minimum size by NewSize below (since it historically
   224     // would have been NewSize and because the NewRatio calculation could
   225     // yield a size that is too small) and bound it by MaxNewSize above.
   226     // This is not always best.  The NewSize calculated by CMS (which has
   227     // a fixed minimum of 16m) can sometimes be "too" large.  Consider
   228     // the case where -Xmx32m.  The CMS calculated NewSize would be about
   229     // half the entire heap which seems too large.  But the counter
   230     // example is seen when the client defaults for NewRatio are used.
   231     // An initial young generation size of 640k was observed
   232     // with -Xmx128m -XX:MaxNewSize=32m when NewSize was not used
   233     // as a lower bound as with
   234     // _min_gen0_size = MIN2(_min_gen0_size, MaxNewSize);
   235     // and 640k seemed too small a young generation.
   236     _min_gen0_size = MIN2(MAX2(_min_gen0_size, NewSize), MaxNewSize);
   237   }
   239   // Parameters are valid, compute area sizes.
   240   size_t max_new_size = align_size_down(_max_heap_byte_size / (NewRatio+1),
   241                                         min_alignment());
   242   max_new_size = MIN2(MAX2(max_new_size, _min_gen0_size), MaxNewSize);
   244   // desired_new_size is used to set the initial size.  The
   245   // initial size must be greater than the minimum size.
   246   size_t desired_new_size =
   247     align_size_down(_initial_heap_byte_size / (NewRatio+1),
   248                   min_alignment());
   250   size_t new_size = MIN2(MAX2(desired_new_size, _min_gen0_size), max_new_size);
   252   _initial_gen0_size = new_size;
   253   _max_gen0_size = max_new_size;
   254 }
   256 void TwoGenerationCollectorPolicy::initialize_size_info() {
   257   GenCollectorPolicy::initialize_size_info();
   259   // Minimum sizes of the generations may be different than
   260   // the initial sizes.  An inconsistently is permitted here
   261   // in the total size that can be specified explicitly by
   262   // command line specification of OldSize and NewSize and
   263   // also a command line specification of -Xms.  Issue a warning
   264   // but allow the values to pass.
   265   if (!FLAG_IS_DEFAULT(OldSize)) {
   266     _min_gen1_size = OldSize;
   267     // The generation minimums and the overall heap mimimum should
   268     // be within one heap alignment.
   269     if ((_min_gen1_size + _min_gen0_size + max_alignment()) <
   270          _min_heap_byte_size) {
   271       warning("Inconsistency between minimum heap size and minimum "
   272         "generation sizes: using min heap = " SIZE_FORMAT,
   273         _min_heap_byte_size);
   274     }
   275   } else {
   276     _min_gen1_size = _min_heap_byte_size - _min_gen0_size;
   277   }
   279   _initial_gen1_size = _initial_heap_byte_size - _initial_gen0_size;
   280   _max_gen1_size = _max_heap_byte_size - _max_gen0_size;
   281 }
   283 HeapWord* GenCollectorPolicy::mem_allocate_work(size_t size,
   284                                         bool is_tlab,
   285                                         bool* gc_overhead_limit_was_exceeded) {
   286   GenCollectedHeap *gch = GenCollectedHeap::heap();
   288   debug_only(gch->check_for_valid_allocation_state());
   289   assert(gch->no_gc_in_progress(), "Allocation during gc not allowed");
   290   HeapWord* result = NULL;
   292   // Loop until the allocation is satisified,
   293   // or unsatisfied after GC.
   294   for (int try_count = 1; /* return or throw */; try_count += 1) {
   295     HandleMark hm; // discard any handles allocated in each iteration
   297     // First allocation attempt is lock-free.
   298     Generation *gen0 = gch->get_gen(0);
   299     assert(gen0->supports_inline_contig_alloc(),
   300       "Otherwise, must do alloc within heap lock");
   301     if (gen0->should_allocate(size, is_tlab)) {
   302       result = gen0->par_allocate(size, is_tlab);
   303       if (result != NULL) {
   304         assert(gch->is_in_reserved(result), "result not in heap");
   305         return result;
   306       }
   307     }
   308     unsigned int gc_count_before;  // read inside the Heap_lock locked region
   309     {
   310       MutexLocker ml(Heap_lock);
   311       if (PrintGC && Verbose) {
   312         gclog_or_tty->print_cr("TwoGenerationCollectorPolicy::mem_allocate_work:"
   313                       " attempting locked slow path allocation");
   314       }
   315       // Note that only large objects get a shot at being
   316       // allocated in later generations.
   317       bool first_only = ! should_try_older_generation_allocation(size);
   319       result = gch->attempt_allocation(size, is_tlab, first_only);
   320       if (result != NULL) {
   321         assert(gch->is_in_reserved(result), "result not in heap");
   322         return result;
   323       }
   325       // There are NULL's returned for different circumstances below.
   326       // In general gc_overhead_limit_was_exceeded should be false so
   327       // set it so here and reset it to true only if the gc time
   328       // limit is being exceeded as checked below.
   329       *gc_overhead_limit_was_exceeded = false;
   331       if (GC_locker::is_active_and_needs_gc()) {
   332         if (is_tlab) {
   333           return NULL;  // Caller will retry allocating individual object
   334         }
   335         if (!gch->is_maximal_no_gc()) {
   336           // Try and expand heap to satisfy request
   337           result = expand_heap_and_allocate(size, is_tlab);
   338           // result could be null if we are out of space
   339           if (result != NULL) {
   340             return result;
   341           }
   342         }
   344         // If this thread is not in a jni critical section, we stall
   345         // the requestor until the critical section has cleared and
   346         // GC allowed. When the critical section clears, a GC is
   347         // initiated by the last thread exiting the critical section; so
   348         // we retry the allocation sequence from the beginning of the loop,
   349         // rather than causing more, now probably unnecessary, GC attempts.
   350         JavaThread* jthr = JavaThread::current();
   351         if (!jthr->in_critical()) {
   352           MutexUnlocker mul(Heap_lock);
   353           // Wait for JNI critical section to be exited
   354           GC_locker::stall_until_clear();
   355           continue;
   356         } else {
   357           if (CheckJNICalls) {
   358             fatal("Possible deadlock due to allocating while"
   359                   " in jni critical section");
   360           }
   361           return NULL;
   362         }
   363       }
   365       // Read the gc count while the heap lock is held.
   366       gc_count_before = Universe::heap()->total_collections();
   367     }
   369     // Allocation has failed and a collection is about
   370     // to be done.  If the gc time limit was exceeded the
   371     // last time a collection was done, return NULL so
   372     // that an out-of-memory will be thrown.  Clear
   373     // gc_time_limit_exceeded so that subsequent attempts
   374     // at a collection will be made.
   375     if (size_policy()->gc_time_limit_exceeded()) {
   376       *gc_overhead_limit_was_exceeded = true;
   377       size_policy()->set_gc_time_limit_exceeded(false);
   378       return NULL;
   379     }
   381     VM_GenCollectForAllocation op(size,
   382                                   is_tlab,
   383                                   gc_count_before);
   384     VMThread::execute(&op);
   385     if (op.prologue_succeeded()) {
   386       result = op.result();
   387       if (op.gc_locked()) {
   388          assert(result == NULL, "must be NULL if gc_locked() is true");
   389          continue;  // retry and/or stall as necessary
   390       }
   391       assert(result == NULL || gch->is_in_reserved(result),
   392              "result not in heap");
   393       return result;
   394     }
   396     // Give a warning if we seem to be looping forever.
   397     if ((QueuedAllocationWarningCount > 0) &&
   398         (try_count % QueuedAllocationWarningCount == 0)) {
   399           warning("TwoGenerationCollectorPolicy::mem_allocate_work retries %d times \n\t"
   400                   " size=%d %s", try_count, size, is_tlab ? "(TLAB)" : "");
   401     }
   402   }
   403 }
   405 HeapWord* GenCollectorPolicy::expand_heap_and_allocate(size_t size,
   406                                                        bool   is_tlab) {
   407   GenCollectedHeap *gch = GenCollectedHeap::heap();
   408   HeapWord* result = NULL;
   409   for (int i = number_of_generations() - 1; i >= 0 && result == NULL; i--) {
   410     Generation *gen = gch->get_gen(i);
   411     if (gen->should_allocate(size, is_tlab)) {
   412       result = gen->expand_and_allocate(size, is_tlab);
   413     }
   414   }
   415   assert(result == NULL || gch->is_in_reserved(result), "result not in heap");
   416   return result;
   417 }
   419 HeapWord* GenCollectorPolicy::satisfy_failed_allocation(size_t size,
   420                                                         bool   is_tlab) {
   421   GenCollectedHeap *gch = GenCollectedHeap::heap();
   422   GCCauseSetter x(gch, GCCause::_allocation_failure);
   423   HeapWord* result = NULL;
   425   assert(size != 0, "Precondition violated");
   426   if (GC_locker::is_active_and_needs_gc()) {
   427     // GC locker is active; instead of a collection we will attempt
   428     // to expand the heap, if there's room for expansion.
   429     if (!gch->is_maximal_no_gc()) {
   430       result = expand_heap_and_allocate(size, is_tlab);
   431     }
   432     return result;   // could be null if we are out of space
   433   } else if (!gch->incremental_collection_will_fail()) {
   434     // The gc_prologues have not executed yet.  The value
   435     // for incremental_collection_will_fail() is the remanent
   436     // of the last collection.
   437     // Do an incremental collection.
   438     gch->do_collection(false            /* full */,
   439                        false            /* clear_all_soft_refs */,
   440                        size             /* size */,
   441                        is_tlab          /* is_tlab */,
   442                        number_of_generations() - 1 /* max_level */);
   443   } else {
   444     // Try a full collection; see delta for bug id 6266275
   445     // for the original code and why this has been simplified
   446     // with from-space allocation criteria modified and
   447     // such allocation moved out of the safepoint path.
   448     gch->do_collection(true             /* full */,
   449                        false            /* clear_all_soft_refs */,
   450                        size             /* size */,
   451                        is_tlab          /* is_tlab */,
   452                        number_of_generations() - 1 /* max_level */);
   453   }
   455   result = gch->attempt_allocation(size, is_tlab, false /*first_only*/);
   457   if (result != NULL) {
   458     assert(gch->is_in_reserved(result), "result not in heap");
   459     return result;
   460   }
   462   // OK, collection failed, try expansion.
   463   result = expand_heap_and_allocate(size, is_tlab);
   464   if (result != NULL) {
   465     return result;
   466   }
   468   // If we reach this point, we're really out of memory. Try every trick
   469   // we can to reclaim memory. Force collection of soft references. Force
   470   // a complete compaction of the heap. Any additional methods for finding
   471   // free memory should be here, especially if they are expensive. If this
   472   // attempt fails, an OOM exception will be thrown.
   473   {
   474     IntFlagSetting flag_change(MarkSweepAlwaysCompactCount, 1); // Make sure the heap is fully compacted
   476     gch->do_collection(true             /* full */,
   477                        true             /* clear_all_soft_refs */,
   478                        size             /* size */,
   479                        is_tlab          /* is_tlab */,
   480                        number_of_generations() - 1 /* max_level */);
   481   }
   483   result = gch->attempt_allocation(size, is_tlab, false /* first_only */);
   484   if (result != NULL) {
   485     assert(gch->is_in_reserved(result), "result not in heap");
   486     return result;
   487   }
   489   // What else?  We might try synchronous finalization later.  If the total
   490   // space available is large enough for the allocation, then a more
   491   // complete compaction phase than we've tried so far might be
   492   // appropriate.
   493   return NULL;
   494 }
   496 size_t GenCollectorPolicy::large_typearray_limit() {
   497   return FastAllocateSizeLimit;
   498 }
   500 // Return true if any of the following is true:
   501 // . the allocation won't fit into the current young gen heap
   502 // . gc locker is occupied (jni critical section)
   503 // . heap memory is tight -- the most recent previous collection
   504 //   was a full collection because a partial collection (would
   505 //   have) failed and is likely to fail again
   506 bool GenCollectorPolicy::should_try_older_generation_allocation(
   507         size_t word_size) const {
   508   GenCollectedHeap* gch = GenCollectedHeap::heap();
   509   size_t gen0_capacity = gch->get_gen(0)->capacity_before_gc();
   510   return    (word_size > heap_word_size(gen0_capacity))
   511          || (GC_locker::is_active_and_needs_gc())
   512          || (   gch->last_incremental_collection_failed()
   513              && gch->incremental_collection_will_fail());
   514 }
   517 //
   518 // MarkSweepPolicy methods
   519 //
   521 MarkSweepPolicy::MarkSweepPolicy() {
   522   initialize_all();
   523 }
   525 void MarkSweepPolicy::initialize_generations() {
   526   initialize_perm_generation(PermGen::MarkSweepCompact);
   527   _generations = new GenerationSpecPtr[number_of_generations()];
   528   if (_generations == NULL)
   529     vm_exit_during_initialization("Unable to allocate gen spec");
   531   if (UseParNewGC && ParallelGCThreads > 0) {
   532     _generations[0] = new GenerationSpec(Generation::ParNew, _initial_gen0_size, _max_gen0_size);
   533   } else {
   534     _generations[0] = new GenerationSpec(Generation::DefNew, _initial_gen0_size, _max_gen0_size);
   535   }
   536   _generations[1] = new GenerationSpec(Generation::MarkSweepCompact, _initial_gen1_size, _max_gen1_size);
   538   if (_generations[0] == NULL || _generations[1] == NULL)
   539     vm_exit_during_initialization("Unable to allocate gen spec");
   540 }
   542 void MarkSweepPolicy::initialize_gc_policy_counters() {
   543   // initialize the policy counters - 2 collectors, 3 generations
   544   if (UseParNewGC && ParallelGCThreads > 0) {
   545     _gc_policy_counters = new GCPolicyCounters("ParNew:MSC", 2, 3);
   546   }
   547   else {
   548     _gc_policy_counters = new GCPolicyCounters("Copy:MSC", 2, 3);
   549   }
   550 }

mercurial