src/share/vm/memory/generation.cpp

Thu, 23 May 2013 12:44:18 +0100

author
chegar
date
Thu, 23 May 2013 12:44:18 +0100
changeset 5249
ce9ecec70f99
parent 4900
8617e38bb4cb
child 5237
f2110083203d
permissions
-rw-r--r--

Merge

     1 /*
     2  * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    20  * or visit www.oracle.com if you need additional information or have any
    21  * questions.
    22  *
    23  */
    25 #include "precompiled.hpp"
    26 #include "gc_implementation/shared/spaceDecorator.hpp"
    27 #include "gc_interface/collectedHeap.inline.hpp"
    28 #include "memory/allocation.inline.hpp"
    29 #include "memory/blockOffsetTable.inline.hpp"
    30 #include "memory/cardTableRS.hpp"
    31 #include "memory/gcLocker.inline.hpp"
    32 #include "memory/genCollectedHeap.hpp"
    33 #include "memory/genMarkSweep.hpp"
    34 #include "memory/genOopClosures.hpp"
    35 #include "memory/genOopClosures.inline.hpp"
    36 #include "memory/generation.hpp"
    37 #include "memory/generation.inline.hpp"
    38 #include "memory/space.inline.hpp"
    39 #include "oops/oop.inline.hpp"
    40 #include "runtime/java.hpp"
    41 #include "utilities/copy.hpp"
    42 #include "utilities/events.hpp"
    44 Generation::Generation(ReservedSpace rs, size_t initial_size, int level) :
    45   _level(level),
    46   _ref_processor(NULL) {
    47   if (!_virtual_space.initialize(rs, initial_size)) {
    48     vm_exit_during_initialization("Could not reserve enough space for "
    49                     "object heap");
    50   }
    51   // Mangle all of the the initial generation.
    52   if (ZapUnusedHeapArea) {
    53     MemRegion mangle_region((HeapWord*)_virtual_space.low(),
    54       (HeapWord*)_virtual_space.high());
    55     SpaceMangler::mangle_region(mangle_region);
    56   }
    57   _reserved = MemRegion((HeapWord*)_virtual_space.low_boundary(),
    58           (HeapWord*)_virtual_space.high_boundary());
    59 }
    61 GenerationSpec* Generation::spec() {
    62   GenCollectedHeap* gch = GenCollectedHeap::heap();
    63   assert(0 <= level() && level() < gch->_n_gens, "Bad gen level");
    64   return gch->_gen_specs[level()];
    65 }
    67 size_t Generation::max_capacity() const {
    68   return reserved().byte_size();
    69 }
    71 void Generation::print_heap_change(size_t prev_used) const {
    72   if (PrintGCDetails && Verbose) {
    73     gclog_or_tty->print(" "  SIZE_FORMAT
    74                         "->" SIZE_FORMAT
    75                         "("  SIZE_FORMAT ")",
    76                         prev_used, used(), capacity());
    77   } else {
    78     gclog_or_tty->print(" "  SIZE_FORMAT "K"
    79                         "->" SIZE_FORMAT "K"
    80                         "("  SIZE_FORMAT "K)",
    81                         prev_used / K, used() / K, capacity() / K);
    82   }
    83 }
    85 // By default we get a single threaded default reference processor;
    86 // generations needing multi-threaded refs processing or discovery override this method.
    87 void Generation::ref_processor_init() {
    88   assert(_ref_processor == NULL, "a reference processor already exists");
    89   assert(!_reserved.is_empty(), "empty generation?");
    90   _ref_processor = new ReferenceProcessor(_reserved);    // a vanilla reference processor
    91   if (_ref_processor == NULL) {
    92     vm_exit_during_initialization("Could not allocate ReferenceProcessor object");
    93   }
    94 }
    96 void Generation::print() const { print_on(tty); }
    98 void Generation::print_on(outputStream* st)  const {
    99   st->print(" %-20s", name());
   100   st->print(" total " SIZE_FORMAT "K, used " SIZE_FORMAT "K",
   101              capacity()/K, used()/K);
   102   st->print_cr(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " INTPTR_FORMAT ")",
   103               _virtual_space.low_boundary(),
   104               _virtual_space.high(),
   105               _virtual_space.high_boundary());
   106 }
   108 void Generation::print_summary_info() { print_summary_info_on(tty); }
   110 void Generation::print_summary_info_on(outputStream* st) {
   111   StatRecord* sr = stat_record();
   112   double time = sr->accumulated_time.seconds();
   113   st->print_cr("[Accumulated GC generation %d time %3.7f secs, "
   114                "%d GC's, avg GC time %3.7f]",
   115                level(), time, sr->invocations,
   116                sr->invocations > 0 ? time / sr->invocations : 0.0);
   117 }
   119 // Utility iterator classes
   121 class GenerationIsInReservedClosure : public SpaceClosure {
   122  public:
   123   const void* _p;
   124   Space* sp;
   125   virtual void do_space(Space* s) {
   126     if (sp == NULL) {
   127       if (s->is_in_reserved(_p)) sp = s;
   128     }
   129   }
   130   GenerationIsInReservedClosure(const void* p) : _p(p), sp(NULL) {}
   131 };
   133 class GenerationIsInClosure : public SpaceClosure {
   134  public:
   135   const void* _p;
   136   Space* sp;
   137   virtual void do_space(Space* s) {
   138     if (sp == NULL) {
   139       if (s->is_in(_p)) sp = s;
   140     }
   141   }
   142   GenerationIsInClosure(const void* p) : _p(p), sp(NULL) {}
   143 };
   145 bool Generation::is_in(const void* p) const {
   146   GenerationIsInClosure blk(p);
   147   ((Generation*)this)->space_iterate(&blk);
   148   return blk.sp != NULL;
   149 }
   151 DefNewGeneration* Generation::as_DefNewGeneration() {
   152   assert((kind() == Generation::DefNew) ||
   153          (kind() == Generation::ParNew) ||
   154          (kind() == Generation::ASParNew),
   155     "Wrong youngest generation type");
   156   return (DefNewGeneration*) this;
   157 }
   159 Generation* Generation::next_gen() const {
   160   GenCollectedHeap* gch = GenCollectedHeap::heap();
   161   int next = level() + 1;
   162   if (next < gch->_n_gens) {
   163     return gch->_gens[next];
   164   } else {
   165     return NULL;
   166   }
   167 }
   169 size_t Generation::max_contiguous_available() const {
   170   // The largest number of contiguous free words in this or any higher generation.
   171   size_t max = 0;
   172   for (const Generation* gen = this; gen != NULL; gen = gen->next_gen()) {
   173     size_t avail = gen->contiguous_available();
   174     if (avail > max) {
   175       max = avail;
   176     }
   177   }
   178   return max;
   179 }
   181 bool Generation::promotion_attempt_is_safe(size_t max_promotion_in_bytes) const {
   182   size_t available = max_contiguous_available();
   183   bool   res = (available >= max_promotion_in_bytes);
   184   if (PrintGC && Verbose) {
   185     gclog_or_tty->print_cr(
   186       "Generation: promo attempt is%s safe: available("SIZE_FORMAT") %s max_promo("SIZE_FORMAT")",
   187       res? "":" not", available, res? ">=":"<",
   188       max_promotion_in_bytes);
   189   }
   190   return res;
   191 }
   193 // Ignores "ref" and calls allocate().
   194 oop Generation::promote(oop obj, size_t obj_size) {
   195   assert(obj_size == (size_t)obj->size(), "bad obj_size passed in");
   197 #ifndef PRODUCT
   198   if (Universe::heap()->promotion_should_fail()) {
   199     return NULL;
   200   }
   201 #endif  // #ifndef PRODUCT
   203   HeapWord* result = allocate(obj_size, false);
   204   if (result != NULL) {
   205     Copy::aligned_disjoint_words((HeapWord*)obj, result, obj_size);
   206     return oop(result);
   207   } else {
   208     GenCollectedHeap* gch = GenCollectedHeap::heap();
   209     return gch->handle_failed_promotion(this, obj, obj_size);
   210   }
   211 }
   213 oop Generation::par_promote(int thread_num,
   214                             oop obj, markOop m, size_t word_sz) {
   215   // Could do a bad general impl here that gets a lock.  But no.
   216   ShouldNotCallThis();
   217   return NULL;
   218 }
   220 void Generation::par_promote_alloc_undo(int thread_num,
   221                                         HeapWord* obj, size_t word_sz) {
   222   // Could do a bad general impl here that gets a lock.  But no.
   223   guarantee(false, "No good general implementation.");
   224 }
   226 Space* Generation::space_containing(const void* p) const {
   227   GenerationIsInReservedClosure blk(p);
   228   // Cast away const
   229   ((Generation*)this)->space_iterate(&blk);
   230   return blk.sp;
   231 }
   233 // Some of these are mediocre general implementations.  Should be
   234 // overridden to get better performance.
   236 class GenerationBlockStartClosure : public SpaceClosure {
   237  public:
   238   const void* _p;
   239   HeapWord* _start;
   240   virtual void do_space(Space* s) {
   241     if (_start == NULL && s->is_in_reserved(_p)) {
   242       _start = s->block_start(_p);
   243     }
   244   }
   245   GenerationBlockStartClosure(const void* p) { _p = p; _start = NULL; }
   246 };
   248 HeapWord* Generation::block_start(const void* p) const {
   249   GenerationBlockStartClosure blk(p);
   250   // Cast away const
   251   ((Generation*)this)->space_iterate(&blk);
   252   return blk._start;
   253 }
   255 class GenerationBlockSizeClosure : public SpaceClosure {
   256  public:
   257   const HeapWord* _p;
   258   size_t size;
   259   virtual void do_space(Space* s) {
   260     if (size == 0 && s->is_in_reserved(_p)) {
   261       size = s->block_size(_p);
   262     }
   263   }
   264   GenerationBlockSizeClosure(const HeapWord* p) { _p = p; size = 0; }
   265 };
   267 size_t Generation::block_size(const HeapWord* p) const {
   268   GenerationBlockSizeClosure blk(p);
   269   // Cast away const
   270   ((Generation*)this)->space_iterate(&blk);
   271   assert(blk.size > 0, "seems reasonable");
   272   return blk.size;
   273 }
   275 class GenerationBlockIsObjClosure : public SpaceClosure {
   276  public:
   277   const HeapWord* _p;
   278   bool is_obj;
   279   virtual void do_space(Space* s) {
   280     if (!is_obj && s->is_in_reserved(_p)) {
   281       is_obj |= s->block_is_obj(_p);
   282     }
   283   }
   284   GenerationBlockIsObjClosure(const HeapWord* p) { _p = p; is_obj = false; }
   285 };
   287 bool Generation::block_is_obj(const HeapWord* p) const {
   288   GenerationBlockIsObjClosure blk(p);
   289   // Cast away const
   290   ((Generation*)this)->space_iterate(&blk);
   291   return blk.is_obj;
   292 }
   294 class GenerationOopIterateClosure : public SpaceClosure {
   295  public:
   296   ExtendedOopClosure* cl;
   297   MemRegion mr;
   298   virtual void do_space(Space* s) {
   299     s->oop_iterate(mr, cl);
   300   }
   301   GenerationOopIterateClosure(ExtendedOopClosure* _cl, MemRegion _mr) :
   302     cl(_cl), mr(_mr) {}
   303 };
   305 void Generation::oop_iterate(ExtendedOopClosure* cl) {
   306   GenerationOopIterateClosure blk(cl, _reserved);
   307   space_iterate(&blk);
   308 }
   310 void Generation::oop_iterate(MemRegion mr, ExtendedOopClosure* cl) {
   311   GenerationOopIterateClosure blk(cl, mr);
   312   space_iterate(&blk);
   313 }
   315 void Generation::younger_refs_in_space_iterate(Space* sp,
   316                                                OopsInGenClosure* cl) {
   317   GenRemSet* rs = SharedHeap::heap()->rem_set();
   318   rs->younger_refs_in_space_iterate(sp, cl);
   319 }
   321 class GenerationObjIterateClosure : public SpaceClosure {
   322  private:
   323   ObjectClosure* _cl;
   324  public:
   325   virtual void do_space(Space* s) {
   326     s->object_iterate(_cl);
   327   }
   328   GenerationObjIterateClosure(ObjectClosure* cl) : _cl(cl) {}
   329 };
   331 void Generation::object_iterate(ObjectClosure* cl) {
   332   GenerationObjIterateClosure blk(cl);
   333   space_iterate(&blk);
   334 }
   336 class GenerationSafeObjIterateClosure : public SpaceClosure {
   337  private:
   338   ObjectClosure* _cl;
   339  public:
   340   virtual void do_space(Space* s) {
   341     s->safe_object_iterate(_cl);
   342   }
   343   GenerationSafeObjIterateClosure(ObjectClosure* cl) : _cl(cl) {}
   344 };
   346 void Generation::safe_object_iterate(ObjectClosure* cl) {
   347   GenerationSafeObjIterateClosure blk(cl);
   348   space_iterate(&blk);
   349 }
   351 void Generation::prepare_for_compaction(CompactPoint* cp) {
   352   // Generic implementation, can be specialized
   353   CompactibleSpace* space = first_compaction_space();
   354   while (space != NULL) {
   355     space->prepare_for_compaction(cp);
   356     space = space->next_compaction_space();
   357   }
   358 }
   360 class AdjustPointersClosure: public SpaceClosure {
   361  public:
   362   void do_space(Space* sp) {
   363     sp->adjust_pointers();
   364   }
   365 };
   367 void Generation::adjust_pointers() {
   368   // Note that this is done over all spaces, not just the compactible
   369   // ones.
   370   AdjustPointersClosure blk;
   371   space_iterate(&blk, true);
   372 }
   374 void Generation::compact() {
   375   CompactibleSpace* sp = first_compaction_space();
   376   while (sp != NULL) {
   377     sp->compact();
   378     sp = sp->next_compaction_space();
   379   }
   380 }
   382 CardGeneration::CardGeneration(ReservedSpace rs, size_t initial_byte_size,
   383                                int level,
   384                                GenRemSet* remset) :
   385   Generation(rs, initial_byte_size, level), _rs(remset),
   386   _shrink_factor(0), _min_heap_delta_bytes(), _capacity_at_prologue(),
   387   _used_at_prologue()
   388 {
   389   HeapWord* start = (HeapWord*)rs.base();
   390   size_t reserved_byte_size = rs.size();
   391   assert((uintptr_t(start) & 3) == 0, "bad alignment");
   392   assert((reserved_byte_size & 3) == 0, "bad alignment");
   393   MemRegion reserved_mr(start, heap_word_size(reserved_byte_size));
   394   _bts = new BlockOffsetSharedArray(reserved_mr,
   395                                     heap_word_size(initial_byte_size));
   396   MemRegion committed_mr(start, heap_word_size(initial_byte_size));
   397   _rs->resize_covered_region(committed_mr);
   398   if (_bts == NULL)
   399     vm_exit_during_initialization("Could not allocate a BlockOffsetArray");
   401   // Verify that the start and end of this generation is the start of a card.
   402   // If this wasn't true, a single card could span more than on generation,
   403   // which would cause problems when we commit/uncommit memory, and when we
   404   // clear and dirty cards.
   405   guarantee(_rs->is_aligned(reserved_mr.start()), "generation must be card aligned");
   406   if (reserved_mr.end() != Universe::heap()->reserved_region().end()) {
   407     // Don't check at the very end of the heap as we'll assert that we're probing off
   408     // the end if we try.
   409     guarantee(_rs->is_aligned(reserved_mr.end()), "generation must be card aligned");
   410   }
   411   _min_heap_delta_bytes = MinHeapDeltaBytes;
   412   _capacity_at_prologue = initial_byte_size;
   413   _used_at_prologue = 0;
   414 }
   416 bool CardGeneration::expand(size_t bytes, size_t expand_bytes) {
   417   assert_locked_or_safepoint(Heap_lock);
   418   if (bytes == 0) {
   419     return true;  // That's what grow_by(0) would return
   420   }
   421   size_t aligned_bytes  = ReservedSpace::page_align_size_up(bytes);
   422   if (aligned_bytes == 0){
   423     // The alignment caused the number of bytes to wrap.  An expand_by(0) will
   424     // return true with the implication that an expansion was done when it
   425     // was not.  A call to expand implies a best effort to expand by "bytes"
   426     // but not a guarantee.  Align down to give a best effort.  This is likely
   427     // the most that the generation can expand since it has some capacity to
   428     // start with.
   429     aligned_bytes = ReservedSpace::page_align_size_down(bytes);
   430   }
   431   size_t aligned_expand_bytes = ReservedSpace::page_align_size_up(expand_bytes);
   432   bool success = false;
   433   if (aligned_expand_bytes > aligned_bytes) {
   434     success = grow_by(aligned_expand_bytes);
   435   }
   436   if (!success) {
   437     success = grow_by(aligned_bytes);
   438   }
   439   if (!success) {
   440     success = grow_to_reserved();
   441   }
   442   if (PrintGC && Verbose) {
   443     if (success && GC_locker::is_active_and_needs_gc()) {
   444       gclog_or_tty->print_cr("Garbage collection disabled, expanded heap instead");
   445     }
   446   }
   448   return success;
   449 }
   452 // No young generation references, clear this generation's cards.
   453 void CardGeneration::clear_remembered_set() {
   454   _rs->clear(reserved());
   455 }
   458 // Objects in this generation may have moved, invalidate this
   459 // generation's cards.
   460 void CardGeneration::invalidate_remembered_set() {
   461   _rs->invalidate(used_region());
   462 }
   465 void CardGeneration::compute_new_size() {
   466   assert(_shrink_factor <= 100, "invalid shrink factor");
   467   size_t current_shrink_factor = _shrink_factor;
   468   _shrink_factor = 0;
   470   // We don't have floating point command-line arguments
   471   // Note:  argument processing ensures that MinHeapFreeRatio < 100.
   472   const double minimum_free_percentage = MinHeapFreeRatio / 100.0;
   473   const double maximum_used_percentage = 1.0 - minimum_free_percentage;
   475   // Compute some numbers about the state of the heap.
   476   const size_t used_after_gc = used();
   477   const size_t capacity_after_gc = capacity();
   479   const double min_tmp = used_after_gc / maximum_used_percentage;
   480   size_t minimum_desired_capacity = (size_t)MIN2(min_tmp, double(max_uintx));
   481   // Don't shrink less than the initial generation size
   482   minimum_desired_capacity = MAX2(minimum_desired_capacity,
   483                                   spec()->init_size());
   484   assert(used_after_gc <= minimum_desired_capacity, "sanity check");
   486   if (PrintGC && Verbose) {
   487     const size_t free_after_gc = free();
   488     const double free_percentage = ((double)free_after_gc) / capacity_after_gc;
   489     gclog_or_tty->print_cr("TenuredGeneration::compute_new_size: ");
   490     gclog_or_tty->print_cr("  "
   491                   "  minimum_free_percentage: %6.2f"
   492                   "  maximum_used_percentage: %6.2f",
   493                   minimum_free_percentage,
   494                   maximum_used_percentage);
   495     gclog_or_tty->print_cr("  "
   496                   "   free_after_gc   : %6.1fK"
   497                   "   used_after_gc   : %6.1fK"
   498                   "   capacity_after_gc   : %6.1fK",
   499                   free_after_gc / (double) K,
   500                   used_after_gc / (double) K,
   501                   capacity_after_gc / (double) K);
   502     gclog_or_tty->print_cr("  "
   503                   "   free_percentage: %6.2f",
   504                   free_percentage);
   505   }
   507   if (capacity_after_gc < minimum_desired_capacity) {
   508     // If we have less free space than we want then expand
   509     size_t expand_bytes = minimum_desired_capacity - capacity_after_gc;
   510     // Don't expand unless it's significant
   511     if (expand_bytes >= _min_heap_delta_bytes) {
   512       expand(expand_bytes, 0); // safe if expansion fails
   513     }
   514     if (PrintGC && Verbose) {
   515       gclog_or_tty->print_cr("    expanding:"
   516                     "  minimum_desired_capacity: %6.1fK"
   517                     "  expand_bytes: %6.1fK"
   518                     "  _min_heap_delta_bytes: %6.1fK",
   519                     minimum_desired_capacity / (double) K,
   520                     expand_bytes / (double) K,
   521                     _min_heap_delta_bytes / (double) K);
   522     }
   523     return;
   524   }
   526   // No expansion, now see if we want to shrink
   527   size_t shrink_bytes = 0;
   528   // We would never want to shrink more than this
   529   size_t max_shrink_bytes = capacity_after_gc - minimum_desired_capacity;
   531   if (MaxHeapFreeRatio < 100) {
   532     const double maximum_free_percentage = MaxHeapFreeRatio / 100.0;
   533     const double minimum_used_percentage = 1.0 - maximum_free_percentage;
   534     const double max_tmp = used_after_gc / minimum_used_percentage;
   535     size_t maximum_desired_capacity = (size_t)MIN2(max_tmp, double(max_uintx));
   536     maximum_desired_capacity = MAX2(maximum_desired_capacity,
   537                                     spec()->init_size());
   538     if (PrintGC && Verbose) {
   539       gclog_or_tty->print_cr("  "
   540                              "  maximum_free_percentage: %6.2f"
   541                              "  minimum_used_percentage: %6.2f",
   542                              maximum_free_percentage,
   543                              minimum_used_percentage);
   544       gclog_or_tty->print_cr("  "
   545                              "  _capacity_at_prologue: %6.1fK"
   546                              "  minimum_desired_capacity: %6.1fK"
   547                              "  maximum_desired_capacity: %6.1fK",
   548                              _capacity_at_prologue / (double) K,
   549                              minimum_desired_capacity / (double) K,
   550                              maximum_desired_capacity / (double) K);
   551     }
   552     assert(minimum_desired_capacity <= maximum_desired_capacity,
   553            "sanity check");
   555     if (capacity_after_gc > maximum_desired_capacity) {
   556       // Capacity too large, compute shrinking size
   557       shrink_bytes = capacity_after_gc - maximum_desired_capacity;
   558       // We don't want shrink all the way back to initSize if people call
   559       // System.gc(), because some programs do that between "phases" and then
   560       // we'd just have to grow the heap up again for the next phase.  So we
   561       // damp the shrinking: 0% on the first call, 10% on the second call, 40%
   562       // on the third call, and 100% by the fourth call.  But if we recompute
   563       // size without shrinking, it goes back to 0%.
   564       shrink_bytes = shrink_bytes / 100 * current_shrink_factor;
   565       assert(shrink_bytes <= max_shrink_bytes, "invalid shrink size");
   566       if (current_shrink_factor == 0) {
   567         _shrink_factor = 10;
   568       } else {
   569         _shrink_factor = MIN2(current_shrink_factor * 4, (size_t) 100);
   570       }
   571       if (PrintGC && Verbose) {
   572         gclog_or_tty->print_cr("  "
   573                       "  shrinking:"
   574                       "  initSize: %.1fK"
   575                       "  maximum_desired_capacity: %.1fK",
   576                       spec()->init_size() / (double) K,
   577                       maximum_desired_capacity / (double) K);
   578         gclog_or_tty->print_cr("  "
   579                       "  shrink_bytes: %.1fK"
   580                       "  current_shrink_factor: %d"
   581                       "  new shrink factor: %d"
   582                       "  _min_heap_delta_bytes: %.1fK",
   583                       shrink_bytes / (double) K,
   584                       current_shrink_factor,
   585                       _shrink_factor,
   586                       _min_heap_delta_bytes / (double) K);
   587       }
   588     }
   589   }
   591   if (capacity_after_gc > _capacity_at_prologue) {
   592     // We might have expanded for promotions, in which case we might want to
   593     // take back that expansion if there's room after GC.  That keeps us from
   594     // stretching the heap with promotions when there's plenty of room.
   595     size_t expansion_for_promotion = capacity_after_gc - _capacity_at_prologue;
   596     expansion_for_promotion = MIN2(expansion_for_promotion, max_shrink_bytes);
   597     // We have two shrinking computations, take the largest
   598     shrink_bytes = MAX2(shrink_bytes, expansion_for_promotion);
   599     assert(shrink_bytes <= max_shrink_bytes, "invalid shrink size");
   600     if (PrintGC && Verbose) {
   601       gclog_or_tty->print_cr("  "
   602                              "  aggressive shrinking:"
   603                              "  _capacity_at_prologue: %.1fK"
   604                              "  capacity_after_gc: %.1fK"
   605                              "  expansion_for_promotion: %.1fK"
   606                              "  shrink_bytes: %.1fK",
   607                              capacity_after_gc / (double) K,
   608                              _capacity_at_prologue / (double) K,
   609                              expansion_for_promotion / (double) K,
   610                              shrink_bytes / (double) K);
   611     }
   612   }
   613   // Don't shrink unless it's significant
   614   if (shrink_bytes >= _min_heap_delta_bytes) {
   615     shrink(shrink_bytes);
   616   }
   617 }
   619 // Currently nothing to do.
   620 void CardGeneration::prepare_for_verify() {}
   623 void OneContigSpaceCardGeneration::collect(bool   full,
   624                                            bool   clear_all_soft_refs,
   625                                            size_t size,
   626                                            bool   is_tlab) {
   627   SpecializationStats::clear();
   628   // Temporarily expand the span of our ref processor, so
   629   // refs discovery is over the entire heap, not just this generation
   630   ReferenceProcessorSpanMutator
   631     x(ref_processor(), GenCollectedHeap::heap()->reserved_region());
   632   GenMarkSweep::invoke_at_safepoint(_level, ref_processor(), clear_all_soft_refs);
   633   SpecializationStats::print();
   634 }
   636 HeapWord*
   637 OneContigSpaceCardGeneration::expand_and_allocate(size_t word_size,
   638                                                   bool is_tlab,
   639                                                   bool parallel) {
   640   assert(!is_tlab, "OneContigSpaceCardGeneration does not support TLAB allocation");
   641   if (parallel) {
   642     MutexLocker x(ParGCRareEvent_lock);
   643     HeapWord* result = NULL;
   644     size_t byte_size = word_size * HeapWordSize;
   645     while (true) {
   646       expand(byte_size, _min_heap_delta_bytes);
   647       if (GCExpandToAllocateDelayMillis > 0) {
   648         os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false);
   649       }
   650       result = _the_space->par_allocate(word_size);
   651       if ( result != NULL) {
   652         return result;
   653       } else {
   654         // If there's not enough expansion space available, give up.
   655         if (_virtual_space.uncommitted_size() < byte_size) {
   656           return NULL;
   657         }
   658         // else try again
   659       }
   660     }
   661   } else {
   662     expand(word_size*HeapWordSize, _min_heap_delta_bytes);
   663     return _the_space->allocate(word_size);
   664   }
   665 }
   667 bool OneContigSpaceCardGeneration::expand(size_t bytes, size_t expand_bytes) {
   668   GCMutexLocker x(ExpandHeap_lock);
   669   return CardGeneration::expand(bytes, expand_bytes);
   670 }
   673 void OneContigSpaceCardGeneration::shrink(size_t bytes) {
   674   assert_locked_or_safepoint(ExpandHeap_lock);
   675   size_t size = ReservedSpace::page_align_size_down(bytes);
   676   if (size > 0) {
   677     shrink_by(size);
   678   }
   679 }
   682 size_t OneContigSpaceCardGeneration::capacity() const {
   683   return _the_space->capacity();
   684 }
   687 size_t OneContigSpaceCardGeneration::used() const {
   688   return _the_space->used();
   689 }
   692 size_t OneContigSpaceCardGeneration::free() const {
   693   return _the_space->free();
   694 }
   696 MemRegion OneContigSpaceCardGeneration::used_region() const {
   697   return the_space()->used_region();
   698 }
   700 size_t OneContigSpaceCardGeneration::unsafe_max_alloc_nogc() const {
   701   return _the_space->free();
   702 }
   704 size_t OneContigSpaceCardGeneration::contiguous_available() const {
   705   return _the_space->free() + _virtual_space.uncommitted_size();
   706 }
   708 bool OneContigSpaceCardGeneration::grow_by(size_t bytes) {
   709   assert_locked_or_safepoint(ExpandHeap_lock);
   710   bool result = _virtual_space.expand_by(bytes);
   711   if (result) {
   712     size_t new_word_size =
   713        heap_word_size(_virtual_space.committed_size());
   714     MemRegion mr(_the_space->bottom(), new_word_size);
   715     // Expand card table
   716     Universe::heap()->barrier_set()->resize_covered_region(mr);
   717     // Expand shared block offset array
   718     _bts->resize(new_word_size);
   720     // Fix for bug #4668531
   721     if (ZapUnusedHeapArea) {
   722       MemRegion mangle_region(_the_space->end(),
   723       (HeapWord*)_virtual_space.high());
   724       SpaceMangler::mangle_region(mangle_region);
   725     }
   727     // Expand space -- also expands space's BOT
   728     // (which uses (part of) shared array above)
   729     _the_space->set_end((HeapWord*)_virtual_space.high());
   731     // update the space and generation capacity counters
   732     update_counters();
   734     if (Verbose && PrintGC) {
   735       size_t new_mem_size = _virtual_space.committed_size();
   736       size_t old_mem_size = new_mem_size - bytes;
   737       gclog_or_tty->print_cr("Expanding %s from " SIZE_FORMAT "K by "
   738                       SIZE_FORMAT "K to " SIZE_FORMAT "K",
   739                       name(), old_mem_size/K, bytes/K, new_mem_size/K);
   740     }
   741   }
   742   return result;
   743 }
   746 bool OneContigSpaceCardGeneration::grow_to_reserved() {
   747   assert_locked_or_safepoint(ExpandHeap_lock);
   748   bool success = true;
   749   const size_t remaining_bytes = _virtual_space.uncommitted_size();
   750   if (remaining_bytes > 0) {
   751     success = grow_by(remaining_bytes);
   752     DEBUG_ONLY(if (!success) warning("grow to reserved failed");)
   753   }
   754   return success;
   755 }
   757 void OneContigSpaceCardGeneration::shrink_by(size_t bytes) {
   758   assert_locked_or_safepoint(ExpandHeap_lock);
   759   // Shrink committed space
   760   _virtual_space.shrink_by(bytes);
   761   // Shrink space; this also shrinks the space's BOT
   762   _the_space->set_end((HeapWord*) _virtual_space.high());
   763   size_t new_word_size = heap_word_size(_the_space->capacity());
   764   // Shrink the shared block offset array
   765   _bts->resize(new_word_size);
   766   MemRegion mr(_the_space->bottom(), new_word_size);
   767   // Shrink the card table
   768   Universe::heap()->barrier_set()->resize_covered_region(mr);
   770   if (Verbose && PrintGC) {
   771     size_t new_mem_size = _virtual_space.committed_size();
   772     size_t old_mem_size = new_mem_size + bytes;
   773     gclog_or_tty->print_cr("Shrinking %s from " SIZE_FORMAT "K to " SIZE_FORMAT "K",
   774                   name(), old_mem_size/K, new_mem_size/K);
   775   }
   776 }
   778 // Currently nothing to do.
   779 void OneContigSpaceCardGeneration::prepare_for_verify() {}
   782 // Override for a card-table generation with one contiguous
   783 // space. NOTE: For reasons that are lost in the fog of history,
   784 // this code is used when you iterate over perm gen objects,
   785 // even when one uses CDS, where the perm gen has a couple of
   786 // other spaces; this is because CompactingPermGenGen derives
   787 // from OneContigSpaceCardGeneration. This should be cleaned up,
   788 // see CR 6897789..
   789 void OneContigSpaceCardGeneration::object_iterate(ObjectClosure* blk) {
   790   _the_space->object_iterate(blk);
   791 }
   793 void OneContigSpaceCardGeneration::space_iterate(SpaceClosure* blk,
   794                                                  bool usedOnly) {
   795   blk->do_space(_the_space);
   796 }
   798 void OneContigSpaceCardGeneration::object_iterate_since_last_GC(ObjectClosure* blk) {
   799   // Deal with delayed initialization of _the_space,
   800   // and lack of initialization of _last_gc.
   801   if (_last_gc.space() == NULL) {
   802     assert(the_space() != NULL, "shouldn't be NULL");
   803     _last_gc = the_space()->bottom_mark();
   804   }
   805   the_space()->object_iterate_from(_last_gc, blk);
   806 }
   808 void OneContigSpaceCardGeneration::younger_refs_iterate(OopsInGenClosure* blk) {
   809   blk->set_generation(this);
   810   younger_refs_in_space_iterate(_the_space, blk);
   811   blk->reset_generation();
   812 }
   814 void OneContigSpaceCardGeneration::save_marks() {
   815   _the_space->set_saved_mark();
   816 }
   819 void OneContigSpaceCardGeneration::reset_saved_marks() {
   820   _the_space->reset_saved_mark();
   821 }
   824 bool OneContigSpaceCardGeneration::no_allocs_since_save_marks() {
   825   return _the_space->saved_mark_at_top();
   826 }
   828 #define OneContig_SINCE_SAVE_MARKS_ITERATE_DEFN(OopClosureType, nv_suffix)      \
   829                                                                                 \
   830 void OneContigSpaceCardGeneration::                                             \
   831 oop_since_save_marks_iterate##nv_suffix(OopClosureType* blk) {                  \
   832   blk->set_generation(this);                                                    \
   833   _the_space->oop_since_save_marks_iterate##nv_suffix(blk);                     \
   834   blk->reset_generation();                                                      \
   835   save_marks();                                                                 \
   836 }
   838 ALL_SINCE_SAVE_MARKS_CLOSURES(OneContig_SINCE_SAVE_MARKS_ITERATE_DEFN)
   840 #undef OneContig_SINCE_SAVE_MARKS_ITERATE_DEFN
   843 void OneContigSpaceCardGeneration::gc_epilogue(bool full) {
   844   _last_gc = WaterMark(the_space(), the_space()->top());
   846   // update the generation and space performance counters
   847   update_counters();
   848   if (ZapUnusedHeapArea) {
   849     the_space()->check_mangled_unused_area_complete();
   850   }
   851 }
   853 void OneContigSpaceCardGeneration::record_spaces_top() {
   854   assert(ZapUnusedHeapArea, "Not mangling unused space");
   855   the_space()->set_top_for_allocations();
   856 }
   858 void OneContigSpaceCardGeneration::verify() {
   859   the_space()->verify();
   860 }
   862 void OneContigSpaceCardGeneration::print_on(outputStream* st)  const {
   863   Generation::print_on(st);
   864   st->print("   the");
   865   the_space()->print_on(st);
   866 }

mercurial