src/share/vm/memory/generation.cpp

Wed, 01 Dec 2010 15:04:06 +0100

author
stefank
date
Wed, 01 Dec 2010 15:04:06 +0100
changeset 2325
c760f78e0a53
parent 2314
f95d63e2154a
child 2651
92da084fefc9
permissions
-rw-r--r--

7003125: precompiled.hpp is included when precompiled headers are not used
Summary: Added an ifndef DONT_USE_PRECOMPILED_HEADER to precompiled.hpp. Set up DONT_USE_PRECOMPILED_HEADER when compiling with Sun Studio or when the user specifies USE_PRECOMPILED_HEADER=0. Fixed broken include dependencies.
Reviewed-by: coleenp, kvn

     1 /*
     2  * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    20  * or visit www.oracle.com if you need additional information or have any
    21  * questions.
    22  *
    23  */
    25 #include "precompiled.hpp"
    26 #include "gc_implementation/shared/spaceDecorator.hpp"
    27 #include "gc_interface/collectedHeap.inline.hpp"
    28 #include "memory/allocation.inline.hpp"
    29 #include "memory/blockOffsetTable.inline.hpp"
    30 #include "memory/cardTableRS.hpp"
    31 #include "memory/gcLocker.inline.hpp"
    32 #include "memory/genCollectedHeap.hpp"
    33 #include "memory/genMarkSweep.hpp"
    34 #include "memory/genOopClosures.hpp"
    35 #include "memory/genOopClosures.inline.hpp"
    36 #include "memory/generation.hpp"
    37 #include "memory/generation.inline.hpp"
    38 #include "memory/space.inline.hpp"
    39 #include "oops/oop.inline.hpp"
    40 #include "runtime/java.hpp"
    41 #include "utilities/copy.hpp"
    42 #include "utilities/events.hpp"
    44 Generation::Generation(ReservedSpace rs, size_t initial_size, int level) :
    45   _level(level),
    46   _ref_processor(NULL) {
    47   if (!_virtual_space.initialize(rs, initial_size)) {
    48     vm_exit_during_initialization("Could not reserve enough space for "
    49                     "object heap");
    50   }
    51   // Mangle all of the the initial generation.
    52   if (ZapUnusedHeapArea) {
    53     MemRegion mangle_region((HeapWord*)_virtual_space.low(),
    54       (HeapWord*)_virtual_space.high());
    55     SpaceMangler::mangle_region(mangle_region);
    56   }
    57   _reserved = MemRegion((HeapWord*)_virtual_space.low_boundary(),
    58           (HeapWord*)_virtual_space.high_boundary());
    59 }
    61 GenerationSpec* Generation::spec() {
    62   GenCollectedHeap* gch = GenCollectedHeap::heap();
    63   assert(0 <= level() && level() < gch->_n_gens, "Bad gen level");
    64   return gch->_gen_specs[level()];
    65 }
    67 size_t Generation::max_capacity() const {
    68   return reserved().byte_size();
    69 }
    71 void Generation::print_heap_change(size_t prev_used) const {
    72   if (PrintGCDetails && Verbose) {
    73     gclog_or_tty->print(" "  SIZE_FORMAT
    74                         "->" SIZE_FORMAT
    75                         "("  SIZE_FORMAT ")",
    76                         prev_used, used(), capacity());
    77   } else {
    78     gclog_or_tty->print(" "  SIZE_FORMAT "K"
    79                         "->" SIZE_FORMAT "K"
    80                         "("  SIZE_FORMAT "K)",
    81                         prev_used / K, used() / K, capacity() / K);
    82   }
    83 }
    85 // By default we get a single threaded default reference processor;
    86 // generations needing multi-threaded refs discovery override this method.
    87 void Generation::ref_processor_init() {
    88   assert(_ref_processor == NULL, "a reference processor already exists");
    89   assert(!_reserved.is_empty(), "empty generation?");
    90   _ref_processor =
    91     new ReferenceProcessor(_reserved,                  // span
    92                            refs_discovery_is_atomic(), // atomic_discovery
    93                            refs_discovery_is_mt());    // mt_discovery
    94   if (_ref_processor == NULL) {
    95     vm_exit_during_initialization("Could not allocate ReferenceProcessor object");
    96   }
    97 }
    99 void Generation::print() const { print_on(tty); }
   101 void Generation::print_on(outputStream* st)  const {
   102   st->print(" %-20s", name());
   103   st->print(" total " SIZE_FORMAT "K, used " SIZE_FORMAT "K",
   104              capacity()/K, used()/K);
   105   st->print_cr(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " INTPTR_FORMAT ")",
   106               _virtual_space.low_boundary(),
   107               _virtual_space.high(),
   108               _virtual_space.high_boundary());
   109 }
   111 void Generation::print_summary_info() { print_summary_info_on(tty); }
   113 void Generation::print_summary_info_on(outputStream* st) {
   114   StatRecord* sr = stat_record();
   115   double time = sr->accumulated_time.seconds();
   116   st->print_cr("[Accumulated GC generation %d time %3.7f secs, "
   117                "%d GC's, avg GC time %3.7f]",
   118                level(), time, sr->invocations,
   119                sr->invocations > 0 ? time / sr->invocations : 0.0);
   120 }
   122 // Utility iterator classes
   124 class GenerationIsInReservedClosure : public SpaceClosure {
   125  public:
   126   const void* _p;
   127   Space* sp;
   128   virtual void do_space(Space* s) {
   129     if (sp == NULL) {
   130       if (s->is_in_reserved(_p)) sp = s;
   131     }
   132   }
   133   GenerationIsInReservedClosure(const void* p) : _p(p), sp(NULL) {}
   134 };
   136 class GenerationIsInClosure : public SpaceClosure {
   137  public:
   138   const void* _p;
   139   Space* sp;
   140   virtual void do_space(Space* s) {
   141     if (sp == NULL) {
   142       if (s->is_in(_p)) sp = s;
   143     }
   144   }
   145   GenerationIsInClosure(const void* p) : _p(p), sp(NULL) {}
   146 };
   148 bool Generation::is_in(const void* p) const {
   149   GenerationIsInClosure blk(p);
   150   ((Generation*)this)->space_iterate(&blk);
   151   return blk.sp != NULL;
   152 }
   154 DefNewGeneration* Generation::as_DefNewGeneration() {
   155   assert((kind() == Generation::DefNew) ||
   156          (kind() == Generation::ParNew) ||
   157          (kind() == Generation::ASParNew),
   158     "Wrong youngest generation type");
   159   return (DefNewGeneration*) this;
   160 }
   162 Generation* Generation::next_gen() const {
   163   GenCollectedHeap* gch = GenCollectedHeap::heap();
   164   int next = level() + 1;
   165   if (next < gch->_n_gens) {
   166     return gch->_gens[next];
   167   } else {
   168     return NULL;
   169   }
   170 }
   172 size_t Generation::max_contiguous_available() const {
   173   // The largest number of contiguous free words in this or any higher generation.
   174   size_t max = 0;
   175   for (const Generation* gen = this; gen != NULL; gen = gen->next_gen()) {
   176     size_t avail = gen->contiguous_available();
   177     if (avail > max) {
   178       max = avail;
   179     }
   180   }
   181   return max;
   182 }
   184 bool Generation::promotion_attempt_is_safe(size_t max_promotion_in_bytes) const {
   185   size_t available = max_contiguous_available();
   186   bool   res = (available >= max_promotion_in_bytes);
   187   if (PrintGC && Verbose) {
   188     gclog_or_tty->print_cr(
   189       "Generation: promo attempt is%s safe: available("SIZE_FORMAT") %s max_promo("SIZE_FORMAT")",
   190       res? "":" not", available, res? ">=":"<",
   191       max_promotion_in_bytes);
   192   }
   193   return res;
   194 }
   196 // Ignores "ref" and calls allocate().
   197 oop Generation::promote(oop obj, size_t obj_size) {
   198   assert(obj_size == (size_t)obj->size(), "bad obj_size passed in");
   200 #ifndef PRODUCT
   201   if (Universe::heap()->promotion_should_fail()) {
   202     return NULL;
   203   }
   204 #endif  // #ifndef PRODUCT
   206   HeapWord* result = allocate(obj_size, false);
   207   if (result != NULL) {
   208     Copy::aligned_disjoint_words((HeapWord*)obj, result, obj_size);
   209     return oop(result);
   210   } else {
   211     GenCollectedHeap* gch = GenCollectedHeap::heap();
   212     return gch->handle_failed_promotion(this, obj, obj_size);
   213   }
   214 }
   216 oop Generation::par_promote(int thread_num,
   217                             oop obj, markOop m, size_t word_sz) {
   218   // Could do a bad general impl here that gets a lock.  But no.
   219   ShouldNotCallThis();
   220   return NULL;
   221 }
   223 void Generation::par_promote_alloc_undo(int thread_num,
   224                                         HeapWord* obj, size_t word_sz) {
   225   // Could do a bad general impl here that gets a lock.  But no.
   226   guarantee(false, "No good general implementation.");
   227 }
   229 Space* Generation::space_containing(const void* p) const {
   230   GenerationIsInReservedClosure blk(p);
   231   // Cast away const
   232   ((Generation*)this)->space_iterate(&blk);
   233   return blk.sp;
   234 }
   236 // Some of these are mediocre general implementations.  Should be
   237 // overridden to get better performance.
   239 class GenerationBlockStartClosure : public SpaceClosure {
   240  public:
   241   const void* _p;
   242   HeapWord* _start;
   243   virtual void do_space(Space* s) {
   244     if (_start == NULL && s->is_in_reserved(_p)) {
   245       _start = s->block_start(_p);
   246     }
   247   }
   248   GenerationBlockStartClosure(const void* p) { _p = p; _start = NULL; }
   249 };
   251 HeapWord* Generation::block_start(const void* p) const {
   252   GenerationBlockStartClosure blk(p);
   253   // Cast away const
   254   ((Generation*)this)->space_iterate(&blk);
   255   return blk._start;
   256 }
   258 class GenerationBlockSizeClosure : public SpaceClosure {
   259  public:
   260   const HeapWord* _p;
   261   size_t size;
   262   virtual void do_space(Space* s) {
   263     if (size == 0 && s->is_in_reserved(_p)) {
   264       size = s->block_size(_p);
   265     }
   266   }
   267   GenerationBlockSizeClosure(const HeapWord* p) { _p = p; size = 0; }
   268 };
   270 size_t Generation::block_size(const HeapWord* p) const {
   271   GenerationBlockSizeClosure blk(p);
   272   // Cast away const
   273   ((Generation*)this)->space_iterate(&blk);
   274   assert(blk.size > 0, "seems reasonable");
   275   return blk.size;
   276 }
   278 class GenerationBlockIsObjClosure : public SpaceClosure {
   279  public:
   280   const HeapWord* _p;
   281   bool is_obj;
   282   virtual void do_space(Space* s) {
   283     if (!is_obj && s->is_in_reserved(_p)) {
   284       is_obj |= s->block_is_obj(_p);
   285     }
   286   }
   287   GenerationBlockIsObjClosure(const HeapWord* p) { _p = p; is_obj = false; }
   288 };
   290 bool Generation::block_is_obj(const HeapWord* p) const {
   291   GenerationBlockIsObjClosure blk(p);
   292   // Cast away const
   293   ((Generation*)this)->space_iterate(&blk);
   294   return blk.is_obj;
   295 }
   297 class GenerationOopIterateClosure : public SpaceClosure {
   298  public:
   299   OopClosure* cl;
   300   MemRegion mr;
   301   virtual void do_space(Space* s) {
   302     s->oop_iterate(mr, cl);
   303   }
   304   GenerationOopIterateClosure(OopClosure* _cl, MemRegion _mr) :
   305     cl(_cl), mr(_mr) {}
   306 };
   308 void Generation::oop_iterate(OopClosure* cl) {
   309   GenerationOopIterateClosure blk(cl, _reserved);
   310   space_iterate(&blk);
   311 }
   313 void Generation::oop_iterate(MemRegion mr, OopClosure* cl) {
   314   GenerationOopIterateClosure blk(cl, mr);
   315   space_iterate(&blk);
   316 }
   318 void Generation::younger_refs_in_space_iterate(Space* sp,
   319                                                OopsInGenClosure* cl) {
   320   GenRemSet* rs = SharedHeap::heap()->rem_set();
   321   rs->younger_refs_in_space_iterate(sp, cl);
   322 }
   324 class GenerationObjIterateClosure : public SpaceClosure {
   325  private:
   326   ObjectClosure* _cl;
   327  public:
   328   virtual void do_space(Space* s) {
   329     s->object_iterate(_cl);
   330   }
   331   GenerationObjIterateClosure(ObjectClosure* cl) : _cl(cl) {}
   332 };
   334 void Generation::object_iterate(ObjectClosure* cl) {
   335   GenerationObjIterateClosure blk(cl);
   336   space_iterate(&blk);
   337 }
   339 class GenerationSafeObjIterateClosure : public SpaceClosure {
   340  private:
   341   ObjectClosure* _cl;
   342  public:
   343   virtual void do_space(Space* s) {
   344     s->safe_object_iterate(_cl);
   345   }
   346   GenerationSafeObjIterateClosure(ObjectClosure* cl) : _cl(cl) {}
   347 };
   349 void Generation::safe_object_iterate(ObjectClosure* cl) {
   350   GenerationSafeObjIterateClosure blk(cl);
   351   space_iterate(&blk);
   352 }
   354 void Generation::prepare_for_compaction(CompactPoint* cp) {
   355   // Generic implementation, can be specialized
   356   CompactibleSpace* space = first_compaction_space();
   357   while (space != NULL) {
   358     space->prepare_for_compaction(cp);
   359     space = space->next_compaction_space();
   360   }
   361 }
   363 class AdjustPointersClosure: public SpaceClosure {
   364  public:
   365   void do_space(Space* sp) {
   366     sp->adjust_pointers();
   367   }
   368 };
   370 void Generation::adjust_pointers() {
   371   // Note that this is done over all spaces, not just the compactible
   372   // ones.
   373   AdjustPointersClosure blk;
   374   space_iterate(&blk, true);
   375 }
   377 void Generation::compact() {
   378   CompactibleSpace* sp = first_compaction_space();
   379   while (sp != NULL) {
   380     sp->compact();
   381     sp = sp->next_compaction_space();
   382   }
   383 }
   385 CardGeneration::CardGeneration(ReservedSpace rs, size_t initial_byte_size,
   386                                int level,
   387                                GenRemSet* remset) :
   388   Generation(rs, initial_byte_size, level), _rs(remset)
   389 {
   390   HeapWord* start = (HeapWord*)rs.base();
   391   size_t reserved_byte_size = rs.size();
   392   assert((uintptr_t(start) & 3) == 0, "bad alignment");
   393   assert((reserved_byte_size & 3) == 0, "bad alignment");
   394   MemRegion reserved_mr(start, heap_word_size(reserved_byte_size));
   395   _bts = new BlockOffsetSharedArray(reserved_mr,
   396                                     heap_word_size(initial_byte_size));
   397   MemRegion committed_mr(start, heap_word_size(initial_byte_size));
   398   _rs->resize_covered_region(committed_mr);
   399   if (_bts == NULL)
   400     vm_exit_during_initialization("Could not allocate a BlockOffsetArray");
   402   // Verify that the start and end of this generation is the start of a card.
   403   // If this wasn't true, a single card could span more than on generation,
   404   // which would cause problems when we commit/uncommit memory, and when we
   405   // clear and dirty cards.
   406   guarantee(_rs->is_aligned(reserved_mr.start()), "generation must be card aligned");
   407   if (reserved_mr.end() != Universe::heap()->reserved_region().end()) {
   408     // Don't check at the very end of the heap as we'll assert that we're probing off
   409     // the end if we try.
   410     guarantee(_rs->is_aligned(reserved_mr.end()), "generation must be card aligned");
   411   }
   412 }
   414 bool CardGeneration::expand(size_t bytes, size_t expand_bytes) {
   415   assert_locked_or_safepoint(Heap_lock);
   416   if (bytes == 0) {
   417     return true;  // That's what grow_by(0) would return
   418   }
   419   size_t aligned_bytes  = ReservedSpace::page_align_size_up(bytes);
   420   if (aligned_bytes == 0){
   421     // The alignment caused the number of bytes to wrap.  An expand_by(0) will
   422     // return true with the implication that an expansion was done when it
   423     // was not.  A call to expand implies a best effort to expand by "bytes"
   424     // but not a guarantee.  Align down to give a best effort.  This is likely
   425     // the most that the generation can expand since it has some capacity to
   426     // start with.
   427     aligned_bytes = ReservedSpace::page_align_size_down(bytes);
   428   }
   429   size_t aligned_expand_bytes = ReservedSpace::page_align_size_up(expand_bytes);
   430   bool success = false;
   431   if (aligned_expand_bytes > aligned_bytes) {
   432     success = grow_by(aligned_expand_bytes);
   433   }
   434   if (!success) {
   435     success = grow_by(aligned_bytes);
   436   }
   437   if (!success) {
   438     success = grow_to_reserved();
   439   }
   440   if (PrintGC && Verbose) {
   441     if (success && GC_locker::is_active()) {
   442       gclog_or_tty->print_cr("Garbage collection disabled, expanded heap instead");
   443     }
   444   }
   446   return success;
   447 }
   450 // No young generation references, clear this generation's cards.
   451 void CardGeneration::clear_remembered_set() {
   452   _rs->clear(reserved());
   453 }
   456 // Objects in this generation may have moved, invalidate this
   457 // generation's cards.
   458 void CardGeneration::invalidate_remembered_set() {
   459   _rs->invalidate(used_region());
   460 }
   463 // Currently nothing to do.
   464 void CardGeneration::prepare_for_verify() {}
   467 void OneContigSpaceCardGeneration::collect(bool   full,
   468                                            bool   clear_all_soft_refs,
   469                                            size_t size,
   470                                            bool   is_tlab) {
   471   SpecializationStats::clear();
   472   // Temporarily expand the span of our ref processor, so
   473   // refs discovery is over the entire heap, not just this generation
   474   ReferenceProcessorSpanMutator
   475     x(ref_processor(), GenCollectedHeap::heap()->reserved_region());
   476   GenMarkSweep::invoke_at_safepoint(_level, ref_processor(), clear_all_soft_refs);
   477   SpecializationStats::print();
   478 }
   480 HeapWord*
   481 OneContigSpaceCardGeneration::expand_and_allocate(size_t word_size,
   482                                                   bool is_tlab,
   483                                                   bool parallel) {
   484   assert(!is_tlab, "OneContigSpaceCardGeneration does not support TLAB allocation");
   485   if (parallel) {
   486     MutexLocker x(ParGCRareEvent_lock);
   487     HeapWord* result = NULL;
   488     size_t byte_size = word_size * HeapWordSize;
   489     while (true) {
   490       expand(byte_size, _min_heap_delta_bytes);
   491       if (GCExpandToAllocateDelayMillis > 0) {
   492         os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false);
   493       }
   494       result = _the_space->par_allocate(word_size);
   495       if ( result != NULL) {
   496         return result;
   497       } else {
   498         // If there's not enough expansion space available, give up.
   499         if (_virtual_space.uncommitted_size() < byte_size) {
   500           return NULL;
   501         }
   502         // else try again
   503       }
   504     }
   505   } else {
   506     expand(word_size*HeapWordSize, _min_heap_delta_bytes);
   507     return _the_space->allocate(word_size);
   508   }
   509 }
   511 bool OneContigSpaceCardGeneration::expand(size_t bytes, size_t expand_bytes) {
   512   GCMutexLocker x(ExpandHeap_lock);
   513   return CardGeneration::expand(bytes, expand_bytes);
   514 }
   517 void OneContigSpaceCardGeneration::shrink(size_t bytes) {
   518   assert_locked_or_safepoint(ExpandHeap_lock);
   519   size_t size = ReservedSpace::page_align_size_down(bytes);
   520   if (size > 0) {
   521     shrink_by(size);
   522   }
   523 }
   526 size_t OneContigSpaceCardGeneration::capacity() const {
   527   return _the_space->capacity();
   528 }
   531 size_t OneContigSpaceCardGeneration::used() const {
   532   return _the_space->used();
   533 }
   536 size_t OneContigSpaceCardGeneration::free() const {
   537   return _the_space->free();
   538 }
   540 MemRegion OneContigSpaceCardGeneration::used_region() const {
   541   return the_space()->used_region();
   542 }
   544 size_t OneContigSpaceCardGeneration::unsafe_max_alloc_nogc() const {
   545   return _the_space->free();
   546 }
   548 size_t OneContigSpaceCardGeneration::contiguous_available() const {
   549   return _the_space->free() + _virtual_space.uncommitted_size();
   550 }
   552 bool OneContigSpaceCardGeneration::grow_by(size_t bytes) {
   553   assert_locked_or_safepoint(ExpandHeap_lock);
   554   bool result = _virtual_space.expand_by(bytes);
   555   if (result) {
   556     size_t new_word_size =
   557        heap_word_size(_virtual_space.committed_size());
   558     MemRegion mr(_the_space->bottom(), new_word_size);
   559     // Expand card table
   560     Universe::heap()->barrier_set()->resize_covered_region(mr);
   561     // Expand shared block offset array
   562     _bts->resize(new_word_size);
   564     // Fix for bug #4668531
   565     if (ZapUnusedHeapArea) {
   566       MemRegion mangle_region(_the_space->end(),
   567       (HeapWord*)_virtual_space.high());
   568       SpaceMangler::mangle_region(mangle_region);
   569     }
   571     // Expand space -- also expands space's BOT
   572     // (which uses (part of) shared array above)
   573     _the_space->set_end((HeapWord*)_virtual_space.high());
   575     // update the space and generation capacity counters
   576     update_counters();
   578     if (Verbose && PrintGC) {
   579       size_t new_mem_size = _virtual_space.committed_size();
   580       size_t old_mem_size = new_mem_size - bytes;
   581       gclog_or_tty->print_cr("Expanding %s from " SIZE_FORMAT "K by "
   582                       SIZE_FORMAT "K to " SIZE_FORMAT "K",
   583                       name(), old_mem_size/K, bytes/K, new_mem_size/K);
   584     }
   585   }
   586   return result;
   587 }
   590 bool OneContigSpaceCardGeneration::grow_to_reserved() {
   591   assert_locked_or_safepoint(ExpandHeap_lock);
   592   bool success = true;
   593   const size_t remaining_bytes = _virtual_space.uncommitted_size();
   594   if (remaining_bytes > 0) {
   595     success = grow_by(remaining_bytes);
   596     DEBUG_ONLY(if (!success) warning("grow to reserved failed");)
   597   }
   598   return success;
   599 }
   601 void OneContigSpaceCardGeneration::shrink_by(size_t bytes) {
   602   assert_locked_or_safepoint(ExpandHeap_lock);
   603   // Shrink committed space
   604   _virtual_space.shrink_by(bytes);
   605   // Shrink space; this also shrinks the space's BOT
   606   _the_space->set_end((HeapWord*) _virtual_space.high());
   607   size_t new_word_size = heap_word_size(_the_space->capacity());
   608   // Shrink the shared block offset array
   609   _bts->resize(new_word_size);
   610   MemRegion mr(_the_space->bottom(), new_word_size);
   611   // Shrink the card table
   612   Universe::heap()->barrier_set()->resize_covered_region(mr);
   614   if (Verbose && PrintGC) {
   615     size_t new_mem_size = _virtual_space.committed_size();
   616     size_t old_mem_size = new_mem_size + bytes;
   617     gclog_or_tty->print_cr("Shrinking %s from " SIZE_FORMAT "K to " SIZE_FORMAT "K",
   618                   name(), old_mem_size/K, new_mem_size/K);
   619   }
   620 }
   622 // Currently nothing to do.
   623 void OneContigSpaceCardGeneration::prepare_for_verify() {}
   626 // Override for a card-table generation with one contiguous
   627 // space. NOTE: For reasons that are lost in the fog of history,
   628 // this code is used when you iterate over perm gen objects,
   629 // even when one uses CDS, where the perm gen has a couple of
   630 // other spaces; this is because CompactingPermGenGen derives
   631 // from OneContigSpaceCardGeneration. This should be cleaned up,
   632 // see CR 6897789..
   633 void OneContigSpaceCardGeneration::object_iterate(ObjectClosure* blk) {
   634   _the_space->object_iterate(blk);
   635 }
   637 void OneContigSpaceCardGeneration::space_iterate(SpaceClosure* blk,
   638                                                  bool usedOnly) {
   639   blk->do_space(_the_space);
   640 }
   642 void OneContigSpaceCardGeneration::object_iterate_since_last_GC(ObjectClosure* blk) {
   643   // Deal with delayed initialization of _the_space,
   644   // and lack of initialization of _last_gc.
   645   if (_last_gc.space() == NULL) {
   646     assert(the_space() != NULL, "shouldn't be NULL");
   647     _last_gc = the_space()->bottom_mark();
   648   }
   649   the_space()->object_iterate_from(_last_gc, blk);
   650 }
   652 void OneContigSpaceCardGeneration::younger_refs_iterate(OopsInGenClosure* blk) {
   653   blk->set_generation(this);
   654   younger_refs_in_space_iterate(_the_space, blk);
   655   blk->reset_generation();
   656 }
   658 void OneContigSpaceCardGeneration::save_marks() {
   659   _the_space->set_saved_mark();
   660 }
   663 void OneContigSpaceCardGeneration::reset_saved_marks() {
   664   _the_space->reset_saved_mark();
   665 }
   668 bool OneContigSpaceCardGeneration::no_allocs_since_save_marks() {
   669   return _the_space->saved_mark_at_top();
   670 }
   672 #define OneContig_SINCE_SAVE_MARKS_ITERATE_DEFN(OopClosureType, nv_suffix)      \
   673                                                                                 \
   674 void OneContigSpaceCardGeneration::                                             \
   675 oop_since_save_marks_iterate##nv_suffix(OopClosureType* blk) {                  \
   676   blk->set_generation(this);                                                    \
   677   _the_space->oop_since_save_marks_iterate##nv_suffix(blk);                     \
   678   blk->reset_generation();                                                      \
   679   save_marks();                                                                 \
   680 }
   682 ALL_SINCE_SAVE_MARKS_CLOSURES(OneContig_SINCE_SAVE_MARKS_ITERATE_DEFN)
   684 #undef OneContig_SINCE_SAVE_MARKS_ITERATE_DEFN
   687 void OneContigSpaceCardGeneration::gc_epilogue(bool full) {
   688   _last_gc = WaterMark(the_space(), the_space()->top());
   690   // update the generation and space performance counters
   691   update_counters();
   692   if (ZapUnusedHeapArea) {
   693     the_space()->check_mangled_unused_area_complete();
   694   }
   695 }
   697 void OneContigSpaceCardGeneration::record_spaces_top() {
   698   assert(ZapUnusedHeapArea, "Not mangling unused space");
   699   the_space()->set_top_for_allocations();
   700 }
   702 void OneContigSpaceCardGeneration::verify(bool allow_dirty) {
   703   the_space()->verify(allow_dirty);
   704 }
   706 void OneContigSpaceCardGeneration::print_on(outputStream* st)  const {
   707   Generation::print_on(st);
   708   st->print("   the");
   709   the_space()->print_on(st);
   710 }

mercurial