src/share/vm/memory/defNewGeneration.cpp

Fri, 17 Dec 2010 23:41:31 -0800

author
ysr
date
Fri, 17 Dec 2010 23:41:31 -0800
changeset 2380
74ee0db180fa
parent 2336
6cd6d394f280
child 2996
48048b59a551
permissions
-rw-r--r--

6807801: CMS: could save/restore fewer header words during scavenge
Summary: Age bits need not enter the mark-word preservation calculus; also affected, in addition to CMS, per CR synopsis above, were ParNew (but not DefNew), ParallelScavenge and G1, albeit to a lesser degree than CMS.
Reviewed-by: tonyp, johnc

     1 /*
     2  * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    20  * or visit www.oracle.com if you need additional information or have any
    21  * questions.
    22  *
    23  */
    25 #include "precompiled.hpp"
    26 #include "gc_implementation/shared/collectorCounters.hpp"
    27 #include "gc_implementation/shared/gcPolicyCounters.hpp"
    28 #include "gc_implementation/shared/spaceDecorator.hpp"
    29 #include "memory/defNewGeneration.inline.hpp"
    30 #include "memory/gcLocker.inline.hpp"
    31 #include "memory/genCollectedHeap.hpp"
    32 #include "memory/genOopClosures.inline.hpp"
    33 #include "memory/generationSpec.hpp"
    34 #include "memory/iterator.hpp"
    35 #include "memory/referencePolicy.hpp"
    36 #include "memory/space.inline.hpp"
    37 #include "oops/instanceRefKlass.hpp"
    38 #include "oops/oop.inline.hpp"
    39 #include "runtime/java.hpp"
    40 #include "utilities/copy.hpp"
    41 #include "utilities/stack.inline.hpp"
    42 #ifdef TARGET_OS_FAMILY_linux
    43 # include "thread_linux.inline.hpp"
    44 #endif
    45 #ifdef TARGET_OS_FAMILY_solaris
    46 # include "thread_solaris.inline.hpp"
    47 #endif
    48 #ifdef TARGET_OS_FAMILY_windows
    49 # include "thread_windows.inline.hpp"
    50 #endif
    52 //
    53 // DefNewGeneration functions.
    55 // Methods of protected closure types.
    57 DefNewGeneration::IsAliveClosure::IsAliveClosure(Generation* g) : _g(g) {
    58   assert(g->level() == 0, "Optimized for youngest gen.");
    59 }
    60 void DefNewGeneration::IsAliveClosure::do_object(oop p) {
    61   assert(false, "Do not call.");
    62 }
    63 bool DefNewGeneration::IsAliveClosure::do_object_b(oop p) {
    64   return (HeapWord*)p >= _g->reserved().end() || p->is_forwarded();
    65 }
    67 DefNewGeneration::KeepAliveClosure::
    68 KeepAliveClosure(ScanWeakRefClosure* cl) : _cl(cl) {
    69   GenRemSet* rs = GenCollectedHeap::heap()->rem_set();
    70   assert(rs->rs_kind() == GenRemSet::CardTable, "Wrong rem set kind.");
    71   _rs = (CardTableRS*)rs;
    72 }
    74 void DefNewGeneration::KeepAliveClosure::do_oop(oop* p)       { DefNewGeneration::KeepAliveClosure::do_oop_work(p); }
    75 void DefNewGeneration::KeepAliveClosure::do_oop(narrowOop* p) { DefNewGeneration::KeepAliveClosure::do_oop_work(p); }
    78 DefNewGeneration::FastKeepAliveClosure::
    79 FastKeepAliveClosure(DefNewGeneration* g, ScanWeakRefClosure* cl) :
    80   DefNewGeneration::KeepAliveClosure(cl) {
    81   _boundary = g->reserved().end();
    82 }
    84 void DefNewGeneration::FastKeepAliveClosure::do_oop(oop* p)       { DefNewGeneration::FastKeepAliveClosure::do_oop_work(p); }
    85 void DefNewGeneration::FastKeepAliveClosure::do_oop(narrowOop* p) { DefNewGeneration::FastKeepAliveClosure::do_oop_work(p); }
    87 DefNewGeneration::EvacuateFollowersClosure::
    88 EvacuateFollowersClosure(GenCollectedHeap* gch, int level,
    89                          ScanClosure* cur, ScanClosure* older) :
    90   _gch(gch), _level(level),
    91   _scan_cur_or_nonheap(cur), _scan_older(older)
    92 {}
    94 void DefNewGeneration::EvacuateFollowersClosure::do_void() {
    95   do {
    96     _gch->oop_since_save_marks_iterate(_level, _scan_cur_or_nonheap,
    97                                        _scan_older);
    98   } while (!_gch->no_allocs_since_save_marks(_level));
    99 }
   101 DefNewGeneration::FastEvacuateFollowersClosure::
   102 FastEvacuateFollowersClosure(GenCollectedHeap* gch, int level,
   103                              DefNewGeneration* gen,
   104                              FastScanClosure* cur, FastScanClosure* older) :
   105   _gch(gch), _level(level), _gen(gen),
   106   _scan_cur_or_nonheap(cur), _scan_older(older)
   107 {}
   109 void DefNewGeneration::FastEvacuateFollowersClosure::do_void() {
   110   do {
   111     _gch->oop_since_save_marks_iterate(_level, _scan_cur_or_nonheap,
   112                                        _scan_older);
   113   } while (!_gch->no_allocs_since_save_marks(_level));
   114   guarantee(_gen->promo_failure_scan_is_complete(), "Failed to finish scan");
   115 }
   117 ScanClosure::ScanClosure(DefNewGeneration* g, bool gc_barrier) :
   118   OopsInGenClosure(g), _g(g), _gc_barrier(gc_barrier)
   119 {
   120   assert(_g->level() == 0, "Optimized for youngest generation");
   121   _boundary = _g->reserved().end();
   122 }
   124 void ScanClosure::do_oop(oop* p)       { ScanClosure::do_oop_work(p); }
   125 void ScanClosure::do_oop(narrowOop* p) { ScanClosure::do_oop_work(p); }
   127 FastScanClosure::FastScanClosure(DefNewGeneration* g, bool gc_barrier) :
   128   OopsInGenClosure(g), _g(g), _gc_barrier(gc_barrier)
   129 {
   130   assert(_g->level() == 0, "Optimized for youngest generation");
   131   _boundary = _g->reserved().end();
   132 }
   134 void FastScanClosure::do_oop(oop* p)       { FastScanClosure::do_oop_work(p); }
   135 void FastScanClosure::do_oop(narrowOop* p) { FastScanClosure::do_oop_work(p); }
   137 ScanWeakRefClosure::ScanWeakRefClosure(DefNewGeneration* g) :
   138   OopClosure(g->ref_processor()), _g(g)
   139 {
   140   assert(_g->level() == 0, "Optimized for youngest generation");
   141   _boundary = _g->reserved().end();
   142 }
   144 void ScanWeakRefClosure::do_oop(oop* p)       { ScanWeakRefClosure::do_oop_work(p); }
   145 void ScanWeakRefClosure::do_oop(narrowOop* p) { ScanWeakRefClosure::do_oop_work(p); }
   147 void FilteringClosure::do_oop(oop* p)       { FilteringClosure::do_oop_work(p); }
   148 void FilteringClosure::do_oop(narrowOop* p) { FilteringClosure::do_oop_work(p); }
   150 DefNewGeneration::DefNewGeneration(ReservedSpace rs,
   151                                    size_t initial_size,
   152                                    int level,
   153                                    const char* policy)
   154   : Generation(rs, initial_size, level),
   155     _promo_failure_drain_in_progress(false),
   156     _should_allocate_from_space(false)
   157 {
   158   MemRegion cmr((HeapWord*)_virtual_space.low(),
   159                 (HeapWord*)_virtual_space.high());
   160   Universe::heap()->barrier_set()->resize_covered_region(cmr);
   162   if (GenCollectedHeap::heap()->collector_policy()->has_soft_ended_eden()) {
   163     _eden_space = new ConcEdenSpace(this);
   164   } else {
   165     _eden_space = new EdenSpace(this);
   166   }
   167   _from_space = new ContiguousSpace();
   168   _to_space   = new ContiguousSpace();
   170   if (_eden_space == NULL || _from_space == NULL || _to_space == NULL)
   171     vm_exit_during_initialization("Could not allocate a new gen space");
   173   // Compute the maximum eden and survivor space sizes. These sizes
   174   // are computed assuming the entire reserved space is committed.
   175   // These values are exported as performance counters.
   176   uintx alignment = GenCollectedHeap::heap()->collector_policy()->min_alignment();
   177   uintx size = _virtual_space.reserved_size();
   178   _max_survivor_size = compute_survivor_size(size, alignment);
   179   _max_eden_size = size - (2*_max_survivor_size);
   181   // allocate the performance counters
   183   // Generation counters -- generation 0, 3 subspaces
   184   _gen_counters = new GenerationCounters("new", 0, 3, &_virtual_space);
   185   _gc_counters = new CollectorCounters(policy, 0);
   187   _eden_counters = new CSpaceCounters("eden", 0, _max_eden_size, _eden_space,
   188                                       _gen_counters);
   189   _from_counters = new CSpaceCounters("s0", 1, _max_survivor_size, _from_space,
   190                                       _gen_counters);
   191   _to_counters = new CSpaceCounters("s1", 2, _max_survivor_size, _to_space,
   192                                     _gen_counters);
   194   compute_space_boundaries(0, SpaceDecorator::Clear, SpaceDecorator::Mangle);
   195   update_counters();
   196   _next_gen = NULL;
   197   _tenuring_threshold = MaxTenuringThreshold;
   198   _pretenure_size_threshold_words = PretenureSizeThreshold >> LogHeapWordSize;
   199 }
   201 void DefNewGeneration::compute_space_boundaries(uintx minimum_eden_size,
   202                                                 bool clear_space,
   203                                                 bool mangle_space) {
   204   uintx alignment =
   205     GenCollectedHeap::heap()->collector_policy()->min_alignment();
   207   // If the spaces are being cleared (only done at heap initialization
   208   // currently), the survivor spaces need not be empty.
   209   // Otherwise, no care is taken for used areas in the survivor spaces
   210   // so check.
   211   assert(clear_space || (to()->is_empty() && from()->is_empty()),
   212     "Initialization of the survivor spaces assumes these are empty");
   214   // Compute sizes
   215   uintx size = _virtual_space.committed_size();
   216   uintx survivor_size = compute_survivor_size(size, alignment);
   217   uintx eden_size = size - (2*survivor_size);
   218   assert(eden_size > 0 && survivor_size <= eden_size, "just checking");
   220   if (eden_size < minimum_eden_size) {
   221     // May happen due to 64Kb rounding, if so adjust eden size back up
   222     minimum_eden_size = align_size_up(minimum_eden_size, alignment);
   223     uintx maximum_survivor_size = (size - minimum_eden_size) / 2;
   224     uintx unaligned_survivor_size =
   225       align_size_down(maximum_survivor_size, alignment);
   226     survivor_size = MAX2(unaligned_survivor_size, alignment);
   227     eden_size = size - (2*survivor_size);
   228     assert(eden_size > 0 && survivor_size <= eden_size, "just checking");
   229     assert(eden_size >= minimum_eden_size, "just checking");
   230   }
   232   char *eden_start = _virtual_space.low();
   233   char *from_start = eden_start + eden_size;
   234   char *to_start   = from_start + survivor_size;
   235   char *to_end     = to_start   + survivor_size;
   237   assert(to_end == _virtual_space.high(), "just checking");
   238   assert(Space::is_aligned((HeapWord*)eden_start), "checking alignment");
   239   assert(Space::is_aligned((HeapWord*)from_start), "checking alignment");
   240   assert(Space::is_aligned((HeapWord*)to_start),   "checking alignment");
   242   MemRegion edenMR((HeapWord*)eden_start, (HeapWord*)from_start);
   243   MemRegion fromMR((HeapWord*)from_start, (HeapWord*)to_start);
   244   MemRegion toMR  ((HeapWord*)to_start, (HeapWord*)to_end);
   246   // A minimum eden size implies that there is a part of eden that
   247   // is being used and that affects the initialization of any
   248   // newly formed eden.
   249   bool live_in_eden = minimum_eden_size > 0;
   251   // If not clearing the spaces, do some checking to verify that
   252   // the space are already mangled.
   253   if (!clear_space) {
   254     // Must check mangling before the spaces are reshaped.  Otherwise,
   255     // the bottom or end of one space may have moved into another
   256     // a failure of the check may not correctly indicate which space
   257     // is not properly mangled.
   258     if (ZapUnusedHeapArea) {
   259       HeapWord* limit = (HeapWord*) _virtual_space.high();
   260       eden()->check_mangled_unused_area(limit);
   261       from()->check_mangled_unused_area(limit);
   262         to()->check_mangled_unused_area(limit);
   263     }
   264   }
   266   // Reset the spaces for their new regions.
   267   eden()->initialize(edenMR,
   268                      clear_space && !live_in_eden,
   269                      SpaceDecorator::Mangle);
   270   // If clear_space and live_in_eden, we will not have cleared any
   271   // portion of eden above its top. This can cause newly
   272   // expanded space not to be mangled if using ZapUnusedHeapArea.
   273   // We explicitly do such mangling here.
   274   if (ZapUnusedHeapArea && clear_space && live_in_eden && mangle_space) {
   275     eden()->mangle_unused_area();
   276   }
   277   from()->initialize(fromMR, clear_space, mangle_space);
   278   to()->initialize(toMR, clear_space, mangle_space);
   280   // Set next compaction spaces.
   281   eden()->set_next_compaction_space(from());
   282   // The to-space is normally empty before a compaction so need
   283   // not be considered.  The exception is during promotion
   284   // failure handling when to-space can contain live objects.
   285   from()->set_next_compaction_space(NULL);
   286 }
   288 void DefNewGeneration::swap_spaces() {
   289   ContiguousSpace* s = from();
   290   _from_space        = to();
   291   _to_space          = s;
   292   eden()->set_next_compaction_space(from());
   293   // The to-space is normally empty before a compaction so need
   294   // not be considered.  The exception is during promotion
   295   // failure handling when to-space can contain live objects.
   296   from()->set_next_compaction_space(NULL);
   298   if (UsePerfData) {
   299     CSpaceCounters* c = _from_counters;
   300     _from_counters = _to_counters;
   301     _to_counters = c;
   302   }
   303 }
   305 bool DefNewGeneration::expand(size_t bytes) {
   306   MutexLocker x(ExpandHeap_lock);
   307   HeapWord* prev_high = (HeapWord*) _virtual_space.high();
   308   bool success = _virtual_space.expand_by(bytes);
   309   if (success && ZapUnusedHeapArea) {
   310     // Mangle newly committed space immediately because it
   311     // can be done here more simply that after the new
   312     // spaces have been computed.
   313     HeapWord* new_high = (HeapWord*) _virtual_space.high();
   314     MemRegion mangle_region(prev_high, new_high);
   315     SpaceMangler::mangle_region(mangle_region);
   316   }
   318   // Do not attempt an expand-to-the reserve size.  The
   319   // request should properly observe the maximum size of
   320   // the generation so an expand-to-reserve should be
   321   // unnecessary.  Also a second call to expand-to-reserve
   322   // value potentially can cause an undue expansion.
   323   // For example if the first expand fail for unknown reasons,
   324   // but the second succeeds and expands the heap to its maximum
   325   // value.
   326   if (GC_locker::is_active()) {
   327     if (PrintGC && Verbose) {
   328       gclog_or_tty->print_cr("Garbage collection disabled, "
   329         "expanded heap instead");
   330     }
   331   }
   333   return success;
   334 }
   337 void DefNewGeneration::compute_new_size() {
   338   // This is called after a gc that includes the following generation
   339   // (which is required to exist.)  So from-space will normally be empty.
   340   // Note that we check both spaces, since if scavenge failed they revert roles.
   341   // If not we bail out (otherwise we would have to relocate the objects)
   342   if (!from()->is_empty() || !to()->is_empty()) {
   343     return;
   344   }
   346   int next_level = level() + 1;
   347   GenCollectedHeap* gch = GenCollectedHeap::heap();
   348   assert(next_level < gch->_n_gens,
   349          "DefNewGeneration cannot be an oldest gen");
   351   Generation* next_gen = gch->_gens[next_level];
   352   size_t old_size = next_gen->capacity();
   353   size_t new_size_before = _virtual_space.committed_size();
   354   size_t min_new_size = spec()->init_size();
   355   size_t max_new_size = reserved().byte_size();
   356   assert(min_new_size <= new_size_before &&
   357          new_size_before <= max_new_size,
   358          "just checking");
   359   // All space sizes must be multiples of Generation::GenGrain.
   360   size_t alignment = Generation::GenGrain;
   362   // Compute desired new generation size based on NewRatio and
   363   // NewSizeThreadIncrease
   364   size_t desired_new_size = old_size/NewRatio;
   365   int threads_count = Threads::number_of_non_daemon_threads();
   366   size_t thread_increase_size = threads_count * NewSizeThreadIncrease;
   367   desired_new_size = align_size_up(desired_new_size + thread_increase_size, alignment);
   369   // Adjust new generation size
   370   desired_new_size = MAX2(MIN2(desired_new_size, max_new_size), min_new_size);
   371   assert(desired_new_size <= max_new_size, "just checking");
   373   bool changed = false;
   374   if (desired_new_size > new_size_before) {
   375     size_t change = desired_new_size - new_size_before;
   376     assert(change % alignment == 0, "just checking");
   377     if (expand(change)) {
   378        changed = true;
   379     }
   380     // If the heap failed to expand to the desired size,
   381     // "changed" will be false.  If the expansion failed
   382     // (and at this point it was expected to succeed),
   383     // ignore the failure (leaving "changed" as false).
   384   }
   385   if (desired_new_size < new_size_before && eden()->is_empty()) {
   386     // bail out of shrinking if objects in eden
   387     size_t change = new_size_before - desired_new_size;
   388     assert(change % alignment == 0, "just checking");
   389     _virtual_space.shrink_by(change);
   390     changed = true;
   391   }
   392   if (changed) {
   393     // The spaces have already been mangled at this point but
   394     // may not have been cleared (set top = bottom) and should be.
   395     // Mangling was done when the heap was being expanded.
   396     compute_space_boundaries(eden()->used(),
   397                              SpaceDecorator::Clear,
   398                              SpaceDecorator::DontMangle);
   399     MemRegion cmr((HeapWord*)_virtual_space.low(),
   400                   (HeapWord*)_virtual_space.high());
   401     Universe::heap()->barrier_set()->resize_covered_region(cmr);
   402     if (Verbose && PrintGC) {
   403       size_t new_size_after  = _virtual_space.committed_size();
   404       size_t eden_size_after = eden()->capacity();
   405       size_t survivor_size_after = from()->capacity();
   406       gclog_or_tty->print("New generation size " SIZE_FORMAT "K->"
   407         SIZE_FORMAT "K [eden="
   408         SIZE_FORMAT "K,survivor=" SIZE_FORMAT "K]",
   409         new_size_before/K, new_size_after/K,
   410         eden_size_after/K, survivor_size_after/K);
   411       if (WizardMode) {
   412         gclog_or_tty->print("[allowed " SIZE_FORMAT "K extra for %d threads]",
   413           thread_increase_size/K, threads_count);
   414       }
   415       gclog_or_tty->cr();
   416     }
   417   }
   418 }
   420 void DefNewGeneration::object_iterate_since_last_GC(ObjectClosure* cl) {
   421   // $$$ This may be wrong in case of "scavenge failure"?
   422   eden()->object_iterate(cl);
   423 }
   425 void DefNewGeneration::younger_refs_iterate(OopsInGenClosure* cl) {
   426   assert(false, "NYI -- are you sure you want to call this?");
   427 }
   430 size_t DefNewGeneration::capacity() const {
   431   return eden()->capacity()
   432        + from()->capacity();  // to() is only used during scavenge
   433 }
   436 size_t DefNewGeneration::used() const {
   437   return eden()->used()
   438        + from()->used();      // to() is only used during scavenge
   439 }
   442 size_t DefNewGeneration::free() const {
   443   return eden()->free()
   444        + from()->free();      // to() is only used during scavenge
   445 }
   447 size_t DefNewGeneration::max_capacity() const {
   448   const size_t alignment = GenCollectedHeap::heap()->collector_policy()->min_alignment();
   449   const size_t reserved_bytes = reserved().byte_size();
   450   return reserved_bytes - compute_survivor_size(reserved_bytes, alignment);
   451 }
   453 size_t DefNewGeneration::unsafe_max_alloc_nogc() const {
   454   return eden()->free();
   455 }
   457 size_t DefNewGeneration::capacity_before_gc() const {
   458   return eden()->capacity();
   459 }
   461 size_t DefNewGeneration::contiguous_available() const {
   462   return eden()->free();
   463 }
   466 HeapWord** DefNewGeneration::top_addr() const { return eden()->top_addr(); }
   467 HeapWord** DefNewGeneration::end_addr() const { return eden()->end_addr(); }
   469 void DefNewGeneration::object_iterate(ObjectClosure* blk) {
   470   eden()->object_iterate(blk);
   471   from()->object_iterate(blk);
   472 }
   475 void DefNewGeneration::space_iterate(SpaceClosure* blk,
   476                                      bool usedOnly) {
   477   blk->do_space(eden());
   478   blk->do_space(from());
   479   blk->do_space(to());
   480 }
   482 // The last collection bailed out, we are running out of heap space,
   483 // so we try to allocate the from-space, too.
   484 HeapWord* DefNewGeneration::allocate_from_space(size_t size) {
   485   HeapWord* result = NULL;
   486   if (Verbose && PrintGCDetails) {
   487     gclog_or_tty->print("DefNewGeneration::allocate_from_space(%u):"
   488                         "  will_fail: %s"
   489                         "  heap_lock: %s"
   490                         "  free: " SIZE_FORMAT,
   491                         size,
   492                         GenCollectedHeap::heap()->incremental_collection_will_fail(false /* don't consult_young */) ?
   493                           "true" : "false",
   494                         Heap_lock->is_locked() ? "locked" : "unlocked",
   495                         from()->free());
   496   }
   497   if (should_allocate_from_space() || GC_locker::is_active_and_needs_gc()) {
   498     if (Heap_lock->owned_by_self() ||
   499         (SafepointSynchronize::is_at_safepoint() &&
   500          Thread::current()->is_VM_thread())) {
   501       // If the Heap_lock is not locked by this thread, this will be called
   502       // again later with the Heap_lock held.
   503       result = from()->allocate(size);
   504     } else if (PrintGC && Verbose) {
   505       gclog_or_tty->print_cr("  Heap_lock is not owned by self");
   506     }
   507   } else if (PrintGC && Verbose) {
   508     gclog_or_tty->print_cr("  should_allocate_from_space: NOT");
   509   }
   510   if (PrintGC && Verbose) {
   511     gclog_or_tty->print_cr("  returns %s", result == NULL ? "NULL" : "object");
   512   }
   513   return result;
   514 }
   516 HeapWord* DefNewGeneration::expand_and_allocate(size_t size,
   517                                                 bool   is_tlab,
   518                                                 bool   parallel) {
   519   // We don't attempt to expand the young generation (but perhaps we should.)
   520   return allocate(size, is_tlab);
   521 }
   524 void DefNewGeneration::collect(bool   full,
   525                                bool   clear_all_soft_refs,
   526                                size_t size,
   527                                bool   is_tlab) {
   528   assert(full || size > 0, "otherwise we don't want to collect");
   529   GenCollectedHeap* gch = GenCollectedHeap::heap();
   530   _next_gen = gch->next_gen(this);
   531   assert(_next_gen != NULL,
   532     "This must be the youngest gen, and not the only gen");
   534   // If the next generation is too full to accomodate promotion
   535   // from this generation, pass on collection; let the next generation
   536   // do it.
   537   if (!collection_attempt_is_safe()) {
   538     if (Verbose && PrintGCDetails) {
   539       gclog_or_tty->print(" :: Collection attempt not safe :: ");
   540     }
   541     gch->set_incremental_collection_failed(); // Slight lie: we did not even attempt one
   542     return;
   543   }
   544   assert(to()->is_empty(), "Else not collection_attempt_is_safe");
   546   init_assuming_no_promotion_failure();
   548   TraceTime t1("GC", PrintGC && !PrintGCDetails, true, gclog_or_tty);
   549   // Capture heap used before collection (for printing).
   550   size_t gch_prev_used = gch->used();
   552   SpecializationStats::clear();
   554   // These can be shared for all code paths
   555   IsAliveClosure is_alive(this);
   556   ScanWeakRefClosure scan_weak_ref(this);
   558   age_table()->clear();
   559   to()->clear(SpaceDecorator::Mangle);
   561   gch->rem_set()->prepare_for_younger_refs_iterate(false);
   563   assert(gch->no_allocs_since_save_marks(0),
   564          "save marks have not been newly set.");
   566   // Not very pretty.
   567   CollectorPolicy* cp = gch->collector_policy();
   569   FastScanClosure fsc_with_no_gc_barrier(this, false);
   570   FastScanClosure fsc_with_gc_barrier(this, true);
   572   set_promo_failure_scan_stack_closure(&fsc_with_no_gc_barrier);
   573   FastEvacuateFollowersClosure evacuate_followers(gch, _level, this,
   574                                                   &fsc_with_no_gc_barrier,
   575                                                   &fsc_with_gc_barrier);
   577   assert(gch->no_allocs_since_save_marks(0),
   578          "save marks have not been newly set.");
   580   gch->gen_process_strong_roots(_level,
   581                                 true,  // Process younger gens, if any,
   582                                        // as strong roots.
   583                                 true,  // activate StrongRootsScope
   584                                 false, // not collecting perm generation.
   585                                 SharedHeap::SO_AllClasses,
   586                                 &fsc_with_no_gc_barrier,
   587                                 true,   // walk *all* scavengable nmethods
   588                                 &fsc_with_gc_barrier);
   590   // "evacuate followers".
   591   evacuate_followers.do_void();
   593   FastKeepAliveClosure keep_alive(this, &scan_weak_ref);
   594   ReferenceProcessor* rp = ref_processor();
   595   rp->setup_policy(clear_all_soft_refs);
   596   rp->process_discovered_references(&is_alive, &keep_alive, &evacuate_followers,
   597                                     NULL);
   598   if (!promotion_failed()) {
   599     // Swap the survivor spaces.
   600     eden()->clear(SpaceDecorator::Mangle);
   601     from()->clear(SpaceDecorator::Mangle);
   602     if (ZapUnusedHeapArea) {
   603       // This is now done here because of the piece-meal mangling which
   604       // can check for valid mangling at intermediate points in the
   605       // collection(s).  When a minor collection fails to collect
   606       // sufficient space resizing of the young generation can occur
   607       // an redistribute the spaces in the young generation.  Mangle
   608       // here so that unzapped regions don't get distributed to
   609       // other spaces.
   610       to()->mangle_unused_area();
   611     }
   612     swap_spaces();
   614     assert(to()->is_empty(), "to space should be empty now");
   616     // Set the desired survivor size to half the real survivor space
   617     _tenuring_threshold =
   618       age_table()->compute_tenuring_threshold(to()->capacity()/HeapWordSize);
   620     // A successful scavenge should restart the GC time limit count which is
   621     // for full GC's.
   622     AdaptiveSizePolicy* size_policy = gch->gen_policy()->size_policy();
   623     size_policy->reset_gc_overhead_limit_count();
   624     if (PrintGC && !PrintGCDetails) {
   625       gch->print_heap_change(gch_prev_used);
   626     }
   627     assert(!gch->incremental_collection_failed(), "Should be clear");
   628   } else {
   629     assert(_promo_failure_scan_stack.is_empty(), "post condition");
   630     _promo_failure_scan_stack.clear(true); // Clear cached segments.
   632     remove_forwarding_pointers();
   633     if (PrintGCDetails) {
   634       gclog_or_tty->print(" (promotion failed) ");
   635     }
   636     // Add to-space to the list of space to compact
   637     // when a promotion failure has occurred.  In that
   638     // case there can be live objects in to-space
   639     // as a result of a partial evacuation of eden
   640     // and from-space.
   641     swap_spaces();   // For uniformity wrt ParNewGeneration.
   642     from()->set_next_compaction_space(to());
   643     gch->set_incremental_collection_failed();
   645     // Inform the next generation that a promotion failure occurred.
   646     _next_gen->promotion_failure_occurred();
   648     // Reset the PromotionFailureALot counters.
   649     NOT_PRODUCT(Universe::heap()->reset_promotion_should_fail();)
   650   }
   651   // set new iteration safe limit for the survivor spaces
   652   from()->set_concurrent_iteration_safe_limit(from()->top());
   653   to()->set_concurrent_iteration_safe_limit(to()->top());
   654   SpecializationStats::print();
   655   update_time_of_last_gc(os::javaTimeMillis());
   656 }
   658 class RemoveForwardPointerClosure: public ObjectClosure {
   659 public:
   660   void do_object(oop obj) {
   661     obj->init_mark();
   662   }
   663 };
   665 void DefNewGeneration::init_assuming_no_promotion_failure() {
   666   _promotion_failed = false;
   667   from()->set_next_compaction_space(NULL);
   668 }
   670 void DefNewGeneration::remove_forwarding_pointers() {
   671   RemoveForwardPointerClosure rspc;
   672   eden()->object_iterate(&rspc);
   673   from()->object_iterate(&rspc);
   675   // Now restore saved marks, if any.
   676   assert(_objs_with_preserved_marks.size() == _preserved_marks_of_objs.size(),
   677          "should be the same");
   678   while (!_objs_with_preserved_marks.is_empty()) {
   679     oop obj   = _objs_with_preserved_marks.pop();
   680     markOop m = _preserved_marks_of_objs.pop();
   681     obj->set_mark(m);
   682   }
   683   _objs_with_preserved_marks.clear(true);
   684   _preserved_marks_of_objs.clear(true);
   685 }
   687 void DefNewGeneration::preserve_mark(oop obj, markOop m) {
   688   assert(promotion_failed() && m->must_be_preserved_for_promotion_failure(obj),
   689          "Oversaving!");
   690   _objs_with_preserved_marks.push(obj);
   691   _preserved_marks_of_objs.push(m);
   692 }
   694 void DefNewGeneration::preserve_mark_if_necessary(oop obj, markOop m) {
   695   if (m->must_be_preserved_for_promotion_failure(obj)) {
   696     preserve_mark(obj, m);
   697   }
   698 }
   700 void DefNewGeneration::handle_promotion_failure(oop old) {
   701   if (PrintPromotionFailure && !_promotion_failed) {
   702     gclog_or_tty->print(" (promotion failure size = " SIZE_FORMAT ") ",
   703                         old->size());
   704   }
   705   _promotion_failed = true;
   706   preserve_mark_if_necessary(old, old->mark());
   707   // forward to self
   708   old->forward_to(old);
   710   _promo_failure_scan_stack.push(old);
   712   if (!_promo_failure_drain_in_progress) {
   713     // prevent recursion in copy_to_survivor_space()
   714     _promo_failure_drain_in_progress = true;
   715     drain_promo_failure_scan_stack();
   716     _promo_failure_drain_in_progress = false;
   717   }
   718 }
   720 oop DefNewGeneration::copy_to_survivor_space(oop old) {
   721   assert(is_in_reserved(old) && !old->is_forwarded(),
   722          "shouldn't be scavenging this oop");
   723   size_t s = old->size();
   724   oop obj = NULL;
   726   // Try allocating obj in to-space (unless too old)
   727   if (old->age() < tenuring_threshold()) {
   728     obj = (oop) to()->allocate(s);
   729   }
   731   // Otherwise try allocating obj tenured
   732   if (obj == NULL) {
   733     obj = _next_gen->promote(old, s);
   734     if (obj == NULL) {
   735       handle_promotion_failure(old);
   736       return old;
   737     }
   738   } else {
   739     // Prefetch beyond obj
   740     const intx interval = PrefetchCopyIntervalInBytes;
   741     Prefetch::write(obj, interval);
   743     // Copy obj
   744     Copy::aligned_disjoint_words((HeapWord*)old, (HeapWord*)obj, s);
   746     // Increment age if obj still in new generation
   747     obj->incr_age();
   748     age_table()->add(obj, s);
   749   }
   751   // Done, insert forward pointer to obj in this header
   752   old->forward_to(obj);
   754   return obj;
   755 }
   757 void DefNewGeneration::drain_promo_failure_scan_stack() {
   758   while (!_promo_failure_scan_stack.is_empty()) {
   759      oop obj = _promo_failure_scan_stack.pop();
   760      obj->oop_iterate(_promo_failure_scan_stack_closure);
   761   }
   762 }
   764 void DefNewGeneration::save_marks() {
   765   eden()->set_saved_mark();
   766   to()->set_saved_mark();
   767   from()->set_saved_mark();
   768 }
   771 void DefNewGeneration::reset_saved_marks() {
   772   eden()->reset_saved_mark();
   773   to()->reset_saved_mark();
   774   from()->reset_saved_mark();
   775 }
   778 bool DefNewGeneration::no_allocs_since_save_marks() {
   779   assert(eden()->saved_mark_at_top(), "Violated spec - alloc in eden");
   780   assert(from()->saved_mark_at_top(), "Violated spec - alloc in from");
   781   return to()->saved_mark_at_top();
   782 }
   784 #define DefNew_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix) \
   785                                                                 \
   786 void DefNewGeneration::                                         \
   787 oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl) {   \
   788   cl->set_generation(this);                                     \
   789   eden()->oop_since_save_marks_iterate##nv_suffix(cl);          \
   790   to()->oop_since_save_marks_iterate##nv_suffix(cl);            \
   791   from()->oop_since_save_marks_iterate##nv_suffix(cl);          \
   792   cl->reset_generation();                                       \
   793   save_marks();                                                 \
   794 }
   796 ALL_SINCE_SAVE_MARKS_CLOSURES(DefNew_SINCE_SAVE_MARKS_DEFN)
   798 #undef DefNew_SINCE_SAVE_MARKS_DEFN
   800 void DefNewGeneration::contribute_scratch(ScratchBlock*& list, Generation* requestor,
   801                                          size_t max_alloc_words) {
   802   if (requestor == this || _promotion_failed) return;
   803   assert(requestor->level() > level(), "DefNewGeneration must be youngest");
   805   /* $$$ Assert this?  "trace" is a "MarkSweep" function so that's not appropriate.
   806   if (to_space->top() > to_space->bottom()) {
   807     trace("to_space not empty when contribute_scratch called");
   808   }
   809   */
   811   ContiguousSpace* to_space = to();
   812   assert(to_space->end() >= to_space->top(), "pointers out of order");
   813   size_t free_words = pointer_delta(to_space->end(), to_space->top());
   814   if (free_words >= MinFreeScratchWords) {
   815     ScratchBlock* sb = (ScratchBlock*)to_space->top();
   816     sb->num_words = free_words;
   817     sb->next = list;
   818     list = sb;
   819   }
   820 }
   822 void DefNewGeneration::reset_scratch() {
   823   // If contributing scratch in to_space, mangle all of
   824   // to_space if ZapUnusedHeapArea.  This is needed because
   825   // top is not maintained while using to-space as scratch.
   826   if (ZapUnusedHeapArea) {
   827     to()->mangle_unused_area_complete();
   828   }
   829 }
   831 bool DefNewGeneration::collection_attempt_is_safe() {
   832   if (!to()->is_empty()) {
   833     if (Verbose && PrintGCDetails) {
   834       gclog_or_tty->print(" :: to is not empty :: ");
   835     }
   836     return false;
   837   }
   838   if (_next_gen == NULL) {
   839     GenCollectedHeap* gch = GenCollectedHeap::heap();
   840     _next_gen = gch->next_gen(this);
   841     assert(_next_gen != NULL,
   842            "This must be the youngest gen, and not the only gen");
   843   }
   844   return _next_gen->promotion_attempt_is_safe(used());
   845 }
   847 void DefNewGeneration::gc_epilogue(bool full) {
   848   DEBUG_ONLY(static bool seen_incremental_collection_failed = false;)
   850   assert(!GC_locker::is_active(), "We should not be executing here");
   851   // Check if the heap is approaching full after a collection has
   852   // been done.  Generally the young generation is empty at
   853   // a minimum at the end of a collection.  If it is not, then
   854   // the heap is approaching full.
   855   GenCollectedHeap* gch = GenCollectedHeap::heap();
   856   if (full) {
   857     DEBUG_ONLY(seen_incremental_collection_failed = false;)
   858     if (!collection_attempt_is_safe() && !_eden_space->is_empty()) {
   859       if (Verbose && PrintGCDetails) {
   860         gclog_or_tty->print("DefNewEpilogue: cause(%s), full, not safe, set_failed, set_alloc_from, clear_seen",
   861                             GCCause::to_string(gch->gc_cause()));
   862       }
   863       gch->set_incremental_collection_failed(); // Slight lie: a full gc left us in that state
   864       set_should_allocate_from_space(); // we seem to be running out of space
   865     } else {
   866       if (Verbose && PrintGCDetails) {
   867         gclog_or_tty->print("DefNewEpilogue: cause(%s), full, safe, clear_failed, clear_alloc_from, clear_seen",
   868                             GCCause::to_string(gch->gc_cause()));
   869       }
   870       gch->clear_incremental_collection_failed(); // We just did a full collection
   871       clear_should_allocate_from_space(); // if set
   872     }
   873   } else {
   874 #ifdef ASSERT
   875     // It is possible that incremental_collection_failed() == true
   876     // here, because an attempted scavenge did not succeed. The policy
   877     // is normally expected to cause a full collection which should
   878     // clear that condition, so we should not be here twice in a row
   879     // with incremental_collection_failed() == true without having done
   880     // a full collection in between.
   881     if (!seen_incremental_collection_failed &&
   882         gch->incremental_collection_failed()) {
   883       if (Verbose && PrintGCDetails) {
   884         gclog_or_tty->print("DefNewEpilogue: cause(%s), not full, not_seen_failed, failed, set_seen_failed",
   885                             GCCause::to_string(gch->gc_cause()));
   886       }
   887       seen_incremental_collection_failed = true;
   888     } else if (seen_incremental_collection_failed) {
   889       if (Verbose && PrintGCDetails) {
   890         gclog_or_tty->print("DefNewEpilogue: cause(%s), not full, seen_failed, will_clear_seen_failed",
   891                             GCCause::to_string(gch->gc_cause()));
   892       }
   893       assert(gch->gc_cause() == GCCause::_scavenge_alot ||
   894              (gch->gc_cause() == GCCause::_java_lang_system_gc && UseConcMarkSweepGC && ExplicitGCInvokesConcurrent) ||
   895              !gch->incremental_collection_failed(),
   896              "Twice in a row");
   897       seen_incremental_collection_failed = false;
   898     }
   899 #endif // ASSERT
   900   }
   902   if (ZapUnusedHeapArea) {
   903     eden()->check_mangled_unused_area_complete();
   904     from()->check_mangled_unused_area_complete();
   905     to()->check_mangled_unused_area_complete();
   906   }
   908   // update the generation and space performance counters
   909   update_counters();
   910   gch->collector_policy()->counters()->update_counters();
   911 }
   913 void DefNewGeneration::record_spaces_top() {
   914   assert(ZapUnusedHeapArea, "Not mangling unused space");
   915   eden()->set_top_for_allocations();
   916   to()->set_top_for_allocations();
   917   from()->set_top_for_allocations();
   918 }
   921 void DefNewGeneration::update_counters() {
   922   if (UsePerfData) {
   923     _eden_counters->update_all();
   924     _from_counters->update_all();
   925     _to_counters->update_all();
   926     _gen_counters->update_all();
   927   }
   928 }
   930 void DefNewGeneration::verify(bool allow_dirty) {
   931   eden()->verify(allow_dirty);
   932   from()->verify(allow_dirty);
   933     to()->verify(allow_dirty);
   934 }
   936 void DefNewGeneration::print_on(outputStream* st) const {
   937   Generation::print_on(st);
   938   st->print("  eden");
   939   eden()->print_on(st);
   940   st->print("  from");
   941   from()->print_on(st);
   942   st->print("  to  ");
   943   to()->print_on(st);
   944 }
   947 const char* DefNewGeneration::name() const {
   948   return "def new generation";
   949 }
   951 // Moved from inline file as they are not called inline
   952 CompactibleSpace* DefNewGeneration::first_compaction_space() const {
   953   return eden();
   954 }
   956 HeapWord* DefNewGeneration::allocate(size_t word_size,
   957                                      bool is_tlab) {
   958   // This is the slow-path allocation for the DefNewGeneration.
   959   // Most allocations are fast-path in compiled code.
   960   // We try to allocate from the eden.  If that works, we are happy.
   961   // Note that since DefNewGeneration supports lock-free allocation, we
   962   // have to use it here, as well.
   963   HeapWord* result = eden()->par_allocate(word_size);
   964   if (result != NULL) {
   965     return result;
   966   }
   967   do {
   968     HeapWord* old_limit = eden()->soft_end();
   969     if (old_limit < eden()->end()) {
   970       // Tell the next generation we reached a limit.
   971       HeapWord* new_limit =
   972         next_gen()->allocation_limit_reached(eden(), eden()->top(), word_size);
   973       if (new_limit != NULL) {
   974         Atomic::cmpxchg_ptr(new_limit, eden()->soft_end_addr(), old_limit);
   975       } else {
   976         assert(eden()->soft_end() == eden()->end(),
   977                "invalid state after allocation_limit_reached returned null");
   978       }
   979     } else {
   980       // The allocation failed and the soft limit is equal to the hard limit,
   981       // there are no reasons to do an attempt to allocate
   982       assert(old_limit == eden()->end(), "sanity check");
   983       break;
   984     }
   985     // Try to allocate until succeeded or the soft limit can't be adjusted
   986     result = eden()->par_allocate(word_size);
   987   } while (result == NULL);
   989   // If the eden is full and the last collection bailed out, we are running
   990   // out of heap space, and we try to allocate the from-space, too.
   991   // allocate_from_space can't be inlined because that would introduce a
   992   // circular dependency at compile time.
   993   if (result == NULL) {
   994     result = allocate_from_space(word_size);
   995   }
   996   return result;
   997 }
   999 HeapWord* DefNewGeneration::par_allocate(size_t word_size,
  1000                                          bool is_tlab) {
  1001   return eden()->par_allocate(word_size);
  1004 void DefNewGeneration::gc_prologue(bool full) {
  1005   // Ensure that _end and _soft_end are the same in eden space.
  1006   eden()->set_soft_end(eden()->end());
  1009 size_t DefNewGeneration::tlab_capacity() const {
  1010   return eden()->capacity();
  1013 size_t DefNewGeneration::unsafe_max_tlab_alloc() const {
  1014   return unsafe_max_alloc_nogc();

mercurial