src/share/vm/memory/defNewGeneration.cpp

Wed, 01 Dec 2010 15:04:06 +0100

author
stefank
date
Wed, 01 Dec 2010 15:04:06 +0100
changeset 2325
c760f78e0a53
parent 2314
f95d63e2154a
child 2336
6cd6d394f280
permissions
-rw-r--r--

7003125: precompiled.hpp is included when precompiled headers are not used
Summary: Added an ifndef DONT_USE_PRECOMPILED_HEADER to precompiled.hpp. Set up DONT_USE_PRECOMPILED_HEADER when compiling with Sun Studio or when the user specifies USE_PRECOMPILED_HEADER=0. Fixed broken include dependencies.
Reviewed-by: coleenp, kvn

     1 /*
     2  * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    20  * or visit www.oracle.com if you need additional information or have any
    21  * questions.
    22  *
    23  */
    25 #include "precompiled.hpp"
    26 #include "gc_implementation/shared/collectorCounters.hpp"
    27 #include "gc_implementation/shared/gcPolicyCounters.hpp"
    28 #include "gc_implementation/shared/spaceDecorator.hpp"
    29 #include "memory/defNewGeneration.inline.hpp"
    30 #include "memory/gcLocker.inline.hpp"
    31 #include "memory/genCollectedHeap.hpp"
    32 #include "memory/genOopClosures.inline.hpp"
    33 #include "memory/generationSpec.hpp"
    34 #include "memory/iterator.hpp"
    35 #include "memory/referencePolicy.hpp"
    36 #include "memory/space.inline.hpp"
    37 #include "oops/instanceRefKlass.hpp"
    38 #include "oops/oop.inline.hpp"
    39 #include "runtime/java.hpp"
    40 #include "utilities/copy.hpp"
    41 #include "utilities/stack.inline.hpp"
    42 #ifdef TARGET_OS_FAMILY_linux
    43 # include "thread_linux.inline.hpp"
    44 #endif
    45 #ifdef TARGET_OS_FAMILY_solaris
    46 # include "thread_solaris.inline.hpp"
    47 #endif
    48 #ifdef TARGET_OS_FAMILY_windows
    49 # include "thread_windows.inline.hpp"
    50 #endif
    52 //
    53 // DefNewGeneration functions.
    55 // Methods of protected closure types.
    57 DefNewGeneration::IsAliveClosure::IsAliveClosure(Generation* g) : _g(g) {
    58   assert(g->level() == 0, "Optimized for youngest gen.");
    59 }
    60 void DefNewGeneration::IsAliveClosure::do_object(oop p) {
    61   assert(false, "Do not call.");
    62 }
    63 bool DefNewGeneration::IsAliveClosure::do_object_b(oop p) {
    64   return (HeapWord*)p >= _g->reserved().end() || p->is_forwarded();
    65 }
    67 DefNewGeneration::KeepAliveClosure::
    68 KeepAliveClosure(ScanWeakRefClosure* cl) : _cl(cl) {
    69   GenRemSet* rs = GenCollectedHeap::heap()->rem_set();
    70   assert(rs->rs_kind() == GenRemSet::CardTable, "Wrong rem set kind.");
    71   _rs = (CardTableRS*)rs;
    72 }
    74 void DefNewGeneration::KeepAliveClosure::do_oop(oop* p)       { DefNewGeneration::KeepAliveClosure::do_oop_work(p); }
    75 void DefNewGeneration::KeepAliveClosure::do_oop(narrowOop* p) { DefNewGeneration::KeepAliveClosure::do_oop_work(p); }
    78 DefNewGeneration::FastKeepAliveClosure::
    79 FastKeepAliveClosure(DefNewGeneration* g, ScanWeakRefClosure* cl) :
    80   DefNewGeneration::KeepAliveClosure(cl) {
    81   _boundary = g->reserved().end();
    82 }
    84 void DefNewGeneration::FastKeepAliveClosure::do_oop(oop* p)       { DefNewGeneration::FastKeepAliveClosure::do_oop_work(p); }
    85 void DefNewGeneration::FastKeepAliveClosure::do_oop(narrowOop* p) { DefNewGeneration::FastKeepAliveClosure::do_oop_work(p); }
    87 DefNewGeneration::EvacuateFollowersClosure::
    88 EvacuateFollowersClosure(GenCollectedHeap* gch, int level,
    89                          ScanClosure* cur, ScanClosure* older) :
    90   _gch(gch), _level(level),
    91   _scan_cur_or_nonheap(cur), _scan_older(older)
    92 {}
    94 void DefNewGeneration::EvacuateFollowersClosure::do_void() {
    95   do {
    96     _gch->oop_since_save_marks_iterate(_level, _scan_cur_or_nonheap,
    97                                        _scan_older);
    98   } while (!_gch->no_allocs_since_save_marks(_level));
    99 }
   101 DefNewGeneration::FastEvacuateFollowersClosure::
   102 FastEvacuateFollowersClosure(GenCollectedHeap* gch, int level,
   103                              DefNewGeneration* gen,
   104                              FastScanClosure* cur, FastScanClosure* older) :
   105   _gch(gch), _level(level), _gen(gen),
   106   _scan_cur_or_nonheap(cur), _scan_older(older)
   107 {}
   109 void DefNewGeneration::FastEvacuateFollowersClosure::do_void() {
   110   do {
   111     _gch->oop_since_save_marks_iterate(_level, _scan_cur_or_nonheap,
   112                                        _scan_older);
   113   } while (!_gch->no_allocs_since_save_marks(_level));
   114   guarantee(_gen->promo_failure_scan_is_complete(), "Failed to finish scan");
   115 }
   117 ScanClosure::ScanClosure(DefNewGeneration* g, bool gc_barrier) :
   118   OopsInGenClosure(g), _g(g), _gc_barrier(gc_barrier)
   119 {
   120   assert(_g->level() == 0, "Optimized for youngest generation");
   121   _boundary = _g->reserved().end();
   122 }
   124 void ScanClosure::do_oop(oop* p)       { ScanClosure::do_oop_work(p); }
   125 void ScanClosure::do_oop(narrowOop* p) { ScanClosure::do_oop_work(p); }
   127 FastScanClosure::FastScanClosure(DefNewGeneration* g, bool gc_barrier) :
   128   OopsInGenClosure(g), _g(g), _gc_barrier(gc_barrier)
   129 {
   130   assert(_g->level() == 0, "Optimized for youngest generation");
   131   _boundary = _g->reserved().end();
   132 }
   134 void FastScanClosure::do_oop(oop* p)       { FastScanClosure::do_oop_work(p); }
   135 void FastScanClosure::do_oop(narrowOop* p) { FastScanClosure::do_oop_work(p); }
   137 ScanWeakRefClosure::ScanWeakRefClosure(DefNewGeneration* g) :
   138   OopClosure(g->ref_processor()), _g(g)
   139 {
   140   assert(_g->level() == 0, "Optimized for youngest generation");
   141   _boundary = _g->reserved().end();
   142 }
   144 void ScanWeakRefClosure::do_oop(oop* p)       { ScanWeakRefClosure::do_oop_work(p); }
   145 void ScanWeakRefClosure::do_oop(narrowOop* p) { ScanWeakRefClosure::do_oop_work(p); }
   147 void FilteringClosure::do_oop(oop* p)       { FilteringClosure::do_oop_work(p); }
   148 void FilteringClosure::do_oop(narrowOop* p) { FilteringClosure::do_oop_work(p); }
   150 DefNewGeneration::DefNewGeneration(ReservedSpace rs,
   151                                    size_t initial_size,
   152                                    int level,
   153                                    const char* policy)
   154   : Generation(rs, initial_size, level),
   155     _promo_failure_drain_in_progress(false),
   156     _should_allocate_from_space(false)
   157 {
   158   MemRegion cmr((HeapWord*)_virtual_space.low(),
   159                 (HeapWord*)_virtual_space.high());
   160   Universe::heap()->barrier_set()->resize_covered_region(cmr);
   162   if (GenCollectedHeap::heap()->collector_policy()->has_soft_ended_eden()) {
   163     _eden_space = new ConcEdenSpace(this);
   164   } else {
   165     _eden_space = new EdenSpace(this);
   166   }
   167   _from_space = new ContiguousSpace();
   168   _to_space   = new ContiguousSpace();
   170   if (_eden_space == NULL || _from_space == NULL || _to_space == NULL)
   171     vm_exit_during_initialization("Could not allocate a new gen space");
   173   // Compute the maximum eden and survivor space sizes. These sizes
   174   // are computed assuming the entire reserved space is committed.
   175   // These values are exported as performance counters.
   176   uintx alignment = GenCollectedHeap::heap()->collector_policy()->min_alignment();
   177   uintx size = _virtual_space.reserved_size();
   178   _max_survivor_size = compute_survivor_size(size, alignment);
   179   _max_eden_size = size - (2*_max_survivor_size);
   181   // allocate the performance counters
   183   // Generation counters -- generation 0, 3 subspaces
   184   _gen_counters = new GenerationCounters("new", 0, 3, &_virtual_space);
   185   _gc_counters = new CollectorCounters(policy, 0);
   187   _eden_counters = new CSpaceCounters("eden", 0, _max_eden_size, _eden_space,
   188                                       _gen_counters);
   189   _from_counters = new CSpaceCounters("s0", 1, _max_survivor_size, _from_space,
   190                                       _gen_counters);
   191   _to_counters = new CSpaceCounters("s1", 2, _max_survivor_size, _to_space,
   192                                     _gen_counters);
   194   compute_space_boundaries(0, SpaceDecorator::Clear, SpaceDecorator::Mangle);
   195   update_counters();
   196   _next_gen = NULL;
   197   _tenuring_threshold = MaxTenuringThreshold;
   198   _pretenure_size_threshold_words = PretenureSizeThreshold >> LogHeapWordSize;
   199 }
   201 void DefNewGeneration::compute_space_boundaries(uintx minimum_eden_size,
   202                                                 bool clear_space,
   203                                                 bool mangle_space) {
   204   uintx alignment =
   205     GenCollectedHeap::heap()->collector_policy()->min_alignment();
   207   // If the spaces are being cleared (only done at heap initialization
   208   // currently), the survivor spaces need not be empty.
   209   // Otherwise, no care is taken for used areas in the survivor spaces
   210   // so check.
   211   assert(clear_space || (to()->is_empty() && from()->is_empty()),
   212     "Initialization of the survivor spaces assumes these are empty");
   214   // Compute sizes
   215   uintx size = _virtual_space.committed_size();
   216   uintx survivor_size = compute_survivor_size(size, alignment);
   217   uintx eden_size = size - (2*survivor_size);
   218   assert(eden_size > 0 && survivor_size <= eden_size, "just checking");
   220   if (eden_size < minimum_eden_size) {
   221     // May happen due to 64Kb rounding, if so adjust eden size back up
   222     minimum_eden_size = align_size_up(minimum_eden_size, alignment);
   223     uintx maximum_survivor_size = (size - minimum_eden_size) / 2;
   224     uintx unaligned_survivor_size =
   225       align_size_down(maximum_survivor_size, alignment);
   226     survivor_size = MAX2(unaligned_survivor_size, alignment);
   227     eden_size = size - (2*survivor_size);
   228     assert(eden_size > 0 && survivor_size <= eden_size, "just checking");
   229     assert(eden_size >= minimum_eden_size, "just checking");
   230   }
   232   char *eden_start = _virtual_space.low();
   233   char *from_start = eden_start + eden_size;
   234   char *to_start   = from_start + survivor_size;
   235   char *to_end     = to_start   + survivor_size;
   237   assert(to_end == _virtual_space.high(), "just checking");
   238   assert(Space::is_aligned((HeapWord*)eden_start), "checking alignment");
   239   assert(Space::is_aligned((HeapWord*)from_start), "checking alignment");
   240   assert(Space::is_aligned((HeapWord*)to_start),   "checking alignment");
   242   MemRegion edenMR((HeapWord*)eden_start, (HeapWord*)from_start);
   243   MemRegion fromMR((HeapWord*)from_start, (HeapWord*)to_start);
   244   MemRegion toMR  ((HeapWord*)to_start, (HeapWord*)to_end);
   246   // A minimum eden size implies that there is a part of eden that
   247   // is being used and that affects the initialization of any
   248   // newly formed eden.
   249   bool live_in_eden = minimum_eden_size > 0;
   251   // If not clearing the spaces, do some checking to verify that
   252   // the space are already mangled.
   253   if (!clear_space) {
   254     // Must check mangling before the spaces are reshaped.  Otherwise,
   255     // the bottom or end of one space may have moved into another
   256     // a failure of the check may not correctly indicate which space
   257     // is not properly mangled.
   258     if (ZapUnusedHeapArea) {
   259       HeapWord* limit = (HeapWord*) _virtual_space.high();
   260       eden()->check_mangled_unused_area(limit);
   261       from()->check_mangled_unused_area(limit);
   262         to()->check_mangled_unused_area(limit);
   263     }
   264   }
   266   // Reset the spaces for their new regions.
   267   eden()->initialize(edenMR,
   268                      clear_space && !live_in_eden,
   269                      SpaceDecorator::Mangle);
   270   // If clear_space and live_in_eden, we will not have cleared any
   271   // portion of eden above its top. This can cause newly
   272   // expanded space not to be mangled if using ZapUnusedHeapArea.
   273   // We explicitly do such mangling here.
   274   if (ZapUnusedHeapArea && clear_space && live_in_eden && mangle_space) {
   275     eden()->mangle_unused_area();
   276   }
   277   from()->initialize(fromMR, clear_space, mangle_space);
   278   to()->initialize(toMR, clear_space, mangle_space);
   280   // Set next compaction spaces.
   281   eden()->set_next_compaction_space(from());
   282   // The to-space is normally empty before a compaction so need
   283   // not be considered.  The exception is during promotion
   284   // failure handling when to-space can contain live objects.
   285   from()->set_next_compaction_space(NULL);
   286 }
   288 void DefNewGeneration::swap_spaces() {
   289   ContiguousSpace* s = from();
   290   _from_space        = to();
   291   _to_space          = s;
   292   eden()->set_next_compaction_space(from());
   293   // The to-space is normally empty before a compaction so need
   294   // not be considered.  The exception is during promotion
   295   // failure handling when to-space can contain live objects.
   296   from()->set_next_compaction_space(NULL);
   298   if (UsePerfData) {
   299     CSpaceCounters* c = _from_counters;
   300     _from_counters = _to_counters;
   301     _to_counters = c;
   302   }
   303 }
   305 bool DefNewGeneration::expand(size_t bytes) {
   306   MutexLocker x(ExpandHeap_lock);
   307   HeapWord* prev_high = (HeapWord*) _virtual_space.high();
   308   bool success = _virtual_space.expand_by(bytes);
   309   if (success && ZapUnusedHeapArea) {
   310     // Mangle newly committed space immediately because it
   311     // can be done here more simply that after the new
   312     // spaces have been computed.
   313     HeapWord* new_high = (HeapWord*) _virtual_space.high();
   314     MemRegion mangle_region(prev_high, new_high);
   315     SpaceMangler::mangle_region(mangle_region);
   316   }
   318   // Do not attempt an expand-to-the reserve size.  The
   319   // request should properly observe the maximum size of
   320   // the generation so an expand-to-reserve should be
   321   // unnecessary.  Also a second call to expand-to-reserve
   322   // value potentially can cause an undue expansion.
   323   // For example if the first expand fail for unknown reasons,
   324   // but the second succeeds and expands the heap to its maximum
   325   // value.
   326   if (GC_locker::is_active()) {
   327     if (PrintGC && Verbose) {
   328       gclog_or_tty->print_cr("Garbage collection disabled, "
   329         "expanded heap instead");
   330     }
   331   }
   333   return success;
   334 }
   337 void DefNewGeneration::compute_new_size() {
   338   // This is called after a gc that includes the following generation
   339   // (which is required to exist.)  So from-space will normally be empty.
   340   // Note that we check both spaces, since if scavenge failed they revert roles.
   341   // If not we bail out (otherwise we would have to relocate the objects)
   342   if (!from()->is_empty() || !to()->is_empty()) {
   343     return;
   344   }
   346   int next_level = level() + 1;
   347   GenCollectedHeap* gch = GenCollectedHeap::heap();
   348   assert(next_level < gch->_n_gens,
   349          "DefNewGeneration cannot be an oldest gen");
   351   Generation* next_gen = gch->_gens[next_level];
   352   size_t old_size = next_gen->capacity();
   353   size_t new_size_before = _virtual_space.committed_size();
   354   size_t min_new_size = spec()->init_size();
   355   size_t max_new_size = reserved().byte_size();
   356   assert(min_new_size <= new_size_before &&
   357          new_size_before <= max_new_size,
   358          "just checking");
   359   // All space sizes must be multiples of Generation::GenGrain.
   360   size_t alignment = Generation::GenGrain;
   362   // Compute desired new generation size based on NewRatio and
   363   // NewSizeThreadIncrease
   364   size_t desired_new_size = old_size/NewRatio;
   365   int threads_count = Threads::number_of_non_daemon_threads();
   366   size_t thread_increase_size = threads_count * NewSizeThreadIncrease;
   367   desired_new_size = align_size_up(desired_new_size + thread_increase_size, alignment);
   369   // Adjust new generation size
   370   desired_new_size = MAX2(MIN2(desired_new_size, max_new_size), min_new_size);
   371   assert(desired_new_size <= max_new_size, "just checking");
   373   bool changed = false;
   374   if (desired_new_size > new_size_before) {
   375     size_t change = desired_new_size - new_size_before;
   376     assert(change % alignment == 0, "just checking");
   377     if (expand(change)) {
   378        changed = true;
   379     }
   380     // If the heap failed to expand to the desired size,
   381     // "changed" will be false.  If the expansion failed
   382     // (and at this point it was expected to succeed),
   383     // ignore the failure (leaving "changed" as false).
   384   }
   385   if (desired_new_size < new_size_before && eden()->is_empty()) {
   386     // bail out of shrinking if objects in eden
   387     size_t change = new_size_before - desired_new_size;
   388     assert(change % alignment == 0, "just checking");
   389     _virtual_space.shrink_by(change);
   390     changed = true;
   391   }
   392   if (changed) {
   393     // The spaces have already been mangled at this point but
   394     // may not have been cleared (set top = bottom) and should be.
   395     // Mangling was done when the heap was being expanded.
   396     compute_space_boundaries(eden()->used(),
   397                              SpaceDecorator::Clear,
   398                              SpaceDecorator::DontMangle);
   399     MemRegion cmr((HeapWord*)_virtual_space.low(),
   400                   (HeapWord*)_virtual_space.high());
   401     Universe::heap()->barrier_set()->resize_covered_region(cmr);
   402     if (Verbose && PrintGC) {
   403       size_t new_size_after  = _virtual_space.committed_size();
   404       size_t eden_size_after = eden()->capacity();
   405       size_t survivor_size_after = from()->capacity();
   406       gclog_or_tty->print("New generation size " SIZE_FORMAT "K->"
   407         SIZE_FORMAT "K [eden="
   408         SIZE_FORMAT "K,survivor=" SIZE_FORMAT "K]",
   409         new_size_before/K, new_size_after/K,
   410         eden_size_after/K, survivor_size_after/K);
   411       if (WizardMode) {
   412         gclog_or_tty->print("[allowed " SIZE_FORMAT "K extra for %d threads]",
   413           thread_increase_size/K, threads_count);
   414       }
   415       gclog_or_tty->cr();
   416     }
   417   }
   418 }
   420 void DefNewGeneration::object_iterate_since_last_GC(ObjectClosure* cl) {
   421   // $$$ This may be wrong in case of "scavenge failure"?
   422   eden()->object_iterate(cl);
   423 }
   425 void DefNewGeneration::younger_refs_iterate(OopsInGenClosure* cl) {
   426   assert(false, "NYI -- are you sure you want to call this?");
   427 }
   430 size_t DefNewGeneration::capacity() const {
   431   return eden()->capacity()
   432        + from()->capacity();  // to() is only used during scavenge
   433 }
   436 size_t DefNewGeneration::used() const {
   437   return eden()->used()
   438        + from()->used();      // to() is only used during scavenge
   439 }
   442 size_t DefNewGeneration::free() const {
   443   return eden()->free()
   444        + from()->free();      // to() is only used during scavenge
   445 }
   447 size_t DefNewGeneration::max_capacity() const {
   448   const size_t alignment = GenCollectedHeap::heap()->collector_policy()->min_alignment();
   449   const size_t reserved_bytes = reserved().byte_size();
   450   return reserved_bytes - compute_survivor_size(reserved_bytes, alignment);
   451 }
   453 size_t DefNewGeneration::unsafe_max_alloc_nogc() const {
   454   return eden()->free();
   455 }
   457 size_t DefNewGeneration::capacity_before_gc() const {
   458   return eden()->capacity();
   459 }
   461 size_t DefNewGeneration::contiguous_available() const {
   462   return eden()->free();
   463 }
   466 HeapWord** DefNewGeneration::top_addr() const { return eden()->top_addr(); }
   467 HeapWord** DefNewGeneration::end_addr() const { return eden()->end_addr(); }
   469 void DefNewGeneration::object_iterate(ObjectClosure* blk) {
   470   eden()->object_iterate(blk);
   471   from()->object_iterate(blk);
   472 }
   475 void DefNewGeneration::space_iterate(SpaceClosure* blk,
   476                                      bool usedOnly) {
   477   blk->do_space(eden());
   478   blk->do_space(from());
   479   blk->do_space(to());
   480 }
   482 // The last collection bailed out, we are running out of heap space,
   483 // so we try to allocate the from-space, too.
   484 HeapWord* DefNewGeneration::allocate_from_space(size_t size) {
   485   HeapWord* result = NULL;
   486   if (PrintGC && Verbose) {
   487     gclog_or_tty->print("DefNewGeneration::allocate_from_space(%u):"
   488                   "  will_fail: %s"
   489                   "  heap_lock: %s"
   490                   "  free: " SIZE_FORMAT,
   491                   size,
   492                GenCollectedHeap::heap()->incremental_collection_will_fail() ? "true" : "false",
   493                Heap_lock->is_locked() ? "locked" : "unlocked",
   494                from()->free());
   495     }
   496   if (should_allocate_from_space() || GC_locker::is_active_and_needs_gc()) {
   497     if (Heap_lock->owned_by_self() ||
   498         (SafepointSynchronize::is_at_safepoint() &&
   499          Thread::current()->is_VM_thread())) {
   500       // If the Heap_lock is not locked by this thread, this will be called
   501       // again later with the Heap_lock held.
   502       result = from()->allocate(size);
   503     } else if (PrintGC && Verbose) {
   504       gclog_or_tty->print_cr("  Heap_lock is not owned by self");
   505     }
   506   } else if (PrintGC && Verbose) {
   507     gclog_or_tty->print_cr("  should_allocate_from_space: NOT");
   508   }
   509   if (PrintGC && Verbose) {
   510     gclog_or_tty->print_cr("  returns %s", result == NULL ? "NULL" : "object");
   511   }
   512   return result;
   513 }
   515 HeapWord* DefNewGeneration::expand_and_allocate(size_t size,
   516                                                 bool   is_tlab,
   517                                                 bool   parallel) {
   518   // We don't attempt to expand the young generation (but perhaps we should.)
   519   return allocate(size, is_tlab);
   520 }
   523 void DefNewGeneration::collect(bool   full,
   524                                bool   clear_all_soft_refs,
   525                                size_t size,
   526                                bool   is_tlab) {
   527   assert(full || size > 0, "otherwise we don't want to collect");
   528   GenCollectedHeap* gch = GenCollectedHeap::heap();
   529   _next_gen = gch->next_gen(this);
   530   assert(_next_gen != NULL,
   531     "This must be the youngest gen, and not the only gen");
   533   // If the next generation is too full to accomodate promotion
   534   // from this generation, pass on collection; let the next generation
   535   // do it.
   536   if (!collection_attempt_is_safe()) {
   537     gch->set_incremental_collection_failed(); // Slight lie: we did not even attempt one
   538     return;
   539   }
   540   assert(to()->is_empty(), "Else not collection_attempt_is_safe");
   542   init_assuming_no_promotion_failure();
   544   TraceTime t1("GC", PrintGC && !PrintGCDetails, true, gclog_or_tty);
   545   // Capture heap used before collection (for printing).
   546   size_t gch_prev_used = gch->used();
   548   SpecializationStats::clear();
   550   // These can be shared for all code paths
   551   IsAliveClosure is_alive(this);
   552   ScanWeakRefClosure scan_weak_ref(this);
   554   age_table()->clear();
   555   to()->clear(SpaceDecorator::Mangle);
   557   gch->rem_set()->prepare_for_younger_refs_iterate(false);
   559   assert(gch->no_allocs_since_save_marks(0),
   560          "save marks have not been newly set.");
   562   // Not very pretty.
   563   CollectorPolicy* cp = gch->collector_policy();
   565   FastScanClosure fsc_with_no_gc_barrier(this, false);
   566   FastScanClosure fsc_with_gc_barrier(this, true);
   568   set_promo_failure_scan_stack_closure(&fsc_with_no_gc_barrier);
   569   FastEvacuateFollowersClosure evacuate_followers(gch, _level, this,
   570                                                   &fsc_with_no_gc_barrier,
   571                                                   &fsc_with_gc_barrier);
   573   assert(gch->no_allocs_since_save_marks(0),
   574          "save marks have not been newly set.");
   576   gch->gen_process_strong_roots(_level,
   577                                 true,  // Process younger gens, if any,
   578                                        // as strong roots.
   579                                 true,  // activate StrongRootsScope
   580                                 false, // not collecting perm generation.
   581                                 SharedHeap::SO_AllClasses,
   582                                 &fsc_with_no_gc_barrier,
   583                                 true,   // walk *all* scavengable nmethods
   584                                 &fsc_with_gc_barrier);
   586   // "evacuate followers".
   587   evacuate_followers.do_void();
   589   FastKeepAliveClosure keep_alive(this, &scan_weak_ref);
   590   ReferenceProcessor* rp = ref_processor();
   591   rp->setup_policy(clear_all_soft_refs);
   592   rp->process_discovered_references(&is_alive, &keep_alive, &evacuate_followers,
   593                                     NULL);
   594   if (!promotion_failed()) {
   595     // Swap the survivor spaces.
   596     eden()->clear(SpaceDecorator::Mangle);
   597     from()->clear(SpaceDecorator::Mangle);
   598     if (ZapUnusedHeapArea) {
   599       // This is now done here because of the piece-meal mangling which
   600       // can check for valid mangling at intermediate points in the
   601       // collection(s).  When a minor collection fails to collect
   602       // sufficient space resizing of the young generation can occur
   603       // an redistribute the spaces in the young generation.  Mangle
   604       // here so that unzapped regions don't get distributed to
   605       // other spaces.
   606       to()->mangle_unused_area();
   607     }
   608     swap_spaces();
   610     assert(to()->is_empty(), "to space should be empty now");
   612     // Set the desired survivor size to half the real survivor space
   613     _tenuring_threshold =
   614       age_table()->compute_tenuring_threshold(to()->capacity()/HeapWordSize);
   616     // A successful scavenge should restart the GC time limit count which is
   617     // for full GC's.
   618     AdaptiveSizePolicy* size_policy = gch->gen_policy()->size_policy();
   619     size_policy->reset_gc_overhead_limit_count();
   620     if (PrintGC && !PrintGCDetails) {
   621       gch->print_heap_change(gch_prev_used);
   622     }
   623     assert(!gch->incremental_collection_failed(), "Should be clear");
   624   } else {
   625     assert(_promo_failure_scan_stack.is_empty(), "post condition");
   626     _promo_failure_scan_stack.clear(true); // Clear cached segments.
   628     remove_forwarding_pointers();
   629     if (PrintGCDetails) {
   630       gclog_or_tty->print(" (promotion failed) ");
   631     }
   632     // Add to-space to the list of space to compact
   633     // when a promotion failure has occurred.  In that
   634     // case there can be live objects in to-space
   635     // as a result of a partial evacuation of eden
   636     // and from-space.
   637     swap_spaces();   // For uniformity wrt ParNewGeneration.
   638     from()->set_next_compaction_space(to());
   639     gch->set_incremental_collection_failed();
   641     // Inform the next generation that a promotion failure occurred.
   642     _next_gen->promotion_failure_occurred();
   644     // Reset the PromotionFailureALot counters.
   645     NOT_PRODUCT(Universe::heap()->reset_promotion_should_fail();)
   646   }
   647   // set new iteration safe limit for the survivor spaces
   648   from()->set_concurrent_iteration_safe_limit(from()->top());
   649   to()->set_concurrent_iteration_safe_limit(to()->top());
   650   SpecializationStats::print();
   651   update_time_of_last_gc(os::javaTimeMillis());
   652 }
   654 class RemoveForwardPointerClosure: public ObjectClosure {
   655 public:
   656   void do_object(oop obj) {
   657     obj->init_mark();
   658   }
   659 };
   661 void DefNewGeneration::init_assuming_no_promotion_failure() {
   662   _promotion_failed = false;
   663   from()->set_next_compaction_space(NULL);
   664 }
   666 void DefNewGeneration::remove_forwarding_pointers() {
   667   RemoveForwardPointerClosure rspc;
   668   eden()->object_iterate(&rspc);
   669   from()->object_iterate(&rspc);
   671   // Now restore saved marks, if any.
   672   assert(_objs_with_preserved_marks.size() == _preserved_marks_of_objs.size(),
   673          "should be the same");
   674   while (!_objs_with_preserved_marks.is_empty()) {
   675     oop obj   = _objs_with_preserved_marks.pop();
   676     markOop m = _preserved_marks_of_objs.pop();
   677     obj->set_mark(m);
   678   }
   679   _objs_with_preserved_marks.clear(true);
   680   _preserved_marks_of_objs.clear(true);
   681 }
   683 void DefNewGeneration::preserve_mark_if_necessary(oop obj, markOop m) {
   684   if (m->must_be_preserved_for_promotion_failure(obj)) {
   685     _objs_with_preserved_marks.push(obj);
   686     _preserved_marks_of_objs.push(m);
   687   }
   688 }
   690 void DefNewGeneration::handle_promotion_failure(oop old) {
   691   preserve_mark_if_necessary(old, old->mark());
   692   if (!_promotion_failed && PrintPromotionFailure) {
   693     gclog_or_tty->print(" (promotion failure size = " SIZE_FORMAT ") ",
   694                         old->size());
   695   }
   697   // forward to self
   698   old->forward_to(old);
   699   _promotion_failed = true;
   701   _promo_failure_scan_stack.push(old);
   703   if (!_promo_failure_drain_in_progress) {
   704     // prevent recursion in copy_to_survivor_space()
   705     _promo_failure_drain_in_progress = true;
   706     drain_promo_failure_scan_stack();
   707     _promo_failure_drain_in_progress = false;
   708   }
   709 }
   711 oop DefNewGeneration::copy_to_survivor_space(oop old) {
   712   assert(is_in_reserved(old) && !old->is_forwarded(),
   713          "shouldn't be scavenging this oop");
   714   size_t s = old->size();
   715   oop obj = NULL;
   717   // Try allocating obj in to-space (unless too old)
   718   if (old->age() < tenuring_threshold()) {
   719     obj = (oop) to()->allocate(s);
   720   }
   722   // Otherwise try allocating obj tenured
   723   if (obj == NULL) {
   724     obj = _next_gen->promote(old, s);
   725     if (obj == NULL) {
   726       handle_promotion_failure(old);
   727       return old;
   728     }
   729   } else {
   730     // Prefetch beyond obj
   731     const intx interval = PrefetchCopyIntervalInBytes;
   732     Prefetch::write(obj, interval);
   734     // Copy obj
   735     Copy::aligned_disjoint_words((HeapWord*)old, (HeapWord*)obj, s);
   737     // Increment age if obj still in new generation
   738     obj->incr_age();
   739     age_table()->add(obj, s);
   740   }
   742   // Done, insert forward pointer to obj in this header
   743   old->forward_to(obj);
   745   return obj;
   746 }
   748 void DefNewGeneration::drain_promo_failure_scan_stack() {
   749   while (!_promo_failure_scan_stack.is_empty()) {
   750      oop obj = _promo_failure_scan_stack.pop();
   751      obj->oop_iterate(_promo_failure_scan_stack_closure);
   752   }
   753 }
   755 void DefNewGeneration::save_marks() {
   756   eden()->set_saved_mark();
   757   to()->set_saved_mark();
   758   from()->set_saved_mark();
   759 }
   762 void DefNewGeneration::reset_saved_marks() {
   763   eden()->reset_saved_mark();
   764   to()->reset_saved_mark();
   765   from()->reset_saved_mark();
   766 }
   769 bool DefNewGeneration::no_allocs_since_save_marks() {
   770   assert(eden()->saved_mark_at_top(), "Violated spec - alloc in eden");
   771   assert(from()->saved_mark_at_top(), "Violated spec - alloc in from");
   772   return to()->saved_mark_at_top();
   773 }
   775 #define DefNew_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix) \
   776                                                                 \
   777 void DefNewGeneration::                                         \
   778 oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl) {   \
   779   cl->set_generation(this);                                     \
   780   eden()->oop_since_save_marks_iterate##nv_suffix(cl);          \
   781   to()->oop_since_save_marks_iterate##nv_suffix(cl);            \
   782   from()->oop_since_save_marks_iterate##nv_suffix(cl);          \
   783   cl->reset_generation();                                       \
   784   save_marks();                                                 \
   785 }
   787 ALL_SINCE_SAVE_MARKS_CLOSURES(DefNew_SINCE_SAVE_MARKS_DEFN)
   789 #undef DefNew_SINCE_SAVE_MARKS_DEFN
   791 void DefNewGeneration::contribute_scratch(ScratchBlock*& list, Generation* requestor,
   792                                          size_t max_alloc_words) {
   793   if (requestor == this || _promotion_failed) return;
   794   assert(requestor->level() > level(), "DefNewGeneration must be youngest");
   796   /* $$$ Assert this?  "trace" is a "MarkSweep" function so that's not appropriate.
   797   if (to_space->top() > to_space->bottom()) {
   798     trace("to_space not empty when contribute_scratch called");
   799   }
   800   */
   802   ContiguousSpace* to_space = to();
   803   assert(to_space->end() >= to_space->top(), "pointers out of order");
   804   size_t free_words = pointer_delta(to_space->end(), to_space->top());
   805   if (free_words >= MinFreeScratchWords) {
   806     ScratchBlock* sb = (ScratchBlock*)to_space->top();
   807     sb->num_words = free_words;
   808     sb->next = list;
   809     list = sb;
   810   }
   811 }
   813 void DefNewGeneration::reset_scratch() {
   814   // If contributing scratch in to_space, mangle all of
   815   // to_space if ZapUnusedHeapArea.  This is needed because
   816   // top is not maintained while using to-space as scratch.
   817   if (ZapUnusedHeapArea) {
   818     to()->mangle_unused_area_complete();
   819   }
   820 }
   822 bool DefNewGeneration::collection_attempt_is_safe() {
   823   if (!to()->is_empty()) {
   824     return false;
   825   }
   826   if (_next_gen == NULL) {
   827     GenCollectedHeap* gch = GenCollectedHeap::heap();
   828     _next_gen = gch->next_gen(this);
   829     assert(_next_gen != NULL,
   830            "This must be the youngest gen, and not the only gen");
   831   }
   832   return _next_gen->promotion_attempt_is_safe(used());
   833 }
   835 void DefNewGeneration::gc_epilogue(bool full) {
   836   DEBUG_ONLY(static bool seen_incremental_collection_failed = false;)
   838   assert(!GC_locker::is_active(), "We should not be executing here");
   839   // Check if the heap is approaching full after a collection has
   840   // been done.  Generally the young generation is empty at
   841   // a minimum at the end of a collection.  If it is not, then
   842   // the heap is approaching full.
   843   GenCollectedHeap* gch = GenCollectedHeap::heap();
   844   if (full) {
   845     DEBUG_ONLY(seen_incremental_collection_failed = false;)
   846     if (!collection_attempt_is_safe()) {
   847       gch->set_incremental_collection_failed(); // Slight lie: a full gc left us in that state
   848       set_should_allocate_from_space(); // we seem to be running out of space
   849     } else {
   850       gch->clear_incremental_collection_failed(); // We just did a full collection
   851       clear_should_allocate_from_space(); // if set
   852     }
   853   } else {
   854 #ifdef ASSERT
   855     // It is possible that incremental_collection_failed() == true
   856     // here, because an attempted scavenge did not succeed. The policy
   857     // is normally expected to cause a full collection which should
   858     // clear that condition, so we should not be here twice in a row
   859     // with incremental_collection_failed() == true without having done
   860     // a full collection in between.
   861     if (!seen_incremental_collection_failed &&
   862         gch->incremental_collection_failed()) {
   863       seen_incremental_collection_failed = true;
   864     } else if (seen_incremental_collection_failed) {
   865       assert(gch->gc_cause() == GCCause::_scavenge_alot || !gch->incremental_collection_failed(),
   866              "Twice in a row");
   868       seen_incremental_collection_failed = false;
   869     }
   870 #endif // ASSERT
   871   }
   873   if (ZapUnusedHeapArea) {
   874     eden()->check_mangled_unused_area_complete();
   875     from()->check_mangled_unused_area_complete();
   876     to()->check_mangled_unused_area_complete();
   877   }
   879   // update the generation and space performance counters
   880   update_counters();
   881   gch->collector_policy()->counters()->update_counters();
   882 }
   884 void DefNewGeneration::record_spaces_top() {
   885   assert(ZapUnusedHeapArea, "Not mangling unused space");
   886   eden()->set_top_for_allocations();
   887   to()->set_top_for_allocations();
   888   from()->set_top_for_allocations();
   889 }
   892 void DefNewGeneration::update_counters() {
   893   if (UsePerfData) {
   894     _eden_counters->update_all();
   895     _from_counters->update_all();
   896     _to_counters->update_all();
   897     _gen_counters->update_all();
   898   }
   899 }
   901 void DefNewGeneration::verify(bool allow_dirty) {
   902   eden()->verify(allow_dirty);
   903   from()->verify(allow_dirty);
   904     to()->verify(allow_dirty);
   905 }
   907 void DefNewGeneration::print_on(outputStream* st) const {
   908   Generation::print_on(st);
   909   st->print("  eden");
   910   eden()->print_on(st);
   911   st->print("  from");
   912   from()->print_on(st);
   913   st->print("  to  ");
   914   to()->print_on(st);
   915 }
   918 const char* DefNewGeneration::name() const {
   919   return "def new generation";
   920 }
   922 // Moved from inline file as they are not called inline
   923 CompactibleSpace* DefNewGeneration::first_compaction_space() const {
   924   return eden();
   925 }
   927 HeapWord* DefNewGeneration::allocate(size_t word_size,
   928                                      bool is_tlab) {
   929   // This is the slow-path allocation for the DefNewGeneration.
   930   // Most allocations are fast-path in compiled code.
   931   // We try to allocate from the eden.  If that works, we are happy.
   932   // Note that since DefNewGeneration supports lock-free allocation, we
   933   // have to use it here, as well.
   934   HeapWord* result = eden()->par_allocate(word_size);
   935   if (result != NULL) {
   936     return result;
   937   }
   938   do {
   939     HeapWord* old_limit = eden()->soft_end();
   940     if (old_limit < eden()->end()) {
   941       // Tell the next generation we reached a limit.
   942       HeapWord* new_limit =
   943         next_gen()->allocation_limit_reached(eden(), eden()->top(), word_size);
   944       if (new_limit != NULL) {
   945         Atomic::cmpxchg_ptr(new_limit, eden()->soft_end_addr(), old_limit);
   946       } else {
   947         assert(eden()->soft_end() == eden()->end(),
   948                "invalid state after allocation_limit_reached returned null");
   949       }
   950     } else {
   951       // The allocation failed and the soft limit is equal to the hard limit,
   952       // there are no reasons to do an attempt to allocate
   953       assert(old_limit == eden()->end(), "sanity check");
   954       break;
   955     }
   956     // Try to allocate until succeeded or the soft limit can't be adjusted
   957     result = eden()->par_allocate(word_size);
   958   } while (result == NULL);
   960   // If the eden is full and the last collection bailed out, we are running
   961   // out of heap space, and we try to allocate the from-space, too.
   962   // allocate_from_space can't be inlined because that would introduce a
   963   // circular dependency at compile time.
   964   if (result == NULL) {
   965     result = allocate_from_space(word_size);
   966   }
   967   return result;
   968 }
   970 HeapWord* DefNewGeneration::par_allocate(size_t word_size,
   971                                          bool is_tlab) {
   972   return eden()->par_allocate(word_size);
   973 }
   975 void DefNewGeneration::gc_prologue(bool full) {
   976   // Ensure that _end and _soft_end are the same in eden space.
   977   eden()->set_soft_end(eden()->end());
   978 }
   980 size_t DefNewGeneration::tlab_capacity() const {
   981   return eden()->capacity();
   982 }
   984 size_t DefNewGeneration::unsafe_max_tlab_alloc() const {
   985   return unsafe_max_alloc_nogc();
   986 }

mercurial