src/share/vm/memory/defNewGeneration.cpp

Mon, 23 Jun 2008 16:49:37 -0700

author
ysr
date
Mon, 23 Jun 2008 16:49:37 -0700
changeset 782
60fb9c4db4e6
parent 777
37f87013dfd8
child 791
1ee8caae33af
permissions
-rw-r--r--

6718086: CMS assert: _concurrent_iteration_safe_limit update missed
Summary: Initialize the field correctly in ContiguousSpace's constructor and initialize() methods, using the latter for the survivor spaces upon initial construction or a subsequent resizing of the young generation. Add some missing Space sub-class constructors.
Reviewed-by: apetrusenko

     1 /*
     2  * Copyright 2001-2007 Sun Microsystems, Inc.  All Rights Reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
    20  * CA 95054 USA or visit www.sun.com if you need additional information or
    21  * have any questions.
    22  *
    23  */
    25 # include "incls/_precompiled.incl"
    26 # include "incls/_defNewGeneration.cpp.incl"
    28 //
    29 // DefNewGeneration functions.
    31 // Methods of protected closure types.
    33 DefNewGeneration::IsAliveClosure::IsAliveClosure(Generation* g) : _g(g) {
    34   assert(g->level() == 0, "Optimized for youngest gen.");
    35 }
    36 void DefNewGeneration::IsAliveClosure::do_object(oop p) {
    37   assert(false, "Do not call.");
    38 }
    39 bool DefNewGeneration::IsAliveClosure::do_object_b(oop p) {
    40   return (HeapWord*)p >= _g->reserved().end() || p->is_forwarded();
    41 }
    43 DefNewGeneration::KeepAliveClosure::
    44 KeepAliveClosure(ScanWeakRefClosure* cl) : _cl(cl) {
    45   GenRemSet* rs = GenCollectedHeap::heap()->rem_set();
    46   assert(rs->rs_kind() == GenRemSet::CardTable, "Wrong rem set kind.");
    47   _rs = (CardTableRS*)rs;
    48 }
    50 void DefNewGeneration::KeepAliveClosure::do_oop(oop* p)       { DefNewGeneration::KeepAliveClosure::do_oop_work(p); }
    51 void DefNewGeneration::KeepAliveClosure::do_oop(narrowOop* p) { DefNewGeneration::KeepAliveClosure::do_oop_work(p); }
    54 DefNewGeneration::FastKeepAliveClosure::
    55 FastKeepAliveClosure(DefNewGeneration* g, ScanWeakRefClosure* cl) :
    56   DefNewGeneration::KeepAliveClosure(cl) {
    57   _boundary = g->reserved().end();
    58 }
    60 void DefNewGeneration::FastKeepAliveClosure::do_oop(oop* p)       { DefNewGeneration::FastKeepAliveClosure::do_oop_work(p); }
    61 void DefNewGeneration::FastKeepAliveClosure::do_oop(narrowOop* p) { DefNewGeneration::FastKeepAliveClosure::do_oop_work(p); }
    63 DefNewGeneration::EvacuateFollowersClosure::
    64 EvacuateFollowersClosure(GenCollectedHeap* gch, int level,
    65                          ScanClosure* cur, ScanClosure* older) :
    66   _gch(gch), _level(level),
    67   _scan_cur_or_nonheap(cur), _scan_older(older)
    68 {}
    70 void DefNewGeneration::EvacuateFollowersClosure::do_void() {
    71   do {
    72     _gch->oop_since_save_marks_iterate(_level, _scan_cur_or_nonheap,
    73                                        _scan_older);
    74   } while (!_gch->no_allocs_since_save_marks(_level));
    75 }
    77 DefNewGeneration::FastEvacuateFollowersClosure::
    78 FastEvacuateFollowersClosure(GenCollectedHeap* gch, int level,
    79                              DefNewGeneration* gen,
    80                              FastScanClosure* cur, FastScanClosure* older) :
    81   _gch(gch), _level(level), _gen(gen),
    82   _scan_cur_or_nonheap(cur), _scan_older(older)
    83 {}
    85 void DefNewGeneration::FastEvacuateFollowersClosure::do_void() {
    86   do {
    87     _gch->oop_since_save_marks_iterate(_level, _scan_cur_or_nonheap,
    88                                        _scan_older);
    89   } while (!_gch->no_allocs_since_save_marks(_level));
    90   guarantee(_gen->promo_failure_scan_stack() == NULL
    91             || _gen->promo_failure_scan_stack()->length() == 0,
    92             "Failed to finish scan");
    93 }
    95 ScanClosure::ScanClosure(DefNewGeneration* g, bool gc_barrier) :
    96   OopsInGenClosure(g), _g(g), _gc_barrier(gc_barrier)
    97 {
    98   assert(_g->level() == 0, "Optimized for youngest generation");
    99   _boundary = _g->reserved().end();
   100 }
   102 void ScanClosure::do_oop(oop* p)       { ScanClosure::do_oop_work(p); }
   103 void ScanClosure::do_oop(narrowOop* p) { ScanClosure::do_oop_work(p); }
   105 FastScanClosure::FastScanClosure(DefNewGeneration* g, bool gc_barrier) :
   106   OopsInGenClosure(g), _g(g), _gc_barrier(gc_barrier)
   107 {
   108   assert(_g->level() == 0, "Optimized for youngest generation");
   109   _boundary = _g->reserved().end();
   110 }
   112 void FastScanClosure::do_oop(oop* p)       { FastScanClosure::do_oop_work(p); }
   113 void FastScanClosure::do_oop(narrowOop* p) { FastScanClosure::do_oop_work(p); }
   115 ScanWeakRefClosure::ScanWeakRefClosure(DefNewGeneration* g) :
   116   OopClosure(g->ref_processor()), _g(g)
   117 {
   118   assert(_g->level() == 0, "Optimized for youngest generation");
   119   _boundary = _g->reserved().end();
   120 }
   122 void ScanWeakRefClosure::do_oop(oop* p)       { ScanWeakRefClosure::do_oop_work(p); }
   123 void ScanWeakRefClosure::do_oop(narrowOop* p) { ScanWeakRefClosure::do_oop_work(p); }
   125 void FilteringClosure::do_oop(oop* p)       { FilteringClosure::do_oop_work(p); }
   126 void FilteringClosure::do_oop(narrowOop* p) { FilteringClosure::do_oop_work(p); }
   128 DefNewGeneration::DefNewGeneration(ReservedSpace rs,
   129                                    size_t initial_size,
   130                                    int level,
   131                                    const char* policy)
   132   : Generation(rs, initial_size, level),
   133     _objs_with_preserved_marks(NULL),
   134     _preserved_marks_of_objs(NULL),
   135     _promo_failure_scan_stack(NULL),
   136     _promo_failure_drain_in_progress(false),
   137     _should_allocate_from_space(false)
   138 {
   139   MemRegion cmr((HeapWord*)_virtual_space.low(),
   140                 (HeapWord*)_virtual_space.high());
   141   Universe::heap()->barrier_set()->resize_covered_region(cmr);
   143   if (GenCollectedHeap::heap()->collector_policy()->has_soft_ended_eden()) {
   144     _eden_space = new ConcEdenSpace(this);
   145   } else {
   146     _eden_space = new EdenSpace(this);
   147   }
   148   _from_space = new ContiguousSpace();
   149   _to_space   = new ContiguousSpace();
   151   if (_eden_space == NULL || _from_space == NULL || _to_space == NULL)
   152     vm_exit_during_initialization("Could not allocate a new gen space");
   154   // Compute the maximum eden and survivor space sizes. These sizes
   155   // are computed assuming the entire reserved space is committed.
   156   // These values are exported as performance counters.
   157   uintx alignment = GenCollectedHeap::heap()->collector_policy()->min_alignment();
   158   uintx size = _virtual_space.reserved_size();
   159   _max_survivor_size = compute_survivor_size(size, alignment);
   160   _max_eden_size = size - (2*_max_survivor_size);
   162   // allocate the performance counters
   164   // Generation counters -- generation 0, 3 subspaces
   165   _gen_counters = new GenerationCounters("new", 0, 3, &_virtual_space);
   166   _gc_counters = new CollectorCounters(policy, 0);
   168   _eden_counters = new CSpaceCounters("eden", 0, _max_eden_size, _eden_space,
   169                                       _gen_counters);
   170   _from_counters = new CSpaceCounters("s0", 1, _max_survivor_size, _from_space,
   171                                       _gen_counters);
   172   _to_counters = new CSpaceCounters("s1", 2, _max_survivor_size, _to_space,
   173                                     _gen_counters);
   175   compute_space_boundaries(0);
   176   update_counters();
   177   _next_gen = NULL;
   178   _tenuring_threshold = MaxTenuringThreshold;
   179   _pretenure_size_threshold_words = PretenureSizeThreshold >> LogHeapWordSize;
   180 }
   182 void DefNewGeneration::compute_space_boundaries(uintx minimum_eden_size) {
   183   uintx alignment = GenCollectedHeap::heap()->collector_policy()->min_alignment();
   185   // Compute sizes
   186   uintx size = _virtual_space.committed_size();
   187   uintx survivor_size = compute_survivor_size(size, alignment);
   188   uintx eden_size = size - (2*survivor_size);
   189   assert(eden_size > 0 && survivor_size <= eden_size, "just checking");
   191   if (eden_size < minimum_eden_size) {
   192     // May happen due to 64Kb rounding, if so adjust eden size back up
   193     minimum_eden_size = align_size_up(minimum_eden_size, alignment);
   194     uintx maximum_survivor_size = (size - minimum_eden_size) / 2;
   195     uintx unaligned_survivor_size =
   196       align_size_down(maximum_survivor_size, alignment);
   197     survivor_size = MAX2(unaligned_survivor_size, alignment);
   198     eden_size = size - (2*survivor_size);
   199     assert(eden_size > 0 && survivor_size <= eden_size, "just checking");
   200     assert(eden_size >= minimum_eden_size, "just checking");
   201   }
   203   char *eden_start = _virtual_space.low();
   204   char *from_start = eden_start + eden_size;
   205   char *to_start   = from_start + survivor_size;
   206   char *to_end     = to_start   + survivor_size;
   208   assert(to_end == _virtual_space.high(), "just checking");
   209   assert(Space::is_aligned((HeapWord*)eden_start), "checking alignment");
   210   assert(Space::is_aligned((HeapWord*)from_start), "checking alignment");
   211   assert(Space::is_aligned((HeapWord*)to_start),   "checking alignment");
   213   MemRegion edenMR((HeapWord*)eden_start, (HeapWord*)from_start);
   214   MemRegion fromMR((HeapWord*)from_start, (HeapWord*)to_start);
   215   MemRegion toMR  ((HeapWord*)to_start, (HeapWord*)to_end);
   217   eden()->set_bounds(edenMR);
   218   if (minimum_eden_size == 0) {
   219     // The "minimum_eden_size" is really the amount of eden occupied by
   220     // allocated objects -- if this is zero, then we can clear the space.
   221     eden()->clear();
   222   } else {
   223     // Otherwise, we will not have cleared eden. This can cause newly
   224     // expanded space not to be mangled if using ZapUnusedHeapArea.
   225     // We explicitly do such mangling here.
   226     if (ZapUnusedHeapArea) {
   227       eden()->mangle_unused_area();
   228     }
   229   }
   230   from()->initialize(fromMR, true /* clear */);
   231     to()->initialize(  toMR, true /* clear */);
   232   // Make sure we compact eden, then from.
   233   // The to-space is normally empty before a compaction so need
   234   // not be considered.  The exception is during promotion
   235   // failure handling when to-space can contain live objects.
   236   eden()->set_next_compaction_space(from());
   237   from()->set_next_compaction_space(NULL);
   238 }
   240 void DefNewGeneration::swap_spaces() {
   241   ContiguousSpace* s = from();
   242   _from_space        = to();
   243   _to_space          = s;
   244   eden()->set_next_compaction_space(from());
   245   // The to-space is normally empty before a compaction so need
   246   // not be considered.  The exception is during promotion
   247   // failure handling when to-space can contain live objects.
   248   from()->set_next_compaction_space(NULL);
   250   if (UsePerfData) {
   251     CSpaceCounters* c = _from_counters;
   252     _from_counters = _to_counters;
   253     _to_counters = c;
   254   }
   255 }
   257 bool DefNewGeneration::expand(size_t bytes) {
   258   MutexLocker x(ExpandHeap_lock);
   259   bool success = _virtual_space.expand_by(bytes);
   261   // Do not attempt an expand-to-the reserve size.  The
   262   // request should properly observe the maximum size of
   263   // the generation so an expand-to-reserve should be
   264   // unnecessary.  Also a second call to expand-to-reserve
   265   // value potentially can cause an undue expansion.
   266   // For example if the first expand fail for unknown reasons,
   267   // but the second succeeds and expands the heap to its maximum
   268   // value.
   269   if (GC_locker::is_active()) {
   270     if (PrintGC && Verbose) {
   271       gclog_or_tty->print_cr("Garbage collection disabled, expanded heap instead");
   272     }
   273   }
   275   return success;
   276 }
   279 void DefNewGeneration::compute_new_size() {
   280   // This is called after a gc that includes the following generation
   281   // (which is required to exist.)  So from-space will normally be empty.
   282   // Note that we check both spaces, since if scavenge failed they revert roles.
   283   // If not we bail out (otherwise we would have to relocate the objects)
   284   if (!from()->is_empty() || !to()->is_empty()) {
   285     return;
   286   }
   288   int next_level = level() + 1;
   289   GenCollectedHeap* gch = GenCollectedHeap::heap();
   290   assert(next_level < gch->_n_gens,
   291          "DefNewGeneration cannot be an oldest gen");
   293   Generation* next_gen = gch->_gens[next_level];
   294   size_t old_size = next_gen->capacity();
   295   size_t new_size_before = _virtual_space.committed_size();
   296   size_t min_new_size = spec()->init_size();
   297   size_t max_new_size = reserved().byte_size();
   298   assert(min_new_size <= new_size_before &&
   299          new_size_before <= max_new_size,
   300          "just checking");
   301   // All space sizes must be multiples of Generation::GenGrain.
   302   size_t alignment = Generation::GenGrain;
   304   // Compute desired new generation size based on NewRatio and
   305   // NewSizeThreadIncrease
   306   size_t desired_new_size = old_size/NewRatio;
   307   int threads_count = Threads::number_of_non_daemon_threads();
   308   size_t thread_increase_size = threads_count * NewSizeThreadIncrease;
   309   desired_new_size = align_size_up(desired_new_size + thread_increase_size, alignment);
   311   // Adjust new generation size
   312   desired_new_size = MAX2(MIN2(desired_new_size, max_new_size), min_new_size);
   313   assert(desired_new_size <= max_new_size, "just checking");
   315   bool changed = false;
   316   if (desired_new_size > new_size_before) {
   317     size_t change = desired_new_size - new_size_before;
   318     assert(change % alignment == 0, "just checking");
   319     if (expand(change)) {
   320        changed = true;
   321     }
   322     // If the heap failed to expand to the desired size,
   323     // "changed" will be false.  If the expansion failed
   324     // (and at this point it was expected to succeed),
   325     // ignore the failure (leaving "changed" as false).
   326   }
   327   if (desired_new_size < new_size_before && eden()->is_empty()) {
   328     // bail out of shrinking if objects in eden
   329     size_t change = new_size_before - desired_new_size;
   330     assert(change % alignment == 0, "just checking");
   331     _virtual_space.shrink_by(change);
   332     changed = true;
   333   }
   334   if (changed) {
   335     compute_space_boundaries(eden()->used());
   336     MemRegion cmr((HeapWord*)_virtual_space.low(), (HeapWord*)_virtual_space.high());
   337     Universe::heap()->barrier_set()->resize_covered_region(cmr);
   338     if (Verbose && PrintGC) {
   339       size_t new_size_after  = _virtual_space.committed_size();
   340       size_t eden_size_after = eden()->capacity();
   341       size_t survivor_size_after = from()->capacity();
   342       gclog_or_tty->print("New generation size " SIZE_FORMAT "K->" SIZE_FORMAT "K [eden="
   343         SIZE_FORMAT "K,survivor=" SIZE_FORMAT "K]",
   344         new_size_before/K, new_size_after/K, eden_size_after/K, survivor_size_after/K);
   345       if (WizardMode) {
   346         gclog_or_tty->print("[allowed " SIZE_FORMAT "K extra for %d threads]",
   347           thread_increase_size/K, threads_count);
   348       }
   349       gclog_or_tty->cr();
   350     }
   351   }
   352 }
   354 void DefNewGeneration::object_iterate_since_last_GC(ObjectClosure* cl) {
   355   // $$$ This may be wrong in case of "scavenge failure"?
   356   eden()->object_iterate(cl);
   357 }
   359 void DefNewGeneration::younger_refs_iterate(OopsInGenClosure* cl) {
   360   assert(false, "NYI -- are you sure you want to call this?");
   361 }
   364 size_t DefNewGeneration::capacity() const {
   365   return eden()->capacity()
   366        + from()->capacity();  // to() is only used during scavenge
   367 }
   370 size_t DefNewGeneration::used() const {
   371   return eden()->used()
   372        + from()->used();      // to() is only used during scavenge
   373 }
   376 size_t DefNewGeneration::free() const {
   377   return eden()->free()
   378        + from()->free();      // to() is only used during scavenge
   379 }
   381 size_t DefNewGeneration::max_capacity() const {
   382   const size_t alignment = GenCollectedHeap::heap()->collector_policy()->min_alignment();
   383   const size_t reserved_bytes = reserved().byte_size();
   384   return reserved_bytes - compute_survivor_size(reserved_bytes, alignment);
   385 }
   387 size_t DefNewGeneration::unsafe_max_alloc_nogc() const {
   388   return eden()->free();
   389 }
   391 size_t DefNewGeneration::capacity_before_gc() const {
   392   return eden()->capacity();
   393 }
   395 size_t DefNewGeneration::contiguous_available() const {
   396   return eden()->free();
   397 }
   400 HeapWord** DefNewGeneration::top_addr() const { return eden()->top_addr(); }
   401 HeapWord** DefNewGeneration::end_addr() const { return eden()->end_addr(); }
   403 void DefNewGeneration::object_iterate(ObjectClosure* blk) {
   404   eden()->object_iterate(blk);
   405   from()->object_iterate(blk);
   406 }
   409 void DefNewGeneration::space_iterate(SpaceClosure* blk,
   410                                      bool usedOnly) {
   411   blk->do_space(eden());
   412   blk->do_space(from());
   413   blk->do_space(to());
   414 }
   416 // The last collection bailed out, we are running out of heap space,
   417 // so we try to allocate the from-space, too.
   418 HeapWord* DefNewGeneration::allocate_from_space(size_t size) {
   419   HeapWord* result = NULL;
   420   if (PrintGC && Verbose) {
   421     gclog_or_tty->print("DefNewGeneration::allocate_from_space(%u):"
   422                   "  will_fail: %s"
   423                   "  heap_lock: %s"
   424                   "  free: " SIZE_FORMAT,
   425                   size,
   426                GenCollectedHeap::heap()->incremental_collection_will_fail() ? "true" : "false",
   427                Heap_lock->is_locked() ? "locked" : "unlocked",
   428                from()->free());
   429     }
   430   if (should_allocate_from_space() || GC_locker::is_active_and_needs_gc()) {
   431     if (Heap_lock->owned_by_self() ||
   432         (SafepointSynchronize::is_at_safepoint() &&
   433          Thread::current()->is_VM_thread())) {
   434       // If the Heap_lock is not locked by this thread, this will be called
   435       // again later with the Heap_lock held.
   436       result = from()->allocate(size);
   437     } else if (PrintGC && Verbose) {
   438       gclog_or_tty->print_cr("  Heap_lock is not owned by self");
   439     }
   440   } else if (PrintGC && Verbose) {
   441     gclog_or_tty->print_cr("  should_allocate_from_space: NOT");
   442   }
   443   if (PrintGC && Verbose) {
   444     gclog_or_tty->print_cr("  returns %s", result == NULL ? "NULL" : "object");
   445   }
   446   return result;
   447 }
   449 HeapWord* DefNewGeneration::expand_and_allocate(size_t size,
   450                                                 bool   is_tlab,
   451                                                 bool   parallel) {
   452   // We don't attempt to expand the young generation (but perhaps we should.)
   453   return allocate(size, is_tlab);
   454 }
   457 void DefNewGeneration::collect(bool   full,
   458                                bool   clear_all_soft_refs,
   459                                size_t size,
   460                                bool   is_tlab) {
   461   assert(full || size > 0, "otherwise we don't want to collect");
   462   GenCollectedHeap* gch = GenCollectedHeap::heap();
   463   _next_gen = gch->next_gen(this);
   464   assert(_next_gen != NULL,
   465     "This must be the youngest gen, and not the only gen");
   467   // If the next generation is too full to accomodate promotion
   468   // from this generation, pass on collection; let the next generation
   469   // do it.
   470   if (!collection_attempt_is_safe()) {
   471     gch->set_incremental_collection_will_fail();
   472     return;
   473   }
   474   assert(to()->is_empty(), "Else not collection_attempt_is_safe");
   476   init_assuming_no_promotion_failure();
   478   TraceTime t1("GC", PrintGC && !PrintGCDetails, true, gclog_or_tty);
   479   // Capture heap used before collection (for printing).
   480   size_t gch_prev_used = gch->used();
   482   SpecializationStats::clear();
   484   // These can be shared for all code paths
   485   IsAliveClosure is_alive(this);
   486   ScanWeakRefClosure scan_weak_ref(this);
   488   age_table()->clear();
   489   to()->clear();
   491   gch->rem_set()->prepare_for_younger_refs_iterate(false);
   493   assert(gch->no_allocs_since_save_marks(0),
   494          "save marks have not been newly set.");
   496   // Weak refs.
   497   // FIXME: Are these storage leaks, or are they resource objects?
   498 #ifdef COMPILER2
   499   ReferencePolicy *soft_ref_policy = new LRUMaxHeapPolicy();
   500 #else
   501   ReferencePolicy *soft_ref_policy = new LRUCurrentHeapPolicy();
   502 #endif // COMPILER2
   504   // Not very pretty.
   505   CollectorPolicy* cp = gch->collector_policy();
   507   FastScanClosure fsc_with_no_gc_barrier(this, false);
   508   FastScanClosure fsc_with_gc_barrier(this, true);
   510   set_promo_failure_scan_stack_closure(&fsc_with_no_gc_barrier);
   511   FastEvacuateFollowersClosure evacuate_followers(gch, _level, this,
   512                                                   &fsc_with_no_gc_barrier,
   513                                                   &fsc_with_gc_barrier);
   515   assert(gch->no_allocs_since_save_marks(0),
   516          "save marks have not been newly set.");
   518   gch->gen_process_strong_roots(_level,
   519                                 true, // Process younger gens, if any, as
   520                                       // strong roots.
   521                                 false,// not collecting permanent generation.
   522                                 SharedHeap::SO_AllClasses,
   523                                 &fsc_with_gc_barrier,
   524                                 &fsc_with_no_gc_barrier);
   526   // "evacuate followers".
   527   evacuate_followers.do_void();
   529   FastKeepAliveClosure keep_alive(this, &scan_weak_ref);
   530   ref_processor()->process_discovered_references(
   531     soft_ref_policy, &is_alive, &keep_alive, &evacuate_followers, NULL);
   532   if (!promotion_failed()) {
   533     // Swap the survivor spaces.
   534     eden()->clear();
   535     from()->clear();
   536     swap_spaces();
   538     assert(to()->is_empty(), "to space should be empty now");
   540     // Set the desired survivor size to half the real survivor space
   541     _tenuring_threshold =
   542       age_table()->compute_tenuring_threshold(to()->capacity()/HeapWordSize);
   544     if (PrintGC && !PrintGCDetails) {
   545       gch->print_heap_change(gch_prev_used);
   546     }
   547   } else {
   548     assert(HandlePromotionFailure,
   549       "Should not be here unless promotion failure handling is on");
   550     assert(_promo_failure_scan_stack != NULL &&
   551       _promo_failure_scan_stack->length() == 0, "post condition");
   553     // deallocate stack and it's elements
   554     delete _promo_failure_scan_stack;
   555     _promo_failure_scan_stack = NULL;
   557     remove_forwarding_pointers();
   558     if (PrintGCDetails) {
   559       gclog_or_tty->print(" (promotion failed)");
   560     }
   561     // Add to-space to the list of space to compact
   562     // when a promotion failure has occurred.  In that
   563     // case there can be live objects in to-space
   564     // as a result of a partial evacuation of eden
   565     // and from-space.
   566     swap_spaces();   // For the sake of uniformity wrt ParNewGeneration::collect().
   567     from()->set_next_compaction_space(to());
   568     gch->set_incremental_collection_will_fail();
   570     // Reset the PromotionFailureALot counters.
   571     NOT_PRODUCT(Universe::heap()->reset_promotion_should_fail();)
   572   }
   573   // set new iteration safe limit for the survivor spaces
   574   from()->set_concurrent_iteration_safe_limit(from()->top());
   575   to()->set_concurrent_iteration_safe_limit(to()->top());
   576   SpecializationStats::print();
   577   update_time_of_last_gc(os::javaTimeMillis());
   578 }
   580 class RemoveForwardPointerClosure: public ObjectClosure {
   581 public:
   582   void do_object(oop obj) {
   583     obj->init_mark();
   584   }
   585 };
   587 void DefNewGeneration::init_assuming_no_promotion_failure() {
   588   _promotion_failed = false;
   589   from()->set_next_compaction_space(NULL);
   590 }
   592 void DefNewGeneration::remove_forwarding_pointers() {
   593   RemoveForwardPointerClosure rspc;
   594   eden()->object_iterate(&rspc);
   595   from()->object_iterate(&rspc);
   596   // Now restore saved marks, if any.
   597   if (_objs_with_preserved_marks != NULL) {
   598     assert(_preserved_marks_of_objs != NULL, "Both or none.");
   599     assert(_objs_with_preserved_marks->length() ==
   600            _preserved_marks_of_objs->length(), "Both or none.");
   601     for (int i = 0; i < _objs_with_preserved_marks->length(); i++) {
   602       oop obj   = _objs_with_preserved_marks->at(i);
   603       markOop m = _preserved_marks_of_objs->at(i);
   604       obj->set_mark(m);
   605     }
   606     delete _objs_with_preserved_marks;
   607     delete _preserved_marks_of_objs;
   608     _objs_with_preserved_marks = NULL;
   609     _preserved_marks_of_objs = NULL;
   610   }
   611 }
   613 void DefNewGeneration::preserve_mark_if_necessary(oop obj, markOop m) {
   614   if (m->must_be_preserved_for_promotion_failure(obj)) {
   615     if (_objs_with_preserved_marks == NULL) {
   616       assert(_preserved_marks_of_objs == NULL, "Both or none.");
   617       _objs_with_preserved_marks = new (ResourceObj::C_HEAP)
   618         GrowableArray<oop>(PreserveMarkStackSize, true);
   619       _preserved_marks_of_objs = new (ResourceObj::C_HEAP)
   620         GrowableArray<markOop>(PreserveMarkStackSize, true);
   621     }
   622     _objs_with_preserved_marks->push(obj);
   623     _preserved_marks_of_objs->push(m);
   624   }
   625 }
   627 void DefNewGeneration::handle_promotion_failure(oop old) {
   628   preserve_mark_if_necessary(old, old->mark());
   629   // forward to self
   630   old->forward_to(old);
   631   _promotion_failed = true;
   633   push_on_promo_failure_scan_stack(old);
   635   if (!_promo_failure_drain_in_progress) {
   636     // prevent recursion in copy_to_survivor_space()
   637     _promo_failure_drain_in_progress = true;
   638     drain_promo_failure_scan_stack();
   639     _promo_failure_drain_in_progress = false;
   640   }
   641 }
   643 oop DefNewGeneration::copy_to_survivor_space(oop old) {
   644   assert(is_in_reserved(old) && !old->is_forwarded(),
   645          "shouldn't be scavenging this oop");
   646   size_t s = old->size();
   647   oop obj = NULL;
   649   // Try allocating obj in to-space (unless too old)
   650   if (old->age() < tenuring_threshold()) {
   651     obj = (oop) to()->allocate(s);
   652   }
   654   // Otherwise try allocating obj tenured
   655   if (obj == NULL) {
   656     obj = _next_gen->promote(old, s);
   657     if (obj == NULL) {
   658       if (!HandlePromotionFailure) {
   659         // A failed promotion likely means the MaxLiveObjectEvacuationRatio flag
   660         // is incorrectly set. In any case, its seriously wrong to be here!
   661         vm_exit_out_of_memory(s*wordSize, "promotion");
   662       }
   664       handle_promotion_failure(old);
   665       return old;
   666     }
   667   } else {
   668     // Prefetch beyond obj
   669     const intx interval = PrefetchCopyIntervalInBytes;
   670     Prefetch::write(obj, interval);
   672     // Copy obj
   673     Copy::aligned_disjoint_words((HeapWord*)old, (HeapWord*)obj, s);
   675     // Increment age if obj still in new generation
   676     obj->incr_age();
   677     age_table()->add(obj, s);
   678   }
   680   // Done, insert forward pointer to obj in this header
   681   old->forward_to(obj);
   683   return obj;
   684 }
   686 void DefNewGeneration::push_on_promo_failure_scan_stack(oop obj) {
   687   if (_promo_failure_scan_stack == NULL) {
   688     _promo_failure_scan_stack = new (ResourceObj::C_HEAP)
   689                                     GrowableArray<oop>(40, true);
   690   }
   692   _promo_failure_scan_stack->push(obj);
   693 }
   695 void DefNewGeneration::drain_promo_failure_scan_stack() {
   696   assert(_promo_failure_scan_stack != NULL, "precondition");
   698   while (_promo_failure_scan_stack->length() > 0) {
   699      oop obj = _promo_failure_scan_stack->pop();
   700      obj->oop_iterate(_promo_failure_scan_stack_closure);
   701   }
   702 }
   704 void DefNewGeneration::save_marks() {
   705   eden()->set_saved_mark();
   706   to()->set_saved_mark();
   707   from()->set_saved_mark();
   708 }
   711 void DefNewGeneration::reset_saved_marks() {
   712   eden()->reset_saved_mark();
   713   to()->reset_saved_mark();
   714   from()->reset_saved_mark();
   715 }
   718 bool DefNewGeneration::no_allocs_since_save_marks() {
   719   assert(eden()->saved_mark_at_top(), "Violated spec - alloc in eden");
   720   assert(from()->saved_mark_at_top(), "Violated spec - alloc in from");
   721   return to()->saved_mark_at_top();
   722 }
   724 #define DefNew_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix) \
   725                                                                 \
   726 void DefNewGeneration::                                         \
   727 oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl) {   \
   728   cl->set_generation(this);                                     \
   729   eden()->oop_since_save_marks_iterate##nv_suffix(cl);          \
   730   to()->oop_since_save_marks_iterate##nv_suffix(cl);            \
   731   from()->oop_since_save_marks_iterate##nv_suffix(cl);          \
   732   cl->reset_generation();                                       \
   733   save_marks();                                                 \
   734 }
   736 ALL_SINCE_SAVE_MARKS_CLOSURES(DefNew_SINCE_SAVE_MARKS_DEFN)
   738 #undef DefNew_SINCE_SAVE_MARKS_DEFN
   740 void DefNewGeneration::contribute_scratch(ScratchBlock*& list, Generation* requestor,
   741                                          size_t max_alloc_words) {
   742   if (requestor == this || _promotion_failed) return;
   743   assert(requestor->level() > level(), "DefNewGeneration must be youngest");
   745   /* $$$ Assert this?  "trace" is a "MarkSweep" function so that's not appropriate.
   746   if (to_space->top() > to_space->bottom()) {
   747     trace("to_space not empty when contribute_scratch called");
   748   }
   749   */
   751   ContiguousSpace* to_space = to();
   752   assert(to_space->end() >= to_space->top(), "pointers out of order");
   753   size_t free_words = pointer_delta(to_space->end(), to_space->top());
   754   if (free_words >= MinFreeScratchWords) {
   755     ScratchBlock* sb = (ScratchBlock*)to_space->top();
   756     sb->num_words = free_words;
   757     sb->next = list;
   758     list = sb;
   759   }
   760 }
   762 bool DefNewGeneration::collection_attempt_is_safe() {
   763   if (!to()->is_empty()) {
   764     return false;
   765   }
   766   if (_next_gen == NULL) {
   767     GenCollectedHeap* gch = GenCollectedHeap::heap();
   768     _next_gen = gch->next_gen(this);
   769     assert(_next_gen != NULL,
   770            "This must be the youngest gen, and not the only gen");
   771   }
   773   // Decide if there's enough room for a full promotion
   774   // When using extremely large edens, we effectively lose a
   775   // large amount of old space.  Use the "MaxLiveObjectEvacuationRatio"
   776   // flag to reduce the minimum evacuation space requirements. If
   777   // there is not enough space to evacuate eden during a scavenge,
   778   // the VM will immediately exit with an out of memory error.
   779   // This flag has not been tested
   780   // with collectors other than simple mark & sweep.
   781   //
   782   // Note that with the addition of promotion failure handling, the
   783   // VM will not immediately exit but will undo the young generation
   784   // collection.  The parameter is left here for compatibility.
   785   const double evacuation_ratio = MaxLiveObjectEvacuationRatio / 100.0;
   787   // worst_case_evacuation is based on "used()".  For the case where this
   788   // method is called after a collection, this is still appropriate because
   789   // the case that needs to be detected is one in which a full collection
   790   // has been done and has overflowed into the young generation.  In that
   791   // case a minor collection will fail (the overflow of the full collection
   792   // means there is no space in the old generation for any promotion).
   793   size_t worst_case_evacuation = (size_t)(used() * evacuation_ratio);
   795   return _next_gen->promotion_attempt_is_safe(worst_case_evacuation,
   796                                               HandlePromotionFailure);
   797 }
   799 void DefNewGeneration::gc_epilogue(bool full) {
   800   // Check if the heap is approaching full after a collection has
   801   // been done.  Generally the young generation is empty at
   802   // a minimum at the end of a collection.  If it is not, then
   803   // the heap is approaching full.
   804   GenCollectedHeap* gch = GenCollectedHeap::heap();
   805   clear_should_allocate_from_space();
   806   if (collection_attempt_is_safe()) {
   807     gch->clear_incremental_collection_will_fail();
   808   } else {
   809     gch->set_incremental_collection_will_fail();
   810     if (full) { // we seem to be running out of space
   811       set_should_allocate_from_space();
   812     }
   813   }
   815   // update the generation and space performance counters
   816   update_counters();
   817   gch->collector_policy()->counters()->update_counters();
   818 }
   820 void DefNewGeneration::update_counters() {
   821   if (UsePerfData) {
   822     _eden_counters->update_all();
   823     _from_counters->update_all();
   824     _to_counters->update_all();
   825     _gen_counters->update_all();
   826   }
   827 }
   829 void DefNewGeneration::verify(bool allow_dirty) {
   830   eden()->verify(allow_dirty);
   831   from()->verify(allow_dirty);
   832     to()->verify(allow_dirty);
   833 }
   835 void DefNewGeneration::print_on(outputStream* st) const {
   836   Generation::print_on(st);
   837   st->print("  eden");
   838   eden()->print_on(st);
   839   st->print("  from");
   840   from()->print_on(st);
   841   st->print("  to  ");
   842   to()->print_on(st);
   843 }
   846 const char* DefNewGeneration::name() const {
   847   return "def new generation";
   848 }
   850 // Moved from inline file as they are not called inline
   851 CompactibleSpace* DefNewGeneration::first_compaction_space() const {
   852   return eden();
   853 }
   855 HeapWord* DefNewGeneration::allocate(size_t word_size,
   856                                      bool is_tlab) {
   857   // This is the slow-path allocation for the DefNewGeneration.
   858   // Most allocations are fast-path in compiled code.
   859   // We try to allocate from the eden.  If that works, we are happy.
   860   // Note that since DefNewGeneration supports lock-free allocation, we
   861   // have to use it here, as well.
   862   HeapWord* result = eden()->par_allocate(word_size);
   863   if (result != NULL) {
   864     return result;
   865   }
   866   do {
   867     HeapWord* old_limit = eden()->soft_end();
   868     if (old_limit < eden()->end()) {
   869       // Tell the next generation we reached a limit.
   870       HeapWord* new_limit =
   871         next_gen()->allocation_limit_reached(eden(), eden()->top(), word_size);
   872       if (new_limit != NULL) {
   873         Atomic::cmpxchg_ptr(new_limit, eden()->soft_end_addr(), old_limit);
   874       } else {
   875         assert(eden()->soft_end() == eden()->end(),
   876                "invalid state after allocation_limit_reached returned null");
   877       }
   878     } else {
   879       // The allocation failed and the soft limit is equal to the hard limit,
   880       // there are no reasons to do an attempt to allocate
   881       assert(old_limit == eden()->end(), "sanity check");
   882       break;
   883     }
   884     // Try to allocate until succeeded or the soft limit can't be adjusted
   885     result = eden()->par_allocate(word_size);
   886   } while (result == NULL);
   888   // If the eden is full and the last collection bailed out, we are running
   889   // out of heap space, and we try to allocate the from-space, too.
   890   // allocate_from_space can't be inlined because that would introduce a
   891   // circular dependency at compile time.
   892   if (result == NULL) {
   893     result = allocate_from_space(word_size);
   894   }
   895   return result;
   896 }
   898 HeapWord* DefNewGeneration::par_allocate(size_t word_size,
   899                                          bool is_tlab) {
   900   return eden()->par_allocate(word_size);
   901 }
   903 void DefNewGeneration::gc_prologue(bool full) {
   904   // Ensure that _end and _soft_end are the same in eden space.
   905   eden()->set_soft_end(eden()->end());
   906 }
   908 size_t DefNewGeneration::tlab_capacity() const {
   909   return eden()->capacity();
   910 }
   912 size_t DefNewGeneration::unsafe_max_tlab_alloc() const {
   913   return unsafe_max_alloc_nogc();
   914 }

mercurial