src/share/vm/memory/defNewGeneration.cpp

changeset 435
a61af66fc99e
child 548
ba764ed4b6f2
     1.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     1.2 +++ b/src/share/vm/memory/defNewGeneration.cpp	Sat Dec 01 00:00:00 2007 +0000
     1.3 @@ -0,0 +1,864 @@
     1.4 +/*
     1.5 + * Copyright 2001-2007 Sun Microsystems, Inc.  All Rights Reserved.
     1.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     1.7 + *
     1.8 + * This code is free software; you can redistribute it and/or modify it
     1.9 + * under the terms of the GNU General Public License version 2 only, as
    1.10 + * published by the Free Software Foundation.
    1.11 + *
    1.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
    1.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    1.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    1.15 + * version 2 for more details (a copy is included in the LICENSE file that
    1.16 + * accompanied this code).
    1.17 + *
    1.18 + * You should have received a copy of the GNU General Public License version
    1.19 + * 2 along with this work; if not, write to the Free Software Foundation,
    1.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    1.21 + *
    1.22 + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
    1.23 + * CA 95054 USA or visit www.sun.com if you need additional information or
    1.24 + * have any questions.
    1.25 + *
    1.26 + */
    1.27 +
    1.28 +# include "incls/_precompiled.incl"
    1.29 +# include "incls/_defNewGeneration.cpp.incl"
    1.30 +
    1.31 +//
    1.32 +// DefNewGeneration functions.
    1.33 +
    1.34 +// Methods of protected closure types.
    1.35 +
    1.36 +DefNewGeneration::IsAliveClosure::IsAliveClosure(Generation* g) : _g(g) {
    1.37 +  assert(g->level() == 0, "Optimized for youngest gen.");
    1.38 +}
    1.39 +void DefNewGeneration::IsAliveClosure::do_object(oop p) {
    1.40 +  assert(false, "Do not call.");
    1.41 +}
    1.42 +bool DefNewGeneration::IsAliveClosure::do_object_b(oop p) {
    1.43 +  return (HeapWord*)p >= _g->reserved().end() || p->is_forwarded();
    1.44 +}
    1.45 +
    1.46 +DefNewGeneration::KeepAliveClosure::
    1.47 +KeepAliveClosure(ScanWeakRefClosure* cl) : _cl(cl) {
    1.48 +  GenRemSet* rs = GenCollectedHeap::heap()->rem_set();
    1.49 +  assert(rs->rs_kind() == GenRemSet::CardTable, "Wrong rem set kind.");
    1.50 +  _rs = (CardTableRS*)rs;
    1.51 +}
    1.52 +
    1.53 +void DefNewGeneration::KeepAliveClosure::do_oop(oop* p) {
    1.54 +  // We never expect to see a null reference being processed
    1.55 +  // as a weak reference.
    1.56 +  assert (*p != NULL, "expected non-null ref");
    1.57 +  assert ((*p)->is_oop(), "expected an oop while scanning weak refs");
    1.58 +
    1.59 +  _cl->do_oop_nv(p);
    1.60 +
    1.61 +  // Card marking is trickier for weak refs.
    1.62 +  // This oop is a 'next' field which was filled in while we
    1.63 +  // were discovering weak references. While we might not need
    1.64 +  // to take a special action to keep this reference alive, we
    1.65 +  // will need to dirty a card as the field was modified.
    1.66 +  //
    1.67 +  // Alternatively, we could create a method which iterates through
    1.68 +  // each generation, allowing them in turn to examine the modified
    1.69 +  // field.
    1.70 +  //
    1.71 +  // We could check that p is also in an older generation, but
    1.72 +  // dirty cards in the youngest gen are never scanned, so the
    1.73 +  // extra check probably isn't worthwhile.
    1.74 +  if (Universe::heap()->is_in_reserved(p)) {
    1.75 +    _rs->inline_write_ref_field_gc(p, *p);
    1.76 +  }
    1.77 +}
    1.78 +
    1.79 +DefNewGeneration::FastKeepAliveClosure::
    1.80 +FastKeepAliveClosure(DefNewGeneration* g, ScanWeakRefClosure* cl) :
    1.81 +  DefNewGeneration::KeepAliveClosure(cl) {
    1.82 +  _boundary = g->reserved().end();
    1.83 +}
    1.84 +
    1.85 +void DefNewGeneration::FastKeepAliveClosure::do_oop(oop* p) {
    1.86 +  assert (*p != NULL, "expected non-null ref");
    1.87 +  assert ((*p)->is_oop(), "expected an oop while scanning weak refs");
    1.88 +
    1.89 +  _cl->do_oop_nv(p);
    1.90 +
    1.91 +  // Optimized for Defnew generation if it's the youngest generation:
    1.92 +  // we set a younger_gen card if we have an older->youngest
    1.93 +  // generation pointer.
    1.94 +  if (((HeapWord*)(*p) < _boundary) && Universe::heap()->is_in_reserved(p)) {
    1.95 +    _rs->inline_write_ref_field_gc(p, *p);
    1.96 +  }
    1.97 +}
    1.98 +
    1.99 +DefNewGeneration::EvacuateFollowersClosure::
   1.100 +EvacuateFollowersClosure(GenCollectedHeap* gch, int level,
   1.101 +                         ScanClosure* cur, ScanClosure* older) :
   1.102 +  _gch(gch), _level(level),
   1.103 +  _scan_cur_or_nonheap(cur), _scan_older(older)
   1.104 +{}
   1.105 +
   1.106 +void DefNewGeneration::EvacuateFollowersClosure::do_void() {
   1.107 +  do {
   1.108 +    _gch->oop_since_save_marks_iterate(_level, _scan_cur_or_nonheap,
   1.109 +                                       _scan_older);
   1.110 +  } while (!_gch->no_allocs_since_save_marks(_level));
   1.111 +}
   1.112 +
   1.113 +DefNewGeneration::FastEvacuateFollowersClosure::
   1.114 +FastEvacuateFollowersClosure(GenCollectedHeap* gch, int level,
   1.115 +                             DefNewGeneration* gen,
   1.116 +                             FastScanClosure* cur, FastScanClosure* older) :
   1.117 +  _gch(gch), _level(level), _gen(gen),
   1.118 +  _scan_cur_or_nonheap(cur), _scan_older(older)
   1.119 +{}
   1.120 +
   1.121 +void DefNewGeneration::FastEvacuateFollowersClosure::do_void() {
   1.122 +  do {
   1.123 +    _gch->oop_since_save_marks_iterate(_level, _scan_cur_or_nonheap,
   1.124 +                                       _scan_older);
   1.125 +  } while (!_gch->no_allocs_since_save_marks(_level));
   1.126 +  guarantee(_gen->promo_failure_scan_stack() == NULL
   1.127 +            || _gen->promo_failure_scan_stack()->length() == 0,
   1.128 +            "Failed to finish scan");
   1.129 +}
   1.130 +
   1.131 +ScanClosure::ScanClosure(DefNewGeneration* g, bool gc_barrier) :
   1.132 +  OopsInGenClosure(g), _g(g), _gc_barrier(gc_barrier)
   1.133 +{
   1.134 +  assert(_g->level() == 0, "Optimized for youngest generation");
   1.135 +  _boundary = _g->reserved().end();
   1.136 +}
   1.137 +
   1.138 +FastScanClosure::FastScanClosure(DefNewGeneration* g, bool gc_barrier) :
   1.139 +  OopsInGenClosure(g), _g(g), _gc_barrier(gc_barrier)
   1.140 +{
   1.141 +  assert(_g->level() == 0, "Optimized for youngest generation");
   1.142 +  _boundary = _g->reserved().end();
   1.143 +}
   1.144 +
   1.145 +ScanWeakRefClosure::ScanWeakRefClosure(DefNewGeneration* g) :
   1.146 +  OopClosure(g->ref_processor()), _g(g)
   1.147 +{
   1.148 +  assert(_g->level() == 0, "Optimized for youngest generation");
   1.149 +  _boundary = _g->reserved().end();
   1.150 +}
   1.151 +
   1.152 +
   1.153 +DefNewGeneration::DefNewGeneration(ReservedSpace rs,
   1.154 +                                   size_t initial_size,
   1.155 +                                   int level,
   1.156 +                                   const char* policy)
   1.157 +  : Generation(rs, initial_size, level),
   1.158 +    _objs_with_preserved_marks(NULL),
   1.159 +    _preserved_marks_of_objs(NULL),
   1.160 +    _promo_failure_scan_stack(NULL),
   1.161 +    _promo_failure_drain_in_progress(false),
   1.162 +    _should_allocate_from_space(false)
   1.163 +{
   1.164 +  MemRegion cmr((HeapWord*)_virtual_space.low(),
   1.165 +                (HeapWord*)_virtual_space.high());
   1.166 +  Universe::heap()->barrier_set()->resize_covered_region(cmr);
   1.167 +
   1.168 +  if (GenCollectedHeap::heap()->collector_policy()->has_soft_ended_eden()) {
   1.169 +    _eden_space = new ConcEdenSpace(this);
   1.170 +  } else {
   1.171 +    _eden_space = new EdenSpace(this);
   1.172 +  }
   1.173 +  _from_space = new ContiguousSpace();
   1.174 +  _to_space   = new ContiguousSpace();
   1.175 +
   1.176 +  if (_eden_space == NULL || _from_space == NULL || _to_space == NULL)
   1.177 +    vm_exit_during_initialization("Could not allocate a new gen space");
   1.178 +
   1.179 +  // Compute the maximum eden and survivor space sizes. These sizes
   1.180 +  // are computed assuming the entire reserved space is committed.
   1.181 +  // These values are exported as performance counters.
   1.182 +  uintx alignment = GenCollectedHeap::heap()->collector_policy()->min_alignment();
   1.183 +  uintx size = _virtual_space.reserved_size();
   1.184 +  _max_survivor_size = compute_survivor_size(size, alignment);
   1.185 +  _max_eden_size = size - (2*_max_survivor_size);
   1.186 +
   1.187 +  // allocate the performance counters
   1.188 +
   1.189 +  // Generation counters -- generation 0, 3 subspaces
   1.190 +  _gen_counters = new GenerationCounters("new", 0, 3, &_virtual_space);
   1.191 +  _gc_counters = new CollectorCounters(policy, 0);
   1.192 +
   1.193 +  _eden_counters = new CSpaceCounters("eden", 0, _max_eden_size, _eden_space,
   1.194 +                                      _gen_counters);
   1.195 +  _from_counters = new CSpaceCounters("s0", 1, _max_survivor_size, _from_space,
   1.196 +                                      _gen_counters);
   1.197 +  _to_counters = new CSpaceCounters("s1", 2, _max_survivor_size, _to_space,
   1.198 +                                    _gen_counters);
   1.199 +
   1.200 +  compute_space_boundaries(0);
   1.201 +  update_counters();
   1.202 +  _next_gen = NULL;
   1.203 +  _tenuring_threshold = MaxTenuringThreshold;
   1.204 +  _pretenure_size_threshold_words = PretenureSizeThreshold >> LogHeapWordSize;
   1.205 +}
   1.206 +
   1.207 +void DefNewGeneration::compute_space_boundaries(uintx minimum_eden_size) {
   1.208 +  uintx alignment = GenCollectedHeap::heap()->collector_policy()->min_alignment();
   1.209 +
   1.210 +  // Compute sizes
   1.211 +  uintx size = _virtual_space.committed_size();
   1.212 +  uintx survivor_size = compute_survivor_size(size, alignment);
   1.213 +  uintx eden_size = size - (2*survivor_size);
   1.214 +  assert(eden_size > 0 && survivor_size <= eden_size, "just checking");
   1.215 +
   1.216 +  if (eden_size < minimum_eden_size) {
   1.217 +    // May happen due to 64Kb rounding, if so adjust eden size back up
   1.218 +    minimum_eden_size = align_size_up(minimum_eden_size, alignment);
   1.219 +    uintx maximum_survivor_size = (size - minimum_eden_size) / 2;
   1.220 +    uintx unaligned_survivor_size =
   1.221 +      align_size_down(maximum_survivor_size, alignment);
   1.222 +    survivor_size = MAX2(unaligned_survivor_size, alignment);
   1.223 +    eden_size = size - (2*survivor_size);
   1.224 +    assert(eden_size > 0 && survivor_size <= eden_size, "just checking");
   1.225 +    assert(eden_size >= minimum_eden_size, "just checking");
   1.226 +  }
   1.227 +
   1.228 +  char *eden_start = _virtual_space.low();
   1.229 +  char *from_start = eden_start + eden_size;
   1.230 +  char *to_start   = from_start + survivor_size;
   1.231 +  char *to_end     = to_start   + survivor_size;
   1.232 +
   1.233 +  assert(to_end == _virtual_space.high(), "just checking");
   1.234 +  assert(Space::is_aligned((HeapWord*)eden_start), "checking alignment");
   1.235 +  assert(Space::is_aligned((HeapWord*)from_start), "checking alignment");
   1.236 +  assert(Space::is_aligned((HeapWord*)to_start),   "checking alignment");
   1.237 +
   1.238 +  MemRegion edenMR((HeapWord*)eden_start, (HeapWord*)from_start);
   1.239 +  MemRegion fromMR((HeapWord*)from_start, (HeapWord*)to_start);
   1.240 +  MemRegion toMR  ((HeapWord*)to_start, (HeapWord*)to_end);
   1.241 +
   1.242 +  eden()->initialize(edenMR, (minimum_eden_size == 0));
   1.243 +  // If minumum_eden_size != 0, we will not have cleared any
   1.244 +  // portion of eden above its top. This can cause newly
   1.245 +  // expanded space not to be mangled if using ZapUnusedHeapArea.
   1.246 +  // We explicitly do such mangling here.
   1.247 +  if (ZapUnusedHeapArea && (minimum_eden_size != 0)) {
   1.248 +    eden()->mangle_unused_area();
   1.249 +  }
   1.250 +  from()->initialize(fromMR, true);
   1.251 +    to()->initialize(toMR  , true);
   1.252 +  eden()->set_next_compaction_space(from());
   1.253 +  // The to-space is normally empty before a compaction so need
   1.254 +  // not be considered.  The exception is during promotion
   1.255 +  // failure handling when to-space can contain live objects.
   1.256 +  from()->set_next_compaction_space(NULL);
   1.257 +}
   1.258 +
   1.259 +void DefNewGeneration::swap_spaces() {
   1.260 +  ContiguousSpace* s = from();
   1.261 +  _from_space        = to();
   1.262 +  _to_space          = s;
   1.263 +  eden()->set_next_compaction_space(from());
   1.264 +  // The to-space is normally empty before a compaction so need
   1.265 +  // not be considered.  The exception is during promotion
   1.266 +  // failure handling when to-space can contain live objects.
   1.267 +  from()->set_next_compaction_space(NULL);
   1.268 +
   1.269 +  if (UsePerfData) {
   1.270 +    CSpaceCounters* c = _from_counters;
   1.271 +    _from_counters = _to_counters;
   1.272 +    _to_counters = c;
   1.273 +  }
   1.274 +}
   1.275 +
   1.276 +bool DefNewGeneration::expand(size_t bytes) {
   1.277 +  MutexLocker x(ExpandHeap_lock);
   1.278 +  bool success = _virtual_space.expand_by(bytes);
   1.279 +
   1.280 +  // Do not attempt an expand-to-the reserve size.  The
   1.281 +  // request should properly observe the maximum size of
   1.282 +  // the generation so an expand-to-reserve should be
   1.283 +  // unnecessary.  Also a second call to expand-to-reserve
   1.284 +  // value potentially can cause an undue expansion.
   1.285 +  // For example if the first expand fail for unknown reasons,
   1.286 +  // but the second succeeds and expands the heap to its maximum
   1.287 +  // value.
   1.288 +  if (GC_locker::is_active()) {
   1.289 +    if (PrintGC && Verbose) {
   1.290 +      gclog_or_tty->print_cr("Garbage collection disabled, expanded heap instead");
   1.291 +    }
   1.292 +  }
   1.293 +
   1.294 +  return success;
   1.295 +}
   1.296 +
   1.297 +
   1.298 +void DefNewGeneration::compute_new_size() {
   1.299 +  // This is called after a gc that includes the following generation
   1.300 +  // (which is required to exist.)  So from-space will normally be empty.
   1.301 +  // Note that we check both spaces, since if scavenge failed they revert roles.
   1.302 +  // If not we bail out (otherwise we would have to relocate the objects)
   1.303 +  if (!from()->is_empty() || !to()->is_empty()) {
   1.304 +    return;
   1.305 +  }
   1.306 +
   1.307 +  int next_level = level() + 1;
   1.308 +  GenCollectedHeap* gch = GenCollectedHeap::heap();
   1.309 +  assert(next_level < gch->_n_gens,
   1.310 +         "DefNewGeneration cannot be an oldest gen");
   1.311 +
   1.312 +  Generation* next_gen = gch->_gens[next_level];
   1.313 +  size_t old_size = next_gen->capacity();
   1.314 +  size_t new_size_before = _virtual_space.committed_size();
   1.315 +  size_t min_new_size = spec()->init_size();
   1.316 +  size_t max_new_size = reserved().byte_size();
   1.317 +  assert(min_new_size <= new_size_before &&
   1.318 +         new_size_before <= max_new_size,
   1.319 +         "just checking");
   1.320 +  // All space sizes must be multiples of Generation::GenGrain.
   1.321 +  size_t alignment = Generation::GenGrain;
   1.322 +
   1.323 +  // Compute desired new generation size based on NewRatio and
   1.324 +  // NewSizeThreadIncrease
   1.325 +  size_t desired_new_size = old_size/NewRatio;
   1.326 +  int threads_count = Threads::number_of_non_daemon_threads();
   1.327 +  size_t thread_increase_size = threads_count * NewSizeThreadIncrease;
   1.328 +  desired_new_size = align_size_up(desired_new_size + thread_increase_size, alignment);
   1.329 +
   1.330 +  // Adjust new generation size
   1.331 +  desired_new_size = MAX2(MIN2(desired_new_size, max_new_size), min_new_size);
   1.332 +  assert(desired_new_size <= max_new_size, "just checking");
   1.333 +
   1.334 +  bool changed = false;
   1.335 +  if (desired_new_size > new_size_before) {
   1.336 +    size_t change = desired_new_size - new_size_before;
   1.337 +    assert(change % alignment == 0, "just checking");
   1.338 +    if (expand(change)) {
   1.339 +       changed = true;
   1.340 +    }
   1.341 +    // If the heap failed to expand to the desired size,
   1.342 +    // "changed" will be false.  If the expansion failed
   1.343 +    // (and at this point it was expected to succeed),
   1.344 +    // ignore the failure (leaving "changed" as false).
   1.345 +  }
   1.346 +  if (desired_new_size < new_size_before && eden()->is_empty()) {
   1.347 +    // bail out of shrinking if objects in eden
   1.348 +    size_t change = new_size_before - desired_new_size;
   1.349 +    assert(change % alignment == 0, "just checking");
   1.350 +    _virtual_space.shrink_by(change);
   1.351 +    changed = true;
   1.352 +  }
   1.353 +  if (changed) {
   1.354 +    compute_space_boundaries(eden()->used());
   1.355 +    MemRegion cmr((HeapWord*)_virtual_space.low(), (HeapWord*)_virtual_space.high());
   1.356 +    Universe::heap()->barrier_set()->resize_covered_region(cmr);
   1.357 +    if (Verbose && PrintGC) {
   1.358 +      size_t new_size_after  = _virtual_space.committed_size();
   1.359 +      size_t eden_size_after = eden()->capacity();
   1.360 +      size_t survivor_size_after = from()->capacity();
   1.361 +      gclog_or_tty->print("New generation size " SIZE_FORMAT "K->" SIZE_FORMAT "K [eden="
   1.362 +        SIZE_FORMAT "K,survivor=" SIZE_FORMAT "K]",
   1.363 +        new_size_before/K, new_size_after/K, eden_size_after/K, survivor_size_after/K);
   1.364 +      if (WizardMode) {
   1.365 +        gclog_or_tty->print("[allowed " SIZE_FORMAT "K extra for %d threads]",
   1.366 +          thread_increase_size/K, threads_count);
   1.367 +      }
   1.368 +      gclog_or_tty->cr();
   1.369 +    }
   1.370 +  }
   1.371 +}
   1.372 +
   1.373 +void DefNewGeneration::object_iterate_since_last_GC(ObjectClosure* cl) {
   1.374 +  // $$$ This may be wrong in case of "scavenge failure"?
   1.375 +  eden()->object_iterate(cl);
   1.376 +}
   1.377 +
   1.378 +void DefNewGeneration::younger_refs_iterate(OopsInGenClosure* cl) {
   1.379 +  assert(false, "NYI -- are you sure you want to call this?");
   1.380 +}
   1.381 +
   1.382 +
   1.383 +size_t DefNewGeneration::capacity() const {
   1.384 +  return eden()->capacity()
   1.385 +       + from()->capacity();  // to() is only used during scavenge
   1.386 +}
   1.387 +
   1.388 +
   1.389 +size_t DefNewGeneration::used() const {
   1.390 +  return eden()->used()
   1.391 +       + from()->used();      // to() is only used during scavenge
   1.392 +}
   1.393 +
   1.394 +
   1.395 +size_t DefNewGeneration::free() const {
   1.396 +  return eden()->free()
   1.397 +       + from()->free();      // to() is only used during scavenge
   1.398 +}
   1.399 +
   1.400 +size_t DefNewGeneration::max_capacity() const {
   1.401 +  const size_t alignment = GenCollectedHeap::heap()->collector_policy()->min_alignment();
   1.402 +  const size_t reserved_bytes = reserved().byte_size();
   1.403 +  return reserved_bytes - compute_survivor_size(reserved_bytes, alignment);
   1.404 +}
   1.405 +
   1.406 +size_t DefNewGeneration::unsafe_max_alloc_nogc() const {
   1.407 +  return eden()->free();
   1.408 +}
   1.409 +
   1.410 +size_t DefNewGeneration::capacity_before_gc() const {
   1.411 +  return eden()->capacity();
   1.412 +}
   1.413 +
   1.414 +size_t DefNewGeneration::contiguous_available() const {
   1.415 +  return eden()->free();
   1.416 +}
   1.417 +
   1.418 +
   1.419 +HeapWord** DefNewGeneration::top_addr() const { return eden()->top_addr(); }
   1.420 +HeapWord** DefNewGeneration::end_addr() const { return eden()->end_addr(); }
   1.421 +
   1.422 +void DefNewGeneration::object_iterate(ObjectClosure* blk) {
   1.423 +  eden()->object_iterate(blk);
   1.424 +  from()->object_iterate(blk);
   1.425 +}
   1.426 +
   1.427 +
   1.428 +void DefNewGeneration::space_iterate(SpaceClosure* blk,
   1.429 +                                     bool usedOnly) {
   1.430 +  blk->do_space(eden());
   1.431 +  blk->do_space(from());
   1.432 +  blk->do_space(to());
   1.433 +}
   1.434 +
   1.435 +// The last collection bailed out, we are running out of heap space,
   1.436 +// so we try to allocate the from-space, too.
   1.437 +HeapWord* DefNewGeneration::allocate_from_space(size_t size) {
   1.438 +  HeapWord* result = NULL;
   1.439 +  if (PrintGC && Verbose) {
   1.440 +    gclog_or_tty->print("DefNewGeneration::allocate_from_space(%u):"
   1.441 +                  "  will_fail: %s"
   1.442 +                  "  heap_lock: %s"
   1.443 +                  "  free: " SIZE_FORMAT,
   1.444 +                  size,
   1.445 +               GenCollectedHeap::heap()->incremental_collection_will_fail() ? "true" : "false",
   1.446 +               Heap_lock->is_locked() ? "locked" : "unlocked",
   1.447 +               from()->free());
   1.448 +    }
   1.449 +  if (should_allocate_from_space() || GC_locker::is_active_and_needs_gc()) {
   1.450 +    if (Heap_lock->owned_by_self() ||
   1.451 +        (SafepointSynchronize::is_at_safepoint() &&
   1.452 +         Thread::current()->is_VM_thread())) {
   1.453 +      // If the Heap_lock is not locked by this thread, this will be called
   1.454 +      // again later with the Heap_lock held.
   1.455 +      result = from()->allocate(size);
   1.456 +    } else if (PrintGC && Verbose) {
   1.457 +      gclog_or_tty->print_cr("  Heap_lock is not owned by self");
   1.458 +    }
   1.459 +  } else if (PrintGC && Verbose) {
   1.460 +    gclog_or_tty->print_cr("  should_allocate_from_space: NOT");
   1.461 +  }
   1.462 +  if (PrintGC && Verbose) {
   1.463 +    gclog_or_tty->print_cr("  returns %s", result == NULL ? "NULL" : "object");
   1.464 +  }
   1.465 +  return result;
   1.466 +}
   1.467 +
   1.468 +HeapWord* DefNewGeneration::expand_and_allocate(size_t size,
   1.469 +                                                bool   is_tlab,
   1.470 +                                                bool   parallel) {
   1.471 +  // We don't attempt to expand the young generation (but perhaps we should.)
   1.472 +  return allocate(size, is_tlab);
   1.473 +}
   1.474 +
   1.475 +
   1.476 +void DefNewGeneration::collect(bool   full,
   1.477 +                               bool   clear_all_soft_refs,
   1.478 +                               size_t size,
   1.479 +                               bool   is_tlab) {
   1.480 +  assert(full || size > 0, "otherwise we don't want to collect");
   1.481 +  GenCollectedHeap* gch = GenCollectedHeap::heap();
   1.482 +  _next_gen = gch->next_gen(this);
   1.483 +  assert(_next_gen != NULL,
   1.484 +    "This must be the youngest gen, and not the only gen");
   1.485 +
   1.486 +  // If the next generation is too full to accomodate promotion
   1.487 +  // from this generation, pass on collection; let the next generation
   1.488 +  // do it.
   1.489 +  if (!collection_attempt_is_safe()) {
   1.490 +    gch->set_incremental_collection_will_fail();
   1.491 +    return;
   1.492 +  }
   1.493 +  assert(to()->is_empty(), "Else not collection_attempt_is_safe");
   1.494 +
   1.495 +  init_assuming_no_promotion_failure();
   1.496 +
   1.497 +  TraceTime t1("GC", PrintGC && !PrintGCDetails, true, gclog_or_tty);
   1.498 +  // Capture heap used before collection (for printing).
   1.499 +  size_t gch_prev_used = gch->used();
   1.500 +
   1.501 +  SpecializationStats::clear();
   1.502 +
   1.503 +  // These can be shared for all code paths
   1.504 +  IsAliveClosure is_alive(this);
   1.505 +  ScanWeakRefClosure scan_weak_ref(this);
   1.506 +
   1.507 +  age_table()->clear();
   1.508 +  to()->clear();
   1.509 +
   1.510 +  gch->rem_set()->prepare_for_younger_refs_iterate(false);
   1.511 +
   1.512 +  assert(gch->no_allocs_since_save_marks(0),
   1.513 +         "save marks have not been newly set.");
   1.514 +
   1.515 +  // Weak refs.
   1.516 +  // FIXME: Are these storage leaks, or are they resource objects?
   1.517 +#ifdef COMPILER2
   1.518 +  ReferencePolicy *soft_ref_policy = new LRUMaxHeapPolicy();
   1.519 +#else
   1.520 +  ReferencePolicy *soft_ref_policy = new LRUCurrentHeapPolicy();
   1.521 +#endif // COMPILER2
   1.522 +
   1.523 +  // Not very pretty.
   1.524 +  CollectorPolicy* cp = gch->collector_policy();
   1.525 +
   1.526 +  FastScanClosure fsc_with_no_gc_barrier(this, false);
   1.527 +  FastScanClosure fsc_with_gc_barrier(this, true);
   1.528 +
   1.529 +  set_promo_failure_scan_stack_closure(&fsc_with_no_gc_barrier);
   1.530 +  FastEvacuateFollowersClosure evacuate_followers(gch, _level, this,
   1.531 +                                                  &fsc_with_no_gc_barrier,
   1.532 +                                                  &fsc_with_gc_barrier);
   1.533 +
   1.534 +  assert(gch->no_allocs_since_save_marks(0),
   1.535 +         "save marks have not been newly set.");
   1.536 +
   1.537 +  gch->gen_process_strong_roots(_level,
   1.538 +                                true, // Process younger gens, if any, as
   1.539 +                                      // strong roots.
   1.540 +                                false,// not collecting permanent generation.
   1.541 +                                SharedHeap::SO_AllClasses,
   1.542 +                                &fsc_with_gc_barrier,
   1.543 +                                &fsc_with_no_gc_barrier);
   1.544 +
   1.545 +  // "evacuate followers".
   1.546 +  evacuate_followers.do_void();
   1.547 +
   1.548 +  FastKeepAliveClosure keep_alive(this, &scan_weak_ref);
   1.549 +  ref_processor()->process_discovered_references(
   1.550 +    soft_ref_policy, &is_alive, &keep_alive, &evacuate_followers, NULL);
   1.551 +  if (!promotion_failed()) {
   1.552 +    // Swap the survivor spaces.
   1.553 +    eden()->clear();
   1.554 +    from()->clear();
   1.555 +    swap_spaces();
   1.556 +
   1.557 +    assert(to()->is_empty(), "to space should be empty now");
   1.558 +
   1.559 +    // Set the desired survivor size to half the real survivor space
   1.560 +    _tenuring_threshold =
   1.561 +      age_table()->compute_tenuring_threshold(to()->capacity()/HeapWordSize);
   1.562 +
   1.563 +    if (PrintGC && !PrintGCDetails) {
   1.564 +      gch->print_heap_change(gch_prev_used);
   1.565 +    }
   1.566 +  } else {
   1.567 +    assert(HandlePromotionFailure,
   1.568 +      "Should not be here unless promotion failure handling is on");
   1.569 +    assert(_promo_failure_scan_stack != NULL &&
   1.570 +      _promo_failure_scan_stack->length() == 0, "post condition");
   1.571 +
   1.572 +    // deallocate stack and it's elements
   1.573 +    delete _promo_failure_scan_stack;
   1.574 +    _promo_failure_scan_stack = NULL;
   1.575 +
   1.576 +    remove_forwarding_pointers();
   1.577 +    if (PrintGCDetails) {
   1.578 +      gclog_or_tty->print(" (promotion failed)");
   1.579 +    }
   1.580 +    // Add to-space to the list of space to compact
   1.581 +    // when a promotion failure has occurred.  In that
   1.582 +    // case there can be live objects in to-space
   1.583 +    // as a result of a partial evacuation of eden
   1.584 +    // and from-space.
   1.585 +    swap_spaces();   // For the sake of uniformity wrt ParNewGeneration::collect().
   1.586 +    from()->set_next_compaction_space(to());
   1.587 +    gch->set_incremental_collection_will_fail();
   1.588 +
   1.589 +    // Reset the PromotionFailureALot counters.
   1.590 +    NOT_PRODUCT(Universe::heap()->reset_promotion_should_fail();)
   1.591 +  }
   1.592 +  // set new iteration safe limit for the survivor spaces
   1.593 +  from()->set_concurrent_iteration_safe_limit(from()->top());
   1.594 +  to()->set_concurrent_iteration_safe_limit(to()->top());
   1.595 +  SpecializationStats::print();
   1.596 +  update_time_of_last_gc(os::javaTimeMillis());
   1.597 +}
   1.598 +
   1.599 +class RemoveForwardPointerClosure: public ObjectClosure {
   1.600 +public:
   1.601 +  void do_object(oop obj) {
   1.602 +    obj->init_mark();
   1.603 +  }
   1.604 +};
   1.605 +
   1.606 +void DefNewGeneration::init_assuming_no_promotion_failure() {
   1.607 +  _promotion_failed = false;
   1.608 +  from()->set_next_compaction_space(NULL);
   1.609 +}
   1.610 +
   1.611 +void DefNewGeneration::remove_forwarding_pointers() {
   1.612 +  RemoveForwardPointerClosure rspc;
   1.613 +  eden()->object_iterate(&rspc);
   1.614 +  from()->object_iterate(&rspc);
   1.615 +  // Now restore saved marks, if any.
   1.616 +  if (_objs_with_preserved_marks != NULL) {
   1.617 +    assert(_preserved_marks_of_objs != NULL, "Both or none.");
   1.618 +    assert(_objs_with_preserved_marks->length() ==
   1.619 +           _preserved_marks_of_objs->length(), "Both or none.");
   1.620 +    for (int i = 0; i < _objs_with_preserved_marks->length(); i++) {
   1.621 +      oop obj   = _objs_with_preserved_marks->at(i);
   1.622 +      markOop m = _preserved_marks_of_objs->at(i);
   1.623 +      obj->set_mark(m);
   1.624 +    }
   1.625 +    delete _objs_with_preserved_marks;
   1.626 +    delete _preserved_marks_of_objs;
   1.627 +    _objs_with_preserved_marks = NULL;
   1.628 +    _preserved_marks_of_objs = NULL;
   1.629 +  }
   1.630 +}
   1.631 +
   1.632 +void DefNewGeneration::preserve_mark_if_necessary(oop obj, markOop m) {
   1.633 +  if (m->must_be_preserved_for_promotion_failure(obj)) {
   1.634 +    if (_objs_with_preserved_marks == NULL) {
   1.635 +      assert(_preserved_marks_of_objs == NULL, "Both or none.");
   1.636 +      _objs_with_preserved_marks = new (ResourceObj::C_HEAP)
   1.637 +        GrowableArray<oop>(PreserveMarkStackSize, true);
   1.638 +      _preserved_marks_of_objs = new (ResourceObj::C_HEAP)
   1.639 +        GrowableArray<markOop>(PreserveMarkStackSize, true);
   1.640 +    }
   1.641 +    _objs_with_preserved_marks->push(obj);
   1.642 +    _preserved_marks_of_objs->push(m);
   1.643 +  }
   1.644 +}
   1.645 +
   1.646 +void DefNewGeneration::handle_promotion_failure(oop old) {
   1.647 +  preserve_mark_if_necessary(old, old->mark());
   1.648 +  // forward to self
   1.649 +  old->forward_to(old);
   1.650 +  _promotion_failed = true;
   1.651 +
   1.652 +  push_on_promo_failure_scan_stack(old);
   1.653 +
   1.654 +  if (!_promo_failure_drain_in_progress) {
   1.655 +    // prevent recursion in copy_to_survivor_space()
   1.656 +    _promo_failure_drain_in_progress = true;
   1.657 +    drain_promo_failure_scan_stack();
   1.658 +    _promo_failure_drain_in_progress = false;
   1.659 +  }
   1.660 +}
   1.661 +
   1.662 +oop DefNewGeneration::copy_to_survivor_space(oop old, oop* from) {
   1.663 +  assert(is_in_reserved(old) && !old->is_forwarded(),
   1.664 +         "shouldn't be scavenging this oop");
   1.665 +  size_t s = old->size();
   1.666 +  oop obj = NULL;
   1.667 +
   1.668 +  // Try allocating obj in to-space (unless too old)
   1.669 +  if (old->age() < tenuring_threshold()) {
   1.670 +    obj = (oop) to()->allocate(s);
   1.671 +  }
   1.672 +
   1.673 +  // Otherwise try allocating obj tenured
   1.674 +  if (obj == NULL) {
   1.675 +    obj = _next_gen->promote(old, s, from);
   1.676 +    if (obj == NULL) {
   1.677 +      if (!HandlePromotionFailure) {
   1.678 +        // A failed promotion likely means the MaxLiveObjectEvacuationRatio flag
   1.679 +        // is incorrectly set. In any case, its seriously wrong to be here!
   1.680 +        vm_exit_out_of_memory(s*wordSize, "promotion");
   1.681 +      }
   1.682 +
   1.683 +      handle_promotion_failure(old);
   1.684 +      return old;
   1.685 +    }
   1.686 +  } else {
   1.687 +    // Prefetch beyond obj
   1.688 +    const intx interval = PrefetchCopyIntervalInBytes;
   1.689 +    Prefetch::write(obj, interval);
   1.690 +
   1.691 +    // Copy obj
   1.692 +    Copy::aligned_disjoint_words((HeapWord*)old, (HeapWord*)obj, s);
   1.693 +
   1.694 +    // Increment age if obj still in new generation
   1.695 +    obj->incr_age();
   1.696 +    age_table()->add(obj, s);
   1.697 +  }
   1.698 +
   1.699 +  // Done, insert forward pointer to obj in this header
   1.700 +  old->forward_to(obj);
   1.701 +
   1.702 +  return obj;
   1.703 +}
   1.704 +
   1.705 +void DefNewGeneration::push_on_promo_failure_scan_stack(oop obj) {
   1.706 +  if (_promo_failure_scan_stack == NULL) {
   1.707 +    _promo_failure_scan_stack = new (ResourceObj::C_HEAP)
   1.708 +                                    GrowableArray<oop>(40, true);
   1.709 +  }
   1.710 +
   1.711 +  _promo_failure_scan_stack->push(obj);
   1.712 +}
   1.713 +
   1.714 +void DefNewGeneration::drain_promo_failure_scan_stack() {
   1.715 +  assert(_promo_failure_scan_stack != NULL, "precondition");
   1.716 +
   1.717 +  while (_promo_failure_scan_stack->length() > 0) {
   1.718 +     oop obj = _promo_failure_scan_stack->pop();
   1.719 +     obj->oop_iterate(_promo_failure_scan_stack_closure);
   1.720 +  }
   1.721 +}
   1.722 +
   1.723 +void DefNewGeneration::save_marks() {
   1.724 +  eden()->set_saved_mark();
   1.725 +  to()->set_saved_mark();
   1.726 +  from()->set_saved_mark();
   1.727 +}
   1.728 +
   1.729 +
   1.730 +void DefNewGeneration::reset_saved_marks() {
   1.731 +  eden()->reset_saved_mark();
   1.732 +  to()->reset_saved_mark();
   1.733 +  from()->reset_saved_mark();
   1.734 +}
   1.735 +
   1.736 +
   1.737 +bool DefNewGeneration::no_allocs_since_save_marks() {
   1.738 +  assert(eden()->saved_mark_at_top(), "Violated spec - alloc in eden");
   1.739 +  assert(from()->saved_mark_at_top(), "Violated spec - alloc in from");
   1.740 +  return to()->saved_mark_at_top();
   1.741 +}
   1.742 +
   1.743 +#define DefNew_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix) \
   1.744 +                                                                \
   1.745 +void DefNewGeneration::                                         \
   1.746 +oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl) {   \
   1.747 +  cl->set_generation(this);                                     \
   1.748 +  eden()->oop_since_save_marks_iterate##nv_suffix(cl);          \
   1.749 +  to()->oop_since_save_marks_iterate##nv_suffix(cl);            \
   1.750 +  from()->oop_since_save_marks_iterate##nv_suffix(cl);          \
   1.751 +  cl->reset_generation();                                       \
   1.752 +  save_marks();                                                 \
   1.753 +}
   1.754 +
   1.755 +ALL_SINCE_SAVE_MARKS_CLOSURES(DefNew_SINCE_SAVE_MARKS_DEFN)
   1.756 +
   1.757 +#undef DefNew_SINCE_SAVE_MARKS_DEFN
   1.758 +
   1.759 +void DefNewGeneration::contribute_scratch(ScratchBlock*& list, Generation* requestor,
   1.760 +                                         size_t max_alloc_words) {
   1.761 +  if (requestor == this || _promotion_failed) return;
   1.762 +  assert(requestor->level() > level(), "DefNewGeneration must be youngest");
   1.763 +
   1.764 +  /* $$$ Assert this?  "trace" is a "MarkSweep" function so that's not appropriate.
   1.765 +  if (to_space->top() > to_space->bottom()) {
   1.766 +    trace("to_space not empty when contribute_scratch called");
   1.767 +  }
   1.768 +  */
   1.769 +
   1.770 +  ContiguousSpace* to_space = to();
   1.771 +  assert(to_space->end() >= to_space->top(), "pointers out of order");
   1.772 +  size_t free_words = pointer_delta(to_space->end(), to_space->top());
   1.773 +  if (free_words >= MinFreeScratchWords) {
   1.774 +    ScratchBlock* sb = (ScratchBlock*)to_space->top();
   1.775 +    sb->num_words = free_words;
   1.776 +    sb->next = list;
   1.777 +    list = sb;
   1.778 +  }
   1.779 +}
   1.780 +
   1.781 +bool DefNewGeneration::collection_attempt_is_safe() {
   1.782 +  if (!to()->is_empty()) {
   1.783 +    return false;
   1.784 +  }
   1.785 +  if (_next_gen == NULL) {
   1.786 +    GenCollectedHeap* gch = GenCollectedHeap::heap();
   1.787 +    _next_gen = gch->next_gen(this);
   1.788 +    assert(_next_gen != NULL,
   1.789 +           "This must be the youngest gen, and not the only gen");
   1.790 +  }
   1.791 +
   1.792 +  // Decide if there's enough room for a full promotion
   1.793 +  // When using extremely large edens, we effectively lose a
   1.794 +  // large amount of old space.  Use the "MaxLiveObjectEvacuationRatio"
   1.795 +  // flag to reduce the minimum evacuation space requirements. If
   1.796 +  // there is not enough space to evacuate eden during a scavenge,
   1.797 +  // the VM will immediately exit with an out of memory error.
   1.798 +  // This flag has not been tested
   1.799 +  // with collectors other than simple mark & sweep.
   1.800 +  //
   1.801 +  // Note that with the addition of promotion failure handling, the
   1.802 +  // VM will not immediately exit but will undo the young generation
   1.803 +  // collection.  The parameter is left here for compatibility.
   1.804 +  const double evacuation_ratio = MaxLiveObjectEvacuationRatio / 100.0;
   1.805 +
   1.806 +  // worst_case_evacuation is based on "used()".  For the case where this
   1.807 +  // method is called after a collection, this is still appropriate because
   1.808 +  // the case that needs to be detected is one in which a full collection
   1.809 +  // has been done and has overflowed into the young generation.  In that
   1.810 +  // case a minor collection will fail (the overflow of the full collection
   1.811 +  // means there is no space in the old generation for any promotion).
   1.812 +  size_t worst_case_evacuation = (size_t)(used() * evacuation_ratio);
   1.813 +
   1.814 +  return _next_gen->promotion_attempt_is_safe(worst_case_evacuation,
   1.815 +                                              HandlePromotionFailure);
   1.816 +}
   1.817 +
   1.818 +void DefNewGeneration::gc_epilogue(bool full) {
   1.819 +  // Check if the heap is approaching full after a collection has
   1.820 +  // been done.  Generally the young generation is empty at
   1.821 +  // a minimum at the end of a collection.  If it is not, then
   1.822 +  // the heap is approaching full.
   1.823 +  GenCollectedHeap* gch = GenCollectedHeap::heap();
   1.824 +  clear_should_allocate_from_space();
   1.825 +  if (collection_attempt_is_safe()) {
   1.826 +    gch->clear_incremental_collection_will_fail();
   1.827 +  } else {
   1.828 +    gch->set_incremental_collection_will_fail();
   1.829 +    if (full) { // we seem to be running out of space
   1.830 +      set_should_allocate_from_space();
   1.831 +    }
   1.832 +  }
   1.833 +
   1.834 +  // update the generation and space performance counters
   1.835 +  update_counters();
   1.836 +  gch->collector_policy()->counters()->update_counters();
   1.837 +}
   1.838 +
   1.839 +void DefNewGeneration::update_counters() {
   1.840 +  if (UsePerfData) {
   1.841 +    _eden_counters->update_all();
   1.842 +    _from_counters->update_all();
   1.843 +    _to_counters->update_all();
   1.844 +    _gen_counters->update_all();
   1.845 +  }
   1.846 +}
   1.847 +
   1.848 +void DefNewGeneration::verify(bool allow_dirty) {
   1.849 +  eden()->verify(allow_dirty);
   1.850 +  from()->verify(allow_dirty);
   1.851 +    to()->verify(allow_dirty);
   1.852 +}
   1.853 +
   1.854 +void DefNewGeneration::print_on(outputStream* st) const {
   1.855 +  Generation::print_on(st);
   1.856 +  st->print("  eden");
   1.857 +  eden()->print_on(st);
   1.858 +  st->print("  from");
   1.859 +  from()->print_on(st);
   1.860 +  st->print("  to  ");
   1.861 +  to()->print_on(st);
   1.862 +}
   1.863 +
   1.864 +
   1.865 +const char* DefNewGeneration::name() const {
   1.866 +  return "def new generation";
   1.867 +}

mercurial