src/share/vm/memory/generation.cpp

changeset 0
f90c822e73f8
child 6876
710a3c8b516e
     1.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     1.2 +++ b/src/share/vm/memory/generation.cpp	Wed Apr 27 01:25:04 2016 +0800
     1.3 @@ -0,0 +1,874 @@
     1.4 +/*
     1.5 + * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
     1.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     1.7 + *
     1.8 + * This code is free software; you can redistribute it and/or modify it
     1.9 + * under the terms of the GNU General Public License version 2 only, as
    1.10 + * published by the Free Software Foundation.
    1.11 + *
    1.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
    1.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    1.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    1.15 + * version 2 for more details (a copy is included in the LICENSE file that
    1.16 + * accompanied this code).
    1.17 + *
    1.18 + * You should have received a copy of the GNU General Public License version
    1.19 + * 2 along with this work; if not, write to the Free Software Foundation,
    1.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    1.21 + *
    1.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    1.23 + * or visit www.oracle.com if you need additional information or have any
    1.24 + * questions.
    1.25 + *
    1.26 + */
    1.27 +
    1.28 +#include "precompiled.hpp"
    1.29 +#include "gc_implementation/shared/gcTimer.hpp"
    1.30 +#include "gc_implementation/shared/gcTrace.hpp"
    1.31 +#include "gc_implementation/shared/spaceDecorator.hpp"
    1.32 +#include "gc_interface/collectedHeap.inline.hpp"
    1.33 +#include "memory/allocation.inline.hpp"
    1.34 +#include "memory/blockOffsetTable.inline.hpp"
    1.35 +#include "memory/cardTableRS.hpp"
    1.36 +#include "memory/gcLocker.inline.hpp"
    1.37 +#include "memory/genCollectedHeap.hpp"
    1.38 +#include "memory/genMarkSweep.hpp"
    1.39 +#include "memory/genOopClosures.hpp"
    1.40 +#include "memory/genOopClosures.inline.hpp"
    1.41 +#include "memory/generation.hpp"
    1.42 +#include "memory/generation.inline.hpp"
    1.43 +#include "memory/space.inline.hpp"
    1.44 +#include "oops/oop.inline.hpp"
    1.45 +#include "runtime/java.hpp"
    1.46 +#include "utilities/copy.hpp"
    1.47 +#include "utilities/events.hpp"
    1.48 +
    1.49 +PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
    1.50 +
    1.51 +Generation::Generation(ReservedSpace rs, size_t initial_size, int level) :
    1.52 +  _level(level),
    1.53 +  _ref_processor(NULL) {
    1.54 +  if (!_virtual_space.initialize(rs, initial_size)) {
    1.55 +    vm_exit_during_initialization("Could not reserve enough space for "
    1.56 +                    "object heap");
    1.57 +  }
    1.58 +  // Mangle all of the the initial generation.
    1.59 +  if (ZapUnusedHeapArea) {
    1.60 +    MemRegion mangle_region((HeapWord*)_virtual_space.low(),
    1.61 +      (HeapWord*)_virtual_space.high());
    1.62 +    SpaceMangler::mangle_region(mangle_region);
    1.63 +  }
    1.64 +  _reserved = MemRegion((HeapWord*)_virtual_space.low_boundary(),
    1.65 +          (HeapWord*)_virtual_space.high_boundary());
    1.66 +}
    1.67 +
    1.68 +GenerationSpec* Generation::spec() {
    1.69 +  GenCollectedHeap* gch = GenCollectedHeap::heap();
    1.70 +  assert(0 <= level() && level() < gch->_n_gens, "Bad gen level");
    1.71 +  return gch->_gen_specs[level()];
    1.72 +}
    1.73 +
    1.74 +size_t Generation::max_capacity() const {
    1.75 +  return reserved().byte_size();
    1.76 +}
    1.77 +
    1.78 +void Generation::print_heap_change(size_t prev_used) const {
    1.79 +  if (PrintGCDetails && Verbose) {
    1.80 +    gclog_or_tty->print(" "  SIZE_FORMAT
    1.81 +                        "->" SIZE_FORMAT
    1.82 +                        "("  SIZE_FORMAT ")",
    1.83 +                        prev_used, used(), capacity());
    1.84 +  } else {
    1.85 +    gclog_or_tty->print(" "  SIZE_FORMAT "K"
    1.86 +                        "->" SIZE_FORMAT "K"
    1.87 +                        "("  SIZE_FORMAT "K)",
    1.88 +                        prev_used / K, used() / K, capacity() / K);
    1.89 +  }
    1.90 +}
    1.91 +
    1.92 +// By default we get a single threaded default reference processor;
    1.93 +// generations needing multi-threaded refs processing or discovery override this method.
    1.94 +void Generation::ref_processor_init() {
    1.95 +  assert(_ref_processor == NULL, "a reference processor already exists");
    1.96 +  assert(!_reserved.is_empty(), "empty generation?");
    1.97 +  _ref_processor = new ReferenceProcessor(_reserved);    // a vanilla reference processor
    1.98 +  if (_ref_processor == NULL) {
    1.99 +    vm_exit_during_initialization("Could not allocate ReferenceProcessor object");
   1.100 +  }
   1.101 +}
   1.102 +
   1.103 +void Generation::print() const { print_on(tty); }
   1.104 +
   1.105 +void Generation::print_on(outputStream* st)  const {
   1.106 +  st->print(" %-20s", name());
   1.107 +  st->print(" total " SIZE_FORMAT "K, used " SIZE_FORMAT "K",
   1.108 +             capacity()/K, used()/K);
   1.109 +  st->print_cr(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " INTPTR_FORMAT ")",
   1.110 +              _virtual_space.low_boundary(),
   1.111 +              _virtual_space.high(),
   1.112 +              _virtual_space.high_boundary());
   1.113 +}
   1.114 +
   1.115 +void Generation::print_summary_info() { print_summary_info_on(tty); }
   1.116 +
   1.117 +void Generation::print_summary_info_on(outputStream* st) {
   1.118 +  StatRecord* sr = stat_record();
   1.119 +  double time = sr->accumulated_time.seconds();
   1.120 +  st->print_cr("[Accumulated GC generation %d time %3.7f secs, "
   1.121 +               "%d GC's, avg GC time %3.7f]",
   1.122 +               level(), time, sr->invocations,
   1.123 +               sr->invocations > 0 ? time / sr->invocations : 0.0);
   1.124 +}
   1.125 +
   1.126 +// Utility iterator classes
   1.127 +
   1.128 +class GenerationIsInReservedClosure : public SpaceClosure {
   1.129 + public:
   1.130 +  const void* _p;
   1.131 +  Space* sp;
   1.132 +  virtual void do_space(Space* s) {
   1.133 +    if (sp == NULL) {
   1.134 +      if (s->is_in_reserved(_p)) sp = s;
   1.135 +    }
   1.136 +  }
   1.137 +  GenerationIsInReservedClosure(const void* p) : _p(p), sp(NULL) {}
   1.138 +};
   1.139 +
   1.140 +class GenerationIsInClosure : public SpaceClosure {
   1.141 + public:
   1.142 +  const void* _p;
   1.143 +  Space* sp;
   1.144 +  virtual void do_space(Space* s) {
   1.145 +    if (sp == NULL) {
   1.146 +      if (s->is_in(_p)) sp = s;
   1.147 +    }
   1.148 +  }
   1.149 +  GenerationIsInClosure(const void* p) : _p(p), sp(NULL) {}
   1.150 +};
   1.151 +
   1.152 +bool Generation::is_in(const void* p) const {
   1.153 +  GenerationIsInClosure blk(p);
   1.154 +  ((Generation*)this)->space_iterate(&blk);
   1.155 +  return blk.sp != NULL;
   1.156 +}
   1.157 +
   1.158 +DefNewGeneration* Generation::as_DefNewGeneration() {
   1.159 +  assert((kind() == Generation::DefNew) ||
   1.160 +         (kind() == Generation::ParNew) ||
   1.161 +         (kind() == Generation::ASParNew),
   1.162 +    "Wrong youngest generation type");
   1.163 +  return (DefNewGeneration*) this;
   1.164 +}
   1.165 +
   1.166 +Generation* Generation::next_gen() const {
   1.167 +  GenCollectedHeap* gch = GenCollectedHeap::heap();
   1.168 +  int next = level() + 1;
   1.169 +  if (next < gch->_n_gens) {
   1.170 +    return gch->_gens[next];
   1.171 +  } else {
   1.172 +    return NULL;
   1.173 +  }
   1.174 +}
   1.175 +
   1.176 +size_t Generation::max_contiguous_available() const {
   1.177 +  // The largest number of contiguous free words in this or any higher generation.
   1.178 +  size_t max = 0;
   1.179 +  for (const Generation* gen = this; gen != NULL; gen = gen->next_gen()) {
   1.180 +    size_t avail = gen->contiguous_available();
   1.181 +    if (avail > max) {
   1.182 +      max = avail;
   1.183 +    }
   1.184 +  }
   1.185 +  return max;
   1.186 +}
   1.187 +
   1.188 +bool Generation::promotion_attempt_is_safe(size_t max_promotion_in_bytes) const {
   1.189 +  size_t available = max_contiguous_available();
   1.190 +  bool   res = (available >= max_promotion_in_bytes);
   1.191 +  if (PrintGC && Verbose) {
   1.192 +    gclog_or_tty->print_cr(
   1.193 +      "Generation: promo attempt is%s safe: available("SIZE_FORMAT") %s max_promo("SIZE_FORMAT")",
   1.194 +      res? "":" not", available, res? ">=":"<",
   1.195 +      max_promotion_in_bytes);
   1.196 +  }
   1.197 +  return res;
   1.198 +}
   1.199 +
   1.200 +// Ignores "ref" and calls allocate().
   1.201 +oop Generation::promote(oop obj, size_t obj_size) {
   1.202 +  assert(obj_size == (size_t)obj->size(), "bad obj_size passed in");
   1.203 +
   1.204 +#ifndef PRODUCT
   1.205 +  if (Universe::heap()->promotion_should_fail()) {
   1.206 +    return NULL;
   1.207 +  }
   1.208 +#endif  // #ifndef PRODUCT
   1.209 +
   1.210 +  HeapWord* result = allocate(obj_size, false);
   1.211 +  if (result != NULL) {
   1.212 +    Copy::aligned_disjoint_words((HeapWord*)obj, result, obj_size);
   1.213 +    return oop(result);
   1.214 +  } else {
   1.215 +    GenCollectedHeap* gch = GenCollectedHeap::heap();
   1.216 +    return gch->handle_failed_promotion(this, obj, obj_size);
   1.217 +  }
   1.218 +}
   1.219 +
   1.220 +oop Generation::par_promote(int thread_num,
   1.221 +                            oop obj, markOop m, size_t word_sz) {
   1.222 +  // Could do a bad general impl here that gets a lock.  But no.
   1.223 +  ShouldNotCallThis();
   1.224 +  return NULL;
   1.225 +}
   1.226 +
   1.227 +void Generation::par_promote_alloc_undo(int thread_num,
   1.228 +                                        HeapWord* obj, size_t word_sz) {
   1.229 +  // Could do a bad general impl here that gets a lock.  But no.
   1.230 +  guarantee(false, "No good general implementation.");
   1.231 +}
   1.232 +
   1.233 +Space* Generation::space_containing(const void* p) const {
   1.234 +  GenerationIsInReservedClosure blk(p);
   1.235 +  // Cast away const
   1.236 +  ((Generation*)this)->space_iterate(&blk);
   1.237 +  return blk.sp;
   1.238 +}
   1.239 +
   1.240 +// Some of these are mediocre general implementations.  Should be
   1.241 +// overridden to get better performance.
   1.242 +
   1.243 +class GenerationBlockStartClosure : public SpaceClosure {
   1.244 + public:
   1.245 +  const void* _p;
   1.246 +  HeapWord* _start;
   1.247 +  virtual void do_space(Space* s) {
   1.248 +    if (_start == NULL && s->is_in_reserved(_p)) {
   1.249 +      _start = s->block_start(_p);
   1.250 +    }
   1.251 +  }
   1.252 +  GenerationBlockStartClosure(const void* p) { _p = p; _start = NULL; }
   1.253 +};
   1.254 +
   1.255 +HeapWord* Generation::block_start(const void* p) const {
   1.256 +  GenerationBlockStartClosure blk(p);
   1.257 +  // Cast away const
   1.258 +  ((Generation*)this)->space_iterate(&blk);
   1.259 +  return blk._start;
   1.260 +}
   1.261 +
   1.262 +class GenerationBlockSizeClosure : public SpaceClosure {
   1.263 + public:
   1.264 +  const HeapWord* _p;
   1.265 +  size_t size;
   1.266 +  virtual void do_space(Space* s) {
   1.267 +    if (size == 0 && s->is_in_reserved(_p)) {
   1.268 +      size = s->block_size(_p);
   1.269 +    }
   1.270 +  }
   1.271 +  GenerationBlockSizeClosure(const HeapWord* p) { _p = p; size = 0; }
   1.272 +};
   1.273 +
   1.274 +size_t Generation::block_size(const HeapWord* p) const {
   1.275 +  GenerationBlockSizeClosure blk(p);
   1.276 +  // Cast away const
   1.277 +  ((Generation*)this)->space_iterate(&blk);
   1.278 +  assert(blk.size > 0, "seems reasonable");
   1.279 +  return blk.size;
   1.280 +}
   1.281 +
   1.282 +class GenerationBlockIsObjClosure : public SpaceClosure {
   1.283 + public:
   1.284 +  const HeapWord* _p;
   1.285 +  bool is_obj;
   1.286 +  virtual void do_space(Space* s) {
   1.287 +    if (!is_obj && s->is_in_reserved(_p)) {
   1.288 +      is_obj |= s->block_is_obj(_p);
   1.289 +    }
   1.290 +  }
   1.291 +  GenerationBlockIsObjClosure(const HeapWord* p) { _p = p; is_obj = false; }
   1.292 +};
   1.293 +
   1.294 +bool Generation::block_is_obj(const HeapWord* p) const {
   1.295 +  GenerationBlockIsObjClosure blk(p);
   1.296 +  // Cast away const
   1.297 +  ((Generation*)this)->space_iterate(&blk);
   1.298 +  return blk.is_obj;
   1.299 +}
   1.300 +
   1.301 +class GenerationOopIterateClosure : public SpaceClosure {
   1.302 + public:
   1.303 +  ExtendedOopClosure* cl;
   1.304 +  MemRegion mr;
   1.305 +  virtual void do_space(Space* s) {
   1.306 +    s->oop_iterate(mr, cl);
   1.307 +  }
   1.308 +  GenerationOopIterateClosure(ExtendedOopClosure* _cl, MemRegion _mr) :
   1.309 +    cl(_cl), mr(_mr) {}
   1.310 +};
   1.311 +
   1.312 +void Generation::oop_iterate(ExtendedOopClosure* cl) {
   1.313 +  GenerationOopIterateClosure blk(cl, _reserved);
   1.314 +  space_iterate(&blk);
   1.315 +}
   1.316 +
   1.317 +void Generation::oop_iterate(MemRegion mr, ExtendedOopClosure* cl) {
   1.318 +  GenerationOopIterateClosure blk(cl, mr);
   1.319 +  space_iterate(&blk);
   1.320 +}
   1.321 +
   1.322 +void Generation::younger_refs_in_space_iterate(Space* sp,
   1.323 +                                               OopsInGenClosure* cl) {
   1.324 +  GenRemSet* rs = SharedHeap::heap()->rem_set();
   1.325 +  rs->younger_refs_in_space_iterate(sp, cl);
   1.326 +}
   1.327 +
   1.328 +class GenerationObjIterateClosure : public SpaceClosure {
   1.329 + private:
   1.330 +  ObjectClosure* _cl;
   1.331 + public:
   1.332 +  virtual void do_space(Space* s) {
   1.333 +    s->object_iterate(_cl);
   1.334 +  }
   1.335 +  GenerationObjIterateClosure(ObjectClosure* cl) : _cl(cl) {}
   1.336 +};
   1.337 +
   1.338 +void Generation::object_iterate(ObjectClosure* cl) {
   1.339 +  GenerationObjIterateClosure blk(cl);
   1.340 +  space_iterate(&blk);
   1.341 +}
   1.342 +
   1.343 +class GenerationSafeObjIterateClosure : public SpaceClosure {
   1.344 + private:
   1.345 +  ObjectClosure* _cl;
   1.346 + public:
   1.347 +  virtual void do_space(Space* s) {
   1.348 +    s->safe_object_iterate(_cl);
   1.349 +  }
   1.350 +  GenerationSafeObjIterateClosure(ObjectClosure* cl) : _cl(cl) {}
   1.351 +};
   1.352 +
   1.353 +void Generation::safe_object_iterate(ObjectClosure* cl) {
   1.354 +  GenerationSafeObjIterateClosure blk(cl);
   1.355 +  space_iterate(&blk);
   1.356 +}
   1.357 +
   1.358 +void Generation::prepare_for_compaction(CompactPoint* cp) {
   1.359 +  // Generic implementation, can be specialized
   1.360 +  CompactibleSpace* space = first_compaction_space();
   1.361 +  while (space != NULL) {
   1.362 +    space->prepare_for_compaction(cp);
   1.363 +    space = space->next_compaction_space();
   1.364 +  }
   1.365 +}
   1.366 +
   1.367 +class AdjustPointersClosure: public SpaceClosure {
   1.368 + public:
   1.369 +  void do_space(Space* sp) {
   1.370 +    sp->adjust_pointers();
   1.371 +  }
   1.372 +};
   1.373 +
   1.374 +void Generation::adjust_pointers() {
   1.375 +  // Note that this is done over all spaces, not just the compactible
   1.376 +  // ones.
   1.377 +  AdjustPointersClosure blk;
   1.378 +  space_iterate(&blk, true);
   1.379 +}
   1.380 +
   1.381 +void Generation::compact() {
   1.382 +  CompactibleSpace* sp = first_compaction_space();
   1.383 +  while (sp != NULL) {
   1.384 +    sp->compact();
   1.385 +    sp = sp->next_compaction_space();
   1.386 +  }
   1.387 +}
   1.388 +
   1.389 +CardGeneration::CardGeneration(ReservedSpace rs, size_t initial_byte_size,
   1.390 +                               int level,
   1.391 +                               GenRemSet* remset) :
   1.392 +  Generation(rs, initial_byte_size, level), _rs(remset),
   1.393 +  _shrink_factor(0), _min_heap_delta_bytes(), _capacity_at_prologue(),
   1.394 +  _used_at_prologue()
   1.395 +{
   1.396 +  HeapWord* start = (HeapWord*)rs.base();
   1.397 +  size_t reserved_byte_size = rs.size();
   1.398 +  assert((uintptr_t(start) & 3) == 0, "bad alignment");
   1.399 +  assert((reserved_byte_size & 3) == 0, "bad alignment");
   1.400 +  MemRegion reserved_mr(start, heap_word_size(reserved_byte_size));
   1.401 +  _bts = new BlockOffsetSharedArray(reserved_mr,
   1.402 +                                    heap_word_size(initial_byte_size));
   1.403 +  MemRegion committed_mr(start, heap_word_size(initial_byte_size));
   1.404 +  _rs->resize_covered_region(committed_mr);
   1.405 +  if (_bts == NULL)
   1.406 +    vm_exit_during_initialization("Could not allocate a BlockOffsetArray");
   1.407 +
   1.408 +  // Verify that the start and end of this generation is the start of a card.
   1.409 +  // If this wasn't true, a single card could span more than on generation,
   1.410 +  // which would cause problems when we commit/uncommit memory, and when we
   1.411 +  // clear and dirty cards.
   1.412 +  guarantee(_rs->is_aligned(reserved_mr.start()), "generation must be card aligned");
   1.413 +  if (reserved_mr.end() != Universe::heap()->reserved_region().end()) {
   1.414 +    // Don't check at the very end of the heap as we'll assert that we're probing off
   1.415 +    // the end if we try.
   1.416 +    guarantee(_rs->is_aligned(reserved_mr.end()), "generation must be card aligned");
   1.417 +  }
   1.418 +  _min_heap_delta_bytes = MinHeapDeltaBytes;
   1.419 +  _capacity_at_prologue = initial_byte_size;
   1.420 +  _used_at_prologue = 0;
   1.421 +}
   1.422 +
   1.423 +bool CardGeneration::expand(size_t bytes, size_t expand_bytes) {
   1.424 +  assert_locked_or_safepoint(Heap_lock);
   1.425 +  if (bytes == 0) {
   1.426 +    return true;  // That's what grow_by(0) would return
   1.427 +  }
   1.428 +  size_t aligned_bytes  = ReservedSpace::page_align_size_up(bytes);
   1.429 +  if (aligned_bytes == 0){
   1.430 +    // The alignment caused the number of bytes to wrap.  An expand_by(0) will
   1.431 +    // return true with the implication that an expansion was done when it
   1.432 +    // was not.  A call to expand implies a best effort to expand by "bytes"
   1.433 +    // but not a guarantee.  Align down to give a best effort.  This is likely
   1.434 +    // the most that the generation can expand since it has some capacity to
   1.435 +    // start with.
   1.436 +    aligned_bytes = ReservedSpace::page_align_size_down(bytes);
   1.437 +  }
   1.438 +  size_t aligned_expand_bytes = ReservedSpace::page_align_size_up(expand_bytes);
   1.439 +  bool success = false;
   1.440 +  if (aligned_expand_bytes > aligned_bytes) {
   1.441 +    success = grow_by(aligned_expand_bytes);
   1.442 +  }
   1.443 +  if (!success) {
   1.444 +    success = grow_by(aligned_bytes);
   1.445 +  }
   1.446 +  if (!success) {
   1.447 +    success = grow_to_reserved();
   1.448 +  }
   1.449 +  if (PrintGC && Verbose) {
   1.450 +    if (success && GC_locker::is_active_and_needs_gc()) {
   1.451 +      gclog_or_tty->print_cr("Garbage collection disabled, expanded heap instead");
   1.452 +    }
   1.453 +  }
   1.454 +
   1.455 +  return success;
   1.456 +}
   1.457 +
   1.458 +
   1.459 +// No young generation references, clear this generation's cards.
   1.460 +void CardGeneration::clear_remembered_set() {
   1.461 +  _rs->clear(reserved());
   1.462 +}
   1.463 +
   1.464 +
   1.465 +// Objects in this generation may have moved, invalidate this
   1.466 +// generation's cards.
   1.467 +void CardGeneration::invalidate_remembered_set() {
   1.468 +  _rs->invalidate(used_region());
   1.469 +}
   1.470 +
   1.471 +
   1.472 +void CardGeneration::compute_new_size() {
   1.473 +  assert(_shrink_factor <= 100, "invalid shrink factor");
   1.474 +  size_t current_shrink_factor = _shrink_factor;
   1.475 +  _shrink_factor = 0;
   1.476 +
   1.477 +  // We don't have floating point command-line arguments
   1.478 +  // Note:  argument processing ensures that MinHeapFreeRatio < 100.
   1.479 +  const double minimum_free_percentage = MinHeapFreeRatio / 100.0;
   1.480 +  const double maximum_used_percentage = 1.0 - minimum_free_percentage;
   1.481 +
   1.482 +  // Compute some numbers about the state of the heap.
   1.483 +  const size_t used_after_gc = used();
   1.484 +  const size_t capacity_after_gc = capacity();
   1.485 +
   1.486 +  const double min_tmp = used_after_gc / maximum_used_percentage;
   1.487 +  size_t minimum_desired_capacity = (size_t)MIN2(min_tmp, double(max_uintx));
   1.488 +  // Don't shrink less than the initial generation size
   1.489 +  minimum_desired_capacity = MAX2(minimum_desired_capacity,
   1.490 +                                  spec()->init_size());
   1.491 +  assert(used_after_gc <= minimum_desired_capacity, "sanity check");
   1.492 +
   1.493 +  if (PrintGC && Verbose) {
   1.494 +    const size_t free_after_gc = free();
   1.495 +    const double free_percentage = ((double)free_after_gc) / capacity_after_gc;
   1.496 +    gclog_or_tty->print_cr("TenuredGeneration::compute_new_size: ");
   1.497 +    gclog_or_tty->print_cr("  "
   1.498 +                  "  minimum_free_percentage: %6.2f"
   1.499 +                  "  maximum_used_percentage: %6.2f",
   1.500 +                  minimum_free_percentage,
   1.501 +                  maximum_used_percentage);
   1.502 +    gclog_or_tty->print_cr("  "
   1.503 +                  "   free_after_gc   : %6.1fK"
   1.504 +                  "   used_after_gc   : %6.1fK"
   1.505 +                  "   capacity_after_gc   : %6.1fK",
   1.506 +                  free_after_gc / (double) K,
   1.507 +                  used_after_gc / (double) K,
   1.508 +                  capacity_after_gc / (double) K);
   1.509 +    gclog_or_tty->print_cr("  "
   1.510 +                  "   free_percentage: %6.2f",
   1.511 +                  free_percentage);
   1.512 +  }
   1.513 +
   1.514 +  if (capacity_after_gc < minimum_desired_capacity) {
   1.515 +    // If we have less free space than we want then expand
   1.516 +    size_t expand_bytes = minimum_desired_capacity - capacity_after_gc;
   1.517 +    // Don't expand unless it's significant
   1.518 +    if (expand_bytes >= _min_heap_delta_bytes) {
   1.519 +      expand(expand_bytes, 0); // safe if expansion fails
   1.520 +    }
   1.521 +    if (PrintGC && Verbose) {
   1.522 +      gclog_or_tty->print_cr("    expanding:"
   1.523 +                    "  minimum_desired_capacity: %6.1fK"
   1.524 +                    "  expand_bytes: %6.1fK"
   1.525 +                    "  _min_heap_delta_bytes: %6.1fK",
   1.526 +                    minimum_desired_capacity / (double) K,
   1.527 +                    expand_bytes / (double) K,
   1.528 +                    _min_heap_delta_bytes / (double) K);
   1.529 +    }
   1.530 +    return;
   1.531 +  }
   1.532 +
   1.533 +  // No expansion, now see if we want to shrink
   1.534 +  size_t shrink_bytes = 0;
   1.535 +  // We would never want to shrink more than this
   1.536 +  size_t max_shrink_bytes = capacity_after_gc - minimum_desired_capacity;
   1.537 +
   1.538 +  if (MaxHeapFreeRatio < 100) {
   1.539 +    const double maximum_free_percentage = MaxHeapFreeRatio / 100.0;
   1.540 +    const double minimum_used_percentage = 1.0 - maximum_free_percentage;
   1.541 +    const double max_tmp = used_after_gc / minimum_used_percentage;
   1.542 +    size_t maximum_desired_capacity = (size_t)MIN2(max_tmp, double(max_uintx));
   1.543 +    maximum_desired_capacity = MAX2(maximum_desired_capacity,
   1.544 +                                    spec()->init_size());
   1.545 +    if (PrintGC && Verbose) {
   1.546 +      gclog_or_tty->print_cr("  "
   1.547 +                             "  maximum_free_percentage: %6.2f"
   1.548 +                             "  minimum_used_percentage: %6.2f",
   1.549 +                             maximum_free_percentage,
   1.550 +                             minimum_used_percentage);
   1.551 +      gclog_or_tty->print_cr("  "
   1.552 +                             "  _capacity_at_prologue: %6.1fK"
   1.553 +                             "  minimum_desired_capacity: %6.1fK"
   1.554 +                             "  maximum_desired_capacity: %6.1fK",
   1.555 +                             _capacity_at_prologue / (double) K,
   1.556 +                             minimum_desired_capacity / (double) K,
   1.557 +                             maximum_desired_capacity / (double) K);
   1.558 +    }
   1.559 +    assert(minimum_desired_capacity <= maximum_desired_capacity,
   1.560 +           "sanity check");
   1.561 +
   1.562 +    if (capacity_after_gc > maximum_desired_capacity) {
   1.563 +      // Capacity too large, compute shrinking size
   1.564 +      shrink_bytes = capacity_after_gc - maximum_desired_capacity;
   1.565 +      // We don't want shrink all the way back to initSize if people call
   1.566 +      // System.gc(), because some programs do that between "phases" and then
   1.567 +      // we'd just have to grow the heap up again for the next phase.  So we
   1.568 +      // damp the shrinking: 0% on the first call, 10% on the second call, 40%
   1.569 +      // on the third call, and 100% by the fourth call.  But if we recompute
   1.570 +      // size without shrinking, it goes back to 0%.
   1.571 +      shrink_bytes = shrink_bytes / 100 * current_shrink_factor;
   1.572 +      assert(shrink_bytes <= max_shrink_bytes, "invalid shrink size");
   1.573 +      if (current_shrink_factor == 0) {
   1.574 +        _shrink_factor = 10;
   1.575 +      } else {
   1.576 +        _shrink_factor = MIN2(current_shrink_factor * 4, (size_t) 100);
   1.577 +      }
   1.578 +      if (PrintGC && Verbose) {
   1.579 +        gclog_or_tty->print_cr("  "
   1.580 +                      "  shrinking:"
   1.581 +                      "  initSize: %.1fK"
   1.582 +                      "  maximum_desired_capacity: %.1fK",
   1.583 +                      spec()->init_size() / (double) K,
   1.584 +                      maximum_desired_capacity / (double) K);
   1.585 +        gclog_or_tty->print_cr("  "
   1.586 +                      "  shrink_bytes: %.1fK"
   1.587 +                      "  current_shrink_factor: %d"
   1.588 +                      "  new shrink factor: %d"
   1.589 +                      "  _min_heap_delta_bytes: %.1fK",
   1.590 +                      shrink_bytes / (double) K,
   1.591 +                      current_shrink_factor,
   1.592 +                      _shrink_factor,
   1.593 +                      _min_heap_delta_bytes / (double) K);
   1.594 +      }
   1.595 +    }
   1.596 +  }
   1.597 +
   1.598 +  if (capacity_after_gc > _capacity_at_prologue) {
   1.599 +    // We might have expanded for promotions, in which case we might want to
   1.600 +    // take back that expansion if there's room after GC.  That keeps us from
   1.601 +    // stretching the heap with promotions when there's plenty of room.
   1.602 +    size_t expansion_for_promotion = capacity_after_gc - _capacity_at_prologue;
   1.603 +    expansion_for_promotion = MIN2(expansion_for_promotion, max_shrink_bytes);
   1.604 +    // We have two shrinking computations, take the largest
   1.605 +    shrink_bytes = MAX2(shrink_bytes, expansion_for_promotion);
   1.606 +    assert(shrink_bytes <= max_shrink_bytes, "invalid shrink size");
   1.607 +    if (PrintGC && Verbose) {
   1.608 +      gclog_or_tty->print_cr("  "
   1.609 +                             "  aggressive shrinking:"
   1.610 +                             "  _capacity_at_prologue: %.1fK"
   1.611 +                             "  capacity_after_gc: %.1fK"
   1.612 +                             "  expansion_for_promotion: %.1fK"
   1.613 +                             "  shrink_bytes: %.1fK",
   1.614 +                             capacity_after_gc / (double) K,
   1.615 +                             _capacity_at_prologue / (double) K,
   1.616 +                             expansion_for_promotion / (double) K,
   1.617 +                             shrink_bytes / (double) K);
   1.618 +    }
   1.619 +  }
   1.620 +  // Don't shrink unless it's significant
   1.621 +  if (shrink_bytes >= _min_heap_delta_bytes) {
   1.622 +    shrink(shrink_bytes);
   1.623 +  }
   1.624 +}
   1.625 +
   1.626 +// Currently nothing to do.
   1.627 +void CardGeneration::prepare_for_verify() {}
   1.628 +
   1.629 +
   1.630 +void OneContigSpaceCardGeneration::collect(bool   full,
   1.631 +                                           bool   clear_all_soft_refs,
   1.632 +                                           size_t size,
   1.633 +                                           bool   is_tlab) {
   1.634 +  GenCollectedHeap* gch = GenCollectedHeap::heap();
   1.635 +
   1.636 +  SpecializationStats::clear();
   1.637 +  // Temporarily expand the span of our ref processor, so
   1.638 +  // refs discovery is over the entire heap, not just this generation
   1.639 +  ReferenceProcessorSpanMutator
   1.640 +    x(ref_processor(), gch->reserved_region());
   1.641 +
   1.642 +  STWGCTimer* gc_timer = GenMarkSweep::gc_timer();
   1.643 +  gc_timer->register_gc_start();
   1.644 +
   1.645 +  SerialOldTracer* gc_tracer = GenMarkSweep::gc_tracer();
   1.646 +  gc_tracer->report_gc_start(gch->gc_cause(), gc_timer->gc_start());
   1.647 +
   1.648 +  GenMarkSweep::invoke_at_safepoint(_level, ref_processor(), clear_all_soft_refs);
   1.649 +
   1.650 +  gc_timer->register_gc_end();
   1.651 +
   1.652 +  gc_tracer->report_gc_end(gc_timer->gc_end(), gc_timer->time_partitions());
   1.653 +
   1.654 +  SpecializationStats::print();
   1.655 +}
   1.656 +
   1.657 +HeapWord*
   1.658 +OneContigSpaceCardGeneration::expand_and_allocate(size_t word_size,
   1.659 +                                                  bool is_tlab,
   1.660 +                                                  bool parallel) {
   1.661 +  assert(!is_tlab, "OneContigSpaceCardGeneration does not support TLAB allocation");
   1.662 +  if (parallel) {
   1.663 +    MutexLocker x(ParGCRareEvent_lock);
   1.664 +    HeapWord* result = NULL;
   1.665 +    size_t byte_size = word_size * HeapWordSize;
   1.666 +    while (true) {
   1.667 +      expand(byte_size, _min_heap_delta_bytes);
   1.668 +      if (GCExpandToAllocateDelayMillis > 0) {
   1.669 +        os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false);
   1.670 +      }
   1.671 +      result = _the_space->par_allocate(word_size);
   1.672 +      if ( result != NULL) {
   1.673 +        return result;
   1.674 +      } else {
   1.675 +        // If there's not enough expansion space available, give up.
   1.676 +        if (_virtual_space.uncommitted_size() < byte_size) {
   1.677 +          return NULL;
   1.678 +        }
   1.679 +        // else try again
   1.680 +      }
   1.681 +    }
   1.682 +  } else {
   1.683 +    expand(word_size*HeapWordSize, _min_heap_delta_bytes);
   1.684 +    return _the_space->allocate(word_size);
   1.685 +  }
   1.686 +}
   1.687 +
   1.688 +bool OneContigSpaceCardGeneration::expand(size_t bytes, size_t expand_bytes) {
   1.689 +  GCMutexLocker x(ExpandHeap_lock);
   1.690 +  return CardGeneration::expand(bytes, expand_bytes);
   1.691 +}
   1.692 +
   1.693 +
   1.694 +void OneContigSpaceCardGeneration::shrink(size_t bytes) {
   1.695 +  assert_locked_or_safepoint(ExpandHeap_lock);
   1.696 +  size_t size = ReservedSpace::page_align_size_down(bytes);
   1.697 +  if (size > 0) {
   1.698 +    shrink_by(size);
   1.699 +  }
   1.700 +}
   1.701 +
   1.702 +
   1.703 +size_t OneContigSpaceCardGeneration::capacity() const {
   1.704 +  return _the_space->capacity();
   1.705 +}
   1.706 +
   1.707 +
   1.708 +size_t OneContigSpaceCardGeneration::used() const {
   1.709 +  return _the_space->used();
   1.710 +}
   1.711 +
   1.712 +
   1.713 +size_t OneContigSpaceCardGeneration::free() const {
   1.714 +  return _the_space->free();
   1.715 +}
   1.716 +
   1.717 +MemRegion OneContigSpaceCardGeneration::used_region() const {
   1.718 +  return the_space()->used_region();
   1.719 +}
   1.720 +
   1.721 +size_t OneContigSpaceCardGeneration::unsafe_max_alloc_nogc() const {
   1.722 +  return _the_space->free();
   1.723 +}
   1.724 +
   1.725 +size_t OneContigSpaceCardGeneration::contiguous_available() const {
   1.726 +  return _the_space->free() + _virtual_space.uncommitted_size();
   1.727 +}
   1.728 +
   1.729 +bool OneContigSpaceCardGeneration::grow_by(size_t bytes) {
   1.730 +  assert_locked_or_safepoint(ExpandHeap_lock);
   1.731 +  bool result = _virtual_space.expand_by(bytes);
   1.732 +  if (result) {
   1.733 +    size_t new_word_size =
   1.734 +       heap_word_size(_virtual_space.committed_size());
   1.735 +    MemRegion mr(_the_space->bottom(), new_word_size);
   1.736 +    // Expand card table
   1.737 +    Universe::heap()->barrier_set()->resize_covered_region(mr);
   1.738 +    // Expand shared block offset array
   1.739 +    _bts->resize(new_word_size);
   1.740 +
   1.741 +    // Fix for bug #4668531
   1.742 +    if (ZapUnusedHeapArea) {
   1.743 +      MemRegion mangle_region(_the_space->end(),
   1.744 +      (HeapWord*)_virtual_space.high());
   1.745 +      SpaceMangler::mangle_region(mangle_region);
   1.746 +    }
   1.747 +
   1.748 +    // Expand space -- also expands space's BOT
   1.749 +    // (which uses (part of) shared array above)
   1.750 +    _the_space->set_end((HeapWord*)_virtual_space.high());
   1.751 +
   1.752 +    // update the space and generation capacity counters
   1.753 +    update_counters();
   1.754 +
   1.755 +    if (Verbose && PrintGC) {
   1.756 +      size_t new_mem_size = _virtual_space.committed_size();
   1.757 +      size_t old_mem_size = new_mem_size - bytes;
   1.758 +      gclog_or_tty->print_cr("Expanding %s from " SIZE_FORMAT "K by "
   1.759 +                      SIZE_FORMAT "K to " SIZE_FORMAT "K",
   1.760 +                      name(), old_mem_size/K, bytes/K, new_mem_size/K);
   1.761 +    }
   1.762 +  }
   1.763 +  return result;
   1.764 +}
   1.765 +
   1.766 +
   1.767 +bool OneContigSpaceCardGeneration::grow_to_reserved() {
   1.768 +  assert_locked_or_safepoint(ExpandHeap_lock);
   1.769 +  bool success = true;
   1.770 +  const size_t remaining_bytes = _virtual_space.uncommitted_size();
   1.771 +  if (remaining_bytes > 0) {
   1.772 +    success = grow_by(remaining_bytes);
   1.773 +    DEBUG_ONLY(if (!success) warning("grow to reserved failed");)
   1.774 +  }
   1.775 +  return success;
   1.776 +}
   1.777 +
   1.778 +void OneContigSpaceCardGeneration::shrink_by(size_t bytes) {
   1.779 +  assert_locked_or_safepoint(ExpandHeap_lock);
   1.780 +  // Shrink committed space
   1.781 +  _virtual_space.shrink_by(bytes);
   1.782 +  // Shrink space; this also shrinks the space's BOT
   1.783 +  _the_space->set_end((HeapWord*) _virtual_space.high());
   1.784 +  size_t new_word_size = heap_word_size(_the_space->capacity());
   1.785 +  // Shrink the shared block offset array
   1.786 +  _bts->resize(new_word_size);
   1.787 +  MemRegion mr(_the_space->bottom(), new_word_size);
   1.788 +  // Shrink the card table
   1.789 +  Universe::heap()->barrier_set()->resize_covered_region(mr);
   1.790 +
   1.791 +  if (Verbose && PrintGC) {
   1.792 +    size_t new_mem_size = _virtual_space.committed_size();
   1.793 +    size_t old_mem_size = new_mem_size + bytes;
   1.794 +    gclog_or_tty->print_cr("Shrinking %s from " SIZE_FORMAT "K to " SIZE_FORMAT "K",
   1.795 +                  name(), old_mem_size/K, new_mem_size/K);
   1.796 +  }
   1.797 +}
   1.798 +
   1.799 +// Currently nothing to do.
   1.800 +void OneContigSpaceCardGeneration::prepare_for_verify() {}
   1.801 +
   1.802 +
   1.803 +// Override for a card-table generation with one contiguous
   1.804 +// space. NOTE: For reasons that are lost in the fog of history,
   1.805 +// this code is used when you iterate over perm gen objects,
   1.806 +// even when one uses CDS, where the perm gen has a couple of
   1.807 +// other spaces; this is because CompactingPermGenGen derives
   1.808 +// from OneContigSpaceCardGeneration. This should be cleaned up,
   1.809 +// see CR 6897789..
   1.810 +void OneContigSpaceCardGeneration::object_iterate(ObjectClosure* blk) {
   1.811 +  _the_space->object_iterate(blk);
   1.812 +}
   1.813 +
   1.814 +void OneContigSpaceCardGeneration::space_iterate(SpaceClosure* blk,
   1.815 +                                                 bool usedOnly) {
   1.816 +  blk->do_space(_the_space);
   1.817 +}
   1.818 +
   1.819 +void OneContigSpaceCardGeneration::younger_refs_iterate(OopsInGenClosure* blk) {
   1.820 +  blk->set_generation(this);
   1.821 +  younger_refs_in_space_iterate(_the_space, blk);
   1.822 +  blk->reset_generation();
   1.823 +}
   1.824 +
   1.825 +void OneContigSpaceCardGeneration::save_marks() {
   1.826 +  _the_space->set_saved_mark();
   1.827 +}
   1.828 +
   1.829 +
   1.830 +void OneContigSpaceCardGeneration::reset_saved_marks() {
   1.831 +  _the_space->reset_saved_mark();
   1.832 +}
   1.833 +
   1.834 +
   1.835 +bool OneContigSpaceCardGeneration::no_allocs_since_save_marks() {
   1.836 +  return _the_space->saved_mark_at_top();
   1.837 +}
   1.838 +
   1.839 +#define OneContig_SINCE_SAVE_MARKS_ITERATE_DEFN(OopClosureType, nv_suffix)      \
   1.840 +                                                                                \
   1.841 +void OneContigSpaceCardGeneration::                                             \
   1.842 +oop_since_save_marks_iterate##nv_suffix(OopClosureType* blk) {                  \
   1.843 +  blk->set_generation(this);                                                    \
   1.844 +  _the_space->oop_since_save_marks_iterate##nv_suffix(blk);                     \
   1.845 +  blk->reset_generation();                                                      \
   1.846 +  save_marks();                                                                 \
   1.847 +}
   1.848 +
   1.849 +ALL_SINCE_SAVE_MARKS_CLOSURES(OneContig_SINCE_SAVE_MARKS_ITERATE_DEFN)
   1.850 +
   1.851 +#undef OneContig_SINCE_SAVE_MARKS_ITERATE_DEFN
   1.852 +
   1.853 +
   1.854 +void OneContigSpaceCardGeneration::gc_epilogue(bool full) {
   1.855 +  _last_gc = WaterMark(the_space(), the_space()->top());
   1.856 +
   1.857 +  // update the generation and space performance counters
   1.858 +  update_counters();
   1.859 +  if (ZapUnusedHeapArea) {
   1.860 +    the_space()->check_mangled_unused_area_complete();
   1.861 +  }
   1.862 +}
   1.863 +
   1.864 +void OneContigSpaceCardGeneration::record_spaces_top() {
   1.865 +  assert(ZapUnusedHeapArea, "Not mangling unused space");
   1.866 +  the_space()->set_top_for_allocations();
   1.867 +}
   1.868 +
   1.869 +void OneContigSpaceCardGeneration::verify() {
   1.870 +  the_space()->verify();
   1.871 +}
   1.872 +
   1.873 +void OneContigSpaceCardGeneration::print_on(outputStream* st)  const {
   1.874 +  Generation::print_on(st);
   1.875 +  st->print("   the");
   1.876 +  the_space()->print_on(st);
   1.877 +}

mercurial