src/share/vm/memory/generation.cpp

changeset 435
a61af66fc99e
child 548
ba764ed4b6f2
     1.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     1.2 +++ b/src/share/vm/memory/generation.cpp	Sat Dec 01 00:00:00 2007 +0000
     1.3 @@ -0,0 +1,635 @@
     1.4 +/*
     1.5 + * Copyright 1997-2006 Sun Microsystems, Inc.  All Rights Reserved.
     1.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     1.7 + *
     1.8 + * This code is free software; you can redistribute it and/or modify it
     1.9 + * under the terms of the GNU General Public License version 2 only, as
    1.10 + * published by the Free Software Foundation.
    1.11 + *
    1.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
    1.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    1.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    1.15 + * version 2 for more details (a copy is included in the LICENSE file that
    1.16 + * accompanied this code).
    1.17 + *
    1.18 + * You should have received a copy of the GNU General Public License version
    1.19 + * 2 along with this work; if not, write to the Free Software Foundation,
    1.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    1.21 + *
    1.22 + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
    1.23 + * CA 95054 USA or visit www.sun.com if you need additional information or
    1.24 + * have any questions.
    1.25 + *
    1.26 + */
    1.27 +
    1.28 +# include "incls/_precompiled.incl"
    1.29 +# include "incls/_generation.cpp.incl"
    1.30 +
    1.31 +Generation::Generation(ReservedSpace rs, size_t initial_size, int level) :
    1.32 +  _level(level),
    1.33 +  _ref_processor(NULL) {
    1.34 +  if (!_virtual_space.initialize(rs, initial_size)) {
    1.35 +    vm_exit_during_initialization("Could not reserve enough space for "
    1.36 +                    "object heap");
    1.37 +  }
    1.38 +  _reserved = MemRegion((HeapWord*)_virtual_space.low_boundary(),
    1.39 +          (HeapWord*)_virtual_space.high_boundary());
    1.40 +}
    1.41 +
    1.42 +GenerationSpec* Generation::spec() {
    1.43 +  GenCollectedHeap* gch = GenCollectedHeap::heap();
    1.44 +  assert(0 <= level() && level() < gch->_n_gens, "Bad gen level");
    1.45 +  return gch->_gen_specs[level()];
    1.46 +}
    1.47 +
    1.48 +size_t Generation::max_capacity() const {
    1.49 +  return reserved().byte_size();
    1.50 +}
    1.51 +
    1.52 +void Generation::print_heap_change(size_t prev_used) const {
    1.53 +  if (PrintGCDetails && Verbose) {
    1.54 +    gclog_or_tty->print(" "  SIZE_FORMAT
    1.55 +                        "->" SIZE_FORMAT
    1.56 +                        "("  SIZE_FORMAT ")",
    1.57 +                        prev_used, used(), capacity());
    1.58 +  } else {
    1.59 +    gclog_or_tty->print(" "  SIZE_FORMAT "K"
    1.60 +                        "->" SIZE_FORMAT "K"
    1.61 +                        "("  SIZE_FORMAT "K)",
    1.62 +                        prev_used / K, used() / K, capacity() / K);
    1.63 +  }
    1.64 +}
    1.65 +
    1.66 +// By default we get a single threaded default reference processor;
    1.67 +// generations needing multi-threaded refs discovery override this method.
    1.68 +void Generation::ref_processor_init() {
    1.69 +  assert(_ref_processor == NULL, "a reference processor already exists");
    1.70 +  assert(!_reserved.is_empty(), "empty generation?");
    1.71 +  _ref_processor =
    1.72 +    new ReferenceProcessor(_reserved,                  // span
    1.73 +                           refs_discovery_is_atomic(), // atomic_discovery
    1.74 +                           refs_discovery_is_mt());    // mt_discovery
    1.75 +  if (_ref_processor == NULL) {
    1.76 +    vm_exit_during_initialization("Could not allocate ReferenceProcessor object");
    1.77 +  }
    1.78 +}
    1.79 +
    1.80 +void Generation::print() const { print_on(tty); }
    1.81 +
    1.82 +void Generation::print_on(outputStream* st)  const {
    1.83 +  st->print(" %-20s", name());
    1.84 +  st->print(" total " SIZE_FORMAT "K, used " SIZE_FORMAT "K",
    1.85 +             capacity()/K, used()/K);
    1.86 +  st->print_cr(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " INTPTR_FORMAT ")",
    1.87 +              _virtual_space.low_boundary(),
    1.88 +              _virtual_space.high(),
    1.89 +              _virtual_space.high_boundary());
    1.90 +}
    1.91 +
    1.92 +void Generation::print_summary_info() { print_summary_info_on(tty); }
    1.93 +
    1.94 +void Generation::print_summary_info_on(outputStream* st) {
    1.95 +  StatRecord* sr = stat_record();
    1.96 +  double time = sr->accumulated_time.seconds();
    1.97 +  st->print_cr("[Accumulated GC generation %d time %3.7f secs, "
    1.98 +               "%d GC's, avg GC time %3.7f]",
    1.99 +               level(), time, sr->invocations,
   1.100 +               sr->invocations > 0 ? time / sr->invocations : 0.0);
   1.101 +}
   1.102 +
   1.103 +// Utility iterator classes
   1.104 +
   1.105 +class GenerationIsInReservedClosure : public SpaceClosure {
   1.106 + public:
   1.107 +  const void* _p;
   1.108 +  Space* sp;
   1.109 +  virtual void do_space(Space* s) {
   1.110 +    if (sp == NULL) {
   1.111 +      if (s->is_in_reserved(_p)) sp = s;
   1.112 +    }
   1.113 +  }
   1.114 +  GenerationIsInReservedClosure(const void* p) : _p(p), sp(NULL) {}
   1.115 +};
   1.116 +
   1.117 +class GenerationIsInClosure : public SpaceClosure {
   1.118 + public:
   1.119 +  const void* _p;
   1.120 +  Space* sp;
   1.121 +  virtual void do_space(Space* s) {
   1.122 +    if (sp == NULL) {
   1.123 +      if (s->is_in(_p)) sp = s;
   1.124 +    }
   1.125 +  }
   1.126 +  GenerationIsInClosure(const void* p) : _p(p), sp(NULL) {}
   1.127 +};
   1.128 +
   1.129 +bool Generation::is_in(const void* p) const {
   1.130 +  GenerationIsInClosure blk(p);
   1.131 +  ((Generation*)this)->space_iterate(&blk);
   1.132 +  return blk.sp != NULL;
   1.133 +}
   1.134 +
   1.135 +DefNewGeneration* Generation::as_DefNewGeneration() {
   1.136 +  assert((kind() == Generation::DefNew) ||
   1.137 +         (kind() == Generation::ParNew) ||
   1.138 +         (kind() == Generation::ASParNew),
   1.139 +    "Wrong youngest generation type");
   1.140 +  return (DefNewGeneration*) this;
   1.141 +}
   1.142 +
   1.143 +Generation* Generation::next_gen() const {
   1.144 +  GenCollectedHeap* gch = GenCollectedHeap::heap();
   1.145 +  int next = level() + 1;
   1.146 +  if (next < gch->_n_gens) {
   1.147 +    return gch->_gens[next];
   1.148 +  } else {
   1.149 +    return NULL;
   1.150 +  }
   1.151 +}
   1.152 +
   1.153 +size_t Generation::max_contiguous_available() const {
   1.154 +  // The largest number of contiguous free words in this or any higher generation.
   1.155 +  size_t max = 0;
   1.156 +  for (const Generation* gen = this; gen != NULL; gen = gen->next_gen()) {
   1.157 +    size_t avail = gen->contiguous_available();
   1.158 +    if (avail > max) {
   1.159 +      max = avail;
   1.160 +    }
   1.161 +  }
   1.162 +  return max;
   1.163 +}
   1.164 +
   1.165 +bool Generation::promotion_attempt_is_safe(size_t promotion_in_bytes,
   1.166 +                                           bool not_used) const {
   1.167 +  if (PrintGC && Verbose) {
   1.168 +    gclog_or_tty->print_cr("Generation::promotion_attempt_is_safe"
   1.169 +                " contiguous_available: " SIZE_FORMAT
   1.170 +                " promotion_in_bytes: " SIZE_FORMAT,
   1.171 +                max_contiguous_available(), promotion_in_bytes);
   1.172 +  }
   1.173 +  return max_contiguous_available() >= promotion_in_bytes;
   1.174 +}
   1.175 +
   1.176 +// Ignores "ref" and calls allocate().
   1.177 +oop Generation::promote(oop obj, size_t obj_size, oop* ref) {
   1.178 +  assert(obj_size == (size_t)obj->size(), "bad obj_size passed in");
   1.179 +
   1.180 +#ifndef PRODUCT
   1.181 +  if (Universe::heap()->promotion_should_fail()) {
   1.182 +    return NULL;
   1.183 +  }
   1.184 +#endif  // #ifndef PRODUCT
   1.185 +
   1.186 +  HeapWord* result = allocate(obj_size, false);
   1.187 +  if (result != NULL) {
   1.188 +    Copy::aligned_disjoint_words((HeapWord*)obj, result, obj_size);
   1.189 +    return oop(result);
   1.190 +  } else {
   1.191 +    GenCollectedHeap* gch = GenCollectedHeap::heap();
   1.192 +    return gch->handle_failed_promotion(this, obj, obj_size, ref);
   1.193 +  }
   1.194 +}
   1.195 +
   1.196 +oop Generation::par_promote(int thread_num,
   1.197 +                            oop obj, markOop m, size_t word_sz) {
   1.198 +  // Could do a bad general impl here that gets a lock.  But no.
   1.199 +  ShouldNotCallThis();
   1.200 +  return NULL;
   1.201 +}
   1.202 +
   1.203 +void Generation::par_promote_alloc_undo(int thread_num,
   1.204 +                                        HeapWord* obj, size_t word_sz) {
   1.205 +  // Could do a bad general impl here that gets a lock.  But no.
   1.206 +  guarantee(false, "No good general implementation.");
   1.207 +}
   1.208 +
   1.209 +Space* Generation::space_containing(const void* p) const {
   1.210 +  GenerationIsInReservedClosure blk(p);
   1.211 +  // Cast away const
   1.212 +  ((Generation*)this)->space_iterate(&blk);
   1.213 +  return blk.sp;
   1.214 +}
   1.215 +
   1.216 +// Some of these are mediocre general implementations.  Should be
   1.217 +// overridden to get better performance.
   1.218 +
   1.219 +class GenerationBlockStartClosure : public SpaceClosure {
   1.220 + public:
   1.221 +  const void* _p;
   1.222 +  HeapWord* _start;
   1.223 +  virtual void do_space(Space* s) {
   1.224 +    if (_start == NULL && s->is_in_reserved(_p)) {
   1.225 +      _start = s->block_start(_p);
   1.226 +    }
   1.227 +  }
   1.228 +  GenerationBlockStartClosure(const void* p) { _p = p; _start = NULL; }
   1.229 +};
   1.230 +
   1.231 +HeapWord* Generation::block_start(const void* p) const {
   1.232 +  GenerationBlockStartClosure blk(p);
   1.233 +  // Cast away const
   1.234 +  ((Generation*)this)->space_iterate(&blk);
   1.235 +  return blk._start;
   1.236 +}
   1.237 +
   1.238 +class GenerationBlockSizeClosure : public SpaceClosure {
   1.239 + public:
   1.240 +  const HeapWord* _p;
   1.241 +  size_t size;
   1.242 +  virtual void do_space(Space* s) {
   1.243 +    if (size == 0 && s->is_in_reserved(_p)) {
   1.244 +      size = s->block_size(_p);
   1.245 +    }
   1.246 +  }
   1.247 +  GenerationBlockSizeClosure(const HeapWord* p) { _p = p; size = 0; }
   1.248 +};
   1.249 +
   1.250 +size_t Generation::block_size(const HeapWord* p) const {
   1.251 +  GenerationBlockSizeClosure blk(p);
   1.252 +  // Cast away const
   1.253 +  ((Generation*)this)->space_iterate(&blk);
   1.254 +  assert(blk.size > 0, "seems reasonable");
   1.255 +  return blk.size;
   1.256 +}
   1.257 +
   1.258 +class GenerationBlockIsObjClosure : public SpaceClosure {
   1.259 + public:
   1.260 +  const HeapWord* _p;
   1.261 +  bool is_obj;
   1.262 +  virtual void do_space(Space* s) {
   1.263 +    if (!is_obj && s->is_in_reserved(_p)) {
   1.264 +      is_obj |= s->block_is_obj(_p);
   1.265 +    }
   1.266 +  }
   1.267 +  GenerationBlockIsObjClosure(const HeapWord* p) { _p = p; is_obj = false; }
   1.268 +};
   1.269 +
   1.270 +bool Generation::block_is_obj(const HeapWord* p) const {
   1.271 +  GenerationBlockIsObjClosure blk(p);
   1.272 +  // Cast away const
   1.273 +  ((Generation*)this)->space_iterate(&blk);
   1.274 +  return blk.is_obj;
   1.275 +}
   1.276 +
   1.277 +class GenerationOopIterateClosure : public SpaceClosure {
   1.278 + public:
   1.279 +  OopClosure* cl;
   1.280 +  MemRegion mr;
   1.281 +  virtual void do_space(Space* s) {
   1.282 +    s->oop_iterate(mr, cl);
   1.283 +  }
   1.284 +  GenerationOopIterateClosure(OopClosure* _cl, MemRegion _mr) :
   1.285 +    cl(_cl), mr(_mr) {}
   1.286 +};
   1.287 +
   1.288 +void Generation::oop_iterate(OopClosure* cl) {
   1.289 +  GenerationOopIterateClosure blk(cl, _reserved);
   1.290 +  space_iterate(&blk);
   1.291 +}
   1.292 +
   1.293 +void Generation::oop_iterate(MemRegion mr, OopClosure* cl) {
   1.294 +  GenerationOopIterateClosure blk(cl, mr);
   1.295 +  space_iterate(&blk);
   1.296 +}
   1.297 +
   1.298 +void Generation::younger_refs_in_space_iterate(Space* sp,
   1.299 +                                               OopsInGenClosure* cl) {
   1.300 +  GenRemSet* rs = SharedHeap::heap()->rem_set();
   1.301 +  rs->younger_refs_in_space_iterate(sp, cl);
   1.302 +}
   1.303 +
   1.304 +class GenerationObjIterateClosure : public SpaceClosure {
   1.305 + private:
   1.306 +  ObjectClosure* _cl;
   1.307 + public:
   1.308 +  virtual void do_space(Space* s) {
   1.309 +    s->object_iterate(_cl);
   1.310 +  }
   1.311 +  GenerationObjIterateClosure(ObjectClosure* cl) : _cl(cl) {}
   1.312 +};
   1.313 +
   1.314 +void Generation::object_iterate(ObjectClosure* cl) {
   1.315 +  GenerationObjIterateClosure blk(cl);
   1.316 +  space_iterate(&blk);
   1.317 +}
   1.318 +
   1.319 +void Generation::prepare_for_compaction(CompactPoint* cp) {
   1.320 +  // Generic implementation, can be specialized
   1.321 +  CompactibleSpace* space = first_compaction_space();
   1.322 +  while (space != NULL) {
   1.323 +    space->prepare_for_compaction(cp);
   1.324 +    space = space->next_compaction_space();
   1.325 +  }
   1.326 +}
   1.327 +
   1.328 +class AdjustPointersClosure: public SpaceClosure {
   1.329 + public:
   1.330 +  void do_space(Space* sp) {
   1.331 +    sp->adjust_pointers();
   1.332 +  }
   1.333 +};
   1.334 +
   1.335 +void Generation::adjust_pointers() {
   1.336 +  // Note that this is done over all spaces, not just the compactible
   1.337 +  // ones.
   1.338 +  AdjustPointersClosure blk;
   1.339 +  space_iterate(&blk, true);
   1.340 +}
   1.341 +
   1.342 +void Generation::compact() {
   1.343 +  CompactibleSpace* sp = first_compaction_space();
   1.344 +  while (sp != NULL) {
   1.345 +    sp->compact();
   1.346 +    sp = sp->next_compaction_space();
   1.347 +  }
   1.348 +}
   1.349 +
   1.350 +CardGeneration::CardGeneration(ReservedSpace rs, size_t initial_byte_size,
   1.351 +                               int level,
   1.352 +                               GenRemSet* remset) :
   1.353 +  Generation(rs, initial_byte_size, level), _rs(remset)
   1.354 +{
   1.355 +  HeapWord* start = (HeapWord*)rs.base();
   1.356 +  size_t reserved_byte_size = rs.size();
   1.357 +  assert((uintptr_t(start) & 3) == 0, "bad alignment");
   1.358 +  assert((reserved_byte_size & 3) == 0, "bad alignment");
   1.359 +  MemRegion reserved_mr(start, heap_word_size(reserved_byte_size));
   1.360 +  _bts = new BlockOffsetSharedArray(reserved_mr,
   1.361 +                                    heap_word_size(initial_byte_size));
   1.362 +  MemRegion committed_mr(start, heap_word_size(initial_byte_size));
   1.363 +  _rs->resize_covered_region(committed_mr);
   1.364 +  if (_bts == NULL)
   1.365 +    vm_exit_during_initialization("Could not allocate a BlockOffsetArray");
   1.366 +
   1.367 +  // Verify that the start and end of this generation is the start of a card.
   1.368 +  // If this wasn't true, a single card could span more than on generation,
   1.369 +  // which would cause problems when we commit/uncommit memory, and when we
   1.370 +  // clear and dirty cards.
   1.371 +  guarantee(_rs->is_aligned(reserved_mr.start()), "generation must be card aligned");
   1.372 +  if (reserved_mr.end() != Universe::heap()->reserved_region().end()) {
   1.373 +    // Don't check at the very end of the heap as we'll assert that we're probing off
   1.374 +    // the end if we try.
   1.375 +    guarantee(_rs->is_aligned(reserved_mr.end()), "generation must be card aligned");
   1.376 +  }
   1.377 +}
   1.378 +
   1.379 +
   1.380 +// No young generation references, clear this generation's cards.
   1.381 +void CardGeneration::clear_remembered_set() {
   1.382 +  _rs->clear(reserved());
   1.383 +}
   1.384 +
   1.385 +
   1.386 +// Objects in this generation may have moved, invalidate this
   1.387 +// generation's cards.
   1.388 +void CardGeneration::invalidate_remembered_set() {
   1.389 +  _rs->invalidate(used_region());
   1.390 +}
   1.391 +
   1.392 +
   1.393 +// Currently nothing to do.
   1.394 +void CardGeneration::prepare_for_verify() {}
   1.395 +
   1.396 +
   1.397 +void OneContigSpaceCardGeneration::collect(bool   full,
   1.398 +                                           bool   clear_all_soft_refs,
   1.399 +                                           size_t size,
   1.400 +                                           bool   is_tlab) {
   1.401 +  SpecializationStats::clear();
   1.402 +  // Temporarily expand the span of our ref processor, so
   1.403 +  // refs discovery is over the entire heap, not just this generation
   1.404 +  ReferenceProcessorSpanMutator
   1.405 +    x(ref_processor(), GenCollectedHeap::heap()->reserved_region());
   1.406 +  GenMarkSweep::invoke_at_safepoint(_level, ref_processor(), clear_all_soft_refs);
   1.407 +  SpecializationStats::print();
   1.408 +}
   1.409 +
   1.410 +HeapWord*
   1.411 +OneContigSpaceCardGeneration::expand_and_allocate(size_t word_size,
   1.412 +                                                  bool is_tlab,
   1.413 +                                                  bool parallel) {
   1.414 +  assert(!is_tlab, "OneContigSpaceCardGeneration does not support TLAB allocation");
   1.415 +  if (parallel) {
   1.416 +    MutexLocker x(ParGCRareEvent_lock);
   1.417 +    HeapWord* result = NULL;
   1.418 +    size_t byte_size = word_size * HeapWordSize;
   1.419 +    while (true) {
   1.420 +      expand(byte_size, _min_heap_delta_bytes);
   1.421 +      if (GCExpandToAllocateDelayMillis > 0) {
   1.422 +        os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false);
   1.423 +      }
   1.424 +      result = _the_space->par_allocate(word_size);
   1.425 +      if ( result != NULL) {
   1.426 +        return result;
   1.427 +      } else {
   1.428 +        // If there's not enough expansion space available, give up.
   1.429 +        if (_virtual_space.uncommitted_size() < byte_size) {
   1.430 +          return NULL;
   1.431 +        }
   1.432 +        // else try again
   1.433 +      }
   1.434 +    }
   1.435 +  } else {
   1.436 +    expand(word_size*HeapWordSize, _min_heap_delta_bytes);
   1.437 +    return _the_space->allocate(word_size);
   1.438 +  }
   1.439 +}
   1.440 +
   1.441 +void OneContigSpaceCardGeneration::expand(size_t bytes, size_t expand_bytes) {
   1.442 +  GCMutexLocker x(ExpandHeap_lock);
   1.443 +  size_t aligned_bytes  = ReservedSpace::page_align_size_up(bytes);
   1.444 +  size_t aligned_expand_bytes = ReservedSpace::page_align_size_up(expand_bytes);
   1.445 +  bool success = false;
   1.446 +  if (aligned_expand_bytes > aligned_bytes) {
   1.447 +    success = grow_by(aligned_expand_bytes);
   1.448 +  }
   1.449 +  if (!success) {
   1.450 +    success = grow_by(aligned_bytes);
   1.451 +  }
   1.452 +  if (!success) {
   1.453 +    grow_to_reserved();
   1.454 +  }
   1.455 +  if (GC_locker::is_active()) {
   1.456 +    if (PrintGC && Verbose) {
   1.457 +      gclog_or_tty->print_cr("Garbage collection disabled, expanded heap instead");
   1.458 +    }
   1.459 +  }
   1.460 +}
   1.461 +
   1.462 +
   1.463 +void OneContigSpaceCardGeneration::shrink(size_t bytes) {
   1.464 +  assert_locked_or_safepoint(ExpandHeap_lock);
   1.465 +  size_t size = ReservedSpace::page_align_size_down(bytes);
   1.466 +  if (size > 0) {
   1.467 +    shrink_by(size);
   1.468 +  }
   1.469 +}
   1.470 +
   1.471 +
   1.472 +size_t OneContigSpaceCardGeneration::capacity() const {
   1.473 +  return _the_space->capacity();
   1.474 +}
   1.475 +
   1.476 +
   1.477 +size_t OneContigSpaceCardGeneration::used() const {
   1.478 +  return _the_space->used();
   1.479 +}
   1.480 +
   1.481 +
   1.482 +size_t OneContigSpaceCardGeneration::free() const {
   1.483 +  return _the_space->free();
   1.484 +}
   1.485 +
   1.486 +MemRegion OneContigSpaceCardGeneration::used_region() const {
   1.487 +  return the_space()->used_region();
   1.488 +}
   1.489 +
   1.490 +size_t OneContigSpaceCardGeneration::unsafe_max_alloc_nogc() const {
   1.491 +  return _the_space->free();
   1.492 +}
   1.493 +
   1.494 +size_t OneContigSpaceCardGeneration::contiguous_available() const {
   1.495 +  return _the_space->free() + _virtual_space.uncommitted_size();
   1.496 +}
   1.497 +
   1.498 +bool OneContigSpaceCardGeneration::grow_by(size_t bytes) {
   1.499 +  assert_locked_or_safepoint(ExpandHeap_lock);
   1.500 +  bool result = _virtual_space.expand_by(bytes);
   1.501 +  if (result) {
   1.502 +    size_t new_word_size =
   1.503 +       heap_word_size(_virtual_space.committed_size());
   1.504 +    MemRegion mr(_the_space->bottom(), new_word_size);
   1.505 +    // Expand card table
   1.506 +    Universe::heap()->barrier_set()->resize_covered_region(mr);
   1.507 +    // Expand shared block offset array
   1.508 +    _bts->resize(new_word_size);
   1.509 +
   1.510 +    // Fix for bug #4668531
   1.511 +    MemRegion mangle_region(_the_space->end(), (HeapWord*)_virtual_space.high());
   1.512 +    _the_space->mangle_region(mangle_region);
   1.513 +
   1.514 +    // Expand space -- also expands space's BOT
   1.515 +    // (which uses (part of) shared array above)
   1.516 +    _the_space->set_end((HeapWord*)_virtual_space.high());
   1.517 +
   1.518 +    // update the space and generation capacity counters
   1.519 +    update_counters();
   1.520 +
   1.521 +    if (Verbose && PrintGC) {
   1.522 +      size_t new_mem_size = _virtual_space.committed_size();
   1.523 +      size_t old_mem_size = new_mem_size - bytes;
   1.524 +      gclog_or_tty->print_cr("Expanding %s from " SIZE_FORMAT "K by "
   1.525 +                      SIZE_FORMAT "K to " SIZE_FORMAT "K",
   1.526 +                      name(), old_mem_size/K, bytes/K, new_mem_size/K);
   1.527 +    }
   1.528 +  }
   1.529 +  return result;
   1.530 +}
   1.531 +
   1.532 +
   1.533 +bool OneContigSpaceCardGeneration::grow_to_reserved() {
   1.534 +  assert_locked_or_safepoint(ExpandHeap_lock);
   1.535 +  bool success = true;
   1.536 +  const size_t remaining_bytes = _virtual_space.uncommitted_size();
   1.537 +  if (remaining_bytes > 0) {
   1.538 +    success = grow_by(remaining_bytes);
   1.539 +    DEBUG_ONLY(if (!success) warning("grow to reserved failed");)
   1.540 +  }
   1.541 +  return success;
   1.542 +}
   1.543 +
   1.544 +void OneContigSpaceCardGeneration::shrink_by(size_t bytes) {
   1.545 +  assert_locked_or_safepoint(ExpandHeap_lock);
   1.546 +  // Shrink committed space
   1.547 +  _virtual_space.shrink_by(bytes);
   1.548 +  // Shrink space; this also shrinks the space's BOT
   1.549 +  _the_space->set_end((HeapWord*) _virtual_space.high());
   1.550 +  size_t new_word_size = heap_word_size(_the_space->capacity());
   1.551 +  // Shrink the shared block offset array
   1.552 +  _bts->resize(new_word_size);
   1.553 +  MemRegion mr(_the_space->bottom(), new_word_size);
   1.554 +  // Shrink the card table
   1.555 +  Universe::heap()->barrier_set()->resize_covered_region(mr);
   1.556 +
   1.557 +  if (Verbose && PrintGC) {
   1.558 +    size_t new_mem_size = _virtual_space.committed_size();
   1.559 +    size_t old_mem_size = new_mem_size + bytes;
   1.560 +    gclog_or_tty->print_cr("Shrinking %s from " SIZE_FORMAT "K to " SIZE_FORMAT "K",
   1.561 +                  name(), old_mem_size/K, new_mem_size/K);
   1.562 +  }
   1.563 +}
   1.564 +
   1.565 +// Currently nothing to do.
   1.566 +void OneContigSpaceCardGeneration::prepare_for_verify() {}
   1.567 +
   1.568 +
   1.569 +void OneContigSpaceCardGeneration::object_iterate(ObjectClosure* blk) {
   1.570 +  _the_space->object_iterate(blk);
   1.571 +}
   1.572 +
   1.573 +void OneContigSpaceCardGeneration::space_iterate(SpaceClosure* blk,
   1.574 +                                                 bool usedOnly) {
   1.575 +  blk->do_space(_the_space);
   1.576 +}
   1.577 +
   1.578 +void OneContigSpaceCardGeneration::object_iterate_since_last_GC(ObjectClosure* blk) {
   1.579 +  // Deal with delayed initialization of _the_space,
   1.580 +  // and lack of initialization of _last_gc.
   1.581 +  if (_last_gc.space() == NULL) {
   1.582 +    assert(the_space() != NULL, "shouldn't be NULL");
   1.583 +    _last_gc = the_space()->bottom_mark();
   1.584 +  }
   1.585 +  the_space()->object_iterate_from(_last_gc, blk);
   1.586 +}
   1.587 +
   1.588 +void OneContigSpaceCardGeneration::younger_refs_iterate(OopsInGenClosure* blk) {
   1.589 +  blk->set_generation(this);
   1.590 +  younger_refs_in_space_iterate(_the_space, blk);
   1.591 +  blk->reset_generation();
   1.592 +}
   1.593 +
   1.594 +void OneContigSpaceCardGeneration::save_marks() {
   1.595 +  _the_space->set_saved_mark();
   1.596 +}
   1.597 +
   1.598 +
   1.599 +void OneContigSpaceCardGeneration::reset_saved_marks() {
   1.600 +  _the_space->reset_saved_mark();
   1.601 +}
   1.602 +
   1.603 +
   1.604 +bool OneContigSpaceCardGeneration::no_allocs_since_save_marks() {
   1.605 +  return _the_space->saved_mark_at_top();
   1.606 +}
   1.607 +
   1.608 +#define OneContig_SINCE_SAVE_MARKS_ITERATE_DEFN(OopClosureType, nv_suffix)      \
   1.609 +                                                                                \
   1.610 +void OneContigSpaceCardGeneration::                                             \
   1.611 +oop_since_save_marks_iterate##nv_suffix(OopClosureType* blk) {                  \
   1.612 +  blk->set_generation(this);                                                    \
   1.613 +  _the_space->oop_since_save_marks_iterate##nv_suffix(blk);                     \
   1.614 +  blk->reset_generation();                                                      \
   1.615 +  save_marks();                                                                 \
   1.616 +}
   1.617 +
   1.618 +ALL_SINCE_SAVE_MARKS_CLOSURES(OneContig_SINCE_SAVE_MARKS_ITERATE_DEFN)
   1.619 +
   1.620 +#undef OneContig_SINCE_SAVE_MARKS_ITERATE_DEFN
   1.621 +
   1.622 +
   1.623 +void OneContigSpaceCardGeneration::gc_epilogue(bool full) {
   1.624 +  _last_gc = WaterMark(the_space(), the_space()->top());
   1.625 +
   1.626 +  // update the generation and space performance counters
   1.627 +  update_counters();
   1.628 +}
   1.629 +
   1.630 +void OneContigSpaceCardGeneration::verify(bool allow_dirty) {
   1.631 +  the_space()->verify(allow_dirty);
   1.632 +}
   1.633 +
   1.634 +void OneContigSpaceCardGeneration::print_on(outputStream* st)  const {
   1.635 +  Generation::print_on(st);
   1.636 +  st->print("   the");
   1.637 +  the_space()->print_on(st);
   1.638 +}

mercurial