duke@435: /* duke@435: * Copyright 1997-2006 Sun Microsystems, Inc. All Rights Reserved. duke@435: * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. duke@435: * duke@435: * This code is free software; you can redistribute it and/or modify it duke@435: * under the terms of the GNU General Public License version 2 only, as duke@435: * published by the Free Software Foundation. duke@435: * duke@435: * This code is distributed in the hope that it will be useful, but WITHOUT duke@435: * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or duke@435: * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License duke@435: * version 2 for more details (a copy is included in the LICENSE file that duke@435: * accompanied this code). duke@435: * duke@435: * You should have received a copy of the GNU General Public License version duke@435: * 2 along with this work; if not, write to the Free Software Foundation, duke@435: * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. duke@435: * duke@435: * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, duke@435: * CA 95054 USA or visit www.sun.com if you need additional information or duke@435: * have any questions. duke@435: * duke@435: */ duke@435: duke@435: # include "incls/_precompiled.incl" duke@435: # include "incls/_generation.cpp.incl" duke@435: duke@435: Generation::Generation(ReservedSpace rs, size_t initial_size, int level) : duke@435: _level(level), duke@435: _ref_processor(NULL) { duke@435: if (!_virtual_space.initialize(rs, initial_size)) { duke@435: vm_exit_during_initialization("Could not reserve enough space for " duke@435: "object heap"); duke@435: } jmasa@698: // Mangle all of the the initial generation. jmasa@698: if (ZapUnusedHeapArea) { jmasa@698: MemRegion mangle_region((HeapWord*)_virtual_space.low(), jmasa@698: (HeapWord*)_virtual_space.high()); jmasa@698: SpaceMangler::mangle_region(mangle_region); jmasa@698: } duke@435: _reserved = MemRegion((HeapWord*)_virtual_space.low_boundary(), duke@435: (HeapWord*)_virtual_space.high_boundary()); duke@435: } duke@435: duke@435: GenerationSpec* Generation::spec() { duke@435: GenCollectedHeap* gch = GenCollectedHeap::heap(); duke@435: assert(0 <= level() && level() < gch->_n_gens, "Bad gen level"); duke@435: return gch->_gen_specs[level()]; duke@435: } duke@435: duke@435: size_t Generation::max_capacity() const { duke@435: return reserved().byte_size(); duke@435: } duke@435: duke@435: void Generation::print_heap_change(size_t prev_used) const { duke@435: if (PrintGCDetails && Verbose) { duke@435: gclog_or_tty->print(" " SIZE_FORMAT duke@435: "->" SIZE_FORMAT duke@435: "(" SIZE_FORMAT ")", duke@435: prev_used, used(), capacity()); duke@435: } else { duke@435: gclog_or_tty->print(" " SIZE_FORMAT "K" duke@435: "->" SIZE_FORMAT "K" duke@435: "(" SIZE_FORMAT "K)", duke@435: prev_used / K, used() / K, capacity() / K); duke@435: } duke@435: } duke@435: duke@435: // By default we get a single threaded default reference processor; duke@435: // generations needing multi-threaded refs discovery override this method. duke@435: void Generation::ref_processor_init() { duke@435: assert(_ref_processor == NULL, "a reference processor already exists"); duke@435: assert(!_reserved.is_empty(), "empty generation?"); duke@435: _ref_processor = duke@435: new ReferenceProcessor(_reserved, // span duke@435: refs_discovery_is_atomic(), // atomic_discovery duke@435: refs_discovery_is_mt()); // mt_discovery duke@435: if (_ref_processor == NULL) { duke@435: vm_exit_during_initialization("Could not allocate ReferenceProcessor object"); duke@435: } duke@435: } duke@435: duke@435: void Generation::print() const { print_on(tty); } duke@435: duke@435: void Generation::print_on(outputStream* st) const { duke@435: st->print(" %-20s", name()); duke@435: st->print(" total " SIZE_FORMAT "K, used " SIZE_FORMAT "K", duke@435: capacity()/K, used()/K); duke@435: st->print_cr(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " INTPTR_FORMAT ")", duke@435: _virtual_space.low_boundary(), duke@435: _virtual_space.high(), duke@435: _virtual_space.high_boundary()); duke@435: } duke@435: duke@435: void Generation::print_summary_info() { print_summary_info_on(tty); } duke@435: duke@435: void Generation::print_summary_info_on(outputStream* st) { duke@435: StatRecord* sr = stat_record(); duke@435: double time = sr->accumulated_time.seconds(); duke@435: st->print_cr("[Accumulated GC generation %d time %3.7f secs, " duke@435: "%d GC's, avg GC time %3.7f]", duke@435: level(), time, sr->invocations, duke@435: sr->invocations > 0 ? time / sr->invocations : 0.0); duke@435: } duke@435: duke@435: // Utility iterator classes duke@435: duke@435: class GenerationIsInReservedClosure : public SpaceClosure { duke@435: public: duke@435: const void* _p; duke@435: Space* sp; duke@435: virtual void do_space(Space* s) { duke@435: if (sp == NULL) { duke@435: if (s->is_in_reserved(_p)) sp = s; duke@435: } duke@435: } duke@435: GenerationIsInReservedClosure(const void* p) : _p(p), sp(NULL) {} duke@435: }; duke@435: duke@435: class GenerationIsInClosure : public SpaceClosure { duke@435: public: duke@435: const void* _p; duke@435: Space* sp; duke@435: virtual void do_space(Space* s) { duke@435: if (sp == NULL) { duke@435: if (s->is_in(_p)) sp = s; duke@435: } duke@435: } duke@435: GenerationIsInClosure(const void* p) : _p(p), sp(NULL) {} duke@435: }; duke@435: duke@435: bool Generation::is_in(const void* p) const { duke@435: GenerationIsInClosure blk(p); duke@435: ((Generation*)this)->space_iterate(&blk); duke@435: return blk.sp != NULL; duke@435: } duke@435: duke@435: DefNewGeneration* Generation::as_DefNewGeneration() { duke@435: assert((kind() == Generation::DefNew) || duke@435: (kind() == Generation::ParNew) || duke@435: (kind() == Generation::ASParNew), duke@435: "Wrong youngest generation type"); duke@435: return (DefNewGeneration*) this; duke@435: } duke@435: duke@435: Generation* Generation::next_gen() const { duke@435: GenCollectedHeap* gch = GenCollectedHeap::heap(); duke@435: int next = level() + 1; duke@435: if (next < gch->_n_gens) { duke@435: return gch->_gens[next]; duke@435: } else { duke@435: return NULL; duke@435: } duke@435: } duke@435: duke@435: size_t Generation::max_contiguous_available() const { duke@435: // The largest number of contiguous free words in this or any higher generation. duke@435: size_t max = 0; duke@435: for (const Generation* gen = this; gen != NULL; gen = gen->next_gen()) { duke@435: size_t avail = gen->contiguous_available(); duke@435: if (avail > max) { duke@435: max = avail; duke@435: } duke@435: } duke@435: return max; duke@435: } duke@435: duke@435: bool Generation::promotion_attempt_is_safe(size_t promotion_in_bytes, duke@435: bool not_used) const { duke@435: if (PrintGC && Verbose) { duke@435: gclog_or_tty->print_cr("Generation::promotion_attempt_is_safe" duke@435: " contiguous_available: " SIZE_FORMAT duke@435: " promotion_in_bytes: " SIZE_FORMAT, duke@435: max_contiguous_available(), promotion_in_bytes); duke@435: } duke@435: return max_contiguous_available() >= promotion_in_bytes; duke@435: } duke@435: duke@435: // Ignores "ref" and calls allocate(). coleenp@548: oop Generation::promote(oop obj, size_t obj_size) { duke@435: assert(obj_size == (size_t)obj->size(), "bad obj_size passed in"); duke@435: duke@435: #ifndef PRODUCT duke@435: if (Universe::heap()->promotion_should_fail()) { duke@435: return NULL; duke@435: } duke@435: #endif // #ifndef PRODUCT duke@435: duke@435: HeapWord* result = allocate(obj_size, false); duke@435: if (result != NULL) { duke@435: Copy::aligned_disjoint_words((HeapWord*)obj, result, obj_size); duke@435: return oop(result); duke@435: } else { duke@435: GenCollectedHeap* gch = GenCollectedHeap::heap(); coleenp@548: return gch->handle_failed_promotion(this, obj, obj_size); duke@435: } duke@435: } duke@435: duke@435: oop Generation::par_promote(int thread_num, duke@435: oop obj, markOop m, size_t word_sz) { duke@435: // Could do a bad general impl here that gets a lock. But no. duke@435: ShouldNotCallThis(); duke@435: return NULL; duke@435: } duke@435: duke@435: void Generation::par_promote_alloc_undo(int thread_num, duke@435: HeapWord* obj, size_t word_sz) { duke@435: // Could do a bad general impl here that gets a lock. But no. duke@435: guarantee(false, "No good general implementation."); duke@435: } duke@435: duke@435: Space* Generation::space_containing(const void* p) const { duke@435: GenerationIsInReservedClosure blk(p); duke@435: // Cast away const duke@435: ((Generation*)this)->space_iterate(&blk); duke@435: return blk.sp; duke@435: } duke@435: duke@435: // Some of these are mediocre general implementations. Should be duke@435: // overridden to get better performance. duke@435: duke@435: class GenerationBlockStartClosure : public SpaceClosure { duke@435: public: duke@435: const void* _p; duke@435: HeapWord* _start; duke@435: virtual void do_space(Space* s) { duke@435: if (_start == NULL && s->is_in_reserved(_p)) { duke@435: _start = s->block_start(_p); duke@435: } duke@435: } duke@435: GenerationBlockStartClosure(const void* p) { _p = p; _start = NULL; } duke@435: }; duke@435: duke@435: HeapWord* Generation::block_start(const void* p) const { duke@435: GenerationBlockStartClosure blk(p); duke@435: // Cast away const duke@435: ((Generation*)this)->space_iterate(&blk); duke@435: return blk._start; duke@435: } duke@435: duke@435: class GenerationBlockSizeClosure : public SpaceClosure { duke@435: public: duke@435: const HeapWord* _p; duke@435: size_t size; duke@435: virtual void do_space(Space* s) { duke@435: if (size == 0 && s->is_in_reserved(_p)) { duke@435: size = s->block_size(_p); duke@435: } duke@435: } duke@435: GenerationBlockSizeClosure(const HeapWord* p) { _p = p; size = 0; } duke@435: }; duke@435: duke@435: size_t Generation::block_size(const HeapWord* p) const { duke@435: GenerationBlockSizeClosure blk(p); duke@435: // Cast away const duke@435: ((Generation*)this)->space_iterate(&blk); duke@435: assert(blk.size > 0, "seems reasonable"); duke@435: return blk.size; duke@435: } duke@435: duke@435: class GenerationBlockIsObjClosure : public SpaceClosure { duke@435: public: duke@435: const HeapWord* _p; duke@435: bool is_obj; duke@435: virtual void do_space(Space* s) { duke@435: if (!is_obj && s->is_in_reserved(_p)) { duke@435: is_obj |= s->block_is_obj(_p); duke@435: } duke@435: } duke@435: GenerationBlockIsObjClosure(const HeapWord* p) { _p = p; is_obj = false; } duke@435: }; duke@435: duke@435: bool Generation::block_is_obj(const HeapWord* p) const { duke@435: GenerationBlockIsObjClosure blk(p); duke@435: // Cast away const duke@435: ((Generation*)this)->space_iterate(&blk); duke@435: return blk.is_obj; duke@435: } duke@435: duke@435: class GenerationOopIterateClosure : public SpaceClosure { duke@435: public: duke@435: OopClosure* cl; duke@435: MemRegion mr; duke@435: virtual void do_space(Space* s) { duke@435: s->oop_iterate(mr, cl); duke@435: } duke@435: GenerationOopIterateClosure(OopClosure* _cl, MemRegion _mr) : duke@435: cl(_cl), mr(_mr) {} duke@435: }; duke@435: duke@435: void Generation::oop_iterate(OopClosure* cl) { duke@435: GenerationOopIterateClosure blk(cl, _reserved); duke@435: space_iterate(&blk); duke@435: } duke@435: duke@435: void Generation::oop_iterate(MemRegion mr, OopClosure* cl) { duke@435: GenerationOopIterateClosure blk(cl, mr); duke@435: space_iterate(&blk); duke@435: } duke@435: duke@435: void Generation::younger_refs_in_space_iterate(Space* sp, duke@435: OopsInGenClosure* cl) { duke@435: GenRemSet* rs = SharedHeap::heap()->rem_set(); duke@435: rs->younger_refs_in_space_iterate(sp, cl); duke@435: } duke@435: duke@435: class GenerationObjIterateClosure : public SpaceClosure { duke@435: private: duke@435: ObjectClosure* _cl; duke@435: public: duke@435: virtual void do_space(Space* s) { duke@435: s->object_iterate(_cl); duke@435: } duke@435: GenerationObjIterateClosure(ObjectClosure* cl) : _cl(cl) {} duke@435: }; duke@435: duke@435: void Generation::object_iterate(ObjectClosure* cl) { duke@435: GenerationObjIterateClosure blk(cl); duke@435: space_iterate(&blk); duke@435: } duke@435: duke@435: void Generation::prepare_for_compaction(CompactPoint* cp) { duke@435: // Generic implementation, can be specialized duke@435: CompactibleSpace* space = first_compaction_space(); duke@435: while (space != NULL) { duke@435: space->prepare_for_compaction(cp); duke@435: space = space->next_compaction_space(); duke@435: } duke@435: } duke@435: duke@435: class AdjustPointersClosure: public SpaceClosure { duke@435: public: duke@435: void do_space(Space* sp) { duke@435: sp->adjust_pointers(); duke@435: } duke@435: }; duke@435: duke@435: void Generation::adjust_pointers() { duke@435: // Note that this is done over all spaces, not just the compactible duke@435: // ones. duke@435: AdjustPointersClosure blk; duke@435: space_iterate(&blk, true); duke@435: } duke@435: duke@435: void Generation::compact() { duke@435: CompactibleSpace* sp = first_compaction_space(); duke@435: while (sp != NULL) { duke@435: sp->compact(); duke@435: sp = sp->next_compaction_space(); duke@435: } duke@435: } duke@435: duke@435: CardGeneration::CardGeneration(ReservedSpace rs, size_t initial_byte_size, duke@435: int level, duke@435: GenRemSet* remset) : duke@435: Generation(rs, initial_byte_size, level), _rs(remset) duke@435: { duke@435: HeapWord* start = (HeapWord*)rs.base(); duke@435: size_t reserved_byte_size = rs.size(); duke@435: assert((uintptr_t(start) & 3) == 0, "bad alignment"); duke@435: assert((reserved_byte_size & 3) == 0, "bad alignment"); duke@435: MemRegion reserved_mr(start, heap_word_size(reserved_byte_size)); duke@435: _bts = new BlockOffsetSharedArray(reserved_mr, duke@435: heap_word_size(initial_byte_size)); duke@435: MemRegion committed_mr(start, heap_word_size(initial_byte_size)); duke@435: _rs->resize_covered_region(committed_mr); duke@435: if (_bts == NULL) duke@435: vm_exit_during_initialization("Could not allocate a BlockOffsetArray"); duke@435: duke@435: // Verify that the start and end of this generation is the start of a card. duke@435: // If this wasn't true, a single card could span more than on generation, duke@435: // which would cause problems when we commit/uncommit memory, and when we duke@435: // clear and dirty cards. duke@435: guarantee(_rs->is_aligned(reserved_mr.start()), "generation must be card aligned"); duke@435: if (reserved_mr.end() != Universe::heap()->reserved_region().end()) { duke@435: // Don't check at the very end of the heap as we'll assert that we're probing off duke@435: // the end if we try. duke@435: guarantee(_rs->is_aligned(reserved_mr.end()), "generation must be card aligned"); duke@435: } duke@435: } duke@435: duke@435: duke@435: // No young generation references, clear this generation's cards. duke@435: void CardGeneration::clear_remembered_set() { duke@435: _rs->clear(reserved()); duke@435: } duke@435: duke@435: duke@435: // Objects in this generation may have moved, invalidate this duke@435: // generation's cards. duke@435: void CardGeneration::invalidate_remembered_set() { duke@435: _rs->invalidate(used_region()); duke@435: } duke@435: duke@435: duke@435: // Currently nothing to do. duke@435: void CardGeneration::prepare_for_verify() {} duke@435: duke@435: duke@435: void OneContigSpaceCardGeneration::collect(bool full, duke@435: bool clear_all_soft_refs, duke@435: size_t size, duke@435: bool is_tlab) { duke@435: SpecializationStats::clear(); duke@435: // Temporarily expand the span of our ref processor, so duke@435: // refs discovery is over the entire heap, not just this generation duke@435: ReferenceProcessorSpanMutator duke@435: x(ref_processor(), GenCollectedHeap::heap()->reserved_region()); duke@435: GenMarkSweep::invoke_at_safepoint(_level, ref_processor(), clear_all_soft_refs); duke@435: SpecializationStats::print(); duke@435: } duke@435: duke@435: HeapWord* duke@435: OneContigSpaceCardGeneration::expand_and_allocate(size_t word_size, duke@435: bool is_tlab, duke@435: bool parallel) { duke@435: assert(!is_tlab, "OneContigSpaceCardGeneration does not support TLAB allocation"); duke@435: if (parallel) { duke@435: MutexLocker x(ParGCRareEvent_lock); duke@435: HeapWord* result = NULL; duke@435: size_t byte_size = word_size * HeapWordSize; duke@435: while (true) { duke@435: expand(byte_size, _min_heap_delta_bytes); duke@435: if (GCExpandToAllocateDelayMillis > 0) { duke@435: os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false); duke@435: } duke@435: result = _the_space->par_allocate(word_size); duke@435: if ( result != NULL) { duke@435: return result; duke@435: } else { duke@435: // If there's not enough expansion space available, give up. duke@435: if (_virtual_space.uncommitted_size() < byte_size) { duke@435: return NULL; duke@435: } duke@435: // else try again duke@435: } duke@435: } duke@435: } else { duke@435: expand(word_size*HeapWordSize, _min_heap_delta_bytes); duke@435: return _the_space->allocate(word_size); duke@435: } duke@435: } duke@435: duke@435: void OneContigSpaceCardGeneration::expand(size_t bytes, size_t expand_bytes) { duke@435: GCMutexLocker x(ExpandHeap_lock); duke@435: size_t aligned_bytes = ReservedSpace::page_align_size_up(bytes); duke@435: size_t aligned_expand_bytes = ReservedSpace::page_align_size_up(expand_bytes); duke@435: bool success = false; duke@435: if (aligned_expand_bytes > aligned_bytes) { duke@435: success = grow_by(aligned_expand_bytes); duke@435: } duke@435: if (!success) { duke@435: success = grow_by(aligned_bytes); duke@435: } duke@435: if (!success) { duke@435: grow_to_reserved(); duke@435: } duke@435: if (GC_locker::is_active()) { duke@435: if (PrintGC && Verbose) { duke@435: gclog_or_tty->print_cr("Garbage collection disabled, expanded heap instead"); duke@435: } duke@435: } duke@435: } duke@435: duke@435: duke@435: void OneContigSpaceCardGeneration::shrink(size_t bytes) { duke@435: assert_locked_or_safepoint(ExpandHeap_lock); duke@435: size_t size = ReservedSpace::page_align_size_down(bytes); duke@435: if (size > 0) { duke@435: shrink_by(size); duke@435: } duke@435: } duke@435: duke@435: duke@435: size_t OneContigSpaceCardGeneration::capacity() const { duke@435: return _the_space->capacity(); duke@435: } duke@435: duke@435: duke@435: size_t OneContigSpaceCardGeneration::used() const { duke@435: return _the_space->used(); duke@435: } duke@435: duke@435: duke@435: size_t OneContigSpaceCardGeneration::free() const { duke@435: return _the_space->free(); duke@435: } duke@435: duke@435: MemRegion OneContigSpaceCardGeneration::used_region() const { duke@435: return the_space()->used_region(); duke@435: } duke@435: duke@435: size_t OneContigSpaceCardGeneration::unsafe_max_alloc_nogc() const { duke@435: return _the_space->free(); duke@435: } duke@435: duke@435: size_t OneContigSpaceCardGeneration::contiguous_available() const { duke@435: return _the_space->free() + _virtual_space.uncommitted_size(); duke@435: } duke@435: duke@435: bool OneContigSpaceCardGeneration::grow_by(size_t bytes) { duke@435: assert_locked_or_safepoint(ExpandHeap_lock); duke@435: bool result = _virtual_space.expand_by(bytes); duke@435: if (result) { duke@435: size_t new_word_size = duke@435: heap_word_size(_virtual_space.committed_size()); duke@435: MemRegion mr(_the_space->bottom(), new_word_size); duke@435: // Expand card table duke@435: Universe::heap()->barrier_set()->resize_covered_region(mr); duke@435: // Expand shared block offset array duke@435: _bts->resize(new_word_size); duke@435: duke@435: // Fix for bug #4668531 jmasa@698: if (ZapUnusedHeapArea) { jmasa@698: MemRegion mangle_region(_the_space->end(), jmasa@698: (HeapWord*)_virtual_space.high()); jmasa@698: SpaceMangler::mangle_region(mangle_region); jmasa@698: } duke@435: duke@435: // Expand space -- also expands space's BOT duke@435: // (which uses (part of) shared array above) duke@435: _the_space->set_end((HeapWord*)_virtual_space.high()); duke@435: duke@435: // update the space and generation capacity counters duke@435: update_counters(); duke@435: duke@435: if (Verbose && PrintGC) { duke@435: size_t new_mem_size = _virtual_space.committed_size(); duke@435: size_t old_mem_size = new_mem_size - bytes; duke@435: gclog_or_tty->print_cr("Expanding %s from " SIZE_FORMAT "K by " duke@435: SIZE_FORMAT "K to " SIZE_FORMAT "K", duke@435: name(), old_mem_size/K, bytes/K, new_mem_size/K); duke@435: } duke@435: } duke@435: return result; duke@435: } duke@435: duke@435: duke@435: bool OneContigSpaceCardGeneration::grow_to_reserved() { duke@435: assert_locked_or_safepoint(ExpandHeap_lock); duke@435: bool success = true; duke@435: const size_t remaining_bytes = _virtual_space.uncommitted_size(); duke@435: if (remaining_bytes > 0) { duke@435: success = grow_by(remaining_bytes); duke@435: DEBUG_ONLY(if (!success) warning("grow to reserved failed");) duke@435: } duke@435: return success; duke@435: } duke@435: duke@435: void OneContigSpaceCardGeneration::shrink_by(size_t bytes) { duke@435: assert_locked_or_safepoint(ExpandHeap_lock); duke@435: // Shrink committed space duke@435: _virtual_space.shrink_by(bytes); duke@435: // Shrink space; this also shrinks the space's BOT duke@435: _the_space->set_end((HeapWord*) _virtual_space.high()); duke@435: size_t new_word_size = heap_word_size(_the_space->capacity()); duke@435: // Shrink the shared block offset array duke@435: _bts->resize(new_word_size); duke@435: MemRegion mr(_the_space->bottom(), new_word_size); duke@435: // Shrink the card table duke@435: Universe::heap()->barrier_set()->resize_covered_region(mr); duke@435: duke@435: if (Verbose && PrintGC) { duke@435: size_t new_mem_size = _virtual_space.committed_size(); duke@435: size_t old_mem_size = new_mem_size + bytes; duke@435: gclog_or_tty->print_cr("Shrinking %s from " SIZE_FORMAT "K to " SIZE_FORMAT "K", duke@435: name(), old_mem_size/K, new_mem_size/K); duke@435: } duke@435: } duke@435: duke@435: // Currently nothing to do. duke@435: void OneContigSpaceCardGeneration::prepare_for_verify() {} duke@435: duke@435: duke@435: void OneContigSpaceCardGeneration::object_iterate(ObjectClosure* blk) { duke@435: _the_space->object_iterate(blk); duke@435: } duke@435: duke@435: void OneContigSpaceCardGeneration::space_iterate(SpaceClosure* blk, duke@435: bool usedOnly) { duke@435: blk->do_space(_the_space); duke@435: } duke@435: duke@435: void OneContigSpaceCardGeneration::object_iterate_since_last_GC(ObjectClosure* blk) { duke@435: // Deal with delayed initialization of _the_space, duke@435: // and lack of initialization of _last_gc. duke@435: if (_last_gc.space() == NULL) { duke@435: assert(the_space() != NULL, "shouldn't be NULL"); duke@435: _last_gc = the_space()->bottom_mark(); duke@435: } duke@435: the_space()->object_iterate_from(_last_gc, blk); duke@435: } duke@435: duke@435: void OneContigSpaceCardGeneration::younger_refs_iterate(OopsInGenClosure* blk) { duke@435: blk->set_generation(this); duke@435: younger_refs_in_space_iterate(_the_space, blk); duke@435: blk->reset_generation(); duke@435: } duke@435: duke@435: void OneContigSpaceCardGeneration::save_marks() { duke@435: _the_space->set_saved_mark(); duke@435: } duke@435: duke@435: duke@435: void OneContigSpaceCardGeneration::reset_saved_marks() { duke@435: _the_space->reset_saved_mark(); duke@435: } duke@435: duke@435: duke@435: bool OneContigSpaceCardGeneration::no_allocs_since_save_marks() { duke@435: return _the_space->saved_mark_at_top(); duke@435: } duke@435: duke@435: #define OneContig_SINCE_SAVE_MARKS_ITERATE_DEFN(OopClosureType, nv_suffix) \ duke@435: \ duke@435: void OneContigSpaceCardGeneration:: \ duke@435: oop_since_save_marks_iterate##nv_suffix(OopClosureType* blk) { \ duke@435: blk->set_generation(this); \ duke@435: _the_space->oop_since_save_marks_iterate##nv_suffix(blk); \ duke@435: blk->reset_generation(); \ duke@435: save_marks(); \ duke@435: } duke@435: duke@435: ALL_SINCE_SAVE_MARKS_CLOSURES(OneContig_SINCE_SAVE_MARKS_ITERATE_DEFN) duke@435: duke@435: #undef OneContig_SINCE_SAVE_MARKS_ITERATE_DEFN duke@435: duke@435: duke@435: void OneContigSpaceCardGeneration::gc_epilogue(bool full) { duke@435: _last_gc = WaterMark(the_space(), the_space()->top()); duke@435: duke@435: // update the generation and space performance counters duke@435: update_counters(); jmasa@698: if (ZapUnusedHeapArea) { jmasa@698: the_space()->check_mangled_unused_area_complete(); jmasa@698: } jmasa@698: } jmasa@698: jmasa@698: void OneContigSpaceCardGeneration::record_spaces_top() { jmasa@698: assert(ZapUnusedHeapArea, "Not mangling unused space"); jmasa@698: the_space()->set_top_for_allocations(); duke@435: } duke@435: duke@435: void OneContigSpaceCardGeneration::verify(bool allow_dirty) { duke@435: the_space()->verify(allow_dirty); duke@435: } duke@435: duke@435: void OneContigSpaceCardGeneration::print_on(outputStream* st) const { duke@435: Generation::print_on(st); duke@435: st->print(" the"); duke@435: the_space()->print_on(st); duke@435: }