aoqi@0: /* aoqi@0: * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved. aoqi@0: * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. aoqi@0: * aoqi@0: * This code is free software; you can redistribute it and/or modify it aoqi@0: * under the terms of the GNU General Public License version 2 only, as aoqi@0: * published by the Free Software Foundation. aoqi@0: * aoqi@0: * This code is distributed in the hope that it will be useful, but WITHOUT aoqi@0: * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or aoqi@0: * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License aoqi@0: * version 2 for more details (a copy is included in the LICENSE file that aoqi@0: * accompanied this code). aoqi@0: * aoqi@0: * You should have received a copy of the GNU General Public License version aoqi@0: * 2 along with this work; if not, write to the Free Software Foundation, aoqi@0: * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. aoqi@0: * aoqi@0: * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA aoqi@0: * or visit www.oracle.com if you need additional information or have any aoqi@0: * questions. aoqi@0: * aoqi@0: */ aoqi@0: aoqi@0: #include "precompiled.hpp" aoqi@0: #include "gc_implementation/shared/gcTimer.hpp" aoqi@0: #include "gc_implementation/shared/gcTrace.hpp" aoqi@0: #include "gc_implementation/shared/spaceDecorator.hpp" aoqi@0: #include "gc_interface/collectedHeap.inline.hpp" aoqi@0: #include "memory/allocation.inline.hpp" aoqi@0: #include "memory/blockOffsetTable.inline.hpp" aoqi@0: #include "memory/cardTableRS.hpp" aoqi@0: #include "memory/gcLocker.inline.hpp" aoqi@0: #include "memory/genCollectedHeap.hpp" aoqi@0: #include "memory/genMarkSweep.hpp" aoqi@0: #include "memory/genOopClosures.hpp" aoqi@0: #include "memory/genOopClosures.inline.hpp" aoqi@0: #include "memory/generation.hpp" aoqi@0: #include "memory/generation.inline.hpp" aoqi@0: #include "memory/space.inline.hpp" aoqi@0: #include "oops/oop.inline.hpp" aoqi@0: #include "runtime/java.hpp" aoqi@0: #include "utilities/copy.hpp" aoqi@0: #include "utilities/events.hpp" aoqi@0: aoqi@0: PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC aoqi@0: aoqi@0: Generation::Generation(ReservedSpace rs, size_t initial_size, int level) : aoqi@0: _level(level), aoqi@0: _ref_processor(NULL) { aoqi@0: if (!_virtual_space.initialize(rs, initial_size)) { aoqi@0: vm_exit_during_initialization("Could not reserve enough space for " aoqi@0: "object heap"); aoqi@0: } aoqi@0: // Mangle all of the the initial generation. aoqi@0: if (ZapUnusedHeapArea) { aoqi@0: MemRegion mangle_region((HeapWord*)_virtual_space.low(), aoqi@0: (HeapWord*)_virtual_space.high()); aoqi@0: SpaceMangler::mangle_region(mangle_region); aoqi@0: } aoqi@0: _reserved = MemRegion((HeapWord*)_virtual_space.low_boundary(), aoqi@0: (HeapWord*)_virtual_space.high_boundary()); aoqi@0: } aoqi@0: aoqi@0: GenerationSpec* Generation::spec() { aoqi@0: GenCollectedHeap* gch = GenCollectedHeap::heap(); aoqi@0: assert(0 <= level() && level() < gch->_n_gens, "Bad gen level"); aoqi@0: return gch->_gen_specs[level()]; aoqi@0: } aoqi@0: aoqi@0: size_t Generation::max_capacity() const { aoqi@0: return reserved().byte_size(); aoqi@0: } aoqi@0: aoqi@0: void Generation::print_heap_change(size_t prev_used) const { aoqi@0: if (PrintGCDetails && Verbose) { aoqi@0: gclog_or_tty->print(" " SIZE_FORMAT aoqi@0: "->" SIZE_FORMAT aoqi@0: "(" SIZE_FORMAT ")", aoqi@0: prev_used, used(), capacity()); aoqi@0: } else { aoqi@0: gclog_or_tty->print(" " SIZE_FORMAT "K" aoqi@0: "->" SIZE_FORMAT "K" aoqi@0: "(" SIZE_FORMAT "K)", aoqi@0: prev_used / K, used() / K, capacity() / K); aoqi@0: } aoqi@0: } aoqi@0: aoqi@0: // By default we get a single threaded default reference processor; aoqi@0: // generations needing multi-threaded refs processing or discovery override this method. aoqi@0: void Generation::ref_processor_init() { aoqi@0: assert(_ref_processor == NULL, "a reference processor already exists"); aoqi@0: assert(!_reserved.is_empty(), "empty generation?"); aoqi@0: _ref_processor = new ReferenceProcessor(_reserved); // a vanilla reference processor aoqi@0: if (_ref_processor == NULL) { aoqi@0: vm_exit_during_initialization("Could not allocate ReferenceProcessor object"); aoqi@0: } aoqi@0: } aoqi@0: aoqi@0: void Generation::print() const { print_on(tty); } aoqi@0: aoqi@0: void Generation::print_on(outputStream* st) const { aoqi@0: st->print(" %-20s", name()); aoqi@0: st->print(" total " SIZE_FORMAT "K, used " SIZE_FORMAT "K", aoqi@0: capacity()/K, used()/K); aoqi@0: st->print_cr(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " INTPTR_FORMAT ")", aoqi@0: _virtual_space.low_boundary(), aoqi@0: _virtual_space.high(), aoqi@0: _virtual_space.high_boundary()); aoqi@0: } aoqi@0: aoqi@0: void Generation::print_summary_info() { print_summary_info_on(tty); } aoqi@0: aoqi@0: void Generation::print_summary_info_on(outputStream* st) { aoqi@0: StatRecord* sr = stat_record(); aoqi@0: double time = sr->accumulated_time.seconds(); aoqi@0: st->print_cr("[Accumulated GC generation %d time %3.7f secs, " aoqi@0: "%d GC's, avg GC time %3.7f]", aoqi@0: level(), time, sr->invocations, aoqi@0: sr->invocations > 0 ? time / sr->invocations : 0.0); aoqi@0: } aoqi@0: aoqi@0: // Utility iterator classes aoqi@0: aoqi@0: class GenerationIsInReservedClosure : public SpaceClosure { aoqi@0: public: aoqi@0: const void* _p; aoqi@0: Space* sp; aoqi@0: virtual void do_space(Space* s) { aoqi@0: if (sp == NULL) { aoqi@0: if (s->is_in_reserved(_p)) sp = s; aoqi@0: } aoqi@0: } aoqi@0: GenerationIsInReservedClosure(const void* p) : _p(p), sp(NULL) {} aoqi@0: }; aoqi@0: aoqi@0: class GenerationIsInClosure : public SpaceClosure { aoqi@0: public: aoqi@0: const void* _p; aoqi@0: Space* sp; aoqi@0: virtual void do_space(Space* s) { aoqi@0: if (sp == NULL) { aoqi@0: if (s->is_in(_p)) sp = s; aoqi@0: } aoqi@0: } aoqi@0: GenerationIsInClosure(const void* p) : _p(p), sp(NULL) {} aoqi@0: }; aoqi@0: aoqi@0: bool Generation::is_in(const void* p) const { aoqi@0: GenerationIsInClosure blk(p); aoqi@0: ((Generation*)this)->space_iterate(&blk); aoqi@0: return blk.sp != NULL; aoqi@0: } aoqi@0: aoqi@0: DefNewGeneration* Generation::as_DefNewGeneration() { aoqi@0: assert((kind() == Generation::DefNew) || aoqi@0: (kind() == Generation::ParNew) || aoqi@0: (kind() == Generation::ASParNew), aoqi@0: "Wrong youngest generation type"); aoqi@0: return (DefNewGeneration*) this; aoqi@0: } aoqi@0: aoqi@0: Generation* Generation::next_gen() const { aoqi@0: GenCollectedHeap* gch = GenCollectedHeap::heap(); aoqi@0: int next = level() + 1; aoqi@0: if (next < gch->_n_gens) { aoqi@0: return gch->_gens[next]; aoqi@0: } else { aoqi@0: return NULL; aoqi@0: } aoqi@0: } aoqi@0: aoqi@0: size_t Generation::max_contiguous_available() const { aoqi@0: // The largest number of contiguous free words in this or any higher generation. aoqi@0: size_t max = 0; aoqi@0: for (const Generation* gen = this; gen != NULL; gen = gen->next_gen()) { aoqi@0: size_t avail = gen->contiguous_available(); aoqi@0: if (avail > max) { aoqi@0: max = avail; aoqi@0: } aoqi@0: } aoqi@0: return max; aoqi@0: } aoqi@0: aoqi@0: bool Generation::promotion_attempt_is_safe(size_t max_promotion_in_bytes) const { aoqi@0: size_t available = max_contiguous_available(); aoqi@0: bool res = (available >= max_promotion_in_bytes); aoqi@0: if (PrintGC && Verbose) { aoqi@0: gclog_or_tty->print_cr( aoqi@0: "Generation: promo attempt is%s safe: available("SIZE_FORMAT") %s max_promo("SIZE_FORMAT")", aoqi@0: res? "":" not", available, res? ">=":"<", aoqi@0: max_promotion_in_bytes); aoqi@0: } aoqi@0: return res; aoqi@0: } aoqi@0: aoqi@0: // Ignores "ref" and calls allocate(). aoqi@0: oop Generation::promote(oop obj, size_t obj_size) { aoqi@0: assert(obj_size == (size_t)obj->size(), "bad obj_size passed in"); aoqi@0: aoqi@0: #ifndef PRODUCT aoqi@0: if (Universe::heap()->promotion_should_fail()) { aoqi@0: return NULL; aoqi@0: } aoqi@0: #endif // #ifndef PRODUCT aoqi@0: aoqi@0: HeapWord* result = allocate(obj_size, false); aoqi@0: if (result != NULL) { aoqi@0: Copy::aligned_disjoint_words((HeapWord*)obj, result, obj_size); aoqi@0: return oop(result); aoqi@0: } else { aoqi@0: GenCollectedHeap* gch = GenCollectedHeap::heap(); aoqi@0: return gch->handle_failed_promotion(this, obj, obj_size); aoqi@0: } aoqi@0: } aoqi@0: aoqi@0: oop Generation::par_promote(int thread_num, aoqi@0: oop obj, markOop m, size_t word_sz) { aoqi@0: // Could do a bad general impl here that gets a lock. But no. aoqi@0: ShouldNotCallThis(); aoqi@0: return NULL; aoqi@0: } aoqi@0: aoqi@0: void Generation::par_promote_alloc_undo(int thread_num, aoqi@0: HeapWord* obj, size_t word_sz) { aoqi@0: // Could do a bad general impl here that gets a lock. But no. aoqi@0: guarantee(false, "No good general implementation."); aoqi@0: } aoqi@0: aoqi@0: Space* Generation::space_containing(const void* p) const { aoqi@0: GenerationIsInReservedClosure blk(p); aoqi@0: // Cast away const aoqi@0: ((Generation*)this)->space_iterate(&blk); aoqi@0: return blk.sp; aoqi@0: } aoqi@0: aoqi@0: // Some of these are mediocre general implementations. Should be aoqi@0: // overridden to get better performance. aoqi@0: aoqi@0: class GenerationBlockStartClosure : public SpaceClosure { aoqi@0: public: aoqi@0: const void* _p; aoqi@0: HeapWord* _start; aoqi@0: virtual void do_space(Space* s) { aoqi@0: if (_start == NULL && s->is_in_reserved(_p)) { aoqi@0: _start = s->block_start(_p); aoqi@0: } aoqi@0: } aoqi@0: GenerationBlockStartClosure(const void* p) { _p = p; _start = NULL; } aoqi@0: }; aoqi@0: aoqi@0: HeapWord* Generation::block_start(const void* p) const { aoqi@0: GenerationBlockStartClosure blk(p); aoqi@0: // Cast away const aoqi@0: ((Generation*)this)->space_iterate(&blk); aoqi@0: return blk._start; aoqi@0: } aoqi@0: aoqi@0: class GenerationBlockSizeClosure : public SpaceClosure { aoqi@0: public: aoqi@0: const HeapWord* _p; aoqi@0: size_t size; aoqi@0: virtual void do_space(Space* s) { aoqi@0: if (size == 0 && s->is_in_reserved(_p)) { aoqi@0: size = s->block_size(_p); aoqi@0: } aoqi@0: } aoqi@0: GenerationBlockSizeClosure(const HeapWord* p) { _p = p; size = 0; } aoqi@0: }; aoqi@0: aoqi@0: size_t Generation::block_size(const HeapWord* p) const { aoqi@0: GenerationBlockSizeClosure blk(p); aoqi@0: // Cast away const aoqi@0: ((Generation*)this)->space_iterate(&blk); aoqi@0: assert(blk.size > 0, "seems reasonable"); aoqi@0: return blk.size; aoqi@0: } aoqi@0: aoqi@0: class GenerationBlockIsObjClosure : public SpaceClosure { aoqi@0: public: aoqi@0: const HeapWord* _p; aoqi@0: bool is_obj; aoqi@0: virtual void do_space(Space* s) { aoqi@0: if (!is_obj && s->is_in_reserved(_p)) { aoqi@0: is_obj |= s->block_is_obj(_p); aoqi@0: } aoqi@0: } aoqi@0: GenerationBlockIsObjClosure(const HeapWord* p) { _p = p; is_obj = false; } aoqi@0: }; aoqi@0: aoqi@0: bool Generation::block_is_obj(const HeapWord* p) const { aoqi@0: GenerationBlockIsObjClosure blk(p); aoqi@0: // Cast away const aoqi@0: ((Generation*)this)->space_iterate(&blk); aoqi@0: return blk.is_obj; aoqi@0: } aoqi@0: aoqi@0: class GenerationOopIterateClosure : public SpaceClosure { aoqi@0: public: mgerdin@6978: ExtendedOopClosure* _cl; aoqi@0: virtual void do_space(Space* s) { mgerdin@6978: s->oop_iterate(_cl); aoqi@0: } mgerdin@6978: GenerationOopIterateClosure(ExtendedOopClosure* cl) : mgerdin@6978: _cl(cl) {} aoqi@0: }; aoqi@0: aoqi@0: void Generation::oop_iterate(ExtendedOopClosure* cl) { mgerdin@6978: GenerationOopIterateClosure blk(cl); aoqi@0: space_iterate(&blk); aoqi@0: } aoqi@0: aoqi@0: void Generation::younger_refs_in_space_iterate(Space* sp, aoqi@0: OopsInGenClosure* cl) { aoqi@0: GenRemSet* rs = SharedHeap::heap()->rem_set(); aoqi@0: rs->younger_refs_in_space_iterate(sp, cl); aoqi@0: } aoqi@0: aoqi@0: class GenerationObjIterateClosure : public SpaceClosure { aoqi@0: private: aoqi@0: ObjectClosure* _cl; aoqi@0: public: aoqi@0: virtual void do_space(Space* s) { aoqi@0: s->object_iterate(_cl); aoqi@0: } aoqi@0: GenerationObjIterateClosure(ObjectClosure* cl) : _cl(cl) {} aoqi@0: }; aoqi@0: aoqi@0: void Generation::object_iterate(ObjectClosure* cl) { aoqi@0: GenerationObjIterateClosure blk(cl); aoqi@0: space_iterate(&blk); aoqi@0: } aoqi@0: aoqi@0: class GenerationSafeObjIterateClosure : public SpaceClosure { aoqi@0: private: aoqi@0: ObjectClosure* _cl; aoqi@0: public: aoqi@0: virtual void do_space(Space* s) { aoqi@0: s->safe_object_iterate(_cl); aoqi@0: } aoqi@0: GenerationSafeObjIterateClosure(ObjectClosure* cl) : _cl(cl) {} aoqi@0: }; aoqi@0: aoqi@0: void Generation::safe_object_iterate(ObjectClosure* cl) { aoqi@0: GenerationSafeObjIterateClosure blk(cl); aoqi@0: space_iterate(&blk); aoqi@0: } aoqi@0: aoqi@0: void Generation::prepare_for_compaction(CompactPoint* cp) { aoqi@0: // Generic implementation, can be specialized aoqi@0: CompactibleSpace* space = first_compaction_space(); aoqi@0: while (space != NULL) { aoqi@0: space->prepare_for_compaction(cp); aoqi@0: space = space->next_compaction_space(); aoqi@0: } aoqi@0: } aoqi@0: aoqi@0: class AdjustPointersClosure: public SpaceClosure { aoqi@0: public: aoqi@0: void do_space(Space* sp) { aoqi@0: sp->adjust_pointers(); aoqi@0: } aoqi@0: }; aoqi@0: aoqi@0: void Generation::adjust_pointers() { aoqi@0: // Note that this is done over all spaces, not just the compactible aoqi@0: // ones. aoqi@0: AdjustPointersClosure blk; aoqi@0: space_iterate(&blk, true); aoqi@0: } aoqi@0: aoqi@0: void Generation::compact() { aoqi@0: CompactibleSpace* sp = first_compaction_space(); aoqi@0: while (sp != NULL) { aoqi@0: sp->compact(); aoqi@0: sp = sp->next_compaction_space(); aoqi@0: } aoqi@0: } aoqi@0: aoqi@0: CardGeneration::CardGeneration(ReservedSpace rs, size_t initial_byte_size, aoqi@0: int level, aoqi@0: GenRemSet* remset) : aoqi@0: Generation(rs, initial_byte_size, level), _rs(remset), aoqi@0: _shrink_factor(0), _min_heap_delta_bytes(), _capacity_at_prologue(), aoqi@0: _used_at_prologue() aoqi@0: { aoqi@0: HeapWord* start = (HeapWord*)rs.base(); aoqi@0: size_t reserved_byte_size = rs.size(); aoqi@0: assert((uintptr_t(start) & 3) == 0, "bad alignment"); aoqi@0: assert((reserved_byte_size & 3) == 0, "bad alignment"); aoqi@0: MemRegion reserved_mr(start, heap_word_size(reserved_byte_size)); aoqi@0: _bts = new BlockOffsetSharedArray(reserved_mr, aoqi@0: heap_word_size(initial_byte_size)); aoqi@0: MemRegion committed_mr(start, heap_word_size(initial_byte_size)); aoqi@0: _rs->resize_covered_region(committed_mr); aoqi@0: if (_bts == NULL) aoqi@0: vm_exit_during_initialization("Could not allocate a BlockOffsetArray"); aoqi@0: aoqi@0: // Verify that the start and end of this generation is the start of a card. aoqi@0: // If this wasn't true, a single card could span more than on generation, aoqi@0: // which would cause problems when we commit/uncommit memory, and when we aoqi@0: // clear and dirty cards. aoqi@0: guarantee(_rs->is_aligned(reserved_mr.start()), "generation must be card aligned"); aoqi@0: if (reserved_mr.end() != Universe::heap()->reserved_region().end()) { aoqi@0: // Don't check at the very end of the heap as we'll assert that we're probing off aoqi@0: // the end if we try. aoqi@0: guarantee(_rs->is_aligned(reserved_mr.end()), "generation must be card aligned"); aoqi@0: } aoqi@0: _min_heap_delta_bytes = MinHeapDeltaBytes; aoqi@0: _capacity_at_prologue = initial_byte_size; aoqi@0: _used_at_prologue = 0; aoqi@0: } aoqi@0: aoqi@0: bool CardGeneration::expand(size_t bytes, size_t expand_bytes) { aoqi@0: assert_locked_or_safepoint(Heap_lock); aoqi@0: if (bytes == 0) { aoqi@0: return true; // That's what grow_by(0) would return aoqi@0: } aoqi@0: size_t aligned_bytes = ReservedSpace::page_align_size_up(bytes); aoqi@0: if (aligned_bytes == 0){ aoqi@0: // The alignment caused the number of bytes to wrap. An expand_by(0) will aoqi@0: // return true with the implication that an expansion was done when it aoqi@0: // was not. A call to expand implies a best effort to expand by "bytes" aoqi@0: // but not a guarantee. Align down to give a best effort. This is likely aoqi@0: // the most that the generation can expand since it has some capacity to aoqi@0: // start with. aoqi@0: aligned_bytes = ReservedSpace::page_align_size_down(bytes); aoqi@0: } aoqi@0: size_t aligned_expand_bytes = ReservedSpace::page_align_size_up(expand_bytes); aoqi@0: bool success = false; aoqi@0: if (aligned_expand_bytes > aligned_bytes) { aoqi@0: success = grow_by(aligned_expand_bytes); aoqi@0: } aoqi@0: if (!success) { aoqi@0: success = grow_by(aligned_bytes); aoqi@0: } aoqi@0: if (!success) { aoqi@0: success = grow_to_reserved(); aoqi@0: } aoqi@0: if (PrintGC && Verbose) { aoqi@0: if (success && GC_locker::is_active_and_needs_gc()) { aoqi@0: gclog_or_tty->print_cr("Garbage collection disabled, expanded heap instead"); aoqi@0: } aoqi@0: } aoqi@0: aoqi@0: return success; aoqi@0: } aoqi@0: aoqi@0: aoqi@0: // No young generation references, clear this generation's cards. aoqi@0: void CardGeneration::clear_remembered_set() { aoqi@0: _rs->clear(reserved()); aoqi@0: } aoqi@0: aoqi@0: aoqi@0: // Objects in this generation may have moved, invalidate this aoqi@0: // generation's cards. aoqi@0: void CardGeneration::invalidate_remembered_set() { aoqi@0: _rs->invalidate(used_region()); aoqi@0: } aoqi@0: aoqi@0: aoqi@0: void CardGeneration::compute_new_size() { aoqi@0: assert(_shrink_factor <= 100, "invalid shrink factor"); aoqi@0: size_t current_shrink_factor = _shrink_factor; aoqi@0: _shrink_factor = 0; aoqi@0: aoqi@0: // We don't have floating point command-line arguments aoqi@0: // Note: argument processing ensures that MinHeapFreeRatio < 100. aoqi@0: const double minimum_free_percentage = MinHeapFreeRatio / 100.0; aoqi@0: const double maximum_used_percentage = 1.0 - minimum_free_percentage; aoqi@0: aoqi@0: // Compute some numbers about the state of the heap. aoqi@0: const size_t used_after_gc = used(); aoqi@0: const size_t capacity_after_gc = capacity(); aoqi@0: aoqi@0: const double min_tmp = used_after_gc / maximum_used_percentage; aoqi@0: size_t minimum_desired_capacity = (size_t)MIN2(min_tmp, double(max_uintx)); aoqi@0: // Don't shrink less than the initial generation size aoqi@0: minimum_desired_capacity = MAX2(minimum_desired_capacity, aoqi@0: spec()->init_size()); aoqi@0: assert(used_after_gc <= minimum_desired_capacity, "sanity check"); aoqi@0: aoqi@0: if (PrintGC && Verbose) { aoqi@0: const size_t free_after_gc = free(); aoqi@0: const double free_percentage = ((double)free_after_gc) / capacity_after_gc; aoqi@0: gclog_or_tty->print_cr("TenuredGeneration::compute_new_size: "); aoqi@0: gclog_or_tty->print_cr(" " aoqi@0: " minimum_free_percentage: %6.2f" aoqi@0: " maximum_used_percentage: %6.2f", aoqi@0: minimum_free_percentage, aoqi@0: maximum_used_percentage); aoqi@0: gclog_or_tty->print_cr(" " aoqi@0: " free_after_gc : %6.1fK" aoqi@0: " used_after_gc : %6.1fK" aoqi@0: " capacity_after_gc : %6.1fK", aoqi@0: free_after_gc / (double) K, aoqi@0: used_after_gc / (double) K, aoqi@0: capacity_after_gc / (double) K); aoqi@0: gclog_or_tty->print_cr(" " aoqi@0: " free_percentage: %6.2f", aoqi@0: free_percentage); aoqi@0: } aoqi@0: aoqi@0: if (capacity_after_gc < minimum_desired_capacity) { aoqi@0: // If we have less free space than we want then expand aoqi@0: size_t expand_bytes = minimum_desired_capacity - capacity_after_gc; aoqi@0: // Don't expand unless it's significant aoqi@0: if (expand_bytes >= _min_heap_delta_bytes) { aoqi@0: expand(expand_bytes, 0); // safe if expansion fails aoqi@0: } aoqi@0: if (PrintGC && Verbose) { aoqi@0: gclog_or_tty->print_cr(" expanding:" aoqi@0: " minimum_desired_capacity: %6.1fK" aoqi@0: " expand_bytes: %6.1fK" aoqi@0: " _min_heap_delta_bytes: %6.1fK", aoqi@0: minimum_desired_capacity / (double) K, aoqi@0: expand_bytes / (double) K, aoqi@0: _min_heap_delta_bytes / (double) K); aoqi@0: } aoqi@0: return; aoqi@0: } aoqi@0: aoqi@0: // No expansion, now see if we want to shrink aoqi@0: size_t shrink_bytes = 0; aoqi@0: // We would never want to shrink more than this aoqi@0: size_t max_shrink_bytes = capacity_after_gc - minimum_desired_capacity; aoqi@0: aoqi@0: if (MaxHeapFreeRatio < 100) { aoqi@0: const double maximum_free_percentage = MaxHeapFreeRatio / 100.0; aoqi@0: const double minimum_used_percentage = 1.0 - maximum_free_percentage; aoqi@0: const double max_tmp = used_after_gc / minimum_used_percentage; aoqi@0: size_t maximum_desired_capacity = (size_t)MIN2(max_tmp, double(max_uintx)); aoqi@0: maximum_desired_capacity = MAX2(maximum_desired_capacity, aoqi@0: spec()->init_size()); aoqi@0: if (PrintGC && Verbose) { aoqi@0: gclog_or_tty->print_cr(" " aoqi@0: " maximum_free_percentage: %6.2f" aoqi@0: " minimum_used_percentage: %6.2f", aoqi@0: maximum_free_percentage, aoqi@0: minimum_used_percentage); aoqi@0: gclog_or_tty->print_cr(" " aoqi@0: " _capacity_at_prologue: %6.1fK" aoqi@0: " minimum_desired_capacity: %6.1fK" aoqi@0: " maximum_desired_capacity: %6.1fK", aoqi@0: _capacity_at_prologue / (double) K, aoqi@0: minimum_desired_capacity / (double) K, aoqi@0: maximum_desired_capacity / (double) K); aoqi@0: } aoqi@0: assert(minimum_desired_capacity <= maximum_desired_capacity, aoqi@0: "sanity check"); aoqi@0: aoqi@0: if (capacity_after_gc > maximum_desired_capacity) { aoqi@0: // Capacity too large, compute shrinking size aoqi@0: shrink_bytes = capacity_after_gc - maximum_desired_capacity; aoqi@0: // We don't want shrink all the way back to initSize if people call aoqi@0: // System.gc(), because some programs do that between "phases" and then aoqi@0: // we'd just have to grow the heap up again for the next phase. So we aoqi@0: // damp the shrinking: 0% on the first call, 10% on the second call, 40% aoqi@0: // on the third call, and 100% by the fourth call. But if we recompute aoqi@0: // size without shrinking, it goes back to 0%. aoqi@0: shrink_bytes = shrink_bytes / 100 * current_shrink_factor; aoqi@0: assert(shrink_bytes <= max_shrink_bytes, "invalid shrink size"); aoqi@0: if (current_shrink_factor == 0) { aoqi@0: _shrink_factor = 10; aoqi@0: } else { aoqi@0: _shrink_factor = MIN2(current_shrink_factor * 4, (size_t) 100); aoqi@0: } aoqi@0: if (PrintGC && Verbose) { aoqi@0: gclog_or_tty->print_cr(" " aoqi@0: " shrinking:" aoqi@0: " initSize: %.1fK" aoqi@0: " maximum_desired_capacity: %.1fK", aoqi@0: spec()->init_size() / (double) K, aoqi@0: maximum_desired_capacity / (double) K); aoqi@0: gclog_or_tty->print_cr(" " aoqi@0: " shrink_bytes: %.1fK" aoqi@0: " current_shrink_factor: %d" aoqi@0: " new shrink factor: %d" aoqi@0: " _min_heap_delta_bytes: %.1fK", aoqi@0: shrink_bytes / (double) K, aoqi@0: current_shrink_factor, aoqi@0: _shrink_factor, aoqi@0: _min_heap_delta_bytes / (double) K); aoqi@0: } aoqi@0: } aoqi@0: } aoqi@0: aoqi@0: if (capacity_after_gc > _capacity_at_prologue) { aoqi@0: // We might have expanded for promotions, in which case we might want to aoqi@0: // take back that expansion if there's room after GC. That keeps us from aoqi@0: // stretching the heap with promotions when there's plenty of room. aoqi@0: size_t expansion_for_promotion = capacity_after_gc - _capacity_at_prologue; aoqi@0: expansion_for_promotion = MIN2(expansion_for_promotion, max_shrink_bytes); aoqi@0: // We have two shrinking computations, take the largest aoqi@0: shrink_bytes = MAX2(shrink_bytes, expansion_for_promotion); aoqi@0: assert(shrink_bytes <= max_shrink_bytes, "invalid shrink size"); aoqi@0: if (PrintGC && Verbose) { aoqi@0: gclog_or_tty->print_cr(" " aoqi@0: " aggressive shrinking:" aoqi@0: " _capacity_at_prologue: %.1fK" aoqi@0: " capacity_after_gc: %.1fK" aoqi@0: " expansion_for_promotion: %.1fK" aoqi@0: " shrink_bytes: %.1fK", aoqi@0: capacity_after_gc / (double) K, aoqi@0: _capacity_at_prologue / (double) K, aoqi@0: expansion_for_promotion / (double) K, aoqi@0: shrink_bytes / (double) K); aoqi@0: } aoqi@0: } aoqi@0: // Don't shrink unless it's significant aoqi@0: if (shrink_bytes >= _min_heap_delta_bytes) { aoqi@0: shrink(shrink_bytes); aoqi@0: } aoqi@0: } aoqi@0: aoqi@0: // Currently nothing to do. aoqi@0: void CardGeneration::prepare_for_verify() {} aoqi@0: aoqi@0: aoqi@0: void OneContigSpaceCardGeneration::collect(bool full, aoqi@0: bool clear_all_soft_refs, aoqi@0: size_t size, aoqi@0: bool is_tlab) { aoqi@0: GenCollectedHeap* gch = GenCollectedHeap::heap(); aoqi@0: aoqi@0: SpecializationStats::clear(); aoqi@0: // Temporarily expand the span of our ref processor, so aoqi@0: // refs discovery is over the entire heap, not just this generation aoqi@0: ReferenceProcessorSpanMutator aoqi@0: x(ref_processor(), gch->reserved_region()); aoqi@0: aoqi@0: STWGCTimer* gc_timer = GenMarkSweep::gc_timer(); aoqi@0: gc_timer->register_gc_start(); aoqi@0: aoqi@0: SerialOldTracer* gc_tracer = GenMarkSweep::gc_tracer(); aoqi@0: gc_tracer->report_gc_start(gch->gc_cause(), gc_timer->gc_start()); aoqi@0: aoqi@0: GenMarkSweep::invoke_at_safepoint(_level, ref_processor(), clear_all_soft_refs); aoqi@0: aoqi@0: gc_timer->register_gc_end(); aoqi@0: aoqi@0: gc_tracer->report_gc_end(gc_timer->gc_end(), gc_timer->time_partitions()); aoqi@0: aoqi@0: SpecializationStats::print(); aoqi@0: } aoqi@0: aoqi@0: HeapWord* aoqi@0: OneContigSpaceCardGeneration::expand_and_allocate(size_t word_size, aoqi@0: bool is_tlab, aoqi@0: bool parallel) { aoqi@0: assert(!is_tlab, "OneContigSpaceCardGeneration does not support TLAB allocation"); aoqi@0: if (parallel) { aoqi@0: MutexLocker x(ParGCRareEvent_lock); aoqi@0: HeapWord* result = NULL; aoqi@0: size_t byte_size = word_size * HeapWordSize; aoqi@0: while (true) { aoqi@0: expand(byte_size, _min_heap_delta_bytes); aoqi@0: if (GCExpandToAllocateDelayMillis > 0) { aoqi@0: os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false); aoqi@0: } aoqi@0: result = _the_space->par_allocate(word_size); aoqi@0: if ( result != NULL) { aoqi@0: return result; aoqi@0: } else { aoqi@0: // If there's not enough expansion space available, give up. aoqi@0: if (_virtual_space.uncommitted_size() < byte_size) { aoqi@0: return NULL; aoqi@0: } aoqi@0: // else try again aoqi@0: } aoqi@0: } aoqi@0: } else { aoqi@0: expand(word_size*HeapWordSize, _min_heap_delta_bytes); aoqi@0: return _the_space->allocate(word_size); aoqi@0: } aoqi@0: } aoqi@0: aoqi@0: bool OneContigSpaceCardGeneration::expand(size_t bytes, size_t expand_bytes) { aoqi@0: GCMutexLocker x(ExpandHeap_lock); aoqi@0: return CardGeneration::expand(bytes, expand_bytes); aoqi@0: } aoqi@0: aoqi@0: aoqi@0: void OneContigSpaceCardGeneration::shrink(size_t bytes) { aoqi@0: assert_locked_or_safepoint(ExpandHeap_lock); aoqi@0: size_t size = ReservedSpace::page_align_size_down(bytes); aoqi@0: if (size > 0) { aoqi@0: shrink_by(size); aoqi@0: } aoqi@0: } aoqi@0: aoqi@0: aoqi@0: size_t OneContigSpaceCardGeneration::capacity() const { aoqi@0: return _the_space->capacity(); aoqi@0: } aoqi@0: aoqi@0: aoqi@0: size_t OneContigSpaceCardGeneration::used() const { aoqi@0: return _the_space->used(); aoqi@0: } aoqi@0: aoqi@0: aoqi@0: size_t OneContigSpaceCardGeneration::free() const { aoqi@0: return _the_space->free(); aoqi@0: } aoqi@0: aoqi@0: MemRegion OneContigSpaceCardGeneration::used_region() const { aoqi@0: return the_space()->used_region(); aoqi@0: } aoqi@0: aoqi@0: size_t OneContigSpaceCardGeneration::unsafe_max_alloc_nogc() const { aoqi@0: return _the_space->free(); aoqi@0: } aoqi@0: aoqi@0: size_t OneContigSpaceCardGeneration::contiguous_available() const { aoqi@0: return _the_space->free() + _virtual_space.uncommitted_size(); aoqi@0: } aoqi@0: aoqi@0: bool OneContigSpaceCardGeneration::grow_by(size_t bytes) { aoqi@0: assert_locked_or_safepoint(ExpandHeap_lock); aoqi@0: bool result = _virtual_space.expand_by(bytes); aoqi@0: if (result) { aoqi@0: size_t new_word_size = aoqi@0: heap_word_size(_virtual_space.committed_size()); aoqi@0: MemRegion mr(_the_space->bottom(), new_word_size); aoqi@0: // Expand card table aoqi@0: Universe::heap()->barrier_set()->resize_covered_region(mr); aoqi@0: // Expand shared block offset array aoqi@0: _bts->resize(new_word_size); aoqi@0: aoqi@0: // Fix for bug #4668531 aoqi@0: if (ZapUnusedHeapArea) { aoqi@0: MemRegion mangle_region(_the_space->end(), aoqi@0: (HeapWord*)_virtual_space.high()); aoqi@0: SpaceMangler::mangle_region(mangle_region); aoqi@0: } aoqi@0: aoqi@0: // Expand space -- also expands space's BOT aoqi@0: // (which uses (part of) shared array above) aoqi@0: _the_space->set_end((HeapWord*)_virtual_space.high()); aoqi@0: aoqi@0: // update the space and generation capacity counters aoqi@0: update_counters(); aoqi@0: aoqi@0: if (Verbose && PrintGC) { aoqi@0: size_t new_mem_size = _virtual_space.committed_size(); aoqi@0: size_t old_mem_size = new_mem_size - bytes; aoqi@0: gclog_or_tty->print_cr("Expanding %s from " SIZE_FORMAT "K by " aoqi@0: SIZE_FORMAT "K to " SIZE_FORMAT "K", aoqi@0: name(), old_mem_size/K, bytes/K, new_mem_size/K); aoqi@0: } aoqi@0: } aoqi@0: return result; aoqi@0: } aoqi@0: aoqi@0: aoqi@0: bool OneContigSpaceCardGeneration::grow_to_reserved() { aoqi@0: assert_locked_or_safepoint(ExpandHeap_lock); aoqi@0: bool success = true; aoqi@0: const size_t remaining_bytes = _virtual_space.uncommitted_size(); aoqi@0: if (remaining_bytes > 0) { aoqi@0: success = grow_by(remaining_bytes); aoqi@0: DEBUG_ONLY(if (!success) warning("grow to reserved failed");) aoqi@0: } aoqi@0: return success; aoqi@0: } aoqi@0: aoqi@0: void OneContigSpaceCardGeneration::shrink_by(size_t bytes) { aoqi@0: assert_locked_or_safepoint(ExpandHeap_lock); aoqi@0: // Shrink committed space aoqi@0: _virtual_space.shrink_by(bytes); aoqi@0: // Shrink space; this also shrinks the space's BOT aoqi@0: _the_space->set_end((HeapWord*) _virtual_space.high()); aoqi@0: size_t new_word_size = heap_word_size(_the_space->capacity()); aoqi@0: // Shrink the shared block offset array aoqi@0: _bts->resize(new_word_size); aoqi@0: MemRegion mr(_the_space->bottom(), new_word_size); aoqi@0: // Shrink the card table aoqi@0: Universe::heap()->barrier_set()->resize_covered_region(mr); aoqi@0: aoqi@0: if (Verbose && PrintGC) { aoqi@0: size_t new_mem_size = _virtual_space.committed_size(); aoqi@0: size_t old_mem_size = new_mem_size + bytes; aoqi@0: gclog_or_tty->print_cr("Shrinking %s from " SIZE_FORMAT "K to " SIZE_FORMAT "K", aoqi@0: name(), old_mem_size/K, new_mem_size/K); aoqi@0: } aoqi@0: } aoqi@0: aoqi@0: // Currently nothing to do. aoqi@0: void OneContigSpaceCardGeneration::prepare_for_verify() {} aoqi@0: aoqi@0: aoqi@0: // Override for a card-table generation with one contiguous aoqi@0: // space. NOTE: For reasons that are lost in the fog of history, aoqi@0: // this code is used when you iterate over perm gen objects, aoqi@0: // even when one uses CDS, where the perm gen has a couple of aoqi@0: // other spaces; this is because CompactingPermGenGen derives aoqi@0: // from OneContigSpaceCardGeneration. This should be cleaned up, aoqi@0: // see CR 6897789.. aoqi@0: void OneContigSpaceCardGeneration::object_iterate(ObjectClosure* blk) { aoqi@0: _the_space->object_iterate(blk); aoqi@0: } aoqi@0: aoqi@0: void OneContigSpaceCardGeneration::space_iterate(SpaceClosure* blk, aoqi@0: bool usedOnly) { aoqi@0: blk->do_space(_the_space); aoqi@0: } aoqi@0: aoqi@0: void OneContigSpaceCardGeneration::younger_refs_iterate(OopsInGenClosure* blk) { aoqi@0: blk->set_generation(this); aoqi@0: younger_refs_in_space_iterate(_the_space, blk); aoqi@0: blk->reset_generation(); aoqi@0: } aoqi@0: aoqi@0: void OneContigSpaceCardGeneration::save_marks() { aoqi@0: _the_space->set_saved_mark(); aoqi@0: } aoqi@0: aoqi@0: aoqi@0: void OneContigSpaceCardGeneration::reset_saved_marks() { aoqi@0: _the_space->reset_saved_mark(); aoqi@0: } aoqi@0: aoqi@0: aoqi@0: bool OneContigSpaceCardGeneration::no_allocs_since_save_marks() { aoqi@0: return _the_space->saved_mark_at_top(); aoqi@0: } aoqi@0: aoqi@0: #define OneContig_SINCE_SAVE_MARKS_ITERATE_DEFN(OopClosureType, nv_suffix) \ aoqi@0: \ aoqi@0: void OneContigSpaceCardGeneration:: \ aoqi@0: oop_since_save_marks_iterate##nv_suffix(OopClosureType* blk) { \ aoqi@0: blk->set_generation(this); \ aoqi@0: _the_space->oop_since_save_marks_iterate##nv_suffix(blk); \ aoqi@0: blk->reset_generation(); \ aoqi@0: save_marks(); \ aoqi@0: } aoqi@0: aoqi@0: ALL_SINCE_SAVE_MARKS_CLOSURES(OneContig_SINCE_SAVE_MARKS_ITERATE_DEFN) aoqi@0: aoqi@0: #undef OneContig_SINCE_SAVE_MARKS_ITERATE_DEFN aoqi@0: aoqi@0: aoqi@0: void OneContigSpaceCardGeneration::gc_epilogue(bool full) { aoqi@0: _last_gc = WaterMark(the_space(), the_space()->top()); aoqi@0: aoqi@0: // update the generation and space performance counters aoqi@0: update_counters(); aoqi@0: if (ZapUnusedHeapArea) { aoqi@0: the_space()->check_mangled_unused_area_complete(); aoqi@0: } aoqi@0: } aoqi@0: aoqi@0: void OneContigSpaceCardGeneration::record_spaces_top() { aoqi@0: assert(ZapUnusedHeapArea, "Not mangling unused space"); aoqi@0: the_space()->set_top_for_allocations(); aoqi@0: } aoqi@0: aoqi@0: void OneContigSpaceCardGeneration::verify() { aoqi@0: the_space()->verify(); aoqi@0: } aoqi@0: aoqi@0: void OneContigSpaceCardGeneration::print_on(outputStream* st) const { aoqi@0: Generation::print_on(st); aoqi@0: st->print(" the"); aoqi@0: the_space()->print_on(st); aoqi@0: }