aoqi@0: /* aoqi@0: * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved. aoqi@0: * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. aoqi@0: * aoqi@0: * This code is free software; you can redistribute it and/or modify it aoqi@0: * under the terms of the GNU General Public License version 2 only, as aoqi@0: * published by the Free Software Foundation. aoqi@0: * aoqi@0: * This code is distributed in the hope that it will be useful, but WITHOUT aoqi@0: * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or aoqi@0: * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License aoqi@0: * version 2 for more details (a copy is included in the LICENSE file that aoqi@0: * accompanied this code). aoqi@0: * aoqi@0: * You should have received a copy of the GNU General Public License version aoqi@0: * 2 along with this work; if not, write to the Free Software Foundation, aoqi@0: * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. aoqi@0: * aoqi@0: * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA aoqi@0: * or visit www.oracle.com if you need additional information or have any aoqi@0: * questions. aoqi@0: * aoqi@0: */ aoqi@0: aoqi@0: #include "precompiled.hpp" aoqi@0: #include "gc_implementation/parallelScavenge/parallelScavengeHeap.hpp" aoqi@0: #include "gc_implementation/parallelScavenge/psAdaptiveSizePolicy.hpp" aoqi@0: #include "gc_implementation/parallelScavenge/psMarkSweepDecorator.hpp" aoqi@0: #include "gc_implementation/parallelScavenge/psOldGen.hpp" aoqi@0: #include "gc_implementation/shared/spaceDecorator.hpp" aoqi@0: #include "memory/cardTableModRefBS.hpp" aoqi@0: #include "memory/gcLocker.inline.hpp" aoqi@0: #include "oops/oop.inline.hpp" aoqi@0: #include "runtime/java.hpp" aoqi@0: aoqi@0: PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC aoqi@0: aoqi@0: inline const char* PSOldGen::select_name() { aoqi@0: return UseParallelOldGC ? "ParOldGen" : "PSOldGen"; aoqi@0: } aoqi@0: aoqi@0: PSOldGen::PSOldGen(ReservedSpace rs, size_t alignment, aoqi@0: size_t initial_size, size_t min_size, size_t max_size, aoqi@0: const char* perf_data_name, int level): aoqi@0: _name(select_name()), _init_gen_size(initial_size), _min_gen_size(min_size), aoqi@0: _max_gen_size(max_size) aoqi@0: { aoqi@0: initialize(rs, alignment, perf_data_name, level); aoqi@0: } aoqi@0: aoqi@0: PSOldGen::PSOldGen(size_t initial_size, aoqi@0: size_t min_size, size_t max_size, aoqi@0: const char* perf_data_name, int level): aoqi@0: _name(select_name()), _init_gen_size(initial_size), _min_gen_size(min_size), aoqi@0: _max_gen_size(max_size) aoqi@0: {} aoqi@0: aoqi@0: void PSOldGen::initialize(ReservedSpace rs, size_t alignment, aoqi@0: const char* perf_data_name, int level) { aoqi@0: initialize_virtual_space(rs, alignment); aoqi@0: initialize_work(perf_data_name, level); aoqi@0: aoqi@0: // The old gen can grow to gen_size_limit(). _reserve reflects only aoqi@0: // the current maximum that can be committed. aoqi@0: assert(_reserved.byte_size() <= gen_size_limit(), "Consistency check"); aoqi@0: aoqi@0: initialize_performance_counters(perf_data_name, level); aoqi@0: } aoqi@0: aoqi@0: void PSOldGen::initialize_virtual_space(ReservedSpace rs, size_t alignment) { aoqi@0: aoqi@0: _virtual_space = new PSVirtualSpace(rs, alignment); aoqi@0: if (!_virtual_space->expand_by(_init_gen_size)) { aoqi@0: vm_exit_during_initialization("Could not reserve enough space for " aoqi@0: "object heap"); aoqi@0: } aoqi@0: } aoqi@0: aoqi@0: void PSOldGen::initialize_work(const char* perf_data_name, int level) { aoqi@0: // aoqi@0: // Basic memory initialization aoqi@0: // aoqi@0: aoqi@0: MemRegion limit_reserved((HeapWord*)virtual_space()->low_boundary(), aoqi@0: heap_word_size(_max_gen_size)); aoqi@0: assert(limit_reserved.byte_size() == _max_gen_size, aoqi@0: "word vs bytes confusion"); aoqi@0: // aoqi@0: // Object start stuff aoqi@0: // aoqi@0: aoqi@0: start_array()->initialize(limit_reserved); aoqi@0: aoqi@0: _reserved = MemRegion((HeapWord*)virtual_space()->low_boundary(), aoqi@0: (HeapWord*)virtual_space()->high_boundary()); aoqi@0: aoqi@0: // aoqi@0: // Card table stuff aoqi@0: // aoqi@0: aoqi@0: MemRegion cmr((HeapWord*)virtual_space()->low(), aoqi@0: (HeapWord*)virtual_space()->high()); aoqi@0: if (ZapUnusedHeapArea) { aoqi@0: // Mangle newly committed space immediately rather than aoqi@0: // waiting for the initialization of the space even though aoqi@0: // mangling is related to spaces. Doing it here eliminates aoqi@0: // the need to carry along information that a complete mangling aoqi@0: // (bottom to end) needs to be done. aoqi@0: SpaceMangler::mangle_region(cmr); aoqi@0: } aoqi@0: aoqi@0: Universe::heap()->barrier_set()->resize_covered_region(cmr); aoqi@0: aoqi@0: CardTableModRefBS* _ct = (CardTableModRefBS*)Universe::heap()->barrier_set(); aoqi@0: assert (_ct->kind() == BarrierSet::CardTableModRef, "Sanity"); aoqi@0: aoqi@0: // Verify that the start and end of this generation is the start of a card. aoqi@0: // If this wasn't true, a single card could span more than one generation, aoqi@0: // which would cause problems when we commit/uncommit memory, and when we aoqi@0: // clear and dirty cards. aoqi@0: guarantee(_ct->is_card_aligned(_reserved.start()), "generation must be card aligned"); aoqi@0: if (_reserved.end() != Universe::heap()->reserved_region().end()) { aoqi@0: // Don't check at the very end of the heap as we'll assert that we're probing off aoqi@0: // the end if we try. aoqi@0: guarantee(_ct->is_card_aligned(_reserved.end()), "generation must be card aligned"); aoqi@0: } aoqi@0: aoqi@0: // aoqi@0: // ObjectSpace stuff aoqi@0: // aoqi@25: aoqi@25: _object_space = new MutableSpace(virtual_space()->alignment()); aoqi@0: aoqi@0: if (_object_space == NULL) aoqi@0: vm_exit_during_initialization("Could not allocate an old gen space"); aoqi@0: aoqi@0: object_space()->initialize(cmr, aoqi@0: SpaceDecorator::Clear, aoqi@0: SpaceDecorator::Mangle); aoqi@0: aoqi@0: _object_mark_sweep = new PSMarkSweepDecorator(_object_space, start_array(), MarkSweepDeadRatio); aoqi@0: aoqi@0: if (_object_mark_sweep == NULL) aoqi@0: vm_exit_during_initialization("Could not complete allocation of old generation"); aoqi@0: aoqi@0: // Update the start_array aoqi@0: start_array()->set_covered_region(cmr); aoqi@0: } aoqi@0: aoqi@0: void PSOldGen::initialize_performance_counters(const char* perf_data_name, int level) { aoqi@0: // Generation Counters, generation 'level', 1 subspace aoqi@0: _gen_counters = new PSGenerationCounters(perf_data_name, level, 1, aoqi@0: virtual_space()); aoqi@0: _space_counters = new SpaceCounters(perf_data_name, 0, aoqi@0: virtual_space()->reserved_size(), aoqi@0: _object_space, _gen_counters); aoqi@0: } aoqi@0: aoqi@0: // Assume that the generation has been allocated if its aoqi@0: // reserved size is not 0. aoqi@0: bool PSOldGen::is_allocated() { aoqi@0: return virtual_space()->reserved_size() != 0; aoqi@0: } aoqi@0: aoqi@0: void PSOldGen::precompact() { aoqi@0: ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); aoqi@0: assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); aoqi@0: aoqi@0: // Reset start array first. aoqi@0: start_array()->reset(); aoqi@0: aoqi@0: object_mark_sweep()->precompact(); aoqi@0: aoqi@0: // Now compact the young gen aoqi@0: heap->young_gen()->precompact(); aoqi@0: } aoqi@0: aoqi@0: void PSOldGen::adjust_pointers() { aoqi@0: object_mark_sweep()->adjust_pointers(); aoqi@0: } aoqi@0: aoqi@0: void PSOldGen::compact() { aoqi@0: object_mark_sweep()->compact(ZapUnusedHeapArea); aoqi@0: } aoqi@0: aoqi@0: size_t PSOldGen::contiguous_available() const { aoqi@0: return object_space()->free_in_bytes() + virtual_space()->uncommitted_size(); aoqi@0: } aoqi@0: aoqi@0: // Allocation. We report all successful allocations to the size policy aoqi@0: // Note that the perm gen does not use this method, and should not! aoqi@0: HeapWord* PSOldGen::allocate(size_t word_size) { aoqi@0: assert_locked_or_safepoint(Heap_lock); aoqi@0: HeapWord* res = allocate_noexpand(word_size); aoqi@0: aoqi@0: if (res == NULL) { aoqi@0: res = expand_and_allocate(word_size); aoqi@0: } aoqi@0: aoqi@0: // Allocations in the old generation need to be reported aoqi@0: if (res != NULL) { aoqi@0: ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); aoqi@0: heap->size_policy()->tenured_allocation(word_size); aoqi@0: } aoqi@0: aoqi@0: return res; aoqi@0: } aoqi@0: aoqi@0: HeapWord* PSOldGen::expand_and_allocate(size_t word_size) { aoqi@0: expand(word_size*HeapWordSize); aoqi@0: if (GCExpandToAllocateDelayMillis > 0) { aoqi@0: os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false); aoqi@0: } aoqi@0: return allocate_noexpand(word_size); aoqi@0: } aoqi@0: aoqi@25: HeapWord* PSOldGen::expand_and_cas_allocate(size_t word_size) { aoqi@0: expand(word_size*HeapWordSize); aoqi@0: if (GCExpandToAllocateDelayMillis > 0) { aoqi@0: os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false); aoqi@0: } aoqi@25: return cas_allocate_noexpand(word_size); aoqi@0: } aoqi@0: aoqi@0: void PSOldGen::expand(size_t bytes) { aoqi@0: if (bytes == 0) { aoqi@0: return; aoqi@0: } aoqi@0: MutexLocker x(ExpandHeap_lock); aoqi@0: const size_t alignment = virtual_space()->alignment(); aoqi@0: size_t aligned_bytes = align_size_up(bytes, alignment); aoqi@0: size_t aligned_expand_bytes = align_size_up(MinHeapDeltaBytes, alignment); aoqi@0: aoqi@0: if (UseNUMA) { aoqi@0: // With NUMA we use round-robin page allocation for the old gen. Expand by at least aoqi@0: // providing a page per lgroup. Alignment is larger or equal to the page size. aoqi@0: aligned_expand_bytes = MAX2(aligned_expand_bytes, alignment * os::numa_get_groups_num()); aoqi@0: } aoqi@0: if (aligned_bytes == 0){ aoqi@0: // The alignment caused the number of bytes to wrap. An expand_by(0) will aoqi@0: // return true with the implication that and expansion was done when it aoqi@0: // was not. A call to expand implies a best effort to expand by "bytes" aoqi@0: // but not a guarantee. Align down to give a best effort. This is likely aoqi@0: // the most that the generation can expand since it has some capacity to aoqi@0: // start with. aoqi@0: aligned_bytes = align_size_down(bytes, alignment); aoqi@0: } aoqi@0: aoqi@0: bool success = false; aoqi@0: if (aligned_expand_bytes > aligned_bytes) { aoqi@0: success = expand_by(aligned_expand_bytes); aoqi@0: } aoqi@0: if (!success) { aoqi@0: success = expand_by(aligned_bytes); aoqi@0: } aoqi@0: if (!success) { aoqi@0: success = expand_to_reserved(); aoqi@0: } aoqi@0: aoqi@0: if (PrintGC && Verbose) { aoqi@0: if (success && GC_locker::is_active_and_needs_gc()) { aoqi@0: gclog_or_tty->print_cr("Garbage collection disabled, expanded heap instead"); aoqi@0: } aoqi@0: } aoqi@0: } aoqi@0: aoqi@0: bool PSOldGen::expand_by(size_t bytes) { aoqi@0: assert_lock_strong(ExpandHeap_lock); aoqi@0: assert_locked_or_safepoint(Heap_lock); aoqi@0: if (bytes == 0) { aoqi@0: return true; // That's what virtual_space()->expand_by(0) would return aoqi@0: } aoqi@0: bool result = virtual_space()->expand_by(bytes); aoqi@0: if (result) { aoqi@0: if (ZapUnusedHeapArea) { aoqi@0: // We need to mangle the newly expanded area. The memregion spans aoqi@0: // end -> new_end, we assume that top -> end is already mangled. aoqi@0: // Do the mangling before post_resize() is called because aoqi@0: // the space is available for allocation after post_resize(); aoqi@0: HeapWord* const virtual_space_high = (HeapWord*) virtual_space()->high(); aoqi@0: assert(object_space()->end() < virtual_space_high, aoqi@0: "Should be true before post_resize()"); aoqi@0: MemRegion mangle_region(object_space()->end(), virtual_space_high); aoqi@0: // Note that the object space has not yet been updated to aoqi@0: // coincede with the new underlying virtual space. aoqi@0: SpaceMangler::mangle_region(mangle_region); aoqi@0: } aoqi@0: post_resize(); aoqi@0: if (UsePerfData) { aoqi@0: _space_counters->update_capacity(); aoqi@0: _gen_counters->update_all(); aoqi@0: } aoqi@0: } aoqi@0: aoqi@0: if (result && Verbose && PrintGC) { aoqi@0: size_t new_mem_size = virtual_space()->committed_size(); aoqi@0: size_t old_mem_size = new_mem_size - bytes; aoqi@0: gclog_or_tty->print_cr("Expanding %s from " SIZE_FORMAT "K by " aoqi@0: SIZE_FORMAT "K to " aoqi@0: SIZE_FORMAT "K", aoqi@0: name(), old_mem_size/K, bytes/K, new_mem_size/K); aoqi@0: } aoqi@0: aoqi@0: return result; aoqi@0: } aoqi@0: aoqi@0: bool PSOldGen::expand_to_reserved() { aoqi@0: assert_lock_strong(ExpandHeap_lock); aoqi@0: assert_locked_or_safepoint(Heap_lock); aoqi@0: aoqi@0: bool result = true; aoqi@0: const size_t remaining_bytes = virtual_space()->uncommitted_size(); aoqi@0: if (remaining_bytes > 0) { aoqi@0: result = expand_by(remaining_bytes); aoqi@0: DEBUG_ONLY(if (!result) warning("grow to reserve failed")); aoqi@0: } aoqi@0: return result; aoqi@0: } aoqi@0: aoqi@0: void PSOldGen::shrink(size_t bytes) { aoqi@0: assert_lock_strong(ExpandHeap_lock); aoqi@0: assert_locked_or_safepoint(Heap_lock); aoqi@0: aoqi@0: size_t size = align_size_down(bytes, virtual_space()->alignment()); aoqi@0: if (size > 0) { aoqi@0: assert_lock_strong(ExpandHeap_lock); aoqi@0: virtual_space()->shrink_by(bytes); aoqi@0: post_resize(); aoqi@0: aoqi@0: if (Verbose && PrintGC) { aoqi@0: size_t new_mem_size = virtual_space()->committed_size(); aoqi@0: size_t old_mem_size = new_mem_size + bytes; aoqi@0: gclog_or_tty->print_cr("Shrinking %s from " SIZE_FORMAT "K by " aoqi@0: SIZE_FORMAT "K to " aoqi@0: SIZE_FORMAT "K", aoqi@0: name(), old_mem_size/K, bytes/K, new_mem_size/K); aoqi@0: } aoqi@0: } aoqi@0: } aoqi@0: aoqi@0: void PSOldGen::resize(size_t desired_free_space) { aoqi@0: const size_t alignment = virtual_space()->alignment(); aoqi@0: const size_t size_before = virtual_space()->committed_size(); aoqi@0: size_t new_size = used_in_bytes() + desired_free_space; aoqi@0: if (new_size < used_in_bytes()) { aoqi@0: // Overflowed the addition. aoqi@0: new_size = gen_size_limit(); aoqi@0: } aoqi@0: // Adjust according to our min and max aoqi@0: new_size = MAX2(MIN2(new_size, gen_size_limit()), min_gen_size()); aoqi@0: aoqi@0: assert(gen_size_limit() >= reserved().byte_size(), "max new size problem?"); aoqi@0: new_size = align_size_up(new_size, alignment); aoqi@0: aoqi@0: const size_t current_size = capacity_in_bytes(); aoqi@0: aoqi@0: if (PrintAdaptiveSizePolicy && Verbose) { aoqi@0: gclog_or_tty->print_cr("AdaptiveSizePolicy::old generation size: " aoqi@0: "desired free: " SIZE_FORMAT " used: " SIZE_FORMAT aoqi@0: " new size: " SIZE_FORMAT " current size " SIZE_FORMAT aoqi@0: " gen limits: " SIZE_FORMAT " / " SIZE_FORMAT, aoqi@0: desired_free_space, used_in_bytes(), new_size, current_size, aoqi@0: gen_size_limit(), min_gen_size()); aoqi@0: } aoqi@0: aoqi@0: if (new_size == current_size) { aoqi@0: // No change requested aoqi@0: return; aoqi@0: } aoqi@0: if (new_size > current_size) { aoqi@0: size_t change_bytes = new_size - current_size; aoqi@0: expand(change_bytes); aoqi@0: } else { aoqi@0: size_t change_bytes = current_size - new_size; aoqi@0: // shrink doesn't grab this lock, expand does. Is that right? aoqi@0: MutexLocker x(ExpandHeap_lock); aoqi@0: shrink(change_bytes); aoqi@0: } aoqi@0: aoqi@0: if (PrintAdaptiveSizePolicy) { aoqi@0: ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); aoqi@0: assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); aoqi@0: gclog_or_tty->print_cr("AdaptiveSizePolicy::old generation size: " aoqi@0: "collection: %d " aoqi@0: "(" SIZE_FORMAT ") -> (" SIZE_FORMAT ") ", aoqi@0: heap->total_collections(), aoqi@0: size_before, virtual_space()->committed_size()); aoqi@0: } aoqi@0: } aoqi@0: aoqi@0: // NOTE! We need to be careful about resizing. During a GC, multiple aoqi@0: // allocators may be active during heap expansion. If we allow the aoqi@0: // heap resizing to become visible before we have correctly resized aoqi@0: // all heap related data structures, we may cause program failures. aoqi@0: void PSOldGen::post_resize() { aoqi@0: // First construct a memregion representing the new size aoqi@0: MemRegion new_memregion((HeapWord*)virtual_space()->low(), aoqi@0: (HeapWord*)virtual_space()->high()); aoqi@0: size_t new_word_size = new_memregion.word_size(); aoqi@0: aoqi@0: start_array()->set_covered_region(new_memregion); aoqi@0: Universe::heap()->barrier_set()->resize_covered_region(new_memregion); aoqi@0: aoqi@0: // ALWAYS do this last!! aoqi@25: object_space()->initialize(new_memregion, aoqi@25: SpaceDecorator::DontClear, aoqi@25: SpaceDecorator::DontMangle); aoqi@25: aoqi@0: assert(new_word_size == heap_word_size(object_space()->capacity_in_bytes()), aoqi@0: "Sanity"); aoqi@0: } aoqi@0: aoqi@0: size_t PSOldGen::gen_size_limit() { aoqi@0: return _max_gen_size; aoqi@0: } aoqi@0: aoqi@0: void PSOldGen::reset_after_change() { aoqi@0: ShouldNotReachHere(); aoqi@0: return; aoqi@0: } aoqi@0: aoqi@0: size_t PSOldGen::available_for_expansion() { aoqi@0: ShouldNotReachHere(); aoqi@0: return 0; aoqi@0: } aoqi@0: aoqi@0: size_t PSOldGen::available_for_contraction() { aoqi@0: ShouldNotReachHere(); aoqi@0: return 0; aoqi@0: } aoqi@0: aoqi@0: void PSOldGen::print() const { print_on(tty);} aoqi@0: void PSOldGen::print_on(outputStream* st) const { aoqi@0: st->print(" %-15s", name()); aoqi@0: if (PrintGCDetails && Verbose) { aoqi@0: st->print(" total " SIZE_FORMAT ", used " SIZE_FORMAT, aoqi@0: capacity_in_bytes(), used_in_bytes()); aoqi@0: } else { aoqi@0: st->print(" total " SIZE_FORMAT "K, used " SIZE_FORMAT "K", aoqi@0: capacity_in_bytes()/K, used_in_bytes()/K); aoqi@0: } aoqi@0: st->print_cr(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " INTPTR_FORMAT ")", aoqi@0: virtual_space()->low_boundary(), aoqi@0: virtual_space()->high(), aoqi@0: virtual_space()->high_boundary()); aoqi@0: aoqi@0: st->print(" object"); object_space()->print_on(st); aoqi@0: } aoqi@0: aoqi@0: void PSOldGen::print_used_change(size_t prev_used) const { aoqi@0: gclog_or_tty->print(" [%s:", name()); aoqi@0: gclog_or_tty->print(" " SIZE_FORMAT "K" aoqi@0: "->" SIZE_FORMAT "K" aoqi@0: "(" SIZE_FORMAT "K)", aoqi@0: prev_used / K, used_in_bytes() / K, aoqi@0: capacity_in_bytes() / K); aoqi@0: gclog_or_tty->print("]"); aoqi@0: } aoqi@0: aoqi@0: void PSOldGen::update_counters() { aoqi@0: if (UsePerfData) { aoqi@0: _space_counters->update_all(); aoqi@0: _gen_counters->update_all(); aoqi@0: } aoqi@0: } aoqi@0: aoqi@0: #ifndef PRODUCT aoqi@0: aoqi@0: void PSOldGen::space_invariants() { aoqi@0: assert(object_space()->end() == (HeapWord*) virtual_space()->high(), aoqi@0: "Space invariant"); aoqi@0: assert(object_space()->bottom() == (HeapWord*) virtual_space()->low(), aoqi@0: "Space invariant"); aoqi@0: assert(virtual_space()->low_boundary() <= virtual_space()->low(), aoqi@0: "Space invariant"); aoqi@0: assert(virtual_space()->high_boundary() >= virtual_space()->high(), aoqi@0: "Space invariant"); aoqi@0: assert(virtual_space()->low_boundary() == (char*) _reserved.start(), aoqi@0: "Space invariant"); aoqi@0: assert(virtual_space()->high_boundary() == (char*) _reserved.end(), aoqi@0: "Space invariant"); aoqi@0: assert(virtual_space()->committed_size() <= virtual_space()->reserved_size(), aoqi@0: "Space invariant"); aoqi@0: } aoqi@0: #endif aoqi@0: aoqi@0: void PSOldGen::verify() { aoqi@0: object_space()->verify(); aoqi@0: } aoqi@0: class VerifyObjectStartArrayClosure : public ObjectClosure { aoqi@0: PSOldGen* _gen; aoqi@0: ObjectStartArray* _start_array; aoqi@0: aoqi@0: public: aoqi@0: VerifyObjectStartArrayClosure(PSOldGen* gen, ObjectStartArray* start_array) : aoqi@0: _gen(gen), _start_array(start_array) { } aoqi@0: aoqi@0: virtual void do_object(oop obj) { aoqi@0: HeapWord* test_addr = (HeapWord*)obj + 1; aoqi@0: guarantee(_start_array->object_start(test_addr) == (HeapWord*)obj, "ObjectStartArray cannot find start of object"); aoqi@0: guarantee(_start_array->is_block_allocated((HeapWord*)obj), "ObjectStartArray missing block allocation"); aoqi@0: } aoqi@0: }; aoqi@0: aoqi@0: void PSOldGen::verify_object_start_array() { aoqi@0: VerifyObjectStartArrayClosure check( this, &_start_array ); aoqi@0: object_iterate(&check); aoqi@0: } aoqi@0: aoqi@0: #ifndef PRODUCT aoqi@0: void PSOldGen::record_spaces_top() { aoqi@0: assert(ZapUnusedHeapArea, "Not mangling unused space"); aoqi@0: object_space()->set_top_for_allocations(); aoqi@0: } aoqi@0: #endif