duke@435: /* stefank@2314: * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. duke@435: * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. duke@435: * duke@435: * This code is free software; you can redistribute it and/or modify it duke@435: * under the terms of the GNU General Public License version 2 only, as duke@435: * published by the Free Software Foundation. duke@435: * duke@435: * This code is distributed in the hope that it will be useful, but WITHOUT duke@435: * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or duke@435: * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License duke@435: * version 2 for more details (a copy is included in the LICENSE file that duke@435: * accompanied this code). duke@435: * duke@435: * You should have received a copy of the GNU General Public License version duke@435: * 2 along with this work; if not, write to the Free Software Foundation, duke@435: * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. duke@435: * trims@1907: * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA trims@1907: * or visit www.oracle.com if you need additional information or have any trims@1907: * questions. duke@435: * duke@435: */ duke@435: stefank@2314: #include "precompiled.hpp" stefank@2314: #include "gc_implementation/parallelScavenge/asPSYoungGen.hpp" stefank@2314: #include "gc_implementation/parallelScavenge/parallelScavengeHeap.hpp" stefank@2314: #include "gc_implementation/parallelScavenge/psMarkSweepDecorator.hpp" stefank@2314: #include "gc_implementation/parallelScavenge/psScavenge.hpp" stefank@2314: #include "gc_implementation/parallelScavenge/psYoungGen.hpp" stefank@2314: #include "gc_implementation/shared/gcUtil.hpp" stefank@2314: #include "gc_implementation/shared/spaceDecorator.hpp" stefank@2314: #include "oops/oop.inline.hpp" stefank@2314: #include "runtime/java.hpp" duke@435: duke@435: ASPSYoungGen::ASPSYoungGen(size_t init_byte_size, duke@435: size_t minimum_byte_size, duke@435: size_t byte_size_limit) : duke@435: PSYoungGen(init_byte_size, minimum_byte_size, byte_size_limit), duke@435: _gen_size_limit(byte_size_limit) { duke@435: } duke@435: duke@435: duke@435: ASPSYoungGen::ASPSYoungGen(PSVirtualSpace* vs, duke@435: size_t init_byte_size, duke@435: size_t minimum_byte_size, duke@435: size_t byte_size_limit) : duke@435: //PSYoungGen(init_byte_size, minimum_byte_size, byte_size_limit), duke@435: PSYoungGen(vs->committed_size(), minimum_byte_size, byte_size_limit), duke@435: _gen_size_limit(byte_size_limit) { duke@435: duke@435: assert(vs->committed_size() == init_byte_size, "Cannot replace with"); duke@435: duke@435: _virtual_space = vs; duke@435: } duke@435: duke@435: void ASPSYoungGen::initialize_virtual_space(ReservedSpace rs, duke@435: size_t alignment) { duke@435: assert(_init_gen_size != 0, "Should have a finite size"); duke@435: _virtual_space = new PSVirtualSpaceHighToLow(rs, alignment); duke@435: if (!_virtual_space->expand_by(_init_gen_size)) { duke@435: vm_exit_during_initialization("Could not reserve enough space for " duke@435: "object heap"); duke@435: } duke@435: } duke@435: duke@435: void ASPSYoungGen::initialize(ReservedSpace rs, size_t alignment) { duke@435: initialize_virtual_space(rs, alignment); duke@435: initialize_work(); duke@435: } duke@435: duke@435: size_t ASPSYoungGen::available_for_expansion() { duke@435: duke@435: size_t current_committed_size = virtual_space()->committed_size(); duke@435: assert((gen_size_limit() >= current_committed_size), duke@435: "generation size limit is wrong"); duke@435: ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); duke@435: size_t result = gen_size_limit() - current_committed_size; duke@435: size_t result_aligned = align_size_down(result, heap->young_gen_alignment()); duke@435: return result_aligned; duke@435: } duke@435: duke@435: // Return the number of bytes the young gen is willing give up. duke@435: // duke@435: // Future implementations could check the survivors and if to_space is in the duke@435: // right place (below from_space), take a chunk from to_space. duke@435: size_t ASPSYoungGen::available_for_contraction() { duke@435: duke@435: size_t uncommitted_bytes = virtual_space()->uncommitted_size(); duke@435: if (uncommitted_bytes != 0) { duke@435: return uncommitted_bytes; duke@435: } duke@435: duke@435: if (eden_space()->is_empty()) { duke@435: // Respect the minimum size for eden and for the young gen as a whole. duke@435: ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); jmasa@448: const size_t eden_alignment = heap->intra_heap_alignment(); duke@435: const size_t gen_alignment = heap->young_gen_alignment(); duke@435: duke@435: assert(eden_space()->capacity_in_bytes() >= eden_alignment, duke@435: "Alignment is wrong"); duke@435: size_t eden_avail = eden_space()->capacity_in_bytes() - eden_alignment; duke@435: eden_avail = align_size_down(eden_avail, gen_alignment); duke@435: duke@435: assert(virtual_space()->committed_size() >= min_gen_size(), duke@435: "minimum gen size is wrong"); duke@435: size_t gen_avail = virtual_space()->committed_size() - min_gen_size(); duke@435: assert(virtual_space()->is_aligned(gen_avail), "not aligned"); duke@435: duke@435: const size_t max_contraction = MIN2(eden_avail, gen_avail); duke@435: // See comment for ASPSOldGen::available_for_contraction() duke@435: // for reasons the "increment" fraction is used. duke@435: PSAdaptiveSizePolicy* policy = heap->size_policy(); duke@435: size_t result = policy->eden_increment_aligned_down(max_contraction); duke@435: size_t result_aligned = align_size_down(result, gen_alignment); duke@435: if (PrintAdaptiveSizePolicy && Verbose) { duke@435: gclog_or_tty->print_cr("ASPSYoungGen::available_for_contraction: %d K", duke@435: result_aligned/K); duke@435: gclog_or_tty->print_cr(" max_contraction %d K", max_contraction/K); duke@435: gclog_or_tty->print_cr(" eden_avail %d K", eden_avail/K); duke@435: gclog_or_tty->print_cr(" gen_avail %d K", gen_avail/K); duke@435: } duke@435: return result_aligned; duke@435: duke@435: } duke@435: duke@435: return 0; duke@435: } duke@435: duke@435: // The current implementation only considers to the end of eden. duke@435: // If to_space is below from_space, to_space is not considered. duke@435: // to_space can be. duke@435: size_t ASPSYoungGen::available_to_live() { duke@435: ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); jmasa@448: const size_t alignment = heap->intra_heap_alignment(); duke@435: duke@435: // Include any space that is committed but is not in eden. duke@435: size_t available = pointer_delta(eden_space()->bottom(), duke@435: virtual_space()->low(), duke@435: sizeof(char)); duke@435: duke@435: const size_t eden_capacity = eden_space()->capacity_in_bytes(); duke@435: if (eden_space()->is_empty() && eden_capacity > alignment) { duke@435: available += eden_capacity - alignment; duke@435: } duke@435: return available; duke@435: } duke@435: duke@435: // Similar to PSYoungGen::resize_generation() but duke@435: // allows sum of eden_size and 2 * survivor_size to exceed _max_gen_size duke@435: // expands at the low end of the virtual space duke@435: // moves the boundary between the generations in order to expand duke@435: // some additional diagnostics duke@435: // If no additional changes are required, this can be deleted duke@435: // and the changes factored back into PSYoungGen::resize_generation(). duke@435: bool ASPSYoungGen::resize_generation(size_t eden_size, size_t survivor_size) { duke@435: const size_t alignment = virtual_space()->alignment(); duke@435: size_t orig_size = virtual_space()->committed_size(); duke@435: bool size_changed = false; duke@435: duke@435: // There used to be a guarantee here that duke@435: // (eden_size + 2*survivor_size) <= _max_gen_size duke@435: // This requirement is enforced by the calculation of desired_size duke@435: // below. It may not be true on entry since the size of the duke@435: // eden_size is no bounded by the generation size. duke@435: duke@435: assert(max_size() == reserved().byte_size(), "max gen size problem?"); duke@435: assert(min_gen_size() <= orig_size && orig_size <= max_size(), duke@435: "just checking"); duke@435: duke@435: // Adjust new generation size duke@435: const size_t eden_plus_survivors = duke@435: align_size_up(eden_size + 2 * survivor_size, alignment); duke@435: size_t desired_size = MAX2(MIN2(eden_plus_survivors, gen_size_limit()), duke@435: min_gen_size()); duke@435: assert(desired_size <= gen_size_limit(), "just checking"); duke@435: duke@435: if (desired_size > orig_size) { duke@435: // Grow the generation duke@435: size_t change = desired_size - orig_size; jmasa@698: HeapWord* prev_low = (HeapWord*) virtual_space()->low(); duke@435: if (!virtual_space()->expand_by(change)) { duke@435: return false; duke@435: } jmasa@698: if (ZapUnusedHeapArea) { jmasa@698: // Mangle newly committed space immediately because it jmasa@698: // can be done here more simply that after the new jmasa@698: // spaces have been computed. jmasa@698: HeapWord* new_low = (HeapWord*) virtual_space()->low(); jmasa@698: assert(new_low < prev_low, "Did not grow"); jmasa@698: jmasa@698: MemRegion mangle_region(new_low, prev_low); jmasa@698: SpaceMangler::mangle_region(mangle_region); jmasa@698: } duke@435: size_changed = true; duke@435: } else if (desired_size < orig_size) { duke@435: size_t desired_change = orig_size - desired_size; duke@435: duke@435: // How much is available for shrinking. duke@435: size_t available_bytes = limit_gen_shrink(desired_change); duke@435: size_t change = MIN2(desired_change, available_bytes); duke@435: virtual_space()->shrink_by(change); duke@435: size_changed = true; duke@435: } else { duke@435: if (Verbose && PrintGC) { duke@435: if (orig_size == gen_size_limit()) { duke@435: gclog_or_tty->print_cr("ASPSYoung generation size at maximum: " duke@435: SIZE_FORMAT "K", orig_size/K); duke@435: } else if (orig_size == min_gen_size()) { duke@435: gclog_or_tty->print_cr("ASPSYoung generation size at minium: " duke@435: SIZE_FORMAT "K", orig_size/K); duke@435: } duke@435: } duke@435: } duke@435: duke@435: if (size_changed) { duke@435: reset_after_change(); duke@435: if (Verbose && PrintGC) { duke@435: size_t current_size = virtual_space()->committed_size(); duke@435: gclog_or_tty->print_cr("ASPSYoung generation size changed: " duke@435: SIZE_FORMAT "K->" SIZE_FORMAT "K", duke@435: orig_size/K, current_size/K); duke@435: } duke@435: } duke@435: duke@435: guarantee(eden_plus_survivors <= virtual_space()->committed_size() || duke@435: virtual_space()->committed_size() == max_size(), "Sanity"); duke@435: duke@435: return true; duke@435: } duke@435: duke@435: // Similar to PSYoungGen::resize_spaces() but duke@435: // eden always starts at the low end of the committed virtual space duke@435: // current implementation does not allow holes between the spaces duke@435: // _young_generation_boundary has to be reset because it changes. duke@435: // so additional verification jmasa@698: duke@435: void ASPSYoungGen::resize_spaces(size_t requested_eden_size, duke@435: size_t requested_survivor_size) { jmasa@698: assert(UseAdaptiveSizePolicy, "sanity check"); duke@435: assert(requested_eden_size > 0 && requested_survivor_size > 0, duke@435: "just checking"); duke@435: duke@435: space_invariants(); duke@435: duke@435: // We require eden and to space to be empty duke@435: if ((!eden_space()->is_empty()) || (!to_space()->is_empty())) { duke@435: return; duke@435: } duke@435: duke@435: if (PrintAdaptiveSizePolicy && Verbose) { duke@435: gclog_or_tty->print_cr("PSYoungGen::resize_spaces(requested_eden_size: " duke@435: SIZE_FORMAT duke@435: ", requested_survivor_size: " SIZE_FORMAT ")", duke@435: requested_eden_size, requested_survivor_size); duke@435: gclog_or_tty->print_cr(" eden: [" PTR_FORMAT ".." PTR_FORMAT ") " duke@435: SIZE_FORMAT, duke@435: eden_space()->bottom(), duke@435: eden_space()->end(), duke@435: pointer_delta(eden_space()->end(), duke@435: eden_space()->bottom(), duke@435: sizeof(char))); duke@435: gclog_or_tty->print_cr(" from: [" PTR_FORMAT ".." PTR_FORMAT ") " duke@435: SIZE_FORMAT, duke@435: from_space()->bottom(), duke@435: from_space()->end(), duke@435: pointer_delta(from_space()->end(), duke@435: from_space()->bottom(), duke@435: sizeof(char))); duke@435: gclog_or_tty->print_cr(" to: [" PTR_FORMAT ".." PTR_FORMAT ") " duke@435: SIZE_FORMAT, duke@435: to_space()->bottom(), duke@435: to_space()->end(), duke@435: pointer_delta( to_space()->end(), duke@435: to_space()->bottom(), duke@435: sizeof(char))); duke@435: } duke@435: duke@435: // There's nothing to do if the new sizes are the same as the current duke@435: if (requested_survivor_size == to_space()->capacity_in_bytes() && duke@435: requested_survivor_size == from_space()->capacity_in_bytes() && duke@435: requested_eden_size == eden_space()->capacity_in_bytes()) { duke@435: if (PrintAdaptiveSizePolicy && Verbose) { duke@435: gclog_or_tty->print_cr(" capacities are the right sizes, returning"); duke@435: } duke@435: return; duke@435: } duke@435: duke@435: char* eden_start = (char*)virtual_space()->low(); duke@435: char* eden_end = (char*)eden_space()->end(); duke@435: char* from_start = (char*)from_space()->bottom(); duke@435: char* from_end = (char*)from_space()->end(); duke@435: char* to_start = (char*)to_space()->bottom(); duke@435: char* to_end = (char*)to_space()->end(); duke@435: duke@435: assert(eden_start < from_start, "Cannot push into from_space"); duke@435: duke@435: ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); jmasa@448: const size_t alignment = heap->intra_heap_alignment(); jmasa@698: const bool maintain_minimum = jmasa@698: (requested_eden_size + 2 * requested_survivor_size) <= min_gen_size(); duke@435: jmasa@698: bool eden_from_to_order = from_start < to_start; duke@435: // Check whether from space is below to space jmasa@698: if (eden_from_to_order) { duke@435: // Eden, from, to jmasa@698: duke@435: if (PrintAdaptiveSizePolicy && Verbose) { duke@435: gclog_or_tty->print_cr(" Eden, from, to:"); duke@435: } duke@435: duke@435: // Set eden jmasa@698: // "requested_eden_size" is a goal for the size of eden jmasa@698: // and may not be attainable. "eden_size" below is jmasa@698: // calculated based on the location of from-space and jmasa@698: // the goal for the size of eden. from-space is jmasa@698: // fixed in place because it contains live data. jmasa@698: // The calculation is done this way to avoid 32bit jmasa@698: // overflow (i.e., eden_start + requested_eden_size jmasa@698: // may too large for representation in 32bits). jmasa@698: size_t eden_size; jmasa@698: if (maintain_minimum) { jmasa@698: // Only make eden larger than the requested size if jmasa@698: // the minimum size of the generation has to be maintained. jmasa@698: // This could be done in general but policy at a higher jmasa@698: // level is determining a requested size for eden and that jmasa@698: // should be honored unless there is a fundamental reason. jmasa@698: eden_size = pointer_delta(from_start, jmasa@698: eden_start, jmasa@698: sizeof(char)); jmasa@698: } else { jmasa@698: eden_size = MIN2(requested_eden_size, jmasa@698: pointer_delta(from_start, eden_start, sizeof(char))); jmasa@698: } jmasa@698: duke@435: eden_end = eden_start + eden_size; jcoomes@1844: assert(eden_end >= eden_start, "addition overflowed"); duke@435: duke@435: // To may resize into from space as long as it is clear of live data. duke@435: // From space must remain page aligned, though, so we need to do some duke@435: // extra calculations. duke@435: duke@435: // First calculate an optimal to-space duke@435: to_end = (char*)virtual_space()->high(); duke@435: to_start = (char*)pointer_delta(to_end, duke@435: (char*)requested_survivor_size, duke@435: sizeof(char)); duke@435: duke@435: // Does the optimal to-space overlap from-space? duke@435: if (to_start < (char*)from_space()->end()) { duke@435: assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); duke@435: duke@435: // Calculate the minimum offset possible for from_end duke@435: size_t from_size = duke@435: pointer_delta(from_space()->top(), from_start, sizeof(char)); duke@435: duke@435: // Should we be in this method if from_space is empty? Why not the set_space method? FIX ME! duke@435: if (from_size == 0) { duke@435: from_size = alignment; duke@435: } else { duke@435: from_size = align_size_up(from_size, alignment); duke@435: } duke@435: duke@435: from_end = from_start + from_size; duke@435: assert(from_end > from_start, "addition overflow or from_size problem"); duke@435: duke@435: guarantee(from_end <= (char*)from_space()->end(), duke@435: "from_end moved to the right"); duke@435: duke@435: // Now update to_start with the new from_end duke@435: to_start = MAX2(from_end, to_start); duke@435: } duke@435: duke@435: guarantee(to_start != to_end, "to space is zero sized"); duke@435: duke@435: if (PrintAdaptiveSizePolicy && Verbose) { duke@435: gclog_or_tty->print_cr(" [eden_start .. eden_end): " duke@435: "[" PTR_FORMAT " .. " PTR_FORMAT ") " SIZE_FORMAT, duke@435: eden_start, duke@435: eden_end, duke@435: pointer_delta(eden_end, eden_start, sizeof(char))); duke@435: gclog_or_tty->print_cr(" [from_start .. from_end): " duke@435: "[" PTR_FORMAT " .. " PTR_FORMAT ") " SIZE_FORMAT, duke@435: from_start, duke@435: from_end, duke@435: pointer_delta(from_end, from_start, sizeof(char))); duke@435: gclog_or_tty->print_cr(" [ to_start .. to_end): " duke@435: "[" PTR_FORMAT " .. " PTR_FORMAT ") " SIZE_FORMAT, duke@435: to_start, duke@435: to_end, duke@435: pointer_delta( to_end, to_start, sizeof(char))); duke@435: } duke@435: } else { duke@435: // Eden, to, from duke@435: if (PrintAdaptiveSizePolicy && Verbose) { duke@435: gclog_or_tty->print_cr(" Eden, to, from:"); duke@435: } duke@435: duke@435: // To space gets priority over eden resizing. Note that we position duke@435: // to space as if we were able to resize from space, even though from duke@435: // space is not modified. duke@435: // Giving eden priority was tried and gave poorer performance. duke@435: to_end = (char*)pointer_delta(virtual_space()->high(), duke@435: (char*)requested_survivor_size, duke@435: sizeof(char)); duke@435: to_end = MIN2(to_end, from_start); duke@435: to_start = (char*)pointer_delta(to_end, (char*)requested_survivor_size, duke@435: sizeof(char)); duke@435: // if the space sizes are to be increased by several times then duke@435: // 'to_start' will point beyond the young generation. In this case duke@435: // 'to_start' should be adjusted. duke@435: to_start = MAX2(to_start, eden_start + alignment); duke@435: duke@435: // Compute how big eden can be, then adjust end. jmasa@698: // See comments above on calculating eden_end. jmasa@698: size_t eden_size; jmasa@698: if (maintain_minimum) { jmasa@698: eden_size = pointer_delta(to_start, eden_start, sizeof(char)); jmasa@698: } else { jmasa@698: eden_size = MIN2(requested_eden_size, jmasa@698: pointer_delta(to_start, eden_start, sizeof(char))); jmasa@698: } duke@435: eden_end = eden_start + eden_size; jcoomes@1844: assert(eden_end >= eden_start, "addition overflowed"); duke@435: duke@435: // Don't let eden shrink down to 0 or less. duke@435: eden_end = MAX2(eden_end, eden_start + alignment); duke@435: to_start = MAX2(to_start, eden_end); duke@435: duke@435: if (PrintAdaptiveSizePolicy && Verbose) { duke@435: gclog_or_tty->print_cr(" [eden_start .. eden_end): " duke@435: "[" PTR_FORMAT " .. " PTR_FORMAT ") " SIZE_FORMAT, duke@435: eden_start, duke@435: eden_end, duke@435: pointer_delta(eden_end, eden_start, sizeof(char))); duke@435: gclog_or_tty->print_cr(" [ to_start .. to_end): " duke@435: "[" PTR_FORMAT " .. " PTR_FORMAT ") " SIZE_FORMAT, duke@435: to_start, duke@435: to_end, duke@435: pointer_delta( to_end, to_start, sizeof(char))); duke@435: gclog_or_tty->print_cr(" [from_start .. from_end): " duke@435: "[" PTR_FORMAT " .. " PTR_FORMAT ") " SIZE_FORMAT, duke@435: from_start, duke@435: from_end, duke@435: pointer_delta(from_end, from_start, sizeof(char))); duke@435: } duke@435: } duke@435: duke@435: duke@435: guarantee((HeapWord*)from_start <= from_space()->bottom(), duke@435: "from start moved to the right"); duke@435: guarantee((HeapWord*)from_end >= from_space()->top(), duke@435: "from end moved into live data"); duke@435: assert(is_object_aligned((intptr_t)eden_start), "checking alignment"); duke@435: assert(is_object_aligned((intptr_t)from_start), "checking alignment"); duke@435: assert(is_object_aligned((intptr_t)to_start), "checking alignment"); duke@435: duke@435: MemRegion edenMR((HeapWord*)eden_start, (HeapWord*)eden_end); duke@435: MemRegion toMR ((HeapWord*)to_start, (HeapWord*)to_end); duke@435: MemRegion fromMR((HeapWord*)from_start, (HeapWord*)from_end); duke@435: duke@435: // Let's make sure the call to initialize doesn't reset "top"! duke@435: DEBUG_ONLY(HeapWord* old_from_top = from_space()->top();) duke@435: duke@435: // For PrintAdaptiveSizePolicy block below duke@435: size_t old_from = from_space()->capacity_in_bytes(); duke@435: size_t old_to = to_space()->capacity_in_bytes(); duke@435: jmasa@698: if (ZapUnusedHeapArea) { jmasa@698: // NUMA is a special case because a numa space is not mangled jmasa@698: // in order to not prematurely bind its address to memory to jmasa@698: // the wrong memory (i.e., don't want the GC thread to first jmasa@698: // touch the memory). The survivor spaces are not numa jmasa@698: // spaces and are mangled. jmasa@698: if (UseNUMA) { jmasa@698: if (eden_from_to_order) { jmasa@698: mangle_survivors(from_space(), fromMR, to_space(), toMR); jmasa@698: } else { jmasa@698: mangle_survivors(to_space(), toMR, from_space(), fromMR); jmasa@698: } jmasa@698: } jmasa@698: jmasa@698: // If not mangling the spaces, do some checking to verify that jmasa@698: // the spaces are already mangled. jmasa@698: // The spaces should be correctly mangled at this point so jmasa@698: // do some checking here. Note that they are not being mangled jmasa@698: // in the calls to initialize(). jmasa@698: // Must check mangling before the spaces are reshaped. Otherwise, jmasa@698: // the bottom or end of one space may have moved into an area jmasa@698: // covered by another space and a failure of the check may jmasa@698: // not correctly indicate which space is not properly mangled. jmasa@698: jmasa@698: HeapWord* limit = (HeapWord*) virtual_space()->high(); jmasa@698: eden_space()->check_mangled_unused_area(limit); jmasa@698: from_space()->check_mangled_unused_area(limit); jmasa@698: to_space()->check_mangled_unused_area(limit); jmasa@698: } jmasa@698: // When an existing space is being initialized, it is not jmasa@698: // mangled because the space has been previously mangled. jmasa@698: eden_space()->initialize(edenMR, jmasa@698: SpaceDecorator::Clear, jmasa@698: SpaceDecorator::DontMangle); jmasa@698: to_space()->initialize(toMR, jmasa@698: SpaceDecorator::Clear, jmasa@698: SpaceDecorator::DontMangle); jmasa@698: from_space()->initialize(fromMR, jmasa@698: SpaceDecorator::DontClear, jmasa@698: SpaceDecorator::DontMangle); jmasa@698: duke@435: PSScavenge::set_young_generation_boundary(eden_space()->bottom()); duke@435: duke@435: assert(from_space()->top() == old_from_top, "from top changed!"); duke@435: duke@435: if (PrintAdaptiveSizePolicy) { duke@435: ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); duke@435: assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); duke@435: duke@435: gclog_or_tty->print("AdaptiveSizePolicy::survivor space sizes: " duke@435: "collection: %d " duke@435: "(" SIZE_FORMAT ", " SIZE_FORMAT ") -> " duke@435: "(" SIZE_FORMAT ", " SIZE_FORMAT ") ", duke@435: heap->total_collections(), duke@435: old_from, old_to, duke@435: from_space()->capacity_in_bytes(), duke@435: to_space()->capacity_in_bytes()); duke@435: gclog_or_tty->cr(); duke@435: } duke@435: space_invariants(); duke@435: } duke@435: void ASPSYoungGen::reset_after_change() { duke@435: assert_locked_or_safepoint(Heap_lock); duke@435: duke@435: _reserved = MemRegion((HeapWord*)virtual_space()->low_boundary(), duke@435: (HeapWord*)virtual_space()->high_boundary()); duke@435: PSScavenge::reference_processor()->set_span(_reserved); duke@435: duke@435: HeapWord* new_eden_bottom = (HeapWord*)virtual_space()->low(); duke@435: HeapWord* eden_bottom = eden_space()->bottom(); duke@435: if (new_eden_bottom != eden_bottom) { duke@435: MemRegion eden_mr(new_eden_bottom, eden_space()->end()); jmasa@698: eden_space()->initialize(eden_mr, jmasa@698: SpaceDecorator::Clear, jmasa@698: SpaceDecorator::Mangle); duke@435: PSScavenge::set_young_generation_boundary(eden_space()->bottom()); duke@435: } duke@435: MemRegion cmr((HeapWord*)virtual_space()->low(), duke@435: (HeapWord*)virtual_space()->high()); duke@435: Universe::heap()->barrier_set()->resize_covered_region(cmr); duke@435: duke@435: space_invariants(); duke@435: }