duke@435: /* trims@1907: * Copyright (c) 2001, 2008, Oracle and/or its affiliates. All rights reserved. duke@435: * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. duke@435: * duke@435: * This code is free software; you can redistribute it and/or modify it duke@435: * under the terms of the GNU General Public License version 2 only, as duke@435: * published by the Free Software Foundation. duke@435: * duke@435: * This code is distributed in the hope that it will be useful, but WITHOUT duke@435: * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or duke@435: * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License duke@435: * version 2 for more details (a copy is included in the LICENSE file that duke@435: * accompanied this code). duke@435: * duke@435: * You should have received a copy of the GNU General Public License version duke@435: * 2 along with this work; if not, write to the Free Software Foundation, duke@435: * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. duke@435: * trims@1907: * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA trims@1907: * or visit www.oracle.com if you need additional information or have any trims@1907: * questions. duke@435: * duke@435: */ duke@435: duke@435: # include "incls/_precompiled.incl" duke@435: # include "incls/_psPermGen.cpp.incl" duke@435: duke@435: PSPermGen::PSPermGen(ReservedSpace rs, size_t alignment, duke@435: size_t initial_size, size_t min_size, size_t max_size, duke@435: const char* gen_name, int level) : duke@435: PSOldGen(rs, alignment, initial_size, min_size, max_size, gen_name, level), duke@435: _last_used(0) duke@435: { duke@435: assert(object_mark_sweep() != NULL, "Sanity"); duke@435: duke@435: object_mark_sweep()->set_allowed_dead_ratio(PermMarkSweepDeadRatio); duke@435: _avg_size = new AdaptivePaddedAverage(AdaptivePermSizeWeight, duke@435: PermGenPadding); duke@435: } duke@435: duke@435: HeapWord* PSPermGen::allocate_permanent(size_t size) { duke@435: assert_locked_or_safepoint(Heap_lock); duke@435: HeapWord* obj = allocate_noexpand(size, false); duke@435: duke@435: if (obj == NULL) { duke@435: obj = expand_and_allocate(size, false); duke@435: } duke@435: duke@435: return obj; duke@435: } duke@435: duke@435: void PSPermGen::compute_new_size(size_t used_before_collection) { duke@435: // Update our padded average of objects allocated in perm duke@435: // gen between collections. duke@435: assert(used_before_collection >= _last_used, duke@435: "negative allocation amount since last GC?"); duke@435: duke@435: const size_t alloc_since_last_gc = used_before_collection - _last_used; duke@435: _avg_size->sample(alloc_since_last_gc); duke@435: duke@435: const size_t current_live = used_in_bytes(); duke@435: // Stash away the current amount live for the next call to this method. duke@435: _last_used = current_live; duke@435: duke@435: // We have different alignment constraints than the rest of the heap. duke@435: const size_t alignment = MAX2(MinPermHeapExpansion, duke@435: virtual_space()->alignment()); duke@435: duke@435: // Compute the desired size: duke@435: // The free space is the newly computed padded average, duke@435: // so the desired size is what's live + the free space. duke@435: size_t desired_size = current_live + (size_t)_avg_size->padded_average(); duke@435: desired_size = align_size_up(desired_size, alignment); duke@435: duke@435: // ...and no larger or smaller than our max and min allowed. duke@435: desired_size = MAX2(MIN2(desired_size, _max_gen_size), _min_gen_size); duke@435: assert(desired_size <= _max_gen_size, "just checking"); duke@435: duke@435: const size_t size_before = _virtual_space->committed_size(); duke@435: duke@435: if (desired_size == size_before) { duke@435: // no change, we're done duke@435: return; duke@435: } duke@435: duke@435: { duke@435: // We'll be growing or shrinking the heap: in either case, duke@435: // we need to hold a lock. duke@435: MutexLocker x(ExpandHeap_lock); duke@435: if (desired_size > size_before) { duke@435: const size_t change_bytes = desired_size - size_before; duke@435: const size_t aligned_change_bytes = duke@435: align_size_up(change_bytes, alignment); duke@435: expand_by(aligned_change_bytes); duke@435: } else { duke@435: // Shrinking duke@435: const size_t change_bytes = duke@435: size_before - desired_size; duke@435: const size_t aligned_change_bytes = align_size_down(change_bytes, alignment); duke@435: shrink(aligned_change_bytes); duke@435: } duke@435: } duke@435: duke@435: // While this code isn't controlled by AdaptiveSizePolicy, it's duke@435: // convenient to see all resizing decsions under the same flag. duke@435: if (PrintAdaptiveSizePolicy) { duke@435: ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); duke@435: assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); duke@435: duke@435: gclog_or_tty->print_cr("AdaptiveSizePolicy::perm generation size: " duke@435: "collection: %d " duke@435: "(" SIZE_FORMAT ") -> (" SIZE_FORMAT ") ", duke@435: heap->total_collections(), duke@435: size_before, _virtual_space->committed_size()); duke@435: } duke@435: } duke@435: duke@435: duke@435: duke@435: void PSPermGen::move_and_update(ParCompactionManager* cm) { duke@435: PSParallelCompact::move_and_update(cm, PSParallelCompact::perm_space_id); duke@435: } duke@435: duke@435: void PSPermGen::precompact() { duke@435: // Reset start array first. duke@435: _start_array.reset(); duke@435: object_mark_sweep()->precompact(); duke@435: }