1.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 1.2 +++ b/src/share/vm/gc_implementation/parallelScavenge/psPermGen.cpp Sat Dec 01 00:00:00 2007 +0000 1.3 @@ -0,0 +1,130 @@ 1.4 +/* 1.5 + * Copyright 2001-2005 Sun Microsystems, Inc. All Rights Reserved. 1.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 1.7 + * 1.8 + * This code is free software; you can redistribute it and/or modify it 1.9 + * under the terms of the GNU General Public License version 2 only, as 1.10 + * published by the Free Software Foundation. 1.11 + * 1.12 + * This code is distributed in the hope that it will be useful, but WITHOUT 1.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 1.14 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 1.15 + * version 2 for more details (a copy is included in the LICENSE file that 1.16 + * accompanied this code). 1.17 + * 1.18 + * You should have received a copy of the GNU General Public License version 1.19 + * 2 along with this work; if not, write to the Free Software Foundation, 1.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 1.21 + * 1.22 + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, 1.23 + * CA 95054 USA or visit www.sun.com if you need additional information or 1.24 + * have any questions. 1.25 + * 1.26 + */ 1.27 + 1.28 +# include "incls/_precompiled.incl" 1.29 +# include "incls/_psPermGen.cpp.incl" 1.30 + 1.31 +PSPermGen::PSPermGen(ReservedSpace rs, size_t alignment, 1.32 + size_t initial_size, size_t min_size, size_t max_size, 1.33 + const char* gen_name, int level) : 1.34 + PSOldGen(rs, alignment, initial_size, min_size, max_size, gen_name, level), 1.35 + _last_used(0) 1.36 +{ 1.37 + assert(object_mark_sweep() != NULL, "Sanity"); 1.38 + 1.39 + object_mark_sweep()->set_allowed_dead_ratio(PermMarkSweepDeadRatio); 1.40 + _avg_size = new AdaptivePaddedAverage(AdaptivePermSizeWeight, 1.41 + PermGenPadding); 1.42 +} 1.43 + 1.44 +HeapWord* PSPermGen::allocate_permanent(size_t size) { 1.45 + assert_locked_or_safepoint(Heap_lock); 1.46 + HeapWord* obj = allocate_noexpand(size, false); 1.47 + 1.48 + if (obj == NULL) { 1.49 + obj = expand_and_allocate(size, false); 1.50 + } 1.51 + 1.52 + return obj; 1.53 +} 1.54 + 1.55 +void PSPermGen::compute_new_size(size_t used_before_collection) { 1.56 + // Update our padded average of objects allocated in perm 1.57 + // gen between collections. 1.58 + assert(used_before_collection >= _last_used, 1.59 + "negative allocation amount since last GC?"); 1.60 + 1.61 + const size_t alloc_since_last_gc = used_before_collection - _last_used; 1.62 + _avg_size->sample(alloc_since_last_gc); 1.63 + 1.64 + const size_t current_live = used_in_bytes(); 1.65 + // Stash away the current amount live for the next call to this method. 1.66 + _last_used = current_live; 1.67 + 1.68 + // We have different alignment constraints than the rest of the heap. 1.69 + const size_t alignment = MAX2(MinPermHeapExpansion, 1.70 + virtual_space()->alignment()); 1.71 + 1.72 + // Compute the desired size: 1.73 + // The free space is the newly computed padded average, 1.74 + // so the desired size is what's live + the free space. 1.75 + size_t desired_size = current_live + (size_t)_avg_size->padded_average(); 1.76 + desired_size = align_size_up(desired_size, alignment); 1.77 + 1.78 + // ...and no larger or smaller than our max and min allowed. 1.79 + desired_size = MAX2(MIN2(desired_size, _max_gen_size), _min_gen_size); 1.80 + assert(desired_size <= _max_gen_size, "just checking"); 1.81 + 1.82 + const size_t size_before = _virtual_space->committed_size(); 1.83 + 1.84 + if (desired_size == size_before) { 1.85 + // no change, we're done 1.86 + return; 1.87 + } 1.88 + 1.89 + { 1.90 + // We'll be growing or shrinking the heap: in either case, 1.91 + // we need to hold a lock. 1.92 + MutexLocker x(ExpandHeap_lock); 1.93 + if (desired_size > size_before) { 1.94 + const size_t change_bytes = desired_size - size_before; 1.95 + const size_t aligned_change_bytes = 1.96 + align_size_up(change_bytes, alignment); 1.97 + expand_by(aligned_change_bytes); 1.98 + } else { 1.99 + // Shrinking 1.100 + const size_t change_bytes = 1.101 + size_before - desired_size; 1.102 + const size_t aligned_change_bytes = align_size_down(change_bytes, alignment); 1.103 + shrink(aligned_change_bytes); 1.104 + } 1.105 + } 1.106 + 1.107 + // While this code isn't controlled by AdaptiveSizePolicy, it's 1.108 + // convenient to see all resizing decsions under the same flag. 1.109 + if (PrintAdaptiveSizePolicy) { 1.110 + ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); 1.111 + assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); 1.112 + 1.113 + gclog_or_tty->print_cr("AdaptiveSizePolicy::perm generation size: " 1.114 + "collection: %d " 1.115 + "(" SIZE_FORMAT ") -> (" SIZE_FORMAT ") ", 1.116 + heap->total_collections(), 1.117 + size_before, _virtual_space->committed_size()); 1.118 + } 1.119 +} 1.120 + 1.121 + 1.122 + 1.123 +void PSPermGen::move_and_update(ParCompactionManager* cm) { 1.124 + PSParallelCompact::move_and_update(cm, PSParallelCompact::perm_space_id); 1.125 +} 1.126 + 1.127 +void PSPermGen::precompact() { 1.128 + // Reset start array first. 1.129 + debug_only(if (!UseParallelOldGC || !VerifyParallelOldWithMarkSweep) {) 1.130 + _start_array.reset(); 1.131 + debug_only(}) 1.132 + object_mark_sweep()->precompact(); 1.133 +}