src/share/vm/gc_implementation/parallelScavenge/psPermGen.cpp

Thu, 22 Sep 2011 10:57:37 -0700

author
johnc
date
Thu, 22 Sep 2011 10:57:37 -0700
changeset 3175
4dfb2df418f2
parent 2971
c9ca3f51cf41
permissions
-rw-r--r--

6484982: G1: process references during evacuation pauses
Summary: G1 now uses two reference processors - one is used by concurrent marking and the other is used by STW GCs (both full and incremental evacuation pauses). In an evacuation pause, the reference processor is embedded into the closures used to scan objects. Doing so causes causes reference objects to be 'discovered' by the reference processor. At the end of the evacuation pause, these discovered reference objects are processed - preserving (and copying) referent objects (and their reachable graphs) as appropriate.
Reviewed-by: ysr, jwilhelm, brutisso, stefank, tonyp

duke@435 1 /*
jcoomes@2783 2 * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
duke@435 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
duke@435 4 *
duke@435 5 * This code is free software; you can redistribute it and/or modify it
duke@435 6 * under the terms of the GNU General Public License version 2 only, as
duke@435 7 * published by the Free Software Foundation.
duke@435 8 *
duke@435 9 * This code is distributed in the hope that it will be useful, but WITHOUT
duke@435 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
duke@435 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
duke@435 12 * version 2 for more details (a copy is included in the LICENSE file that
duke@435 13 * accompanied this code).
duke@435 14 *
duke@435 15 * You should have received a copy of the GNU General Public License version
duke@435 16 * 2 along with this work; if not, write to the Free Software Foundation,
duke@435 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
duke@435 18 *
trims@1907 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
trims@1907 20 * or visit www.oracle.com if you need additional information or have any
trims@1907 21 * questions.
duke@435 22 *
duke@435 23 */
duke@435 24
stefank@2314 25 #include "precompiled.hpp"
stefank@2314 26 #include "gc_implementation/parallelScavenge/parallelScavengeHeap.hpp"
stefank@2314 27 #include "gc_implementation/parallelScavenge/psMarkSweepDecorator.hpp"
stefank@2314 28 #include "gc_implementation/parallelScavenge/psParallelCompact.hpp"
stefank@2314 29 #include "gc_implementation/parallelScavenge/psPermGen.hpp"
stefank@2314 30 #include "gc_implementation/shared/gcUtil.hpp"
stefank@2314 31 #include "gc_implementation/shared/markSweep.inline.hpp"
stefank@2314 32 #include "oops/markOop.inline.hpp"
duke@435 33
duke@435 34 PSPermGen::PSPermGen(ReservedSpace rs, size_t alignment,
duke@435 35 size_t initial_size, size_t min_size, size_t max_size,
duke@435 36 const char* gen_name, int level) :
duke@435 37 PSOldGen(rs, alignment, initial_size, min_size, max_size, gen_name, level),
duke@435 38 _last_used(0)
duke@435 39 {
duke@435 40 assert(object_mark_sweep() != NULL, "Sanity");
duke@435 41
duke@435 42 object_mark_sweep()->set_allowed_dead_ratio(PermMarkSweepDeadRatio);
duke@435 43 _avg_size = new AdaptivePaddedAverage(AdaptivePermSizeWeight,
duke@435 44 PermGenPadding);
duke@435 45 }
duke@435 46
duke@435 47 HeapWord* PSPermGen::allocate_permanent(size_t size) {
duke@435 48 assert_locked_or_safepoint(Heap_lock);
tonyp@2971 49 HeapWord* obj = allocate_noexpand(size);
duke@435 50
duke@435 51 if (obj == NULL) {
tonyp@2971 52 obj = expand_and_allocate(size);
duke@435 53 }
duke@435 54
duke@435 55 return obj;
duke@435 56 }
duke@435 57
duke@435 58 void PSPermGen::compute_new_size(size_t used_before_collection) {
duke@435 59 // Update our padded average of objects allocated in perm
duke@435 60 // gen between collections.
duke@435 61 assert(used_before_collection >= _last_used,
duke@435 62 "negative allocation amount since last GC?");
duke@435 63
duke@435 64 const size_t alloc_since_last_gc = used_before_collection - _last_used;
duke@435 65 _avg_size->sample(alloc_since_last_gc);
duke@435 66
duke@435 67 const size_t current_live = used_in_bytes();
duke@435 68 // Stash away the current amount live for the next call to this method.
duke@435 69 _last_used = current_live;
duke@435 70
duke@435 71 // We have different alignment constraints than the rest of the heap.
duke@435 72 const size_t alignment = MAX2(MinPermHeapExpansion,
duke@435 73 virtual_space()->alignment());
duke@435 74
duke@435 75 // Compute the desired size:
duke@435 76 // The free space is the newly computed padded average,
duke@435 77 // so the desired size is what's live + the free space.
duke@435 78 size_t desired_size = current_live + (size_t)_avg_size->padded_average();
duke@435 79 desired_size = align_size_up(desired_size, alignment);
duke@435 80
duke@435 81 // ...and no larger or smaller than our max and min allowed.
duke@435 82 desired_size = MAX2(MIN2(desired_size, _max_gen_size), _min_gen_size);
duke@435 83 assert(desired_size <= _max_gen_size, "just checking");
duke@435 84
duke@435 85 const size_t size_before = _virtual_space->committed_size();
duke@435 86
duke@435 87 if (desired_size == size_before) {
duke@435 88 // no change, we're done
duke@435 89 return;
duke@435 90 }
duke@435 91
duke@435 92 {
duke@435 93 // We'll be growing or shrinking the heap: in either case,
duke@435 94 // we need to hold a lock.
duke@435 95 MutexLocker x(ExpandHeap_lock);
duke@435 96 if (desired_size > size_before) {
duke@435 97 const size_t change_bytes = desired_size - size_before;
duke@435 98 const size_t aligned_change_bytes =
duke@435 99 align_size_up(change_bytes, alignment);
duke@435 100 expand_by(aligned_change_bytes);
duke@435 101 } else {
duke@435 102 // Shrinking
duke@435 103 const size_t change_bytes =
duke@435 104 size_before - desired_size;
duke@435 105 const size_t aligned_change_bytes = align_size_down(change_bytes, alignment);
duke@435 106 shrink(aligned_change_bytes);
duke@435 107 }
duke@435 108 }
duke@435 109
duke@435 110 // While this code isn't controlled by AdaptiveSizePolicy, it's
duke@435 111 // convenient to see all resizing decsions under the same flag.
duke@435 112 if (PrintAdaptiveSizePolicy) {
duke@435 113 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
duke@435 114 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
duke@435 115
duke@435 116 gclog_or_tty->print_cr("AdaptiveSizePolicy::perm generation size: "
duke@435 117 "collection: %d "
duke@435 118 "(" SIZE_FORMAT ") -> (" SIZE_FORMAT ") ",
duke@435 119 heap->total_collections(),
duke@435 120 size_before, _virtual_space->committed_size());
duke@435 121 }
duke@435 122 }
duke@435 123
duke@435 124 void PSPermGen::precompact() {
duke@435 125 // Reset start array first.
duke@435 126 _start_array.reset();
duke@435 127 object_mark_sweep()->precompact();
duke@435 128 }

mercurial