Fri, 01 Oct 2010 16:12:54 -0700
6794422: Perm gen expansion policy for concurrent collectors
Summary: Concurrent collectors should expand the perm gen without a full STW GC, but possibly by triggering a concurrent collection. Temporary band-aid for G1 where no concurrent collection is kicked off since the perm gen is not collected concurrently.
Reviewed-by: johnc
1 /*
2 * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "incls/_precompiled.incl"
26 #include "incls/_permGen.cpp.incl"
28 HeapWord* PermGen::request_expand_and_allocate(Generation* gen, size_t size,
29 GCCause::Cause prev_cause) {
30 if (gen->capacity() < _capacity_expansion_limit ||
31 prev_cause != GCCause::_no_gc || UseG1GC) { // last disjunct is a temporary hack for G1
32 return gen->expand_and_allocate(size, false);
33 }
34 // We have reached the limit of capacity expansion where
35 // we will not expand further until a GC is done; request denied.
36 return NULL;
37 }
39 HeapWord* PermGen::mem_allocate_in_gen(size_t size, Generation* gen) {
40 GCCause::Cause next_cause = GCCause::_permanent_generation_full;
41 GCCause::Cause prev_cause = GCCause::_no_gc;
42 unsigned int gc_count_before, full_gc_count_before;
43 HeapWord* obj;
45 for (;;) {
46 {
47 MutexLocker ml(Heap_lock);
48 if ((obj = gen->allocate(size, false)) != NULL) {
49 return obj;
50 }
51 // Attempt to expand and allocate the requested space:
52 // specific subtypes may use specific policy to either expand
53 // or not. The default policy (see above) is to expand until
54 // _capacity_expansion_limit, and no further unless a GC is done.
55 // Concurrent collectors may decide to kick off a concurrent
56 // collection under appropriate conditions.
57 obj = request_expand_and_allocate(gen, size, prev_cause);
59 if (obj != NULL || prev_cause == GCCause::_last_ditch_collection) {
60 return obj;
61 }
62 if (GC_locker::is_active_and_needs_gc()) {
63 // If this thread is not in a jni critical section, we stall
64 // the requestor until the critical section has cleared and
65 // GC allowed. When the critical section clears, a GC is
66 // initiated by the last thread exiting the critical section; so
67 // we retry the allocation sequence from the beginning of the loop,
68 // rather than causing more, now probably unnecessary, GC attempts.
69 JavaThread* jthr = JavaThread::current();
70 if (!jthr->in_critical()) {
71 MutexUnlocker mul(Heap_lock);
72 // Wait for JNI critical section to be exited
73 GC_locker::stall_until_clear();
74 continue;
75 } else {
76 if (CheckJNICalls) {
77 fatal("Possible deadlock due to allocating while"
78 " in jni critical section");
79 }
80 return NULL;
81 }
82 }
83 // Read the GC count while holding the Heap_lock
84 gc_count_before = SharedHeap::heap()->total_collections();
85 full_gc_count_before = SharedHeap::heap()->total_full_collections();
86 }
88 // Give up heap lock above, VMThread::execute below gets it back
89 VM_GenCollectForPermanentAllocation op(size, gc_count_before, full_gc_count_before,
90 next_cause);
91 VMThread::execute(&op);
92 if (!op.prologue_succeeded() || op.gc_locked()) {
93 assert(op.result() == NULL, "must be NULL if gc_locked() is true");
94 continue; // retry and/or stall as necessary
95 }
96 obj = op.result();
97 assert(obj == NULL || SharedHeap::heap()->is_in_reserved(obj),
98 "result not in heap");
99 if (obj != NULL) {
100 return obj;
101 }
102 prev_cause = next_cause;
103 next_cause = GCCause::_last_ditch_collection;
104 }
105 }
107 CompactingPermGen::CompactingPermGen(ReservedSpace rs,
108 ReservedSpace shared_rs,
109 size_t initial_byte_size,
110 GenRemSet* remset,
111 PermanentGenerationSpec* perm_spec)
112 {
113 CompactingPermGenGen* g =
114 new CompactingPermGenGen(rs, shared_rs, initial_byte_size, -1, remset,
115 NULL, perm_spec);
116 if (g == NULL)
117 vm_exit_during_initialization("Could not allocate a CompactingPermGen");
118 _gen = g;
120 g->initialize_performance_counters();
122 _capacity_expansion_limit = g->capacity() + MaxPermHeapExpansion;
123 }
125 HeapWord* CompactingPermGen::mem_allocate(size_t size) {
126 return mem_allocate_in_gen(size, _gen);
127 }
129 void CompactingPermGen::compute_new_size() {
130 size_t desired_capacity = align_size_up(_gen->used(), MinPermHeapExpansion);
131 if (desired_capacity < PermSize) {
132 desired_capacity = PermSize;
133 }
134 if (_gen->capacity() > desired_capacity) {
135 _gen->shrink(_gen->capacity() - desired_capacity);
136 }
137 set_capacity_expansion_limit(_gen->capacity() + MaxPermHeapExpansion);
138 }