src/share/vm/memory/permGen.cpp

Fri, 15 Apr 2011 09:36:28 -0400

author
coleenp
date
Fri, 15 Apr 2011 09:36:28 -0400
changeset 2777
8ce625481709
parent 2314
f95d63e2154a
permissions
-rw-r--r--

7032407: Crash in LinkResolver::runtime_resolve_virtual_method()
Summary: Make CDS reorder vtables so that dump time vtables match run time order, so when redefine classes reinitializes them, they aren't in the wrong order.
Reviewed-by: dcubed, acorn

duke@435 1 /*
ysr@2194 2 * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
duke@435 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
duke@435 4 *
duke@435 5 * This code is free software; you can redistribute it and/or modify it
duke@435 6 * under the terms of the GNU General Public License version 2 only, as
duke@435 7 * published by the Free Software Foundation.
duke@435 8 *
duke@435 9 * This code is distributed in the hope that it will be useful, but WITHOUT
duke@435 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
duke@435 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
duke@435 12 * version 2 for more details (a copy is included in the LICENSE file that
duke@435 13 * accompanied this code).
duke@435 14 *
duke@435 15 * You should have received a copy of the GNU General Public License version
duke@435 16 * 2 along with this work; if not, write to the Free Software Foundation,
duke@435 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
duke@435 18 *
trims@1907 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
trims@1907 20 * or visit www.oracle.com if you need additional information or have any
trims@1907 21 * questions.
duke@435 22 *
duke@435 23 */
duke@435 24
stefank@2314 25 #include "precompiled.hpp"
stefank@2314 26 #include "gc_implementation/shared/cSpaceCounters.hpp"
stefank@2314 27 #include "gc_implementation/shared/vmGCOperations.hpp"
stefank@2314 28 #include "gc_interface/collectedHeap.inline.hpp"
stefank@2314 29 #include "memory/blockOffsetTable.inline.hpp"
stefank@2314 30 #include "memory/compactPermGen.hpp"
stefank@2314 31 #include "memory/gcLocker.hpp"
stefank@2314 32 #include "memory/gcLocker.inline.hpp"
stefank@2314 33 #include "memory/genCollectedHeap.hpp"
stefank@2314 34 #include "memory/generation.inline.hpp"
stefank@2314 35 #include "memory/permGen.hpp"
stefank@2314 36 #include "memory/universe.hpp"
stefank@2314 37 #include "oops/oop.inline.hpp"
stefank@2314 38 #include "runtime/java.hpp"
stefank@2314 39 #include "runtime/vmThread.hpp"
duke@435 40
ysr@2194 41 HeapWord* PermGen::request_expand_and_allocate(Generation* gen, size_t size,
ysr@2194 42 GCCause::Cause prev_cause) {
ysr@2194 43 if (gen->capacity() < _capacity_expansion_limit ||
ysr@2194 44 prev_cause != GCCause::_no_gc || UseG1GC) { // last disjunct is a temporary hack for G1
ysr@2194 45 return gen->expand_and_allocate(size, false);
ysr@2194 46 }
ysr@2194 47 // We have reached the limit of capacity expansion where
ysr@2194 48 // we will not expand further until a GC is done; request denied.
ysr@2194 49 return NULL;
ysr@2194 50 }
ysr@2194 51
apetrusenko@574 52 HeapWord* PermGen::mem_allocate_in_gen(size_t size, Generation* gen) {
apetrusenko@574 53 GCCause::Cause next_cause = GCCause::_permanent_generation_full;
apetrusenko@574 54 GCCause::Cause prev_cause = GCCause::_no_gc;
ysr@915 55 unsigned int gc_count_before, full_gc_count_before;
ysr@915 56 HeapWord* obj;
apetrusenko@574 57
apetrusenko@574 58 for (;;) {
ysr@915 59 {
ysr@915 60 MutexLocker ml(Heap_lock);
ysr@915 61 if ((obj = gen->allocate(size, false)) != NULL) {
ysr@915 62 return obj;
ysr@915 63 }
ysr@2194 64 // Attempt to expand and allocate the requested space:
ysr@2194 65 // specific subtypes may use specific policy to either expand
ysr@2194 66 // or not. The default policy (see above) is to expand until
ysr@2194 67 // _capacity_expansion_limit, and no further unless a GC is done.
ysr@2194 68 // Concurrent collectors may decide to kick off a concurrent
ysr@2194 69 // collection under appropriate conditions.
ysr@2194 70 obj = request_expand_and_allocate(gen, size, prev_cause);
ysr@2194 71
ysr@915 72 if (obj != NULL || prev_cause == GCCause::_last_ditch_collection) {
ysr@915 73 return obj;
ysr@915 74 }
apetrusenko@574 75 if (GC_locker::is_active_and_needs_gc()) {
apetrusenko@574 76 // If this thread is not in a jni critical section, we stall
apetrusenko@574 77 // the requestor until the critical section has cleared and
apetrusenko@574 78 // GC allowed. When the critical section clears, a GC is
apetrusenko@574 79 // initiated by the last thread exiting the critical section; so
apetrusenko@574 80 // we retry the allocation sequence from the beginning of the loop,
apetrusenko@574 81 // rather than causing more, now probably unnecessary, GC attempts.
apetrusenko@574 82 JavaThread* jthr = JavaThread::current();
apetrusenko@574 83 if (!jthr->in_critical()) {
apetrusenko@574 84 MutexUnlocker mul(Heap_lock);
apetrusenko@574 85 // Wait for JNI critical section to be exited
apetrusenko@574 86 GC_locker::stall_until_clear();
apetrusenko@574 87 continue;
apetrusenko@574 88 } else {
apetrusenko@574 89 if (CheckJNICalls) {
apetrusenko@574 90 fatal("Possible deadlock due to allocating while"
apetrusenko@574 91 " in jni critical section");
apetrusenko@574 92 }
apetrusenko@574 93 return NULL;
apetrusenko@574 94 }
apetrusenko@574 95 }
ysr@915 96 // Read the GC count while holding the Heap_lock
ysr@915 97 gc_count_before = SharedHeap::heap()->total_collections();
ysr@915 98 full_gc_count_before = SharedHeap::heap()->total_full_collections();
ysr@915 99 }
apetrusenko@574 100
ysr@915 101 // Give up heap lock above, VMThread::execute below gets it back
ysr@915 102 VM_GenCollectForPermanentAllocation op(size, gc_count_before, full_gc_count_before,
ysr@915 103 next_cause);
ysr@915 104 VMThread::execute(&op);
ysr@915 105 if (!op.prologue_succeeded() || op.gc_locked()) {
ysr@915 106 assert(op.result() == NULL, "must be NULL if gc_locked() is true");
ysr@915 107 continue; // retry and/or stall as necessary
ysr@915 108 }
ysr@915 109 obj = op.result();
ysr@915 110 assert(obj == NULL || SharedHeap::heap()->is_in_reserved(obj),
ysr@915 111 "result not in heap");
ysr@915 112 if (obj != NULL) {
apetrusenko@574 113 return obj;
apetrusenko@574 114 }
ysr@915 115 prev_cause = next_cause;
ysr@915 116 next_cause = GCCause::_last_ditch_collection;
apetrusenko@574 117 }
apetrusenko@574 118 }
apetrusenko@574 119
duke@435 120 CompactingPermGen::CompactingPermGen(ReservedSpace rs,
duke@435 121 ReservedSpace shared_rs,
duke@435 122 size_t initial_byte_size,
duke@435 123 GenRemSet* remset,
duke@435 124 PermanentGenerationSpec* perm_spec)
duke@435 125 {
duke@435 126 CompactingPermGenGen* g =
duke@435 127 new CompactingPermGenGen(rs, shared_rs, initial_byte_size, -1, remset,
duke@435 128 NULL, perm_spec);
duke@435 129 if (g == NULL)
duke@435 130 vm_exit_during_initialization("Could not allocate a CompactingPermGen");
duke@435 131 _gen = g;
duke@435 132
duke@435 133 g->initialize_performance_counters();
duke@435 134
duke@435 135 _capacity_expansion_limit = g->capacity() + MaxPermHeapExpansion;
duke@435 136 }
duke@435 137
duke@435 138 HeapWord* CompactingPermGen::mem_allocate(size_t size) {
apetrusenko@574 139 return mem_allocate_in_gen(size, _gen);
duke@435 140 }
duke@435 141
duke@435 142 void CompactingPermGen::compute_new_size() {
duke@435 143 size_t desired_capacity = align_size_up(_gen->used(), MinPermHeapExpansion);
duke@435 144 if (desired_capacity < PermSize) {
duke@435 145 desired_capacity = PermSize;
duke@435 146 }
duke@435 147 if (_gen->capacity() > desired_capacity) {
duke@435 148 _gen->shrink(_gen->capacity() - desired_capacity);
duke@435 149 }
ysr@2194 150 set_capacity_expansion_limit(_gen->capacity() + MaxPermHeapExpansion);
duke@435 151 }

mercurial