duke@435: /* ysr@2194: * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved. duke@435: * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. duke@435: * duke@435: * This code is free software; you can redistribute it and/or modify it duke@435: * under the terms of the GNU General Public License version 2 only, as duke@435: * published by the Free Software Foundation. duke@435: * duke@435: * This code is distributed in the hope that it will be useful, but WITHOUT duke@435: * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or duke@435: * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License duke@435: * version 2 for more details (a copy is included in the LICENSE file that duke@435: * accompanied this code). duke@435: * duke@435: * You should have received a copy of the GNU General Public License version duke@435: * 2 along with this work; if not, write to the Free Software Foundation, duke@435: * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. duke@435: * trims@1907: * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA trims@1907: * or visit www.oracle.com if you need additional information or have any trims@1907: * questions. duke@435: * duke@435: */ duke@435: stefank@2314: #include "precompiled.hpp" stefank@2314: #include "gc_implementation/shared/cSpaceCounters.hpp" stefank@2314: #include "gc_implementation/shared/vmGCOperations.hpp" stefank@2314: #include "gc_interface/collectedHeap.inline.hpp" stefank@2314: #include "memory/blockOffsetTable.inline.hpp" stefank@2314: #include "memory/compactPermGen.hpp" stefank@2314: #include "memory/gcLocker.hpp" stefank@2314: #include "memory/gcLocker.inline.hpp" stefank@2314: #include "memory/genCollectedHeap.hpp" stefank@2314: #include "memory/generation.inline.hpp" stefank@2314: #include "memory/permGen.hpp" stefank@2314: #include "memory/universe.hpp" stefank@2314: #include "oops/oop.inline.hpp" stefank@2314: #include "runtime/java.hpp" stefank@2314: #include "runtime/vmThread.hpp" duke@435: ysr@2194: HeapWord* PermGen::request_expand_and_allocate(Generation* gen, size_t size, ysr@2194: GCCause::Cause prev_cause) { ysr@2194: if (gen->capacity() < _capacity_expansion_limit || ysr@2194: prev_cause != GCCause::_no_gc || UseG1GC) { // last disjunct is a temporary hack for G1 ysr@2194: return gen->expand_and_allocate(size, false); ysr@2194: } ysr@2194: // We have reached the limit of capacity expansion where ysr@2194: // we will not expand further until a GC is done; request denied. ysr@2194: return NULL; ysr@2194: } ysr@2194: apetrusenko@574: HeapWord* PermGen::mem_allocate_in_gen(size_t size, Generation* gen) { apetrusenko@574: GCCause::Cause next_cause = GCCause::_permanent_generation_full; apetrusenko@574: GCCause::Cause prev_cause = GCCause::_no_gc; ysr@915: unsigned int gc_count_before, full_gc_count_before; ysr@915: HeapWord* obj; apetrusenko@574: apetrusenko@574: for (;;) { ysr@915: { ysr@915: MutexLocker ml(Heap_lock); ysr@915: if ((obj = gen->allocate(size, false)) != NULL) { ysr@915: return obj; ysr@915: } ysr@2194: // Attempt to expand and allocate the requested space: ysr@2194: // specific subtypes may use specific policy to either expand ysr@2194: // or not. The default policy (see above) is to expand until ysr@2194: // _capacity_expansion_limit, and no further unless a GC is done. ysr@2194: // Concurrent collectors may decide to kick off a concurrent ysr@2194: // collection under appropriate conditions. ysr@2194: obj = request_expand_and_allocate(gen, size, prev_cause); ysr@2194: ysr@915: if (obj != NULL || prev_cause == GCCause::_last_ditch_collection) { ysr@915: return obj; ysr@915: } apetrusenko@574: if (GC_locker::is_active_and_needs_gc()) { apetrusenko@574: // If this thread is not in a jni critical section, we stall apetrusenko@574: // the requestor until the critical section has cleared and apetrusenko@574: // GC allowed. When the critical section clears, a GC is apetrusenko@574: // initiated by the last thread exiting the critical section; so apetrusenko@574: // we retry the allocation sequence from the beginning of the loop, apetrusenko@574: // rather than causing more, now probably unnecessary, GC attempts. apetrusenko@574: JavaThread* jthr = JavaThread::current(); apetrusenko@574: if (!jthr->in_critical()) { apetrusenko@574: MutexUnlocker mul(Heap_lock); apetrusenko@574: // Wait for JNI critical section to be exited apetrusenko@574: GC_locker::stall_until_clear(); apetrusenko@574: continue; apetrusenko@574: } else { apetrusenko@574: if (CheckJNICalls) { apetrusenko@574: fatal("Possible deadlock due to allocating while" apetrusenko@574: " in jni critical section"); apetrusenko@574: } apetrusenko@574: return NULL; apetrusenko@574: } apetrusenko@574: } ysr@915: // Read the GC count while holding the Heap_lock ysr@915: gc_count_before = SharedHeap::heap()->total_collections(); ysr@915: full_gc_count_before = SharedHeap::heap()->total_full_collections(); ysr@915: } apetrusenko@574: ysr@915: // Give up heap lock above, VMThread::execute below gets it back ysr@915: VM_GenCollectForPermanentAllocation op(size, gc_count_before, full_gc_count_before, ysr@915: next_cause); ysr@915: VMThread::execute(&op); ysr@915: if (!op.prologue_succeeded() || op.gc_locked()) { ysr@915: assert(op.result() == NULL, "must be NULL if gc_locked() is true"); ysr@915: continue; // retry and/or stall as necessary ysr@915: } ysr@915: obj = op.result(); ysr@915: assert(obj == NULL || SharedHeap::heap()->is_in_reserved(obj), ysr@915: "result not in heap"); ysr@915: if (obj != NULL) { apetrusenko@574: return obj; apetrusenko@574: } ysr@915: prev_cause = next_cause; ysr@915: next_cause = GCCause::_last_ditch_collection; apetrusenko@574: } apetrusenko@574: } apetrusenko@574: duke@435: CompactingPermGen::CompactingPermGen(ReservedSpace rs, duke@435: ReservedSpace shared_rs, duke@435: size_t initial_byte_size, duke@435: GenRemSet* remset, duke@435: PermanentGenerationSpec* perm_spec) duke@435: { duke@435: CompactingPermGenGen* g = duke@435: new CompactingPermGenGen(rs, shared_rs, initial_byte_size, -1, remset, duke@435: NULL, perm_spec); duke@435: if (g == NULL) duke@435: vm_exit_during_initialization("Could not allocate a CompactingPermGen"); duke@435: _gen = g; duke@435: duke@435: g->initialize_performance_counters(); duke@435: duke@435: _capacity_expansion_limit = g->capacity() + MaxPermHeapExpansion; duke@435: } duke@435: duke@435: HeapWord* CompactingPermGen::mem_allocate(size_t size) { apetrusenko@574: return mem_allocate_in_gen(size, _gen); duke@435: } duke@435: duke@435: void CompactingPermGen::compute_new_size() { duke@435: size_t desired_capacity = align_size_up(_gen->used(), MinPermHeapExpansion); duke@435: if (desired_capacity < PermSize) { duke@435: desired_capacity = PermSize; duke@435: } duke@435: if (_gen->capacity() > desired_capacity) { duke@435: _gen->shrink(_gen->capacity() - desired_capacity); duke@435: } ysr@2194: set_capacity_expansion_limit(_gen->capacity() + MaxPermHeapExpansion); duke@435: }