Thu, 11 Dec 2008 12:05:08 -0800
6578152: fill_region_with_object has usability and safety issues
Reviewed-by: apetrusenko, ysr
duke@435 | 1 | /* |
xdono@631 | 2 | * Copyright 2000-2008 Sun Microsystems, Inc. All Rights Reserved. |
duke@435 | 3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
duke@435 | 4 | * |
duke@435 | 5 | * This code is free software; you can redistribute it and/or modify it |
duke@435 | 6 | * under the terms of the GNU General Public License version 2 only, as |
duke@435 | 7 | * published by the Free Software Foundation. |
duke@435 | 8 | * |
duke@435 | 9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
duke@435 | 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
duke@435 | 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
duke@435 | 12 | * version 2 for more details (a copy is included in the LICENSE file that |
duke@435 | 13 | * accompanied this code). |
duke@435 | 14 | * |
duke@435 | 15 | * You should have received a copy of the GNU General Public License version |
duke@435 | 16 | * 2 along with this work; if not, write to the Free Software Foundation, |
duke@435 | 17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
duke@435 | 18 | * |
duke@435 | 19 | * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, |
duke@435 | 20 | * CA 95054 USA or visit www.sun.com if you need additional information or |
duke@435 | 21 | * have any questions. |
duke@435 | 22 | * |
duke@435 | 23 | */ |
duke@435 | 24 | |
duke@435 | 25 | #include "incls/_precompiled.incl" |
duke@435 | 26 | #include "incls/_permGen.cpp.incl" |
duke@435 | 27 | |
apetrusenko@574 | 28 | HeapWord* PermGen::mem_allocate_in_gen(size_t size, Generation* gen) { |
apetrusenko@574 | 29 | GCCause::Cause next_cause = GCCause::_permanent_generation_full; |
apetrusenko@574 | 30 | GCCause::Cause prev_cause = GCCause::_no_gc; |
ysr@915 | 31 | unsigned int gc_count_before, full_gc_count_before; |
ysr@915 | 32 | HeapWord* obj; |
apetrusenko@574 | 33 | |
apetrusenko@574 | 34 | for (;;) { |
ysr@915 | 35 | { |
ysr@915 | 36 | MutexLocker ml(Heap_lock); |
ysr@915 | 37 | if ((obj = gen->allocate(size, false)) != NULL) { |
ysr@915 | 38 | return obj; |
ysr@915 | 39 | } |
ysr@915 | 40 | if (gen->capacity() < _capacity_expansion_limit || |
ysr@915 | 41 | prev_cause != GCCause::_no_gc) { |
ysr@915 | 42 | obj = gen->expand_and_allocate(size, false); |
ysr@915 | 43 | } |
ysr@915 | 44 | if (obj != NULL || prev_cause == GCCause::_last_ditch_collection) { |
ysr@915 | 45 | return obj; |
ysr@915 | 46 | } |
apetrusenko@574 | 47 | if (GC_locker::is_active_and_needs_gc()) { |
apetrusenko@574 | 48 | // If this thread is not in a jni critical section, we stall |
apetrusenko@574 | 49 | // the requestor until the critical section has cleared and |
apetrusenko@574 | 50 | // GC allowed. When the critical section clears, a GC is |
apetrusenko@574 | 51 | // initiated by the last thread exiting the critical section; so |
apetrusenko@574 | 52 | // we retry the allocation sequence from the beginning of the loop, |
apetrusenko@574 | 53 | // rather than causing more, now probably unnecessary, GC attempts. |
apetrusenko@574 | 54 | JavaThread* jthr = JavaThread::current(); |
apetrusenko@574 | 55 | if (!jthr->in_critical()) { |
apetrusenko@574 | 56 | MutexUnlocker mul(Heap_lock); |
apetrusenko@574 | 57 | // Wait for JNI critical section to be exited |
apetrusenko@574 | 58 | GC_locker::stall_until_clear(); |
apetrusenko@574 | 59 | continue; |
apetrusenko@574 | 60 | } else { |
apetrusenko@574 | 61 | if (CheckJNICalls) { |
apetrusenko@574 | 62 | fatal("Possible deadlock due to allocating while" |
apetrusenko@574 | 63 | " in jni critical section"); |
apetrusenko@574 | 64 | } |
apetrusenko@574 | 65 | return NULL; |
apetrusenko@574 | 66 | } |
apetrusenko@574 | 67 | } |
ysr@915 | 68 | // Read the GC count while holding the Heap_lock |
ysr@915 | 69 | gc_count_before = SharedHeap::heap()->total_collections(); |
ysr@915 | 70 | full_gc_count_before = SharedHeap::heap()->total_full_collections(); |
ysr@915 | 71 | } |
apetrusenko@574 | 72 | |
ysr@915 | 73 | // Give up heap lock above, VMThread::execute below gets it back |
ysr@915 | 74 | VM_GenCollectForPermanentAllocation op(size, gc_count_before, full_gc_count_before, |
ysr@915 | 75 | next_cause); |
ysr@915 | 76 | VMThread::execute(&op); |
ysr@915 | 77 | if (!op.prologue_succeeded() || op.gc_locked()) { |
ysr@915 | 78 | assert(op.result() == NULL, "must be NULL if gc_locked() is true"); |
ysr@915 | 79 | continue; // retry and/or stall as necessary |
ysr@915 | 80 | } |
ysr@915 | 81 | obj = op.result(); |
ysr@915 | 82 | assert(obj == NULL || SharedHeap::heap()->is_in_reserved(obj), |
ysr@915 | 83 | "result not in heap"); |
ysr@915 | 84 | if (obj != NULL) { |
apetrusenko@574 | 85 | return obj; |
apetrusenko@574 | 86 | } |
ysr@915 | 87 | prev_cause = next_cause; |
ysr@915 | 88 | next_cause = GCCause::_last_ditch_collection; |
apetrusenko@574 | 89 | } |
apetrusenko@574 | 90 | } |
apetrusenko@574 | 91 | |
duke@435 | 92 | CompactingPermGen::CompactingPermGen(ReservedSpace rs, |
duke@435 | 93 | ReservedSpace shared_rs, |
duke@435 | 94 | size_t initial_byte_size, |
duke@435 | 95 | GenRemSet* remset, |
duke@435 | 96 | PermanentGenerationSpec* perm_spec) |
duke@435 | 97 | { |
duke@435 | 98 | CompactingPermGenGen* g = |
duke@435 | 99 | new CompactingPermGenGen(rs, shared_rs, initial_byte_size, -1, remset, |
duke@435 | 100 | NULL, perm_spec); |
duke@435 | 101 | if (g == NULL) |
duke@435 | 102 | vm_exit_during_initialization("Could not allocate a CompactingPermGen"); |
duke@435 | 103 | _gen = g; |
duke@435 | 104 | |
duke@435 | 105 | g->initialize_performance_counters(); |
duke@435 | 106 | |
duke@435 | 107 | _capacity_expansion_limit = g->capacity() + MaxPermHeapExpansion; |
duke@435 | 108 | } |
duke@435 | 109 | |
duke@435 | 110 | HeapWord* CompactingPermGen::mem_allocate(size_t size) { |
apetrusenko@574 | 111 | return mem_allocate_in_gen(size, _gen); |
duke@435 | 112 | } |
duke@435 | 113 | |
duke@435 | 114 | void CompactingPermGen::compute_new_size() { |
duke@435 | 115 | size_t desired_capacity = align_size_up(_gen->used(), MinPermHeapExpansion); |
duke@435 | 116 | if (desired_capacity < PermSize) { |
duke@435 | 117 | desired_capacity = PermSize; |
duke@435 | 118 | } |
duke@435 | 119 | if (_gen->capacity() > desired_capacity) { |
duke@435 | 120 | _gen->shrink(_gen->capacity() - desired_capacity); |
duke@435 | 121 | } |
duke@435 | 122 | _capacity_expansion_limit = _gen->capacity() + MaxPermHeapExpansion; |
duke@435 | 123 | } |