duke@435: /* xdono@631: * Copyright 2005-2008 Sun Microsystems, Inc. All Rights Reserved. duke@435: * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. duke@435: * duke@435: * This code is free software; you can redistribute it and/or modify it duke@435: * under the terms of the GNU General Public License version 2 only, as duke@435: * published by the Free Software Foundation. duke@435: * duke@435: * This code is distributed in the hope that it will be useful, but WITHOUT duke@435: * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or duke@435: * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License duke@435: * version 2 for more details (a copy is included in the LICENSE file that duke@435: * accompanied this code). duke@435: * duke@435: * You should have received a copy of the GNU General Public License version duke@435: * 2 along with this work; if not, write to the Free Software Foundation, duke@435: * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. duke@435: * duke@435: * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, duke@435: * CA 95054 USA or visit www.sun.com if you need additional information or duke@435: * have any questions. duke@435: * duke@435: */ duke@435: # include "incls/_precompiled.incl" duke@435: # include "incls/_vmGCOperations.cpp.incl" duke@435: duke@435: HS_DTRACE_PROBE_DECL1(hotspot, gc__begin, bool); duke@435: HS_DTRACE_PROBE_DECL(hotspot, gc__end); duke@435: duke@435: // The same dtrace probe can't be inserted in two different files, so we duke@435: // have to call it here, so it's only in one file. Can't create new probes duke@435: // for the other file anymore. The dtrace probes have to remain stable. duke@435: void VM_GC_Operation::notify_gc_begin(bool full) { duke@435: HS_DTRACE_PROBE1(hotspot, gc__begin, full); duke@435: } duke@435: duke@435: void VM_GC_Operation::notify_gc_end() { duke@435: HS_DTRACE_PROBE(hotspot, gc__end); duke@435: } duke@435: duke@435: void VM_GC_Operation::acquire_pending_list_lock() { duke@435: // we may enter this with pending exception set duke@435: instanceRefKlass::acquire_pending_list_lock(&_pending_list_basic_lock); duke@435: } duke@435: duke@435: duke@435: void VM_GC_Operation::release_and_notify_pending_list_lock() { duke@435: duke@435: instanceRefKlass::release_and_notify_pending_list_lock(&_pending_list_basic_lock); duke@435: } duke@435: duke@435: // Allocations may fail in several threads at about the same time, duke@435: // resulting in multiple gc requests. We only want to do one of them. duke@435: // In case a GC locker is active and the need for a GC is already signalled, duke@435: // we want to skip this GC attempt altogether, without doing a futile duke@435: // safepoint operation. duke@435: bool VM_GC_Operation::skip_operation() const { duke@435: bool skip = (_gc_count_before != Universe::heap()->total_collections()); duke@435: if (_full && skip) { duke@435: skip = (_full_gc_count_before != Universe::heap()->total_full_collections()); duke@435: } duke@435: if (!skip && GC_locker::is_active_and_needs_gc()) { duke@435: skip = Universe::heap()->is_maximal_no_gc(); duke@435: assert(!(skip && (_gc_cause == GCCause::_gc_locker)), duke@435: "GC_locker cannot be active when initiating GC"); duke@435: } duke@435: return skip; duke@435: } duke@435: duke@435: bool VM_GC_Operation::doit_prologue() { duke@435: assert(Thread::current()->is_Java_thread(), "just checking"); duke@435: duke@435: acquire_pending_list_lock(); duke@435: // If the GC count has changed someone beat us to the collection duke@435: // Get the Heap_lock after the pending_list_lock. duke@435: Heap_lock->lock(); ysr@777: duke@435: // Check invocations duke@435: if (skip_operation()) { duke@435: // skip collection duke@435: Heap_lock->unlock(); duke@435: release_and_notify_pending_list_lock(); duke@435: _prologue_succeeded = false; duke@435: } else { duke@435: _prologue_succeeded = true; ysr@777: SharedHeap* sh = SharedHeap::heap(); ysr@777: if (sh != NULL) sh->_thread_holds_heap_lock_for_gc = true; duke@435: } duke@435: return _prologue_succeeded; duke@435: } duke@435: duke@435: duke@435: void VM_GC_Operation::doit_epilogue() { duke@435: assert(Thread::current()->is_Java_thread(), "just checking"); duke@435: // Release the Heap_lock first. ysr@777: SharedHeap* sh = SharedHeap::heap(); ysr@777: if (sh != NULL) sh->_thread_holds_heap_lock_for_gc = false; duke@435: Heap_lock->unlock(); duke@435: release_and_notify_pending_list_lock(); duke@435: } duke@435: duke@435: bool VM_GC_HeapInspection::doit_prologue() { duke@435: if (Universe::heap()->supports_heap_inspection()) { duke@435: return VM_GC_Operation::doit_prologue(); duke@435: } else { duke@435: return false; duke@435: } duke@435: } duke@435: duke@435: bool VM_GC_HeapInspection::skip_operation() const { duke@435: assert(Universe::heap()->supports_heap_inspection(), "huh?"); duke@435: return false; duke@435: } duke@435: duke@435: void VM_GC_HeapInspection::doit() { duke@435: HandleMark hm; duke@435: CollectedHeap* ch = Universe::heap(); duke@435: if (_full_gc) { duke@435: ch->collect_as_vm_thread(GCCause::_heap_inspection); duke@435: } else { duke@435: // make the heap parsable (no need to retire TLABs) duke@435: ch->ensure_parsability(false); duke@435: } ysr@1050: HeapInspection::heap_inspection(_out, _need_prologue /* need_prologue */); duke@435: } duke@435: duke@435: duke@435: void VM_GenCollectForAllocation::doit() { duke@435: JvmtiGCForAllocationMarker jgcm; duke@435: notify_gc_begin(false); duke@435: duke@435: GenCollectedHeap* gch = GenCollectedHeap::heap(); duke@435: GCCauseSetter gccs(gch, _gc_cause); duke@435: _res = gch->satisfy_failed_allocation(_size, _tlab); duke@435: assert(gch->is_in_reserved_or_null(_res), "result not in heap"); duke@435: duke@435: if (_res == NULL && GC_locker::is_active_and_needs_gc()) { duke@435: set_gc_locked(); duke@435: } duke@435: notify_gc_end(); duke@435: } duke@435: duke@435: void VM_GenCollectFull::doit() { duke@435: JvmtiGCFullMarker jgcm; duke@435: notify_gc_begin(true); duke@435: duke@435: GenCollectedHeap* gch = GenCollectedHeap::heap(); duke@435: GCCauseSetter gccs(gch, _gc_cause); duke@435: gch->do_full_collection(gch->must_clear_all_soft_refs(), _max_level); duke@435: notify_gc_end(); duke@435: } apetrusenko@574: apetrusenko@574: void VM_GenCollectForPermanentAllocation::doit() { apetrusenko@574: JvmtiGCForAllocationMarker jgcm; apetrusenko@574: notify_gc_begin(true); ysr@777: SharedHeap* heap = (SharedHeap*)Universe::heap(); ysr@777: GCCauseSetter gccs(heap, _gc_cause); ysr@777: switch (heap->kind()) { ysr@777: case (CollectedHeap::GenCollectedHeap): { ysr@777: GenCollectedHeap* gch = (GenCollectedHeap*)heap; ysr@777: gch->do_full_collection(gch->must_clear_all_soft_refs(), ysr@777: gch->n_gens() - 1); ysr@777: break; ysr@777: } ysr@777: #ifndef SERIALGC ysr@777: case (CollectedHeap::G1CollectedHeap): { ysr@777: G1CollectedHeap* g1h = (G1CollectedHeap*)heap; ysr@777: g1h->do_full_collection(_gc_cause == GCCause::_last_ditch_collection); ysr@777: break; ysr@777: } ysr@777: #endif // SERIALGC ysr@777: default: ysr@777: ShouldNotReachHere(); ysr@777: } ysr@777: _res = heap->perm_gen()->allocate(_size, false); ysr@777: assert(heap->is_in_reserved_or_null(_res), "result not in heap"); apetrusenko@574: if (_res == NULL && GC_locker::is_active_and_needs_gc()) { apetrusenko@574: set_gc_locked(); apetrusenko@574: } apetrusenko@574: notify_gc_end(); apetrusenko@574: }