src/share/vm/gc_implementation/shared/vmGCOperations.cpp

Tue, 13 Apr 2010 13:52:10 -0700

author
jmasa
date
Tue, 13 Apr 2010 13:52:10 -0700
changeset 1822
0bfd3fb24150
parent 1050
c6c601a0f2d6
child 1827
bdb5361c461c
permissions
-rw-r--r--

6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit.
Summary: Ensure a full GC that clears SoftReferences before throwing an out-of-memory
Reviewed-by: ysr, jcoomes

     1 /*
     2  * Copyright 2005-2008 Sun Microsystems, Inc.  All Rights Reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
    20  * CA 95054 USA or visit www.sun.com if you need additional information or
    21  * have any questions.
    22  *
    23  */
    24 # include "incls/_precompiled.incl"
    25 # include "incls/_vmGCOperations.cpp.incl"
    27 HS_DTRACE_PROBE_DECL1(hotspot, gc__begin, bool);
    28 HS_DTRACE_PROBE_DECL(hotspot, gc__end);
    30 // The same dtrace probe can't be inserted in two different files, so we
    31 // have to call it here, so it's only in one file.  Can't create new probes
    32 // for the other file anymore.   The dtrace probes have to remain stable.
    33 void VM_GC_Operation::notify_gc_begin(bool full) {
    34   HS_DTRACE_PROBE1(hotspot, gc__begin, full);
    35 }
    37 void VM_GC_Operation::notify_gc_end() {
    38   HS_DTRACE_PROBE(hotspot, gc__end);
    39 }
    41 void VM_GC_Operation::acquire_pending_list_lock() {
    42   // we may enter this with pending exception set
    43   instanceRefKlass::acquire_pending_list_lock(&_pending_list_basic_lock);
    44 }
    47 void VM_GC_Operation::release_and_notify_pending_list_lock() {
    49   instanceRefKlass::release_and_notify_pending_list_lock(&_pending_list_basic_lock);
    50 }
    52 // Allocations may fail in several threads at about the same time,
    53 // resulting in multiple gc requests.  We only want to do one of them.
    54 // In case a GC locker is active and the need for a GC is already signalled,
    55 // we want to skip this GC attempt altogether, without doing a futile
    56 // safepoint operation.
    57 bool VM_GC_Operation::skip_operation() const {
    58   bool skip = (_gc_count_before != Universe::heap()->total_collections());
    59   if (_full && skip) {
    60     skip = (_full_gc_count_before != Universe::heap()->total_full_collections());
    61   }
    62   if (!skip && GC_locker::is_active_and_needs_gc()) {
    63     skip = Universe::heap()->is_maximal_no_gc();
    64     assert(!(skip && (_gc_cause == GCCause::_gc_locker)),
    65            "GC_locker cannot be active when initiating GC");
    66   }
    67   return skip;
    68 }
    70 bool VM_GC_Operation::doit_prologue() {
    71   assert(Thread::current()->is_Java_thread(), "just checking");
    73   acquire_pending_list_lock();
    74   // If the GC count has changed someone beat us to the collection
    75   // Get the Heap_lock after the pending_list_lock.
    76   Heap_lock->lock();
    78   // Check invocations
    79   if (skip_operation()) {
    80     // skip collection
    81     Heap_lock->unlock();
    82     release_and_notify_pending_list_lock();
    83     _prologue_succeeded = false;
    84   } else {
    85     _prologue_succeeded = true;
    86     SharedHeap* sh = SharedHeap::heap();
    87     if (sh != NULL) sh->_thread_holds_heap_lock_for_gc = true;
    88   }
    89   return _prologue_succeeded;
    90 }
    93 void VM_GC_Operation::doit_epilogue() {
    94   assert(Thread::current()->is_Java_thread(), "just checking");
    95   // Release the Heap_lock first.
    96   SharedHeap* sh = SharedHeap::heap();
    97   if (sh != NULL) sh->_thread_holds_heap_lock_for_gc = false;
    98   Heap_lock->unlock();
    99   release_and_notify_pending_list_lock();
   100 }
   102 bool VM_GC_HeapInspection::doit_prologue() {
   103   if (Universe::heap()->supports_heap_inspection()) {
   104     return VM_GC_Operation::doit_prologue();
   105   } else {
   106     return false;
   107   }
   108 }
   110 bool VM_GC_HeapInspection::skip_operation() const {
   111   assert(Universe::heap()->supports_heap_inspection(), "huh?");
   112   return false;
   113 }
   115 void VM_GC_HeapInspection::doit() {
   116   HandleMark hm;
   117   CollectedHeap* ch = Universe::heap();
   118   if (_full_gc) {
   119     ch->collect_as_vm_thread(GCCause::_heap_inspection);
   120   } else {
   121     // make the heap parsable (no need to retire TLABs)
   122     ch->ensure_parsability(false);
   123   }
   124   HeapInspection::heap_inspection(_out, _need_prologue /* need_prologue */);
   125 }
   128 void VM_GenCollectForAllocation::doit() {
   129   JvmtiGCForAllocationMarker jgcm;
   130   notify_gc_begin(false);
   132   GenCollectedHeap* gch = GenCollectedHeap::heap();
   133   GCCauseSetter gccs(gch, _gc_cause);
   134   _res = gch->satisfy_failed_allocation(_size, _tlab);
   135   assert(gch->is_in_reserved_or_null(_res), "result not in heap");
   137   if (_res == NULL && GC_locker::is_active_and_needs_gc()) {
   138     set_gc_locked();
   139   }
   140   notify_gc_end();
   141 }
   143 void VM_GenCollectFull::doit() {
   144   JvmtiGCFullMarker jgcm;
   145   notify_gc_begin(true);
   147   GenCollectedHeap* gch = GenCollectedHeap::heap();
   148   GCCauseSetter gccs(gch, _gc_cause);
   149   gch->do_full_collection(gch->must_clear_all_soft_refs(), _max_level);
   150   notify_gc_end();
   151 }
   153 void VM_GenCollectForPermanentAllocation::doit() {
   154   JvmtiGCForAllocationMarker jgcm;
   155   notify_gc_begin(true);
   156   SharedHeap* heap = (SharedHeap*)Universe::heap();
   157   GCCauseSetter gccs(heap, _gc_cause);
   158   switch (heap->kind()) {
   159     case (CollectedHeap::GenCollectedHeap): {
   160       GenCollectedHeap* gch = (GenCollectedHeap*)heap;
   161       gch->do_full_collection(gch->must_clear_all_soft_refs(),
   162                               gch->n_gens() - 1);
   163       break;
   164     }
   165 #ifndef SERIALGC
   166     case (CollectedHeap::G1CollectedHeap): {
   167       G1CollectedHeap* g1h = (G1CollectedHeap*)heap;
   168       g1h->do_full_collection(_gc_cause == GCCause::_last_ditch_collection);
   169       break;
   170     }
   171 #endif // SERIALGC
   172     default:
   173       ShouldNotReachHere();
   174   }
   175   _res = heap->perm_gen()->allocate(_size, false);
   176   assert(heap->is_in_reserved_or_null(_res), "result not in heap");
   177   if (_res == NULL && GC_locker::is_active_and_needs_gc()) {
   178     set_gc_locked();
   179   }
   180   notify_gc_end();
   181 }

mercurial