src/share/vm/gc_implementation/shared/vmGCOperations.cpp

Mon, 09 Jun 2008 07:18:59 -0700

author
iveresov
date
Mon, 09 Jun 2008 07:18:59 -0700
changeset 625
d1635bf93939
parent 574
c0492d52d55b
child 631
d1605aabd0a1
child 777
37f87013dfd8
permissions
-rw-r--r--

6711930: NUMA allocator: ParOld can create a hole less than minimal object size in the lgrp chunk
Summary: The fix takes care of three issues that can create a hole less a minimal object in the lgrp chunk
Reviewed-by: ysr, apetrusenko

duke@435 1 /*
duke@435 2 * Copyright 2005-2006 Sun Microsystems, Inc. All Rights Reserved.
duke@435 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
duke@435 4 *
duke@435 5 * This code is free software; you can redistribute it and/or modify it
duke@435 6 * under the terms of the GNU General Public License version 2 only, as
duke@435 7 * published by the Free Software Foundation.
duke@435 8 *
duke@435 9 * This code is distributed in the hope that it will be useful, but WITHOUT
duke@435 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
duke@435 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
duke@435 12 * version 2 for more details (a copy is included in the LICENSE file that
duke@435 13 * accompanied this code).
duke@435 14 *
duke@435 15 * You should have received a copy of the GNU General Public License version
duke@435 16 * 2 along with this work; if not, write to the Free Software Foundation,
duke@435 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
duke@435 18 *
duke@435 19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
duke@435 20 * CA 95054 USA or visit www.sun.com if you need additional information or
duke@435 21 * have any questions.
duke@435 22 *
duke@435 23 */
duke@435 24 # include "incls/_precompiled.incl"
duke@435 25 # include "incls/_vmGCOperations.cpp.incl"
duke@435 26
duke@435 27 HS_DTRACE_PROBE_DECL1(hotspot, gc__begin, bool);
duke@435 28 HS_DTRACE_PROBE_DECL(hotspot, gc__end);
duke@435 29
duke@435 30 // The same dtrace probe can't be inserted in two different files, so we
duke@435 31 // have to call it here, so it's only in one file. Can't create new probes
duke@435 32 // for the other file anymore. The dtrace probes have to remain stable.
duke@435 33 void VM_GC_Operation::notify_gc_begin(bool full) {
duke@435 34 HS_DTRACE_PROBE1(hotspot, gc__begin, full);
duke@435 35 }
duke@435 36
duke@435 37 void VM_GC_Operation::notify_gc_end() {
duke@435 38 HS_DTRACE_PROBE(hotspot, gc__end);
duke@435 39 }
duke@435 40
duke@435 41 void VM_GC_Operation::acquire_pending_list_lock() {
duke@435 42 // we may enter this with pending exception set
duke@435 43 instanceRefKlass::acquire_pending_list_lock(&_pending_list_basic_lock);
duke@435 44 }
duke@435 45
duke@435 46
duke@435 47 void VM_GC_Operation::release_and_notify_pending_list_lock() {
duke@435 48
duke@435 49 instanceRefKlass::release_and_notify_pending_list_lock(&_pending_list_basic_lock);
duke@435 50 }
duke@435 51
duke@435 52 // Allocations may fail in several threads at about the same time,
duke@435 53 // resulting in multiple gc requests. We only want to do one of them.
duke@435 54 // In case a GC locker is active and the need for a GC is already signalled,
duke@435 55 // we want to skip this GC attempt altogether, without doing a futile
duke@435 56 // safepoint operation.
duke@435 57 bool VM_GC_Operation::skip_operation() const {
duke@435 58 bool skip = (_gc_count_before != Universe::heap()->total_collections());
duke@435 59 if (_full && skip) {
duke@435 60 skip = (_full_gc_count_before != Universe::heap()->total_full_collections());
duke@435 61 }
duke@435 62 if (!skip && GC_locker::is_active_and_needs_gc()) {
duke@435 63 skip = Universe::heap()->is_maximal_no_gc();
duke@435 64 assert(!(skip && (_gc_cause == GCCause::_gc_locker)),
duke@435 65 "GC_locker cannot be active when initiating GC");
duke@435 66 }
duke@435 67 return skip;
duke@435 68 }
duke@435 69
duke@435 70 bool VM_GC_Operation::doit_prologue() {
duke@435 71 assert(Thread::current()->is_Java_thread(), "just checking");
duke@435 72
duke@435 73 acquire_pending_list_lock();
duke@435 74 // If the GC count has changed someone beat us to the collection
duke@435 75 // Get the Heap_lock after the pending_list_lock.
duke@435 76 Heap_lock->lock();
duke@435 77 // Check invocations
duke@435 78 if (skip_operation()) {
duke@435 79 // skip collection
duke@435 80 Heap_lock->unlock();
duke@435 81 release_and_notify_pending_list_lock();
duke@435 82 _prologue_succeeded = false;
duke@435 83 } else {
duke@435 84 _prologue_succeeded = true;
duke@435 85 }
duke@435 86 return _prologue_succeeded;
duke@435 87 }
duke@435 88
duke@435 89
duke@435 90 void VM_GC_Operation::doit_epilogue() {
duke@435 91 assert(Thread::current()->is_Java_thread(), "just checking");
duke@435 92 // Release the Heap_lock first.
duke@435 93 Heap_lock->unlock();
duke@435 94 release_and_notify_pending_list_lock();
duke@435 95 }
duke@435 96
duke@435 97 bool VM_GC_HeapInspection::doit_prologue() {
duke@435 98 if (Universe::heap()->supports_heap_inspection()) {
duke@435 99 return VM_GC_Operation::doit_prologue();
duke@435 100 } else {
duke@435 101 return false;
duke@435 102 }
duke@435 103 }
duke@435 104
duke@435 105 bool VM_GC_HeapInspection::skip_operation() const {
duke@435 106 assert(Universe::heap()->supports_heap_inspection(), "huh?");
duke@435 107 return false;
duke@435 108 }
duke@435 109
duke@435 110 void VM_GC_HeapInspection::doit() {
duke@435 111 HandleMark hm;
duke@435 112 CollectedHeap* ch = Universe::heap();
duke@435 113 if (_full_gc) {
duke@435 114 ch->collect_as_vm_thread(GCCause::_heap_inspection);
duke@435 115 } else {
duke@435 116 // make the heap parsable (no need to retire TLABs)
duke@435 117 ch->ensure_parsability(false);
duke@435 118 }
duke@435 119 HeapInspection::heap_inspection(_out);
duke@435 120 }
duke@435 121
duke@435 122
duke@435 123 void VM_GenCollectForAllocation::doit() {
duke@435 124 JvmtiGCForAllocationMarker jgcm;
duke@435 125 notify_gc_begin(false);
duke@435 126
duke@435 127 GenCollectedHeap* gch = GenCollectedHeap::heap();
duke@435 128 GCCauseSetter gccs(gch, _gc_cause);
duke@435 129 _res = gch->satisfy_failed_allocation(_size, _tlab);
duke@435 130 assert(gch->is_in_reserved_or_null(_res), "result not in heap");
duke@435 131
duke@435 132 if (_res == NULL && GC_locker::is_active_and_needs_gc()) {
duke@435 133 set_gc_locked();
duke@435 134 }
duke@435 135 notify_gc_end();
duke@435 136 }
duke@435 137
duke@435 138 void VM_GenCollectFull::doit() {
duke@435 139 JvmtiGCFullMarker jgcm;
duke@435 140 notify_gc_begin(true);
duke@435 141
duke@435 142 GenCollectedHeap* gch = GenCollectedHeap::heap();
duke@435 143 GCCauseSetter gccs(gch, _gc_cause);
duke@435 144 gch->do_full_collection(gch->must_clear_all_soft_refs(), _max_level);
duke@435 145 notify_gc_end();
duke@435 146 }
apetrusenko@574 147
apetrusenko@574 148 void VM_GenCollectForPermanentAllocation::doit() {
apetrusenko@574 149 JvmtiGCForAllocationMarker jgcm;
apetrusenko@574 150 notify_gc_begin(true);
apetrusenko@574 151 GenCollectedHeap* gch = GenCollectedHeap::heap();
apetrusenko@574 152 GCCauseSetter gccs(gch, _gc_cause);
apetrusenko@574 153 gch->do_full_collection(gch->must_clear_all_soft_refs(),
apetrusenko@574 154 gch->n_gens() - 1);
apetrusenko@574 155 _res = gch->perm_gen()->allocate(_size, false);
apetrusenko@574 156 assert(gch->is_in_reserved_or_null(_res), "result not in heap");
apetrusenko@574 157 if (_res == NULL && GC_locker::is_active_and_needs_gc()) {
apetrusenko@574 158 set_gc_locked();
apetrusenko@574 159 }
apetrusenko@574 160 notify_gc_end();
apetrusenko@574 161 }

mercurial