src/share/vm/gc_implementation/g1/vm_operations_g1.cpp

Fri, 13 Apr 2012 01:59:38 +0200

author
brutisso
date
Fri, 13 Apr 2012 01:59:38 +0200
changeset 3710
5c86f8211d1e
parent 3666
64bf7c8270cb
child 3765
1096fc5a52eb
permissions
-rw-r--r--

7160728: Introduce an extra logging level for G1 logging
Summary: Added log levels "fine", "finer" and "finest". Let PrintGC map to "fine" and PrintGCDetails map to "finer". Separated out the per worker information in the G1 logging to the "finest" level.
Reviewed-by: stefank, jwilhelm, tonyp, johnc

ysr@777 1 /*
brutisso@3456 2 * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
ysr@777 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
ysr@777 4 *
ysr@777 5 * This code is free software; you can redistribute it and/or modify it
ysr@777 6 * under the terms of the GNU General Public License version 2 only, as
ysr@777 7 * published by the Free Software Foundation.
ysr@777 8 *
ysr@777 9 * This code is distributed in the hope that it will be useful, but WITHOUT
ysr@777 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
ysr@777 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
ysr@777 12 * version 2 for more details (a copy is included in the LICENSE file that
ysr@777 13 * accompanied this code).
ysr@777 14 *
ysr@777 15 * You should have received a copy of the GNU General Public License version
ysr@777 16 * 2 along with this work; if not, write to the Free Software Foundation,
ysr@777 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
ysr@777 18 *
trims@1907 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
trims@1907 20 * or visit www.oracle.com if you need additional information or have any
trims@1907 21 * questions.
ysr@777 22 *
ysr@777 23 */
ysr@777 24
stefank@2314 25 #include "precompiled.hpp"
johnc@3218 26 #include "gc_implementation/g1/concurrentMarkThread.inline.hpp"
stefank@2314 27 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
stefank@2314 28 #include "gc_implementation/g1/g1CollectorPolicy.hpp"
brutisso@3710 29 #include "gc_implementation/g1/g1Log.hpp"
stefank@2314 30 #include "gc_implementation/g1/vm_operations_g1.hpp"
stefank@2314 31 #include "gc_implementation/shared/isGCActiveMark.hpp"
tonyp@2315 32 #include "gc_implementation/g1/vm_operations_g1.hpp"
stefank@2314 33 #include "runtime/interfaceSupport.hpp"
ysr@777 34
tonyp@2315 35 VM_G1CollectForAllocation::VM_G1CollectForAllocation(
tonyp@2315 36 unsigned int gc_count_before,
tonyp@2315 37 size_t word_size)
johnc@3666 38 : VM_G1OperationWithAllocRequest(gc_count_before, word_size,
johnc@3666 39 GCCause::_allocation_failure) {
tonyp@2315 40 guarantee(word_size > 0, "an allocation should always be requested");
tonyp@2315 41 }
tonyp@2315 42
ysr@777 43 void VM_G1CollectForAllocation::doit() {
ysr@777 44 G1CollectedHeap* g1h = G1CollectedHeap::heap();
tonyp@2315 45 _result = g1h->satisfy_failed_allocation(_word_size, &_pause_succeeded);
tonyp@2315 46 assert(_result == NULL || _pause_succeeded,
tonyp@2315 47 "if we get back a result, the pause should have succeeded");
ysr@777 48 }
ysr@777 49
ysr@777 50 void VM_G1CollectFull::doit() {
ysr@777 51 G1CollectedHeap* g1h = G1CollectedHeap::heap();
ysr@777 52 GCCauseSetter x(g1h, _gc_cause);
ysr@777 53 g1h->do_full_collection(false /* clear_all_soft_refs */);
ysr@777 54 }
ysr@777 55
tonyp@2315 56 VM_G1IncCollectionPause::VM_G1IncCollectionPause(
tonyp@2315 57 unsigned int gc_count_before,
tonyp@2315 58 size_t word_size,
tonyp@2315 59 bool should_initiate_conc_mark,
tonyp@2315 60 double target_pause_time_ms,
tonyp@2315 61 GCCause::Cause gc_cause)
johnc@3666 62 : VM_G1OperationWithAllocRequest(gc_count_before, word_size, gc_cause),
tonyp@2315 63 _should_initiate_conc_mark(should_initiate_conc_mark),
tonyp@2315 64 _target_pause_time_ms(target_pause_time_ms),
johnc@3666 65 _should_retry_gc(false),
tonyp@2315 66 _full_collections_completed_before(0) {
tonyp@2315 67 guarantee(target_pause_time_ms > 0.0,
tonyp@2315 68 err_msg("target_pause_time_ms = %1.6lf should be positive",
tonyp@2315 69 target_pause_time_ms));
tonyp@2315 70 guarantee(word_size == 0 || gc_cause == GCCause::_g1_inc_collection_pause,
tonyp@2315 71 "we can only request an allocation if the GC cause is for "
tonyp@2315 72 "an incremental GC pause");
tonyp@2315 73 _gc_cause = gc_cause;
tonyp@2315 74 }
tonyp@2315 75
johnc@3666 76 bool VM_G1IncCollectionPause::doit_prologue() {
johnc@3666 77 bool res = VM_GC_Operation::doit_prologue();
johnc@3666 78 if (!res) {
johnc@3666 79 if (_should_initiate_conc_mark) {
johnc@3666 80 // The prologue can fail for a couple of reasons. The first is that another GC
johnc@3666 81 // got scheduled and prevented the scheduling of the initial mark GC. The
johnc@3666 82 // second is that the GC locker may be active and the heap can't be expanded.
johnc@3666 83 // In both cases we want to retry the GC so that the initial mark pause is
johnc@3666 84 // actually scheduled. In the second case, however, we should stall until
johnc@3666 85 // until the GC locker is no longer active and then retry the initial mark GC.
johnc@3666 86 _should_retry_gc = true;
johnc@3666 87 }
johnc@3666 88 }
johnc@3666 89 return res;
johnc@3666 90 }
johnc@3666 91
ysr@777 92 void VM_G1IncCollectionPause::doit() {
ysr@777 93 G1CollectedHeap* g1h = G1CollectedHeap::heap();
tonyp@2011 94 assert(!_should_initiate_conc_mark ||
tonyp@2011 95 ((_gc_cause == GCCause::_gc_locker && GCLockerInvokesConcurrent) ||
brutisso@3456 96 (_gc_cause == GCCause::_java_lang_system_gc && ExplicitGCInvokesConcurrent) ||
brutisso@3456 97 _gc_cause == GCCause::_g1_humongous_allocation),
brutisso@3456 98 "only a GC locker, a System.gc() or a hum allocation induced GC should start a cycle");
tonyp@2011 99
tonyp@2315 100 if (_word_size > 0) {
tonyp@2315 101 // An allocation has been requested. So, try to do that first.
tonyp@2315 102 _result = g1h->attempt_allocation_at_safepoint(_word_size,
tonyp@2315 103 false /* expect_null_cur_alloc_region */);
tonyp@2315 104 if (_result != NULL) {
tonyp@2315 105 // If we can successfully allocate before we actually do the
tonyp@2315 106 // pause then we will consider this pause successful.
tonyp@2315 107 _pause_succeeded = true;
tonyp@2315 108 return;
tonyp@2315 109 }
tonyp@2315 110 }
tonyp@2315 111
ysr@1523 112 GCCauseSetter x(g1h, _gc_cause);
tonyp@2011 113 if (_should_initiate_conc_mark) {
tonyp@2011 114 // It's safer to read full_collections_completed() here, given
tonyp@2011 115 // that noone else will be updating it concurrently. Since we'll
tonyp@2011 116 // only need it if we're initiating a marking cycle, no point in
tonyp@2011 117 // setting it earlier.
tonyp@2011 118 _full_collections_completed_before = g1h->full_collections_completed();
tonyp@2011 119
tonyp@2011 120 // At this point we are supposed to start a concurrent cycle. We
tonyp@2011 121 // will do so if one is not already in progress.
tonyp@3114 122 bool res = g1h->g1_policy()->force_initial_mark_if_outside_cycle(_gc_cause);
johnc@2970 123
johnc@2970 124 // The above routine returns true if we were able to force the
johnc@2970 125 // next GC pause to be an initial mark; it returns false if a
johnc@2970 126 // marking cycle is already in progress.
johnc@2970 127 //
johnc@3666 128 // If a marking cycle is already in progress just return and skip the
johnc@3666 129 // pause below - if the reason for requesting this initial mark pause
johnc@3666 130 // was due to a System.gc() then the requesting thread should block in
johnc@3666 131 // doit_epilogue() until the marking cycle is complete.
johnc@3666 132 //
johnc@3666 133 // If this initial mark pause was requested as part of a humongous
johnc@3666 134 // allocation then we know that the marking cycle must just have
johnc@3666 135 // been started by another thread (possibly also allocating a humongous
johnc@3666 136 // object) as there was no active marking cycle when the requesting
johnc@3666 137 // thread checked before calling collect() in
johnc@3666 138 // attempt_allocation_humongous(). Retrying the GC, in this case,
johnc@3666 139 // will cause the requesting thread to spin inside collect() until the
johnc@3666 140 // just started marking cycle is complete - which may be a while. So
johnc@3666 141 // we do NOT retry the GC.
johnc@2970 142 if (!res) {
johnc@3666 143 assert(_word_size == 0, "Concurrent Full GC/Humongous Object IM shouldn't be allocating");
johnc@3666 144 if (_gc_cause != GCCause::_g1_humongous_allocation) {
johnc@3666 145 _should_retry_gc = true;
johnc@3666 146 }
johnc@2970 147 return;
johnc@2970 148 }
tonyp@2011 149 }
tonyp@2315 150
tonyp@2315 151 _pause_succeeded =
tonyp@2315 152 g1h->do_collection_pause_at_safepoint(_target_pause_time_ms);
tonyp@2315 153 if (_pause_succeeded && _word_size > 0) {
tonyp@2315 154 // An allocation had been requested.
tonyp@2315 155 _result = g1h->attempt_allocation_at_safepoint(_word_size,
tonyp@2315 156 true /* expect_null_cur_alloc_region */);
tonyp@2315 157 } else {
tonyp@2315 158 assert(_result == NULL, "invariant");
johnc@3666 159 if (!_pause_succeeded) {
johnc@3666 160 // Another possible reason reason for the pause to not be successful
johnc@3666 161 // is that, again, the GC locker is active (and has become active
johnc@3666 162 // since the prologue was executed). In this case we should retry
johnc@3666 163 // the pause after waiting for the GC locker to become inactive.
johnc@3666 164 _should_retry_gc = true;
johnc@3666 165 }
tonyp@2315 166 }
tonyp@2011 167 }
tonyp@2011 168
tonyp@2011 169 void VM_G1IncCollectionPause::doit_epilogue() {
tonyp@2011 170 VM_GC_Operation::doit_epilogue();
tonyp@2011 171
tonyp@2011 172 // If the pause was initiated by a System.gc() and
tonyp@2011 173 // +ExplicitGCInvokesConcurrent, we have to wait here for the cycle
tonyp@2011 174 // that just started (or maybe one that was already in progress) to
tonyp@2011 175 // finish.
tonyp@2011 176 if (_gc_cause == GCCause::_java_lang_system_gc &&
tonyp@2011 177 _should_initiate_conc_mark) {
tonyp@2011 178 assert(ExplicitGCInvokesConcurrent,
tonyp@2011 179 "the only way to be here is if ExplicitGCInvokesConcurrent is set");
tonyp@2011 180
tonyp@2011 181 G1CollectedHeap* g1h = G1CollectedHeap::heap();
tonyp@2011 182
tonyp@2011 183 // In the doit() method we saved g1h->full_collections_completed()
tonyp@2011 184 // in the _full_collections_completed_before field. We have to
tonyp@2011 185 // wait until we observe that g1h->full_collections_completed()
tonyp@2011 186 // has increased by at least one. This can happen if a) we started
tonyp@2011 187 // a cycle and it completes, b) a cycle already in progress
tonyp@2011 188 // completes, or c) a Full GC happens.
tonyp@2011 189
tonyp@2011 190 // If the condition has already been reached, there's no point in
tonyp@2011 191 // actually taking the lock and doing the wait.
tonyp@2011 192 if (g1h->full_collections_completed() <=
tonyp@2011 193 _full_collections_completed_before) {
tonyp@2011 194 // The following is largely copied from CMS
tonyp@2011 195
tonyp@2011 196 Thread* thr = Thread::current();
tonyp@2011 197 assert(thr->is_Java_thread(), "invariant");
tonyp@2011 198 JavaThread* jt = (JavaThread*)thr;
tonyp@2011 199 ThreadToNativeFromVM native(jt);
tonyp@2011 200
tonyp@2011 201 MutexLockerEx x(FullGCCount_lock, Mutex::_no_safepoint_check_flag);
tonyp@2011 202 while (g1h->full_collections_completed() <=
tonyp@2011 203 _full_collections_completed_before) {
tonyp@2011 204 FullGCCount_lock->wait(Mutex::_no_safepoint_check_flag);
tonyp@2011 205 }
tonyp@2011 206 }
tonyp@2011 207 }
ysr@777 208 }
ysr@777 209
johnc@3218 210 void VM_CGC_Operation::acquire_pending_list_lock() {
johnc@3666 211 assert(_needs_pll, "don't call this otherwise");
johnc@3218 212 // The caller may block while communicating
johnc@3218 213 // with the SLT thread in order to acquire/release the PLL.
johnc@3218 214 ConcurrentMarkThread::slt()->
johnc@3218 215 manipulatePLL(SurrogateLockerThread::acquirePLL);
johnc@3218 216 }
johnc@3218 217
johnc@3218 218 void VM_CGC_Operation::release_and_notify_pending_list_lock() {
johnc@3666 219 assert(_needs_pll, "don't call this otherwise");
johnc@3218 220 // The caller may block while communicating
johnc@3218 221 // with the SLT thread in order to acquire/release the PLL.
johnc@3218 222 ConcurrentMarkThread::slt()->
johnc@3218 223 manipulatePLL(SurrogateLockerThread::releaseAndNotifyPLL);
johnc@3218 224 }
johnc@3218 225
ysr@777 226 void VM_CGC_Operation::doit() {
brutisso@3710 227 gclog_or_tty->date_stamp(G1Log::fine() && PrintGCDateStamps);
brutisso@3710 228 TraceCPUTime tcpu(G1Log::finer(), true, gclog_or_tty);
brutisso@3710 229 TraceTime t(_printGCMessage, G1Log::fine(), true, gclog_or_tty);
ysr@777 230 SharedHeap* sh = SharedHeap::heap();
ysr@777 231 // This could go away if CollectedHeap gave access to _gc_is_active...
ysr@777 232 if (sh != NULL) {
ysr@777 233 IsGCActiveMark x;
ysr@777 234 _cl->do_void();
ysr@777 235 } else {
ysr@777 236 _cl->do_void();
ysr@777 237 }
ysr@777 238 }
ysr@777 239
ysr@777 240 bool VM_CGC_Operation::doit_prologue() {
johnc@3218 241 // Note the relative order of the locks must match that in
johnc@3218 242 // VM_GC_Operation::doit_prologue() or deadlocks can occur
johnc@3666 243 if (_needs_pll) {
johnc@3666 244 acquire_pending_list_lock();
johnc@3666 245 }
johnc@3218 246
ysr@777 247 Heap_lock->lock();
ysr@777 248 SharedHeap::heap()->_thread_holds_heap_lock_for_gc = true;
ysr@777 249 return true;
ysr@777 250 }
ysr@777 251
ysr@777 252 void VM_CGC_Operation::doit_epilogue() {
johnc@3218 253 // Note the relative order of the unlocks must match that in
johnc@3218 254 // VM_GC_Operation::doit_epilogue()
ysr@777 255 SharedHeap::heap()->_thread_holds_heap_lock_for_gc = false;
ysr@777 256 Heap_lock->unlock();
johnc@3666 257 if (_needs_pll) {
johnc@3666 258 release_and_notify_pending_list_lock();
johnc@3666 259 }
ysr@777 260 }

mercurial