src/share/vm/gc_implementation/g1/vm_operations_g1.cpp

Sat, 06 Oct 2012 01:17:44 -0700

author
johnc
date
Sat, 06 Oct 2012 01:17:44 -0700
changeset 4173
8a5ea0a9ccc4
parent 3823
37552638d24a
child 5237
f2110083203d
permissions
-rw-r--r--

7127708: G1: change task num types from int to uint in concurrent mark
Summary: Change the type of various task num fields, parameters etc to unsigned and rename them to be more consistent with the other collectors. Code changes were also reviewed by Vitaly Davidovich.
Reviewed-by: johnc
Contributed-by: Kaushik Srenevasan <kaushik@twitter.com>

ysr@777 1 /*
brutisso@3456 2 * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
ysr@777 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
ysr@777 4 *
ysr@777 5 * This code is free software; you can redistribute it and/or modify it
ysr@777 6 * under the terms of the GNU General Public License version 2 only, as
ysr@777 7 * published by the Free Software Foundation.
ysr@777 8 *
ysr@777 9 * This code is distributed in the hope that it will be useful, but WITHOUT
ysr@777 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
ysr@777 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
ysr@777 12 * version 2 for more details (a copy is included in the LICENSE file that
ysr@777 13 * accompanied this code).
ysr@777 14 *
ysr@777 15 * You should have received a copy of the GNU General Public License version
ysr@777 16 * 2 along with this work; if not, write to the Free Software Foundation,
ysr@777 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
ysr@777 18 *
trims@1907 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
trims@1907 20 * or visit www.oracle.com if you need additional information or have any
trims@1907 21 * questions.
ysr@777 22 *
ysr@777 23 */
ysr@777 24
stefank@2314 25 #include "precompiled.hpp"
johnc@3218 26 #include "gc_implementation/g1/concurrentMarkThread.inline.hpp"
stefank@2314 27 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
stefank@2314 28 #include "gc_implementation/g1/g1CollectorPolicy.hpp"
brutisso@3710 29 #include "gc_implementation/g1/g1Log.hpp"
stefank@2314 30 #include "gc_implementation/g1/vm_operations_g1.hpp"
stefank@2314 31 #include "gc_implementation/shared/isGCActiveMark.hpp"
tonyp@2315 32 #include "gc_implementation/g1/vm_operations_g1.hpp"
stefank@2314 33 #include "runtime/interfaceSupport.hpp"
ysr@777 34
tonyp@2315 35 VM_G1CollectForAllocation::VM_G1CollectForAllocation(
tonyp@2315 36 unsigned int gc_count_before,
tonyp@2315 37 size_t word_size)
johnc@3666 38 : VM_G1OperationWithAllocRequest(gc_count_before, word_size,
johnc@3666 39 GCCause::_allocation_failure) {
tonyp@2315 40 guarantee(word_size > 0, "an allocation should always be requested");
tonyp@2315 41 }
tonyp@2315 42
ysr@777 43 void VM_G1CollectForAllocation::doit() {
ysr@777 44 G1CollectedHeap* g1h = G1CollectedHeap::heap();
johnc@3765 45 GCCauseSetter x(g1h, _gc_cause);
tonyp@2315 46 _result = g1h->satisfy_failed_allocation(_word_size, &_pause_succeeded);
tonyp@2315 47 assert(_result == NULL || _pause_succeeded,
tonyp@2315 48 "if we get back a result, the pause should have succeeded");
ysr@777 49 }
ysr@777 50
ysr@777 51 void VM_G1CollectFull::doit() {
ysr@777 52 G1CollectedHeap* g1h = G1CollectedHeap::heap();
ysr@777 53 GCCauseSetter x(g1h, _gc_cause);
ysr@777 54 g1h->do_full_collection(false /* clear_all_soft_refs */);
ysr@777 55 }
ysr@777 56
tonyp@2315 57 VM_G1IncCollectionPause::VM_G1IncCollectionPause(
tonyp@2315 58 unsigned int gc_count_before,
tonyp@2315 59 size_t word_size,
tonyp@2315 60 bool should_initiate_conc_mark,
tonyp@2315 61 double target_pause_time_ms,
tonyp@2315 62 GCCause::Cause gc_cause)
johnc@3666 63 : VM_G1OperationWithAllocRequest(gc_count_before, word_size, gc_cause),
tonyp@2315 64 _should_initiate_conc_mark(should_initiate_conc_mark),
tonyp@2315 65 _target_pause_time_ms(target_pause_time_ms),
johnc@3666 66 _should_retry_gc(false),
brutisso@3823 67 _old_marking_cycles_completed_before(0) {
tonyp@2315 68 guarantee(target_pause_time_ms > 0.0,
tonyp@2315 69 err_msg("target_pause_time_ms = %1.6lf should be positive",
tonyp@2315 70 target_pause_time_ms));
tonyp@2315 71 guarantee(word_size == 0 || gc_cause == GCCause::_g1_inc_collection_pause,
tonyp@2315 72 "we can only request an allocation if the GC cause is for "
tonyp@2315 73 "an incremental GC pause");
tonyp@2315 74 _gc_cause = gc_cause;
tonyp@2315 75 }
tonyp@2315 76
johnc@3666 77 bool VM_G1IncCollectionPause::doit_prologue() {
johnc@3666 78 bool res = VM_GC_Operation::doit_prologue();
johnc@3666 79 if (!res) {
johnc@3666 80 if (_should_initiate_conc_mark) {
johnc@3666 81 // The prologue can fail for a couple of reasons. The first is that another GC
johnc@3666 82 // got scheduled and prevented the scheduling of the initial mark GC. The
johnc@3666 83 // second is that the GC locker may be active and the heap can't be expanded.
johnc@3666 84 // In both cases we want to retry the GC so that the initial mark pause is
johnc@3666 85 // actually scheduled. In the second case, however, we should stall until
johnc@3666 86 // until the GC locker is no longer active and then retry the initial mark GC.
johnc@3666 87 _should_retry_gc = true;
johnc@3666 88 }
johnc@3666 89 }
johnc@3666 90 return res;
johnc@3666 91 }
johnc@3666 92
ysr@777 93 void VM_G1IncCollectionPause::doit() {
ysr@777 94 G1CollectedHeap* g1h = G1CollectedHeap::heap();
tonyp@2011 95 assert(!_should_initiate_conc_mark ||
tonyp@2011 96 ((_gc_cause == GCCause::_gc_locker && GCLockerInvokesConcurrent) ||
brutisso@3456 97 (_gc_cause == GCCause::_java_lang_system_gc && ExplicitGCInvokesConcurrent) ||
brutisso@3456 98 _gc_cause == GCCause::_g1_humongous_allocation),
brutisso@3456 99 "only a GC locker, a System.gc() or a hum allocation induced GC should start a cycle");
tonyp@2011 100
tonyp@2315 101 if (_word_size > 0) {
tonyp@2315 102 // An allocation has been requested. So, try to do that first.
tonyp@2315 103 _result = g1h->attempt_allocation_at_safepoint(_word_size,
tonyp@2315 104 false /* expect_null_cur_alloc_region */);
tonyp@2315 105 if (_result != NULL) {
tonyp@2315 106 // If we can successfully allocate before we actually do the
tonyp@2315 107 // pause then we will consider this pause successful.
tonyp@2315 108 _pause_succeeded = true;
tonyp@2315 109 return;
tonyp@2315 110 }
tonyp@2315 111 }
tonyp@2315 112
ysr@1523 113 GCCauseSetter x(g1h, _gc_cause);
tonyp@2011 114 if (_should_initiate_conc_mark) {
brutisso@3823 115 // It's safer to read old_marking_cycles_completed() here, given
tonyp@2011 116 // that noone else will be updating it concurrently. Since we'll
tonyp@2011 117 // only need it if we're initiating a marking cycle, no point in
tonyp@2011 118 // setting it earlier.
brutisso@3823 119 _old_marking_cycles_completed_before = g1h->old_marking_cycles_completed();
tonyp@2011 120
tonyp@2011 121 // At this point we are supposed to start a concurrent cycle. We
tonyp@2011 122 // will do so if one is not already in progress.
tonyp@3114 123 bool res = g1h->g1_policy()->force_initial_mark_if_outside_cycle(_gc_cause);
johnc@2970 124
johnc@2970 125 // The above routine returns true if we were able to force the
johnc@2970 126 // next GC pause to be an initial mark; it returns false if a
johnc@2970 127 // marking cycle is already in progress.
johnc@2970 128 //
johnc@3666 129 // If a marking cycle is already in progress just return and skip the
johnc@3666 130 // pause below - if the reason for requesting this initial mark pause
johnc@3666 131 // was due to a System.gc() then the requesting thread should block in
johnc@3666 132 // doit_epilogue() until the marking cycle is complete.
johnc@3666 133 //
johnc@3666 134 // If this initial mark pause was requested as part of a humongous
johnc@3666 135 // allocation then we know that the marking cycle must just have
johnc@3666 136 // been started by another thread (possibly also allocating a humongous
johnc@3666 137 // object) as there was no active marking cycle when the requesting
johnc@3666 138 // thread checked before calling collect() in
johnc@3666 139 // attempt_allocation_humongous(). Retrying the GC, in this case,
johnc@3666 140 // will cause the requesting thread to spin inside collect() until the
johnc@3666 141 // just started marking cycle is complete - which may be a while. So
johnc@3666 142 // we do NOT retry the GC.
johnc@2970 143 if (!res) {
johnc@3666 144 assert(_word_size == 0, "Concurrent Full GC/Humongous Object IM shouldn't be allocating");
johnc@3666 145 if (_gc_cause != GCCause::_g1_humongous_allocation) {
johnc@3666 146 _should_retry_gc = true;
johnc@3666 147 }
johnc@2970 148 return;
johnc@2970 149 }
tonyp@2011 150 }
tonyp@2315 151
tonyp@2315 152 _pause_succeeded =
tonyp@2315 153 g1h->do_collection_pause_at_safepoint(_target_pause_time_ms);
tonyp@2315 154 if (_pause_succeeded && _word_size > 0) {
tonyp@2315 155 // An allocation had been requested.
tonyp@2315 156 _result = g1h->attempt_allocation_at_safepoint(_word_size,
tonyp@2315 157 true /* expect_null_cur_alloc_region */);
tonyp@2315 158 } else {
tonyp@2315 159 assert(_result == NULL, "invariant");
johnc@3666 160 if (!_pause_succeeded) {
johnc@3666 161 // Another possible reason reason for the pause to not be successful
johnc@3666 162 // is that, again, the GC locker is active (and has become active
johnc@3666 163 // since the prologue was executed). In this case we should retry
johnc@3666 164 // the pause after waiting for the GC locker to become inactive.
johnc@3666 165 _should_retry_gc = true;
johnc@3666 166 }
tonyp@2315 167 }
tonyp@2011 168 }
tonyp@2011 169
tonyp@2011 170 void VM_G1IncCollectionPause::doit_epilogue() {
tonyp@2011 171 VM_GC_Operation::doit_epilogue();
tonyp@2011 172
tonyp@2011 173 // If the pause was initiated by a System.gc() and
tonyp@2011 174 // +ExplicitGCInvokesConcurrent, we have to wait here for the cycle
tonyp@2011 175 // that just started (or maybe one that was already in progress) to
tonyp@2011 176 // finish.
tonyp@2011 177 if (_gc_cause == GCCause::_java_lang_system_gc &&
tonyp@2011 178 _should_initiate_conc_mark) {
tonyp@2011 179 assert(ExplicitGCInvokesConcurrent,
tonyp@2011 180 "the only way to be here is if ExplicitGCInvokesConcurrent is set");
tonyp@2011 181
tonyp@2011 182 G1CollectedHeap* g1h = G1CollectedHeap::heap();
tonyp@2011 183
brutisso@3823 184 // In the doit() method we saved g1h->old_marking_cycles_completed()
brutisso@3823 185 // in the _old_marking_cycles_completed_before field. We have to
brutisso@3823 186 // wait until we observe that g1h->old_marking_cycles_completed()
tonyp@2011 187 // has increased by at least one. This can happen if a) we started
tonyp@2011 188 // a cycle and it completes, b) a cycle already in progress
tonyp@2011 189 // completes, or c) a Full GC happens.
tonyp@2011 190
tonyp@2011 191 // If the condition has already been reached, there's no point in
tonyp@2011 192 // actually taking the lock and doing the wait.
brutisso@3823 193 if (g1h->old_marking_cycles_completed() <=
brutisso@3823 194 _old_marking_cycles_completed_before) {
tonyp@2011 195 // The following is largely copied from CMS
tonyp@2011 196
tonyp@2011 197 Thread* thr = Thread::current();
tonyp@2011 198 assert(thr->is_Java_thread(), "invariant");
tonyp@2011 199 JavaThread* jt = (JavaThread*)thr;
tonyp@2011 200 ThreadToNativeFromVM native(jt);
tonyp@2011 201
tonyp@2011 202 MutexLockerEx x(FullGCCount_lock, Mutex::_no_safepoint_check_flag);
brutisso@3823 203 while (g1h->old_marking_cycles_completed() <=
brutisso@3823 204 _old_marking_cycles_completed_before) {
tonyp@2011 205 FullGCCount_lock->wait(Mutex::_no_safepoint_check_flag);
tonyp@2011 206 }
tonyp@2011 207 }
tonyp@2011 208 }
ysr@777 209 }
ysr@777 210
johnc@3218 211 void VM_CGC_Operation::acquire_pending_list_lock() {
johnc@3666 212 assert(_needs_pll, "don't call this otherwise");
johnc@3218 213 // The caller may block while communicating
johnc@3218 214 // with the SLT thread in order to acquire/release the PLL.
johnc@3218 215 ConcurrentMarkThread::slt()->
johnc@3218 216 manipulatePLL(SurrogateLockerThread::acquirePLL);
johnc@3218 217 }
johnc@3218 218
johnc@3218 219 void VM_CGC_Operation::release_and_notify_pending_list_lock() {
johnc@3666 220 assert(_needs_pll, "don't call this otherwise");
johnc@3218 221 // The caller may block while communicating
johnc@3218 222 // with the SLT thread in order to acquire/release the PLL.
johnc@3218 223 ConcurrentMarkThread::slt()->
johnc@3218 224 manipulatePLL(SurrogateLockerThread::releaseAndNotifyPLL);
johnc@3218 225 }
johnc@3218 226
ysr@777 227 void VM_CGC_Operation::doit() {
brutisso@3710 228 gclog_or_tty->date_stamp(G1Log::fine() && PrintGCDateStamps);
brutisso@3710 229 TraceCPUTime tcpu(G1Log::finer(), true, gclog_or_tty);
brutisso@3710 230 TraceTime t(_printGCMessage, G1Log::fine(), true, gclog_or_tty);
ysr@777 231 SharedHeap* sh = SharedHeap::heap();
ysr@777 232 // This could go away if CollectedHeap gave access to _gc_is_active...
ysr@777 233 if (sh != NULL) {
ysr@777 234 IsGCActiveMark x;
ysr@777 235 _cl->do_void();
ysr@777 236 } else {
ysr@777 237 _cl->do_void();
ysr@777 238 }
ysr@777 239 }
ysr@777 240
ysr@777 241 bool VM_CGC_Operation::doit_prologue() {
johnc@3218 242 // Note the relative order of the locks must match that in
johnc@3218 243 // VM_GC_Operation::doit_prologue() or deadlocks can occur
johnc@3666 244 if (_needs_pll) {
johnc@3666 245 acquire_pending_list_lock();
johnc@3666 246 }
johnc@3218 247
ysr@777 248 Heap_lock->lock();
ysr@777 249 SharedHeap::heap()->_thread_holds_heap_lock_for_gc = true;
ysr@777 250 return true;
ysr@777 251 }
ysr@777 252
ysr@777 253 void VM_CGC_Operation::doit_epilogue() {
johnc@3218 254 // Note the relative order of the unlocks must match that in
johnc@3218 255 // VM_GC_Operation::doit_epilogue()
ysr@777 256 SharedHeap::heap()->_thread_holds_heap_lock_for_gc = false;
ysr@777 257 Heap_lock->unlock();
johnc@3666 258 if (_needs_pll) {
johnc@3666 259 release_and_notify_pending_list_lock();
johnc@3666 260 }
ysr@777 261 }

mercurial