Mon, 16 Jan 2012 22:10:05 +0100
6976060: G1: humongous object allocations should initiate marking cycles when necessary
Reviewed-by: tonyp, johnc
ysr@777 | 1 | /* |
brutisso@3456 | 2 | * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved. |
ysr@777 | 3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
ysr@777 | 4 | * |
ysr@777 | 5 | * This code is free software; you can redistribute it and/or modify it |
ysr@777 | 6 | * under the terms of the GNU General Public License version 2 only, as |
ysr@777 | 7 | * published by the Free Software Foundation. |
ysr@777 | 8 | * |
ysr@777 | 9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
ysr@777 | 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
ysr@777 | 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
ysr@777 | 12 | * version 2 for more details (a copy is included in the LICENSE file that |
ysr@777 | 13 | * accompanied this code). |
ysr@777 | 14 | * |
ysr@777 | 15 | * You should have received a copy of the GNU General Public License version |
ysr@777 | 16 | * 2 along with this work; if not, write to the Free Software Foundation, |
ysr@777 | 17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
ysr@777 | 18 | * |
trims@1907 | 19 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
trims@1907 | 20 | * or visit www.oracle.com if you need additional information or have any |
trims@1907 | 21 | * questions. |
ysr@777 | 22 | * |
ysr@777 | 23 | */ |
ysr@777 | 24 | |
stefank@2314 | 25 | #include "precompiled.hpp" |
johnc@3218 | 26 | #include "gc_implementation/g1/concurrentMarkThread.inline.hpp" |
stefank@2314 | 27 | #include "gc_implementation/g1/g1CollectedHeap.inline.hpp" |
stefank@2314 | 28 | #include "gc_implementation/g1/g1CollectorPolicy.hpp" |
stefank@2314 | 29 | #include "gc_implementation/g1/vm_operations_g1.hpp" |
stefank@2314 | 30 | #include "gc_implementation/shared/isGCActiveMark.hpp" |
tonyp@2315 | 31 | #include "gc_implementation/g1/vm_operations_g1.hpp" |
stefank@2314 | 32 | #include "runtime/interfaceSupport.hpp" |
ysr@777 | 33 | |
tonyp@2315 | 34 | VM_G1CollectForAllocation::VM_G1CollectForAllocation( |
tonyp@2315 | 35 | unsigned int gc_count_before, |
tonyp@2315 | 36 | size_t word_size) |
tonyp@2315 | 37 | : VM_G1OperationWithAllocRequest(gc_count_before, word_size) { |
tonyp@2315 | 38 | guarantee(word_size > 0, "an allocation should always be requested"); |
tonyp@2315 | 39 | } |
tonyp@2315 | 40 | |
ysr@777 | 41 | void VM_G1CollectForAllocation::doit() { |
ysr@777 | 42 | G1CollectedHeap* g1h = G1CollectedHeap::heap(); |
tonyp@2315 | 43 | _result = g1h->satisfy_failed_allocation(_word_size, &_pause_succeeded); |
tonyp@2315 | 44 | assert(_result == NULL || _pause_succeeded, |
tonyp@2315 | 45 | "if we get back a result, the pause should have succeeded"); |
ysr@777 | 46 | } |
ysr@777 | 47 | |
ysr@777 | 48 | void VM_G1CollectFull::doit() { |
ysr@777 | 49 | G1CollectedHeap* g1h = G1CollectedHeap::heap(); |
ysr@777 | 50 | GCCauseSetter x(g1h, _gc_cause); |
ysr@777 | 51 | g1h->do_full_collection(false /* clear_all_soft_refs */); |
ysr@777 | 52 | } |
ysr@777 | 53 | |
tonyp@2315 | 54 | VM_G1IncCollectionPause::VM_G1IncCollectionPause( |
tonyp@2315 | 55 | unsigned int gc_count_before, |
tonyp@2315 | 56 | size_t word_size, |
tonyp@2315 | 57 | bool should_initiate_conc_mark, |
tonyp@2315 | 58 | double target_pause_time_ms, |
tonyp@2315 | 59 | GCCause::Cause gc_cause) |
tonyp@2315 | 60 | : VM_G1OperationWithAllocRequest(gc_count_before, word_size), |
tonyp@2315 | 61 | _should_initiate_conc_mark(should_initiate_conc_mark), |
tonyp@2315 | 62 | _target_pause_time_ms(target_pause_time_ms), |
tonyp@2315 | 63 | _full_collections_completed_before(0) { |
tonyp@2315 | 64 | guarantee(target_pause_time_ms > 0.0, |
tonyp@2315 | 65 | err_msg("target_pause_time_ms = %1.6lf should be positive", |
tonyp@2315 | 66 | target_pause_time_ms)); |
tonyp@2315 | 67 | guarantee(word_size == 0 || gc_cause == GCCause::_g1_inc_collection_pause, |
tonyp@2315 | 68 | "we can only request an allocation if the GC cause is for " |
tonyp@2315 | 69 | "an incremental GC pause"); |
tonyp@2315 | 70 | _gc_cause = gc_cause; |
tonyp@2315 | 71 | } |
tonyp@2315 | 72 | |
ysr@777 | 73 | void VM_G1IncCollectionPause::doit() { |
ysr@777 | 74 | G1CollectedHeap* g1h = G1CollectedHeap::heap(); |
tonyp@2011 | 75 | assert(!_should_initiate_conc_mark || |
tonyp@2011 | 76 | ((_gc_cause == GCCause::_gc_locker && GCLockerInvokesConcurrent) || |
brutisso@3456 | 77 | (_gc_cause == GCCause::_java_lang_system_gc && ExplicitGCInvokesConcurrent) || |
brutisso@3456 | 78 | _gc_cause == GCCause::_g1_humongous_allocation), |
brutisso@3456 | 79 | "only a GC locker, a System.gc() or a hum allocation induced GC should start a cycle"); |
tonyp@2011 | 80 | |
tonyp@2315 | 81 | if (_word_size > 0) { |
tonyp@2315 | 82 | // An allocation has been requested. So, try to do that first. |
tonyp@2315 | 83 | _result = g1h->attempt_allocation_at_safepoint(_word_size, |
tonyp@2315 | 84 | false /* expect_null_cur_alloc_region */); |
tonyp@2315 | 85 | if (_result != NULL) { |
tonyp@2315 | 86 | // If we can successfully allocate before we actually do the |
tonyp@2315 | 87 | // pause then we will consider this pause successful. |
tonyp@2315 | 88 | _pause_succeeded = true; |
tonyp@2315 | 89 | return; |
tonyp@2315 | 90 | } |
tonyp@2315 | 91 | } |
tonyp@2315 | 92 | |
ysr@1523 | 93 | GCCauseSetter x(g1h, _gc_cause); |
tonyp@2011 | 94 | if (_should_initiate_conc_mark) { |
tonyp@2011 | 95 | // It's safer to read full_collections_completed() here, given |
tonyp@2011 | 96 | // that noone else will be updating it concurrently. Since we'll |
tonyp@2011 | 97 | // only need it if we're initiating a marking cycle, no point in |
tonyp@2011 | 98 | // setting it earlier. |
tonyp@2011 | 99 | _full_collections_completed_before = g1h->full_collections_completed(); |
tonyp@2011 | 100 | |
tonyp@2011 | 101 | // At this point we are supposed to start a concurrent cycle. We |
tonyp@2011 | 102 | // will do so if one is not already in progress. |
tonyp@3114 | 103 | bool res = g1h->g1_policy()->force_initial_mark_if_outside_cycle(_gc_cause); |
johnc@2970 | 104 | |
johnc@2970 | 105 | // The above routine returns true if we were able to force the |
johnc@2970 | 106 | // next GC pause to be an initial mark; it returns false if a |
johnc@2970 | 107 | // marking cycle is already in progress. |
johnc@2970 | 108 | // |
johnc@2970 | 109 | // If a marking cycle is already in progress just return and skip |
johnc@2970 | 110 | // the pause - the requesting thread should block in doit_epilogue |
johnc@2970 | 111 | // until the marking cycle is complete. |
johnc@2970 | 112 | if (!res) { |
johnc@2970 | 113 | assert(_word_size == 0, "ExplicitGCInvokesConcurrent shouldn't be allocating"); |
johnc@2970 | 114 | return; |
johnc@2970 | 115 | } |
tonyp@2011 | 116 | } |
tonyp@2315 | 117 | |
tonyp@2315 | 118 | _pause_succeeded = |
tonyp@2315 | 119 | g1h->do_collection_pause_at_safepoint(_target_pause_time_ms); |
tonyp@2315 | 120 | if (_pause_succeeded && _word_size > 0) { |
tonyp@2315 | 121 | // An allocation had been requested. |
tonyp@2315 | 122 | _result = g1h->attempt_allocation_at_safepoint(_word_size, |
tonyp@2315 | 123 | true /* expect_null_cur_alloc_region */); |
tonyp@2315 | 124 | } else { |
tonyp@2315 | 125 | assert(_result == NULL, "invariant"); |
tonyp@2315 | 126 | } |
tonyp@2011 | 127 | } |
tonyp@2011 | 128 | |
tonyp@2011 | 129 | void VM_G1IncCollectionPause::doit_epilogue() { |
tonyp@2011 | 130 | VM_GC_Operation::doit_epilogue(); |
tonyp@2011 | 131 | |
tonyp@2011 | 132 | // If the pause was initiated by a System.gc() and |
tonyp@2011 | 133 | // +ExplicitGCInvokesConcurrent, we have to wait here for the cycle |
tonyp@2011 | 134 | // that just started (or maybe one that was already in progress) to |
tonyp@2011 | 135 | // finish. |
tonyp@2011 | 136 | if (_gc_cause == GCCause::_java_lang_system_gc && |
tonyp@2011 | 137 | _should_initiate_conc_mark) { |
tonyp@2011 | 138 | assert(ExplicitGCInvokesConcurrent, |
tonyp@2011 | 139 | "the only way to be here is if ExplicitGCInvokesConcurrent is set"); |
tonyp@2011 | 140 | |
tonyp@2011 | 141 | G1CollectedHeap* g1h = G1CollectedHeap::heap(); |
tonyp@2011 | 142 | |
tonyp@2011 | 143 | // In the doit() method we saved g1h->full_collections_completed() |
tonyp@2011 | 144 | // in the _full_collections_completed_before field. We have to |
tonyp@2011 | 145 | // wait until we observe that g1h->full_collections_completed() |
tonyp@2011 | 146 | // has increased by at least one. This can happen if a) we started |
tonyp@2011 | 147 | // a cycle and it completes, b) a cycle already in progress |
tonyp@2011 | 148 | // completes, or c) a Full GC happens. |
tonyp@2011 | 149 | |
tonyp@2011 | 150 | // If the condition has already been reached, there's no point in |
tonyp@2011 | 151 | // actually taking the lock and doing the wait. |
tonyp@2011 | 152 | if (g1h->full_collections_completed() <= |
tonyp@2011 | 153 | _full_collections_completed_before) { |
tonyp@2011 | 154 | // The following is largely copied from CMS |
tonyp@2011 | 155 | |
tonyp@2011 | 156 | Thread* thr = Thread::current(); |
tonyp@2011 | 157 | assert(thr->is_Java_thread(), "invariant"); |
tonyp@2011 | 158 | JavaThread* jt = (JavaThread*)thr; |
tonyp@2011 | 159 | ThreadToNativeFromVM native(jt); |
tonyp@2011 | 160 | |
tonyp@2011 | 161 | MutexLockerEx x(FullGCCount_lock, Mutex::_no_safepoint_check_flag); |
tonyp@2011 | 162 | while (g1h->full_collections_completed() <= |
tonyp@2011 | 163 | _full_collections_completed_before) { |
tonyp@2011 | 164 | FullGCCount_lock->wait(Mutex::_no_safepoint_check_flag); |
tonyp@2011 | 165 | } |
tonyp@2011 | 166 | } |
tonyp@2011 | 167 | } |
ysr@777 | 168 | } |
ysr@777 | 169 | |
johnc@3218 | 170 | void VM_CGC_Operation::acquire_pending_list_lock() { |
johnc@3218 | 171 | // The caller may block while communicating |
johnc@3218 | 172 | // with the SLT thread in order to acquire/release the PLL. |
johnc@3218 | 173 | ConcurrentMarkThread::slt()-> |
johnc@3218 | 174 | manipulatePLL(SurrogateLockerThread::acquirePLL); |
johnc@3218 | 175 | } |
johnc@3218 | 176 | |
johnc@3218 | 177 | void VM_CGC_Operation::release_and_notify_pending_list_lock() { |
johnc@3218 | 178 | // The caller may block while communicating |
johnc@3218 | 179 | // with the SLT thread in order to acquire/release the PLL. |
johnc@3218 | 180 | ConcurrentMarkThread::slt()-> |
johnc@3218 | 181 | manipulatePLL(SurrogateLockerThread::releaseAndNotifyPLL); |
johnc@3218 | 182 | } |
johnc@3218 | 183 | |
ysr@777 | 184 | void VM_CGC_Operation::doit() { |
ysr@777 | 185 | gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps); |
ysr@777 | 186 | TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); |
ysr@777 | 187 | TraceTime t(_printGCMessage, PrintGC, true, gclog_or_tty); |
ysr@777 | 188 | SharedHeap* sh = SharedHeap::heap(); |
ysr@777 | 189 | // This could go away if CollectedHeap gave access to _gc_is_active... |
ysr@777 | 190 | if (sh != NULL) { |
ysr@777 | 191 | IsGCActiveMark x; |
ysr@777 | 192 | _cl->do_void(); |
ysr@777 | 193 | } else { |
ysr@777 | 194 | _cl->do_void(); |
ysr@777 | 195 | } |
ysr@777 | 196 | } |
ysr@777 | 197 | |
ysr@777 | 198 | bool VM_CGC_Operation::doit_prologue() { |
johnc@3218 | 199 | // Note the relative order of the locks must match that in |
johnc@3218 | 200 | // VM_GC_Operation::doit_prologue() or deadlocks can occur |
johnc@3218 | 201 | acquire_pending_list_lock(); |
johnc@3218 | 202 | |
ysr@777 | 203 | Heap_lock->lock(); |
ysr@777 | 204 | SharedHeap::heap()->_thread_holds_heap_lock_for_gc = true; |
ysr@777 | 205 | return true; |
ysr@777 | 206 | } |
ysr@777 | 207 | |
ysr@777 | 208 | void VM_CGC_Operation::doit_epilogue() { |
johnc@3218 | 209 | // Note the relative order of the unlocks must match that in |
johnc@3218 | 210 | // VM_GC_Operation::doit_epilogue() |
ysr@777 | 211 | SharedHeap::heap()->_thread_holds_heap_lock_for_gc = false; |
ysr@777 | 212 | Heap_lock->unlock(); |
johnc@3218 | 213 | release_and_notify_pending_list_lock(); |
ysr@777 | 214 | } |