Fri, 29 Apr 2011 14:59:04 -0400
7035144: G1: nightly failure: Non-dirty cards in region that should be dirty (failures still exist...)
Summary: We should only undirty cards after we decide that they are not on a young region, not before. The fix also includes improvements to the verify_dirty_region() method which print out which cards were not found dirty.
Reviewed-by: johnc, brutisso
1 /*
2 * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "precompiled.hpp"
26 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
27 #include "gc_implementation/g1/g1CollectorPolicy.hpp"
28 #include "gc_implementation/g1/vm_operations_g1.hpp"
29 #include "gc_implementation/shared/isGCActiveMark.hpp"
30 #include "gc_implementation/g1/vm_operations_g1.hpp"
31 #include "runtime/interfaceSupport.hpp"
33 VM_G1CollectForAllocation::VM_G1CollectForAllocation(
34 unsigned int gc_count_before,
35 size_t word_size)
36 : VM_G1OperationWithAllocRequest(gc_count_before, word_size) {
37 guarantee(word_size > 0, "an allocation should always be requested");
38 }
40 void VM_G1CollectForAllocation::doit() {
41 G1CollectedHeap* g1h = G1CollectedHeap::heap();
42 _result = g1h->satisfy_failed_allocation(_word_size, &_pause_succeeded);
43 assert(_result == NULL || _pause_succeeded,
44 "if we get back a result, the pause should have succeeded");
45 }
47 void VM_G1CollectFull::doit() {
48 G1CollectedHeap* g1h = G1CollectedHeap::heap();
49 GCCauseSetter x(g1h, _gc_cause);
50 g1h->do_full_collection(false /* clear_all_soft_refs */);
51 }
53 VM_G1IncCollectionPause::VM_G1IncCollectionPause(
54 unsigned int gc_count_before,
55 size_t word_size,
56 bool should_initiate_conc_mark,
57 double target_pause_time_ms,
58 GCCause::Cause gc_cause)
59 : VM_G1OperationWithAllocRequest(gc_count_before, word_size),
60 _should_initiate_conc_mark(should_initiate_conc_mark),
61 _target_pause_time_ms(target_pause_time_ms),
62 _full_collections_completed_before(0) {
63 guarantee(target_pause_time_ms > 0.0,
64 err_msg("target_pause_time_ms = %1.6lf should be positive",
65 target_pause_time_ms));
66 guarantee(word_size == 0 || gc_cause == GCCause::_g1_inc_collection_pause,
67 "we can only request an allocation if the GC cause is for "
68 "an incremental GC pause");
69 _gc_cause = gc_cause;
70 }
72 void VM_G1IncCollectionPause::doit() {
73 G1CollectedHeap* g1h = G1CollectedHeap::heap();
74 assert(!_should_initiate_conc_mark ||
75 ((_gc_cause == GCCause::_gc_locker && GCLockerInvokesConcurrent) ||
76 (_gc_cause == GCCause::_java_lang_system_gc && ExplicitGCInvokesConcurrent)),
77 "only a GC locker or a System.gc() induced GC should start a cycle");
79 if (_word_size > 0) {
80 // An allocation has been requested. So, try to do that first.
81 _result = g1h->attempt_allocation_at_safepoint(_word_size,
82 false /* expect_null_cur_alloc_region */);
83 if (_result != NULL) {
84 // If we can successfully allocate before we actually do the
85 // pause then we will consider this pause successful.
86 _pause_succeeded = true;
87 return;
88 }
89 }
91 GCCauseSetter x(g1h, _gc_cause);
92 if (_should_initiate_conc_mark) {
93 // It's safer to read full_collections_completed() here, given
94 // that noone else will be updating it concurrently. Since we'll
95 // only need it if we're initiating a marking cycle, no point in
96 // setting it earlier.
97 _full_collections_completed_before = g1h->full_collections_completed();
99 // At this point we are supposed to start a concurrent cycle. We
100 // will do so if one is not already in progress.
101 bool res = g1h->g1_policy()->force_initial_mark_if_outside_cycle();
102 }
104 _pause_succeeded =
105 g1h->do_collection_pause_at_safepoint(_target_pause_time_ms);
106 if (_pause_succeeded && _word_size > 0) {
107 // An allocation had been requested.
108 _result = g1h->attempt_allocation_at_safepoint(_word_size,
109 true /* expect_null_cur_alloc_region */);
110 } else {
111 assert(_result == NULL, "invariant");
112 }
113 }
115 void VM_G1IncCollectionPause::doit_epilogue() {
116 VM_GC_Operation::doit_epilogue();
118 // If the pause was initiated by a System.gc() and
119 // +ExplicitGCInvokesConcurrent, we have to wait here for the cycle
120 // that just started (or maybe one that was already in progress) to
121 // finish.
122 if (_gc_cause == GCCause::_java_lang_system_gc &&
123 _should_initiate_conc_mark) {
124 assert(ExplicitGCInvokesConcurrent,
125 "the only way to be here is if ExplicitGCInvokesConcurrent is set");
127 G1CollectedHeap* g1h = G1CollectedHeap::heap();
129 // In the doit() method we saved g1h->full_collections_completed()
130 // in the _full_collections_completed_before field. We have to
131 // wait until we observe that g1h->full_collections_completed()
132 // has increased by at least one. This can happen if a) we started
133 // a cycle and it completes, b) a cycle already in progress
134 // completes, or c) a Full GC happens.
136 // If the condition has already been reached, there's no point in
137 // actually taking the lock and doing the wait.
138 if (g1h->full_collections_completed() <=
139 _full_collections_completed_before) {
140 // The following is largely copied from CMS
142 Thread* thr = Thread::current();
143 assert(thr->is_Java_thread(), "invariant");
144 JavaThread* jt = (JavaThread*)thr;
145 ThreadToNativeFromVM native(jt);
147 MutexLockerEx x(FullGCCount_lock, Mutex::_no_safepoint_check_flag);
148 while (g1h->full_collections_completed() <=
149 _full_collections_completed_before) {
150 FullGCCount_lock->wait(Mutex::_no_safepoint_check_flag);
151 }
152 }
153 }
154 }
156 void VM_CGC_Operation::doit() {
157 gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps);
158 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
159 TraceTime t(_printGCMessage, PrintGC, true, gclog_or_tty);
160 SharedHeap* sh = SharedHeap::heap();
161 // This could go away if CollectedHeap gave access to _gc_is_active...
162 if (sh != NULL) {
163 IsGCActiveMark x;
164 _cl->do_void();
165 } else {
166 _cl->do_void();
167 }
168 }
170 bool VM_CGC_Operation::doit_prologue() {
171 Heap_lock->lock();
172 SharedHeap::heap()->_thread_holds_heap_lock_for_gc = true;
173 return true;
174 }
176 void VM_CGC_Operation::doit_epilogue() {
177 SharedHeap::heap()->_thread_holds_heap_lock_for_gc = false;
178 Heap_lock->unlock();
179 }