Mon, 02 Aug 2010 12:51:43 -0700
6814437: G1: remove the _new_refs array
Summary: The per-worker _new_refs array is used to hold references that point into the collection set. It is populated during RSet updating and subsequently processed. In the event of an evacuation failure it processed again to recreate the RSets of regions in the collection set. Remove the per-worker _new_refs array by processing the references directly. Use a DirtyCardQueue to hold the cards containing the references so that the RSets of regions in the collection set can be recreated when handling an evacuation failure.
Reviewed-by: iveresov, jmasa, tonyp
1 /*
2 * Copyright (c) 2001, 2009, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "incls/_precompiled.incl"
26 #include "incls/_vm_operations_g1.cpp.incl"
28 void VM_G1CollectForAllocation::doit() {
29 JvmtiGCForAllocationMarker jgcm;
30 G1CollectedHeap* g1h = G1CollectedHeap::heap();
31 _res = g1h->satisfy_failed_allocation(_size);
32 assert(g1h->is_in_or_null(_res), "result not in heap");
33 }
35 void VM_G1CollectFull::doit() {
36 JvmtiGCFullMarker jgcm;
37 G1CollectedHeap* g1h = G1CollectedHeap::heap();
38 GCCauseSetter x(g1h, _gc_cause);
39 g1h->do_full_collection(false /* clear_all_soft_refs */);
40 }
42 void VM_G1IncCollectionPause::doit() {
43 JvmtiGCForAllocationMarker jgcm;
44 G1CollectedHeap* g1h = G1CollectedHeap::heap();
45 assert(!_should_initiate_conc_mark ||
46 ((_gc_cause == GCCause::_gc_locker && GCLockerInvokesConcurrent) ||
47 (_gc_cause == GCCause::_java_lang_system_gc && ExplicitGCInvokesConcurrent)),
48 "only a GC locker or a System.gc() induced GC should start a cycle");
50 GCCauseSetter x(g1h, _gc_cause);
51 if (_should_initiate_conc_mark) {
52 // It's safer to read full_collections_completed() here, given
53 // that noone else will be updating it concurrently. Since we'll
54 // only need it if we're initiating a marking cycle, no point in
55 // setting it earlier.
56 _full_collections_completed_before = g1h->full_collections_completed();
58 // At this point we are supposed to start a concurrent cycle. We
59 // will do so if one is not already in progress.
60 bool res = g1h->g1_policy()->force_initial_mark_if_outside_cycle();
61 }
62 g1h->do_collection_pause_at_safepoint(_target_pause_time_ms);
63 }
65 void VM_G1IncCollectionPause::doit_epilogue() {
66 VM_GC_Operation::doit_epilogue();
68 // If the pause was initiated by a System.gc() and
69 // +ExplicitGCInvokesConcurrent, we have to wait here for the cycle
70 // that just started (or maybe one that was already in progress) to
71 // finish.
72 if (_gc_cause == GCCause::_java_lang_system_gc &&
73 _should_initiate_conc_mark) {
74 assert(ExplicitGCInvokesConcurrent,
75 "the only way to be here is if ExplicitGCInvokesConcurrent is set");
77 G1CollectedHeap* g1h = G1CollectedHeap::heap();
79 // In the doit() method we saved g1h->full_collections_completed()
80 // in the _full_collections_completed_before field. We have to
81 // wait until we observe that g1h->full_collections_completed()
82 // has increased by at least one. This can happen if a) we started
83 // a cycle and it completes, b) a cycle already in progress
84 // completes, or c) a Full GC happens.
86 // If the condition has already been reached, there's no point in
87 // actually taking the lock and doing the wait.
88 if (g1h->full_collections_completed() <=
89 _full_collections_completed_before) {
90 // The following is largely copied from CMS
92 Thread* thr = Thread::current();
93 assert(thr->is_Java_thread(), "invariant");
94 JavaThread* jt = (JavaThread*)thr;
95 ThreadToNativeFromVM native(jt);
97 MutexLockerEx x(FullGCCount_lock, Mutex::_no_safepoint_check_flag);
98 while (g1h->full_collections_completed() <=
99 _full_collections_completed_before) {
100 FullGCCount_lock->wait(Mutex::_no_safepoint_check_flag);
101 }
102 }
103 }
104 }
106 void VM_CGC_Operation::doit() {
107 gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps);
108 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
109 TraceTime t(_printGCMessage, PrintGC, true, gclog_or_tty);
110 SharedHeap* sh = SharedHeap::heap();
111 // This could go away if CollectedHeap gave access to _gc_is_active...
112 if (sh != NULL) {
113 IsGCActiveMark x;
114 _cl->do_void();
115 } else {
116 _cl->do_void();
117 }
118 }
120 bool VM_CGC_Operation::doit_prologue() {
121 Heap_lock->lock();
122 SharedHeap::heap()->_thread_holds_heap_lock_for_gc = true;
123 return true;
124 }
126 void VM_CGC_Operation::doit_epilogue() {
127 SharedHeap::heap()->_thread_holds_heap_lock_for_gc = false;
128 Heap_lock->unlock();
129 }