Fri, 06 Aug 2010 10:17:21 -0700
6930581: G1: assert(ParallelGCThreads > 1 || n_yielded() == _hrrs->occupied(),"Should have yielded all the ..
Summary: During RSet updating, when ParallelGCThreads is zero, references that point into the collection set are added directly the referenced region's RSet. This can cause the sparse table in the RSet to expand. RSet scanning and the "occupied" routine will then operate on different instances of the sparse table causing the assert to trip. This may also cause some cards added post expansion to be missed during RSet scanning. When ParallelGCThreads is non-zero such references are recorded on the "references to be scanned" queue and the card containing the reference is recorded in a dirty card queue for use in the event of an evacuation failure. Employ the parallel code in the serial case to avoid expanding the RSets of regions in the collection set.
Reviewed-by: iveresov, ysr, tonyp
1 /*
2 * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 // A G1RemSet provides ways of iterating over pointers into a selected
26 // collection set.
28 class G1CollectedHeap;
29 class CardTableModRefBarrierSet;
30 class HRInto_G1RemSet;
31 class ConcurrentG1Refine;
33 class G1RemSet: public CHeapObj {
34 protected:
35 G1CollectedHeap* _g1;
36 unsigned _conc_refine_cards;
37 size_t n_workers();
39 public:
40 G1RemSet(G1CollectedHeap* g1) :
41 _g1(g1), _conc_refine_cards(0)
42 {}
44 // Invoke "blk->do_oop" on all pointers into the CS in object in regions
45 // outside the CS (having invoked "blk->set_region" to set the "from"
46 // region correctly beforehand.) The "worker_i" param is for the
47 // parallel case where the number of the worker thread calling this
48 // function can be helpful in partitioning the work to be done. It
49 // should be the same as the "i" passed to the calling thread's
50 // work(i) function. In the sequential case this param will be ingored.
51 virtual void oops_into_collection_set_do(OopsInHeapRegionClosure* blk,
52 int worker_i) = 0;
54 // Prepare for and cleanup after an oops_into_collection_set_do
55 // call. Must call each of these once before and after (in sequential
56 // code) any threads call oops into collection set do. (This offers an
57 // opportunity to sequential setup and teardown of structures needed by a
58 // parallel iteration over the CS's RS.)
59 virtual void prepare_for_oops_into_collection_set_do() = 0;
60 virtual void cleanup_after_oops_into_collection_set_do() = 0;
62 // If "this" is of the given subtype, return "this", else "NULL".
63 virtual HRInto_G1RemSet* as_HRInto_G1RemSet() { return NULL; }
65 // Record, if necessary, the fact that *p (where "p" is in region "from",
66 // and is, a fortiori, required to be non-NULL) has changed to its new value.
67 virtual void write_ref(HeapRegion* from, oop* p) = 0;
68 virtual void write_ref(HeapRegion* from, narrowOop* p) = 0;
69 virtual void par_write_ref(HeapRegion* from, oop* p, int tid) = 0;
70 virtual void par_write_ref(HeapRegion* from, narrowOop* p, int tid) = 0;
72 // Requires "region_bm" and "card_bm" to be bitmaps with 1 bit per region
73 // or card, respectively, such that a region or card with a corresponding
74 // 0 bit contains no part of any live object. Eliminates any remembered
75 // set entries that correspond to dead heap ranges.
76 virtual void scrub(BitMap* region_bm, BitMap* card_bm) = 0;
77 // Like the above, but assumes is called in parallel: "worker_num" is the
78 // parallel thread id of the current thread, and "claim_val" is the
79 // value that should be used to claim heap regions.
80 virtual void scrub_par(BitMap* region_bm, BitMap* card_bm,
81 int worker_num, int claim_val) = 0;
83 // Refine the card corresponding to "card_ptr". If "sts" is non-NULL,
84 // join and leave around parts that must be atomic wrt GC. (NULL means
85 // being done at a safepoint.)
86 // With some implementations of this routine, when check_for_refs_into_cset
87 // is true, a true result may be returned if the given card contains oops
88 // that have references into the current collection set.
89 virtual bool concurrentRefineOneCard(jbyte* card_ptr, int worker_i,
90 bool check_for_refs_into_cset) {
91 return false;
92 }
94 // Print any relevant summary info.
95 virtual void print_summary_info() {}
97 // Prepare remebered set for verification.
98 virtual void prepare_for_verify() {};
99 };
102 // The simplest possible G1RemSet: iterates over all objects in non-CS
103 // regions, searching for pointers into the CS.
104 class StupidG1RemSet: public G1RemSet {
105 public:
106 StupidG1RemSet(G1CollectedHeap* g1) : G1RemSet(g1) {}
108 void oops_into_collection_set_do(OopsInHeapRegionClosure* blk,
109 int worker_i);
111 void prepare_for_oops_into_collection_set_do() {}
112 void cleanup_after_oops_into_collection_set_do() {}
114 // Nothing is necessary in the version below.
115 void write_ref(HeapRegion* from, oop* p) {}
116 void write_ref(HeapRegion* from, narrowOop* p) {}
117 void par_write_ref(HeapRegion* from, oop* p, int tid) {}
118 void par_write_ref(HeapRegion* from, narrowOop* p, int tid) {}
120 void scrub(BitMap* region_bm, BitMap* card_bm) {}
121 void scrub_par(BitMap* region_bm, BitMap* card_bm,
122 int worker_num, int claim_val) {}
124 };
126 // A G1RemSet in which each heap region has a rem set that records the
127 // external heap references into it. Uses a mod ref bs to track updates,
128 // so that they can be used to update the individual region remsets.
130 class HRInto_G1RemSet: public G1RemSet {
131 protected:
132 enum SomePrivateConstants {
133 UpdateRStoMergeSync = 0,
134 MergeRStoDoDirtySync = 1,
135 DoDirtySync = 2,
136 LastSync = 3,
138 SeqTask = 0,
139 NumSeqTasks = 1
140 };
142 CardTableModRefBS* _ct_bs;
143 SubTasksDone* _seq_task;
144 G1CollectorPolicy* _g1p;
146 ConcurrentG1Refine* _cg1r;
148 size_t* _cards_scanned;
149 size_t _total_cards_scanned;
151 // _traversal_in_progress is "true" iff a traversal is in progress.
153 bool _traversal_in_progress;
154 void set_traversal(bool b) { _traversal_in_progress = b; }
156 // Used for caching the closure that is responsible for scanning
157 // references into the collection set.
158 OopsInHeapRegionClosure** _cset_rs_update_cl;
160 // The routine that performs the actual work of refining a dirty
161 // card.
162 // If check_for_refs_into_refs is true then a true result is returned
163 // if the card contains oops that have references into the current
164 // collection set.
165 bool concurrentRefineOneCard_impl(jbyte* card_ptr, int worker_i,
166 bool check_for_refs_into_cset);
168 protected:
169 template <class T> void write_ref_nv(HeapRegion* from, T* p);
170 template <class T> void par_write_ref_nv(HeapRegion* from, T* p, int tid);
172 public:
173 // This is called to reset dual hash tables after the gc pause
174 // is finished and the initial hash table is no longer being
175 // scanned.
176 void cleanupHRRS();
178 HRInto_G1RemSet(G1CollectedHeap* g1, CardTableModRefBS* ct_bs);
179 ~HRInto_G1RemSet();
181 void oops_into_collection_set_do(OopsInHeapRegionClosure* blk,
182 int worker_i);
184 void prepare_for_oops_into_collection_set_do();
185 void cleanup_after_oops_into_collection_set_do();
186 void scanRS(OopsInHeapRegionClosure* oc, int worker_i);
187 template <class T> void scanNewRefsRS_work(OopsInHeapRegionClosure* oc, int worker_i);
188 void scanNewRefsRS(OopsInHeapRegionClosure* oc, int worker_i) {
189 if (UseCompressedOops) {
190 scanNewRefsRS_work<narrowOop>(oc, worker_i);
191 } else {
192 scanNewRefsRS_work<oop>(oc, worker_i);
193 }
194 }
195 void updateRS(DirtyCardQueue* into_cset_dcq, int worker_i);
196 HeapRegion* calculateStartRegion(int i);
198 HRInto_G1RemSet* as_HRInto_G1RemSet() { return this; }
200 CardTableModRefBS* ct_bs() { return _ct_bs; }
201 size_t cardsScanned() { return _total_cards_scanned; }
203 // Record, if necessary, the fact that *p (where "p" is in region "from",
204 // which is required to be non-NULL) has changed to a new non-NULL value.
205 // [Below the virtual version calls a non-virtual protected
206 // workhorse that is templatified for narrow vs wide oop.]
207 inline void write_ref(HeapRegion* from, oop* p) {
208 write_ref_nv(from, p);
209 }
210 inline void write_ref(HeapRegion* from, narrowOop* p) {
211 write_ref_nv(from, p);
212 }
213 inline void par_write_ref(HeapRegion* from, oop* p, int tid) {
214 par_write_ref_nv(from, p, tid);
215 }
216 inline void par_write_ref(HeapRegion* from, narrowOop* p, int tid) {
217 par_write_ref_nv(from, p, tid);
218 }
220 bool self_forwarded(oop obj);
222 void scrub(BitMap* region_bm, BitMap* card_bm);
223 void scrub_par(BitMap* region_bm, BitMap* card_bm,
224 int worker_num, int claim_val);
226 // If check_for_refs_into_cset is true then a true result is returned
227 // if the card contains oops that have references into the current
228 // collection set.
229 virtual bool concurrentRefineOneCard(jbyte* card_ptr, int worker_i,
230 bool check_for_refs_into_cset);
232 virtual void print_summary_info();
233 virtual void prepare_for_verify();
234 };
236 #define G1_REM_SET_LOGGING 0
238 class CountNonCleanMemRegionClosure: public MemRegionClosure {
239 G1CollectedHeap* _g1;
240 int _n;
241 HeapWord* _start_first;
242 public:
243 CountNonCleanMemRegionClosure(G1CollectedHeap* g1) :
244 _g1(g1), _n(0), _start_first(NULL)
245 {}
246 void do_MemRegion(MemRegion mr);
247 int n() { return _n; };
248 HeapWord* start_first() { return _start_first; }
249 };
251 class UpdateRSOopClosure: public OopClosure {
252 HeapRegion* _from;
253 HRInto_G1RemSet* _rs;
254 int _worker_i;
256 template <class T> void do_oop_work(T* p);
258 public:
259 UpdateRSOopClosure(HRInto_G1RemSet* rs, int worker_i = 0) :
260 _from(NULL), _rs(rs), _worker_i(worker_i) {
261 guarantee(_rs != NULL, "Requires an HRIntoG1RemSet");
262 }
264 void set_from(HeapRegion* from) {
265 assert(from != NULL, "from region must be non-NULL");
266 _from = from;
267 }
269 virtual void do_oop(narrowOop* p) { do_oop_work(p); }
270 virtual void do_oop(oop* p) { do_oop_work(p); }
272 // Override: this closure is idempotent.
273 // bool idempotent() { return true; }
274 bool apply_to_weak_ref_discovered_field() { return true; }
275 };
277 class UpdateRSetImmediate: public OopsInHeapRegionClosure {
278 private:
279 G1RemSet* _g1_rem_set;
281 template <class T> void do_oop_work(T* p);
282 public:
283 UpdateRSetImmediate(G1RemSet* rs) :
284 _g1_rem_set(rs) {}
286 virtual void do_oop(narrowOop* p) { do_oop_work(p); }
287 virtual void do_oop( oop* p) { do_oop_work(p); }
288 };