Wed, 02 Nov 2011 08:04:23 +0100
7106751: G1: gc/gctests/nativeGC03 crashes VM with SIGSEGV
Summary: _cset_rs_update_cl[] was indexed with values beyond what it is set up to handle.
Reviewed-by: ysr, jmasa, johnc
1 /*
2 * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1REMSET_HPP
26 #define SHARE_VM_GC_IMPLEMENTATION_G1_G1REMSET_HPP
28 // A G1RemSet provides ways of iterating over pointers into a selected
29 // collection set.
31 class G1CollectedHeap;
32 class CardTableModRefBarrierSet;
33 class ConcurrentG1Refine;
35 // A G1RemSet in which each heap region has a rem set that records the
36 // external heap references into it. Uses a mod ref bs to track updates,
37 // so that they can be used to update the individual region remsets.
39 class G1RemSet: public CHeapObj {
40 protected:
41 G1CollectedHeap* _g1;
42 unsigned _conc_refine_cards;
43 size_t n_workers();
45 protected:
46 enum SomePrivateConstants {
47 UpdateRStoMergeSync = 0,
48 MergeRStoDoDirtySync = 1,
49 DoDirtySync = 2,
50 LastSync = 3,
52 SeqTask = 0,
53 NumSeqTasks = 1
54 };
56 CardTableModRefBS* _ct_bs;
57 SubTasksDone* _seq_task;
58 G1CollectorPolicy* _g1p;
60 ConcurrentG1Refine* _cg1r;
62 size_t* _cards_scanned;
63 size_t _total_cards_scanned;
65 // Used for caching the closure that is responsible for scanning
66 // references into the collection set.
67 OopsInHeapRegionClosure** _cset_rs_update_cl;
69 // The routine that performs the actual work of refining a dirty
70 // card.
71 // If check_for_refs_into_refs is true then a true result is returned
72 // if the card contains oops that have references into the current
73 // collection set.
74 bool concurrentRefineOneCard_impl(jbyte* card_ptr, int worker_i,
75 bool check_for_refs_into_cset);
77 public:
78 // This is called to reset dual hash tables after the gc pause
79 // is finished and the initial hash table is no longer being
80 // scanned.
81 void cleanupHRRS();
83 G1RemSet(G1CollectedHeap* g1, CardTableModRefBS* ct_bs);
84 ~G1RemSet();
86 // Invoke "blk->do_oop" on all pointers into the CS in objects in regions
87 // outside the CS (having invoked "blk->set_region" to set the "from"
88 // region correctly beforehand.) The "worker_i" param is for the
89 // parallel case where the number of the worker thread calling this
90 // function can be helpful in partitioning the work to be done. It
91 // should be the same as the "i" passed to the calling thread's
92 // work(i) function. In the sequential case this param will be ingored.
93 void oops_into_collection_set_do(OopsInHeapRegionClosure* blk,
94 int worker_i);
96 // Prepare for and cleanup after an oops_into_collection_set_do
97 // call. Must call each of these once before and after (in sequential
98 // code) any threads call oops_into_collection_set_do. (This offers an
99 // opportunity to sequential setup and teardown of structures needed by a
100 // parallel iteration over the CS's RS.)
101 void prepare_for_oops_into_collection_set_do();
102 void cleanup_after_oops_into_collection_set_do();
104 void scanRS(OopsInHeapRegionClosure* oc, int worker_i);
105 void updateRS(DirtyCardQueue* into_cset_dcq, int worker_i);
107 HeapRegion* calculateStartRegion(int i);
109 CardTableModRefBS* ct_bs() { return _ct_bs; }
110 size_t cardsScanned() { return _total_cards_scanned; }
112 // Record, if necessary, the fact that *p (where "p" is in region "from",
113 // which is required to be non-NULL) has changed to a new non-NULL value.
114 template <class T> void write_ref(HeapRegion* from, T* p);
115 template <class T> void par_write_ref(HeapRegion* from, T* p, int tid);
117 // Requires "region_bm" and "card_bm" to be bitmaps with 1 bit per region
118 // or card, respectively, such that a region or card with a corresponding
119 // 0 bit contains no part of any live object. Eliminates any remembered
120 // set entries that correspond to dead heap ranges.
121 void scrub(BitMap* region_bm, BitMap* card_bm);
123 // Like the above, but assumes is called in parallel: "worker_num" is the
124 // parallel thread id of the current thread, and "claim_val" is the
125 // value that should be used to claim heap regions.
126 void scrub_par(BitMap* region_bm, BitMap* card_bm,
127 int worker_num, int claim_val);
129 // Refine the card corresponding to "card_ptr". If "sts" is non-NULL,
130 // join and leave around parts that must be atomic wrt GC. (NULL means
131 // being done at a safepoint.)
132 // If check_for_refs_into_cset is true, a true result is returned
133 // if the given card contains oops that have references into the
134 // current collection set.
135 virtual bool concurrentRefineOneCard(jbyte* card_ptr, int worker_i,
136 bool check_for_refs_into_cset);
138 // Print any relevant summary info.
139 virtual void print_summary_info();
141 // Prepare remembered set for verification.
142 virtual void prepare_for_verify();
143 };
145 class CountNonCleanMemRegionClosure: public MemRegionClosure {
146 G1CollectedHeap* _g1;
147 int _n;
148 HeapWord* _start_first;
149 public:
150 CountNonCleanMemRegionClosure(G1CollectedHeap* g1) :
151 _g1(g1), _n(0), _start_first(NULL)
152 {}
153 void do_MemRegion(MemRegion mr);
154 int n() { return _n; };
155 HeapWord* start_first() { return _start_first; }
156 };
158 class UpdateRSOopClosure: public OopClosure {
159 HeapRegion* _from;
160 G1RemSet* _rs;
161 int _worker_i;
163 template <class T> void do_oop_work(T* p);
165 public:
166 UpdateRSOopClosure(G1RemSet* rs, int worker_i = 0) :
167 _from(NULL), _rs(rs), _worker_i(worker_i)
168 {}
170 void set_from(HeapRegion* from) {
171 assert(from != NULL, "from region must be non-NULL");
172 _from = from;
173 }
175 virtual void do_oop(narrowOop* p) { do_oop_work(p); }
176 virtual void do_oop(oop* p) { do_oop_work(p); }
178 // Override: this closure is idempotent.
179 // bool idempotent() { return true; }
180 bool apply_to_weak_ref_discovered_field() { return true; }
181 };
183 class UpdateRSetImmediate: public OopsInHeapRegionClosure {
184 private:
185 G1RemSet* _g1_rem_set;
187 template <class T> void do_oop_work(T* p);
188 public:
189 UpdateRSetImmediate(G1RemSet* rs) :
190 _g1_rem_set(rs) {}
192 virtual void do_oop(narrowOop* p) { do_oop_work(p); }
193 virtual void do_oop( oop* p) { do_oop_work(p); }
194 };
196 class UpdateRSOrPushRefOopClosure: public OopClosure {
197 G1CollectedHeap* _g1;
198 G1RemSet* _g1_rem_set;
199 HeapRegion* _from;
200 OopsInHeapRegionClosure* _push_ref_cl;
201 bool _record_refs_into_cset;
202 int _worker_i;
204 template <class T> void do_oop_work(T* p);
206 public:
207 UpdateRSOrPushRefOopClosure(G1CollectedHeap* g1h,
208 G1RemSet* rs,
209 OopsInHeapRegionClosure* push_ref_cl,
210 bool record_refs_into_cset,
211 int worker_i = 0) :
212 _g1(g1h),
213 _g1_rem_set(rs),
214 _from(NULL),
215 _record_refs_into_cset(record_refs_into_cset),
216 _push_ref_cl(push_ref_cl),
217 _worker_i(worker_i) { }
219 void set_from(HeapRegion* from) {
220 assert(from != NULL, "from region must be non-NULL");
221 _from = from;
222 }
224 bool self_forwarded(oop obj) {
225 bool result = (obj->is_forwarded() && (obj->forwardee()== obj));
226 return result;
227 }
229 virtual void do_oop(narrowOop* p) { do_oop_work(p); }
230 virtual void do_oop(oop* p) { do_oop_work(p); }
232 bool apply_to_weak_ref_discovered_field() { return true; }
233 };
236 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1REMSET_HPP