Mon, 03 Aug 2009 12:59:30 -0700
6865703: G1: Parallelize hot card cache cleanup
Summary: Have the GC worker threads clear the hot card cache in parallel by having each worker thread claim a chunk of the card cache and process the cards in that chunk. The size of the chunks that each thread will claim is determined at VM initialization from the size of the card cache and the number of worker threads.
Reviewed-by: jmasa, tonyp
1 /*
2 * Copyright 2001-2009 Sun Microsystems, Inc. All Rights Reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
20 * CA 95054 USA or visit www.sun.com if you need additional information or
21 * have any questions.
22 *
23 */
25 class HeapRegion;
26 class G1CollectedHeap;
27 class G1RemSet;
28 class HRInto_G1RemSet;
29 class G1RemSet;
30 class ConcurrentMark;
31 class DirtyCardToOopClosure;
32 class CMBitMap;
33 class CMMarkStack;
34 class G1ParScanThreadState;
36 // A class that scans oops in a given heap region (much as OopsInGenClosure
37 // scans oops in a generation.)
38 class OopsInHeapRegionClosure: public OopsInGenClosure {
39 protected:
40 HeapRegion* _from;
41 public:
42 virtual void set_region(HeapRegion* from) { _from = from; }
43 };
45 class G1ParClosureSuper : public OopsInHeapRegionClosure {
46 protected:
47 G1CollectedHeap* _g1;
48 G1RemSet* _g1_rem;
49 ConcurrentMark* _cm;
50 G1ParScanThreadState* _par_scan_state;
51 public:
52 G1ParClosureSuper(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state);
53 bool apply_to_weak_ref_discovered_field() { return true; }
54 };
56 class G1ParScanClosure : public G1ParClosureSuper {
57 public:
58 G1ParScanClosure(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state) :
59 G1ParClosureSuper(g1, par_scan_state) { }
60 template <class T> void do_oop_nv(T* p);
61 virtual void do_oop(oop* p) { do_oop_nv(p); }
62 virtual void do_oop(narrowOop* p) { do_oop_nv(p); }
63 };
65 #define G1_PARTIAL_ARRAY_MASK 0x2
67 template <class T> inline bool has_partial_array_mask(T* ref) {
68 return ((uintptr_t)ref & G1_PARTIAL_ARRAY_MASK) == G1_PARTIAL_ARRAY_MASK;
69 }
71 template <class T> inline T* set_partial_array_mask(T obj) {
72 assert(((uintptr_t)obj & G1_PARTIAL_ARRAY_MASK) == 0, "Information loss!");
73 return (T*) ((uintptr_t)obj | G1_PARTIAL_ARRAY_MASK);
74 }
76 template <class T> inline oop clear_partial_array_mask(T* ref) {
77 return oop((intptr_t)ref & ~G1_PARTIAL_ARRAY_MASK);
78 }
80 class G1ParScanPartialArrayClosure : public G1ParClosureSuper {
81 G1ParScanClosure _scanner;
82 public:
83 G1ParScanPartialArrayClosure(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state) :
84 G1ParClosureSuper(g1, par_scan_state), _scanner(g1, par_scan_state) { }
85 template <class T> void do_oop_nv(T* p);
86 virtual void do_oop(oop* p) { do_oop_nv(p); }
87 virtual void do_oop(narrowOop* p) { do_oop_nv(p); }
88 };
91 class G1ParCopyHelper : public G1ParClosureSuper {
92 G1ParScanClosure *_scanner;
93 protected:
94 template <class T> void mark_forwardee(T* p);
95 oop copy_to_survivor_space(oop obj);
96 public:
97 G1ParCopyHelper(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state,
98 G1ParScanClosure *scanner) :
99 G1ParClosureSuper(g1, par_scan_state), _scanner(scanner) { }
100 };
102 template<bool do_gen_barrier, G1Barrier barrier,
103 bool do_mark_forwardee, bool skip_cset_test>
104 class G1ParCopyClosure : public G1ParCopyHelper {
105 G1ParScanClosure _scanner;
106 template <class T> void do_oop_work(T* p);
107 public:
108 G1ParCopyClosure(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state) :
109 _scanner(g1, par_scan_state), G1ParCopyHelper(g1, par_scan_state, &_scanner) { }
110 template <class T> void do_oop_nv(T* p) {
111 do_oop_work(p);
112 if (do_mark_forwardee)
113 mark_forwardee(p);
114 }
115 virtual void do_oop(oop* p) { do_oop_nv(p); }
116 virtual void do_oop(narrowOop* p) { do_oop_nv(p); }
117 };
119 typedef G1ParCopyClosure<false, G1BarrierNone, false, false> G1ParScanExtRootClosure;
120 typedef G1ParCopyClosure<true, G1BarrierNone, false, false> G1ParScanPermClosure;
121 typedef G1ParCopyClosure<false, G1BarrierRS, false, false> G1ParScanHeapRSClosure;
122 typedef G1ParCopyClosure<false, G1BarrierNone, true, false> G1ParScanAndMarkExtRootClosure;
123 typedef G1ParCopyClosure<true, G1BarrierNone, true, false> G1ParScanAndMarkPermClosure;
124 typedef G1ParCopyClosure<false, G1BarrierRS, true, false> G1ParScanAndMarkHeapRSClosure;
125 // This is the only case when we set skip_cset_test. Basically, this
126 // closure is (should?) only be called directly while we're draining
127 // the overflow and task queues. In that case we know that the
128 // reference in question points into the collection set, otherwise we
129 // would not have pushed it on the queue. The following is defined in
130 // g1_specialized_oop_closures.hpp.
131 // typedef G1ParCopyClosure<false, G1BarrierEvac, false, true> G1ParScanHeapEvacClosure;
132 // We need a separate closure to handle references during evacuation
133 // failure processing, as we cannot asume that the reference already
134 // points into the collection set (like G1ParScanHeapEvacClosure does).
135 typedef G1ParCopyClosure<false, G1BarrierEvac, false, false> G1ParScanHeapEvacFailureClosure;
137 class FilterIntoCSClosure: public OopClosure {
138 G1CollectedHeap* _g1;
139 OopClosure* _oc;
140 DirtyCardToOopClosure* _dcto_cl;
141 public:
142 FilterIntoCSClosure( DirtyCardToOopClosure* dcto_cl,
143 G1CollectedHeap* g1, OopClosure* oc) :
144 _dcto_cl(dcto_cl), _g1(g1), _oc(oc)
145 {}
146 template <class T> void do_oop_nv(T* p);
147 virtual void do_oop(oop* p) { do_oop_nv(p); }
148 virtual void do_oop(narrowOop* p) { do_oop_nv(p); }
149 bool apply_to_weak_ref_discovered_field() { return true; }
150 bool do_header() { return false; }
151 };
153 class FilterInHeapRegionAndIntoCSClosure : public OopsInHeapRegionClosure {
154 G1CollectedHeap* _g1;
155 OopsInHeapRegionClosure* _oc;
156 public:
157 FilterInHeapRegionAndIntoCSClosure(G1CollectedHeap* g1,
158 OopsInHeapRegionClosure* oc) :
159 _g1(g1), _oc(oc)
160 {}
161 template <class T> void do_oop_nv(T* p);
162 virtual void do_oop(oop* p) { do_oop_nv(p); }
163 virtual void do_oop(narrowOop* p) { do_oop_nv(p); }
164 bool apply_to_weak_ref_discovered_field() { return true; }
165 bool do_header() { return false; }
166 void set_region(HeapRegion* from) {
167 _oc->set_region(from);
168 }
169 };
171 class FilterAndMarkInHeapRegionAndIntoCSClosure : public OopsInHeapRegionClosure {
172 G1CollectedHeap* _g1;
173 ConcurrentMark* _cm;
174 OopsInHeapRegionClosure* _oc;
175 public:
176 FilterAndMarkInHeapRegionAndIntoCSClosure(G1CollectedHeap* g1,
177 OopsInHeapRegionClosure* oc,
178 ConcurrentMark* cm)
179 : _g1(g1), _oc(oc), _cm(cm) { }
181 template <class T> void do_oop_nv(T* p);
182 virtual void do_oop(oop* p) { do_oop_nv(p); }
183 virtual void do_oop(narrowOop* p) { do_oop_nv(p); }
184 bool apply_to_weak_ref_discovered_field() { return true; }
185 bool do_header() { return false; }
186 void set_region(HeapRegion* from) {
187 _oc->set_region(from);
188 }
189 };
191 class FilterOutOfRegionClosure: public OopClosure {
192 HeapWord* _r_bottom;
193 HeapWord* _r_end;
194 OopClosure* _oc;
195 int _out_of_region;
196 public:
197 FilterOutOfRegionClosure(HeapRegion* r, OopClosure* oc);
198 template <class T> void do_oop_nv(T* p);
199 virtual void do_oop(oop* p) { do_oop_nv(p); }
200 virtual void do_oop(narrowOop* p) { do_oop_nv(p); }
201 bool apply_to_weak_ref_discovered_field() { return true; }
202 bool do_header() { return false; }
203 int out_of_region() { return _out_of_region; }
204 };