src/share/vm/gc_implementation/g1/g1OopClosures.hpp

Mon, 24 Mar 2014 15:30:14 +0100

author
tschatzl
date
Mon, 24 Mar 2014 15:30:14 +0100
changeset 6402
191174b49bec
parent 6331
5d492d192cbf
child 6408
bc22cbb8b45a
permissions
-rw-r--r--

8035406: Improve data structure for Code Cache remembered sets
Summary: Change the code cache remembered sets data structure from a GrowableArray to a chunked list of nmethods. This makes the data structure more amenable to parallelization, and decreases freeing time.
Reviewed-by: mgerdin, brutisso

ysr@777 1 /*
tschatzl@6269 2 * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
ysr@777 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
ysr@777 4 *
ysr@777 5 * This code is free software; you can redistribute it and/or modify it
ysr@777 6 * under the terms of the GNU General Public License version 2 only, as
ysr@777 7 * published by the Free Software Foundation.
ysr@777 8 *
ysr@777 9 * This code is distributed in the hope that it will be useful, but WITHOUT
ysr@777 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
ysr@777 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
ysr@777 12 * version 2 for more details (a copy is included in the LICENSE file that
ysr@777 13 * accompanied this code).
ysr@777 14 *
ysr@777 15 * You should have received a copy of the GNU General Public License version
ysr@777 16 * 2 along with this work; if not, write to the Free Software Foundation,
ysr@777 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
ysr@777 18 *
trims@1907 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
trims@1907 20 * or visit www.oracle.com if you need additional information or have any
trims@1907 21 * questions.
ysr@777 22 *
ysr@777 23 */
ysr@777 24
stefank@2314 25 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1OOPCLOSURES_HPP
stefank@2314 26 #define SHARE_VM_GC_IMPLEMENTATION_G1_G1OOPCLOSURES_HPP
stefank@2314 27
ysr@777 28 class HeapRegion;
ysr@777 29 class G1CollectedHeap;
ysr@777 30 class G1RemSet;
ysr@777 31 class ConcurrentMark;
ysr@777 32 class DirtyCardToOopClosure;
ysr@777 33 class CMBitMap;
ysr@777 34 class CMMarkStack;
ysr@777 35 class G1ParScanThreadState;
tonyp@2968 36 class CMTask;
johnc@3175 37 class ReferenceProcessor;
ysr@777 38
ysr@777 39 // A class that scans oops in a given heap region (much as OopsInGenClosure
ysr@777 40 // scans oops in a generation.)
tschatzl@6231 41 class OopsInHeapRegionClosure: public ExtendedOopClosure {
ysr@777 42 protected:
ysr@777 43 HeapRegion* _from;
ysr@777 44 public:
tonyp@2962 45 void set_region(HeapRegion* from) { _from = from; }
ysr@777 46 };
ysr@777 47
ysr@777 48 class G1ParClosureSuper : public OopsInHeapRegionClosure {
ysr@777 49 protected:
ysr@777 50 G1CollectedHeap* _g1;
ysr@777 51 G1ParScanThreadState* _par_scan_state;
johnc@3463 52 uint _worker_id;
ysr@777 53 public:
ysr@777 54 G1ParClosureSuper(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state);
ysr@777 55 bool apply_to_weak_ref_discovered_field() { return true; }
ysr@777 56 };
ysr@777 57
iveresov@1696 58 class G1ParPushHeapRSClosure : public G1ParClosureSuper {
iveresov@1696 59 public:
johnc@3175 60 G1ParPushHeapRSClosure(G1CollectedHeap* g1,
johnc@3179 61 G1ParScanThreadState* par_scan_state):
johnc@3179 62 G1ParClosureSuper(g1, par_scan_state) { }
johnc@3175 63
iveresov@1696 64 template <class T> void do_oop_nv(T* p);
iveresov@1696 65 virtual void do_oop(oop* p) { do_oop_nv(p); }
iveresov@1696 66 virtual void do_oop(narrowOop* p) { do_oop_nv(p); }
iveresov@1696 67 };
iveresov@1696 68
ysr@777 69 class G1ParScanClosure : public G1ParClosureSuper {
ysr@777 70 public:
johnc@3175 71 G1ParScanClosure(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state, ReferenceProcessor* rp) :
johnc@3175 72 G1ParClosureSuper(g1, par_scan_state)
johnc@3175 73 {
johnc@3175 74 assert(_ref_processor == NULL, "sanity");
johnc@3175 75 _ref_processor = rp;
johnc@3175 76 }
johnc@3175 77
ysr@1280 78 template <class T> void do_oop_nv(T* p);
ysr@777 79 virtual void do_oop(oop* p) { do_oop_nv(p); }
ysr@777 80 virtual void do_oop(narrowOop* p) { do_oop_nv(p); }
ysr@777 81 };
ysr@777 82
ysr@1280 83 #define G1_PARTIAL_ARRAY_MASK 0x2
ysr@777 84
tschatzl@6269 85 inline bool has_partial_array_mask(oop* ref) {
ysr@1280 86 return ((uintptr_t)ref & G1_PARTIAL_ARRAY_MASK) == G1_PARTIAL_ARRAY_MASK;
tonyp@961 87 }
tonyp@961 88
tschatzl@6269 89 // We never encode partial array oops as narrowOop*, so return false immediately.
tschatzl@6269 90 // This allows the compiler to create optimized code when popping references from
tschatzl@6269 91 // the work queue.
tschatzl@6269 92 inline bool has_partial_array_mask(narrowOop* ref) {
tschatzl@6269 93 assert(((uintptr_t)ref & G1_PARTIAL_ARRAY_MASK) != G1_PARTIAL_ARRAY_MASK, "Partial array oop reference encoded as narrowOop*");
tschatzl@6269 94 return false;
tschatzl@6269 95 }
tschatzl@6269 96
tschatzl@6269 97 // Only implement set_partial_array_mask() for regular oops, not for narrowOops.
tschatzl@6269 98 // We always encode partial arrays as regular oop, to allow the
tschatzl@6269 99 // specialization for has_partial_array_mask() for narrowOops above.
tschatzl@6269 100 // This means that unintentional use of this method with narrowOops are caught
tschatzl@6269 101 // by the compiler.
tschatzl@6269 102 inline oop* set_partial_array_mask(oop obj) {
hseigel@5784 103 assert(((uintptr_t)(void *)obj & G1_PARTIAL_ARRAY_MASK) == 0, "Information loss!");
tschatzl@6269 104 return (oop*) ((uintptr_t)(void *)obj | G1_PARTIAL_ARRAY_MASK);
tonyp@961 105 }
tonyp@961 106
ysr@1280 107 template <class T> inline oop clear_partial_array_mask(T* ref) {
hseigel@5784 108 return cast_to_oop((intptr_t)ref & ~G1_PARTIAL_ARRAY_MASK);
tonyp@961 109 }
tonyp@961 110
ysr@777 111 class G1ParScanPartialArrayClosure : public G1ParClosureSuper {
ysr@777 112 G1ParScanClosure _scanner;
johnc@3175 113
ysr@777 114 public:
johnc@3175 115 G1ParScanPartialArrayClosure(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state, ReferenceProcessor* rp) :
johnc@3175 116 G1ParClosureSuper(g1, par_scan_state), _scanner(g1, par_scan_state, rp)
johnc@3175 117 {
johnc@3175 118 assert(_ref_processor == NULL, "sanity");
johnc@3175 119 }
johnc@3175 120
johnc@3175 121 G1ParScanClosure* scanner() {
johnc@3175 122 return &_scanner;
johnc@3175 123 }
johnc@3175 124
ysr@1280 125 template <class T> void do_oop_nv(T* p);
ysr@777 126 virtual void do_oop(oop* p) { do_oop_nv(p); }
ysr@777 127 virtual void do_oop(narrowOop* p) { do_oop_nv(p); }
ysr@777 128 };
ysr@777 129
coleenp@4037 130 // Add back base class for metadata
coleenp@4037 131 class G1ParCopyHelper : public G1ParClosureSuper {
tschatzl@6329 132 protected:
coleenp@4037 133 Klass* _scanned_klass;
tschatzl@6329 134 ConcurrentMark* _cm;
coleenp@4037 135
tschatzl@6329 136 // Mark the object if it's not already marked. This is used to mark
tschatzl@6329 137 // objects pointed to by roots that are guaranteed not to move
tschatzl@6329 138 // during the GC (i.e., non-CSet objects). It is MT-safe.
tschatzl@6329 139 void mark_object(oop obj);
tschatzl@6329 140
tschatzl@6329 141 // Mark the object if it's not already marked. This is used to mark
tschatzl@6329 142 // objects pointed to by roots that have been forwarded during a
tschatzl@6329 143 // GC. It is MT-safe.
tschatzl@6329 144 void mark_forwarded_object(oop from_obj, oop to_obj);
coleenp@4037 145 public:
tschatzl@6329 146 G1ParCopyHelper(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state);
coleenp@4037 147
coleenp@4037 148 void set_scanned_klass(Klass* k) { _scanned_klass = k; }
coleenp@4037 149 template <class T> void do_klass_barrier(T* p, oop new_obj);
coleenp@4037 150 };
coleenp@4037 151
tschatzl@6231 152 template <G1Barrier barrier, bool do_mark_object>
coleenp@4037 153 class G1ParCopyClosure : public G1ParCopyHelper {
tschatzl@6331 154 private:
brutisso@3690 155 template <class T> void do_oop_work(T* p);
ysr@777 156
ysr@777 157 public:
johnc@3175 158 G1ParCopyClosure(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state,
johnc@3175 159 ReferenceProcessor* rp) :
coleenp@4037 160 G1ParCopyHelper(g1, par_scan_state) {
johnc@3175 161 assert(_ref_processor == NULL, "sanity");
johnc@3175 162 }
johnc@3175 163
tschatzl@6329 164 template <class T> void do_oop_nv(T* p) { do_oop_work(p); }
ysr@777 165 virtual void do_oop(oop* p) { do_oop_nv(p); }
ysr@777 166 virtual void do_oop(narrowOop* p) { do_oop_nv(p); }
ysr@777 167 };
ysr@777 168
tschatzl@6231 169 typedef G1ParCopyClosure<G1BarrierNone, false> G1ParScanExtRootClosure;
tschatzl@6231 170 typedef G1ParCopyClosure<G1BarrierKlass, false> G1ParScanMetadataClosure;
coleenp@4037 171
johnc@3175 172
tschatzl@6231 173 typedef G1ParCopyClosure<G1BarrierNone, true> G1ParScanAndMarkExtRootClosure;
tschatzl@6231 174 typedef G1ParCopyClosure<G1BarrierKlass, true> G1ParScanAndMarkMetadataClosure;
johnc@3175 175
johnc@3175 176 // The following closure type is defined in g1_specialized_oop_closures.hpp:
johnc@3175 177 //
tschatzl@6231 178 // typedef G1ParCopyClosure<G1BarrierEvac, false> G1ParScanHeapEvacClosure;
johnc@3175 179
johnc@3175 180 // We use a separate closure to handle references during evacuation
johnc@3175 181 // failure processing.
johnc@3175 182 // We could have used another instance of G1ParScanHeapEvacClosure
johnc@3175 183 // (since that closure no longer assumes that the references it
johnc@3175 184 // handles point into the collection set).
johnc@3175 185
tschatzl@6231 186 typedef G1ParCopyClosure<G1BarrierEvac, false> G1ParScanHeapEvacFailureClosure;
ysr@777 187
coleenp@4037 188 class FilterIntoCSClosure: public ExtendedOopClosure {
ysr@777 189 G1CollectedHeap* _g1;
ysr@777 190 OopClosure* _oc;
ysr@777 191 DirtyCardToOopClosure* _dcto_cl;
ysr@777 192 public:
ysr@777 193 FilterIntoCSClosure( DirtyCardToOopClosure* dcto_cl,
johnc@3175 194 G1CollectedHeap* g1,
johnc@3179 195 OopClosure* oc) :
johnc@3179 196 _dcto_cl(dcto_cl), _g1(g1), _oc(oc) { }
johnc@3175 197
ysr@1280 198 template <class T> void do_oop_nv(T* p);
ysr@1280 199 virtual void do_oop(oop* p) { do_oop_nv(p); }
ysr@1280 200 virtual void do_oop(narrowOop* p) { do_oop_nv(p); }
ysr@777 201 bool apply_to_weak_ref_discovered_field() { return true; }
ysr@777 202 };
ysr@777 203
coleenp@4037 204 class FilterOutOfRegionClosure: public ExtendedOopClosure {
ysr@777 205 HeapWord* _r_bottom;
ysr@777 206 HeapWord* _r_end;
ysr@777 207 OopClosure* _oc;
ysr@777 208 public:
ysr@777 209 FilterOutOfRegionClosure(HeapRegion* r, OopClosure* oc);
ysr@1280 210 template <class T> void do_oop_nv(T* p);
ysr@1280 211 virtual void do_oop(oop* p) { do_oop_nv(p); }
ysr@1280 212 virtual void do_oop(narrowOop* p) { do_oop_nv(p); }
ysr@777 213 bool apply_to_weak_ref_discovered_field() { return true; }
ysr@777 214 };
stefank@2314 215
tonyp@2968 216 // Closure for iterating over object fields during concurrent marking
coleenp@4037 217 class G1CMOopClosure : public ExtendedOopClosure {
tonyp@3464 218 private:
tonyp@2968 219 G1CollectedHeap* _g1h;
tonyp@2968 220 ConcurrentMark* _cm;
tonyp@2968 221 CMTask* _task;
tonyp@2968 222 public:
tonyp@2968 223 G1CMOopClosure(G1CollectedHeap* g1h, ConcurrentMark* cm, CMTask* task);
tonyp@2968 224 template <class T> void do_oop_nv(T* p);
tonyp@2968 225 virtual void do_oop( oop* p) { do_oop_nv(p); }
tonyp@2968 226 virtual void do_oop(narrowOop* p) { do_oop_nv(p); }
tonyp@2968 227 };
tonyp@2968 228
tonyp@3464 229 // Closure to scan the root regions during concurrent marking
coleenp@4037 230 class G1RootRegionScanClosure : public ExtendedOopClosure {
tonyp@3464 231 private:
tonyp@3464 232 G1CollectedHeap* _g1h;
tonyp@3464 233 ConcurrentMark* _cm;
tonyp@3464 234 uint _worker_id;
tonyp@3464 235 public:
tonyp@3464 236 G1RootRegionScanClosure(G1CollectedHeap* g1h, ConcurrentMark* cm,
tonyp@3464 237 uint worker_id) :
tonyp@3464 238 _g1h(g1h), _cm(cm), _worker_id(worker_id) { }
tonyp@3464 239 template <class T> void do_oop_nv(T* p);
tonyp@3464 240 virtual void do_oop( oop* p) { do_oop_nv(p); }
tonyp@3464 241 virtual void do_oop(narrowOop* p) { do_oop_nv(p); }
tonyp@3464 242 };
tonyp@3464 243
johnc@3466 244 // Closure that applies the given two closures in sequence.
johnc@3466 245 // Used by the RSet refinement code (when updating RSets
johnc@3466 246 // during an evacuation pause) to record cards containing
johnc@3466 247 // pointers into the collection set.
johnc@3466 248
coleenp@4037 249 class G1Mux2Closure : public ExtendedOopClosure {
johnc@3466 250 OopClosure* _c1;
johnc@3466 251 OopClosure* _c2;
johnc@3466 252 public:
johnc@3466 253 G1Mux2Closure(OopClosure *c1, OopClosure *c2);
johnc@3466 254 template <class T> void do_oop_nv(T* p);
johnc@3466 255 virtual void do_oop(oop* p) { do_oop_nv(p); }
johnc@3466 256 virtual void do_oop(narrowOop* p) { do_oop_nv(p); }
johnc@3466 257 };
johnc@3466 258
johnc@3466 259 // A closure that returns true if it is actually applied
johnc@3466 260 // to a reference
johnc@3466 261
coleenp@4037 262 class G1TriggerClosure : public ExtendedOopClosure {
johnc@3466 263 bool _triggered;
johnc@3466 264 public:
johnc@3466 265 G1TriggerClosure();
johnc@3466 266 bool triggered() const { return _triggered; }
johnc@3466 267 template <class T> void do_oop_nv(T* p);
johnc@3466 268 virtual void do_oop(oop* p) { do_oop_nv(p); }
johnc@3466 269 virtual void do_oop(narrowOop* p) { do_oop_nv(p); }
johnc@3466 270 };
johnc@3466 271
johnc@3466 272 // A closure which uses a triggering closure to determine
johnc@3466 273 // whether to apply an oop closure.
johnc@3466 274
coleenp@4037 275 class G1InvokeIfNotTriggeredClosure: public ExtendedOopClosure {
johnc@3466 276 G1TriggerClosure* _trigger_cl;
johnc@3466 277 OopClosure* _oop_cl;
johnc@3466 278 public:
johnc@3466 279 G1InvokeIfNotTriggeredClosure(G1TriggerClosure* t, OopClosure* oc);
johnc@3466 280 template <class T> void do_oop_nv(T* p);
johnc@3466 281 virtual void do_oop(oop* p) { do_oop_nv(p); }
johnc@3466 282 virtual void do_oop(narrowOop* p) { do_oop_nv(p); }
johnc@3466 283 };
johnc@3466 284
coleenp@4037 285 class G1UpdateRSOrPushRefOopClosure: public ExtendedOopClosure {
johnc@3466 286 G1CollectedHeap* _g1;
johnc@3466 287 G1RemSet* _g1_rem_set;
johnc@3466 288 HeapRegion* _from;
johnc@3466 289 OopsInHeapRegionClosure* _push_ref_cl;
johnc@3466 290 bool _record_refs_into_cset;
johnc@3466 291 int _worker_i;
johnc@3466 292
johnc@3466 293 public:
johnc@3466 294 G1UpdateRSOrPushRefOopClosure(G1CollectedHeap* g1h,
johnc@3466 295 G1RemSet* rs,
johnc@3466 296 OopsInHeapRegionClosure* push_ref_cl,
johnc@3466 297 bool record_refs_into_cset,
johnc@3466 298 int worker_i = 0);
johnc@3466 299
johnc@3466 300 void set_from(HeapRegion* from) {
johnc@3466 301 assert(from != NULL, "from region must be non-NULL");
johnc@3466 302 _from = from;
johnc@3466 303 }
johnc@3466 304
johnc@3466 305 bool self_forwarded(oop obj) {
johnc@3466 306 bool result = (obj->is_forwarded() && (obj->forwardee()== obj));
johnc@3466 307 return result;
johnc@3466 308 }
johnc@3466 309
johnc@3466 310 bool apply_to_weak_ref_discovered_field() { return true; }
johnc@3466 311
johnc@3466 312 template <class T> void do_oop_nv(T* p);
johnc@3466 313 virtual void do_oop(narrowOop* p) { do_oop_nv(p); }
johnc@3466 314 virtual void do_oop(oop* p) { do_oop_nv(p); }
johnc@3466 315 };
johnc@3466 316
stefank@2314 317 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1OOPCLOSURES_HPP

mercurial