src/share/vm/gc_implementation/g1/g1OopClosures.hpp

Mon, 12 Mar 2012 14:59:00 -0700

author
johnc
date
Mon, 12 Mar 2012 14:59:00 -0700
changeset 3666
64bf7c8270cb
parent 3466
b4ebad3520bb
child 3690
748051fd24ce
permissions
-rw-r--r--

7147724: G1: hang in SurrogateLockerThread::manipulatePLL
Summary: Attempting to initiate a marking cycle when allocating a humongous object can, if a marking cycle is successfully initiated by another thread, result in the allocating thread spinning until the marking cycle is complete. Eliminate a deadlock between the main ConcurrentMarkThread, the SurrogateLocker thread, the VM thread, and a mutator thread waiting on the SecondaryFreeList_lock (while free regions are going to become available) by not manipulating the pending list lock during the prologue and epilogue of the cleanup pause.
Reviewed-by: brutisso, jcoomes, tonyp

ysr@777 1 /*
tonyp@3416 2 * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
ysr@777 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
ysr@777 4 *
ysr@777 5 * This code is free software; you can redistribute it and/or modify it
ysr@777 6 * under the terms of the GNU General Public License version 2 only, as
ysr@777 7 * published by the Free Software Foundation.
ysr@777 8 *
ysr@777 9 * This code is distributed in the hope that it will be useful, but WITHOUT
ysr@777 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
ysr@777 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
ysr@777 12 * version 2 for more details (a copy is included in the LICENSE file that
ysr@777 13 * accompanied this code).
ysr@777 14 *
ysr@777 15 * You should have received a copy of the GNU General Public License version
ysr@777 16 * 2 along with this work; if not, write to the Free Software Foundation,
ysr@777 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
ysr@777 18 *
trims@1907 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
trims@1907 20 * or visit www.oracle.com if you need additional information or have any
trims@1907 21 * questions.
ysr@777 22 *
ysr@777 23 */
ysr@777 24
stefank@2314 25 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1OOPCLOSURES_HPP
stefank@2314 26 #define SHARE_VM_GC_IMPLEMENTATION_G1_G1OOPCLOSURES_HPP
stefank@2314 27
ysr@777 28 class HeapRegion;
ysr@777 29 class G1CollectedHeap;
ysr@777 30 class G1RemSet;
ysr@777 31 class ConcurrentMark;
ysr@777 32 class DirtyCardToOopClosure;
ysr@777 33 class CMBitMap;
ysr@777 34 class CMMarkStack;
ysr@777 35 class G1ParScanThreadState;
tonyp@2968 36 class CMTask;
johnc@3175 37 class ReferenceProcessor;
ysr@777 38
ysr@777 39 // A class that scans oops in a given heap region (much as OopsInGenClosure
ysr@777 40 // scans oops in a generation.)
ysr@777 41 class OopsInHeapRegionClosure: public OopsInGenClosure {
ysr@777 42 protected:
ysr@777 43 HeapRegion* _from;
ysr@777 44 public:
tonyp@2962 45 void set_region(HeapRegion* from) { _from = from; }
ysr@777 46 };
ysr@777 47
ysr@777 48 class G1ParClosureSuper : public OopsInHeapRegionClosure {
ysr@777 49 protected:
ysr@777 50 G1CollectedHeap* _g1;
ysr@777 51 G1RemSet* _g1_rem;
ysr@777 52 ConcurrentMark* _cm;
ysr@777 53 G1ParScanThreadState* _par_scan_state;
johnc@3463 54 uint _worker_id;
johnc@3086 55 bool _during_initial_mark;
johnc@3086 56 bool _mark_in_progress;
ysr@777 57 public:
ysr@777 58 G1ParClosureSuper(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state);
ysr@777 59 bool apply_to_weak_ref_discovered_field() { return true; }
ysr@777 60 };
ysr@777 61
iveresov@1696 62 class G1ParPushHeapRSClosure : public G1ParClosureSuper {
iveresov@1696 63 public:
johnc@3175 64 G1ParPushHeapRSClosure(G1CollectedHeap* g1,
johnc@3179 65 G1ParScanThreadState* par_scan_state):
johnc@3179 66 G1ParClosureSuper(g1, par_scan_state) { }
johnc@3175 67
iveresov@1696 68 template <class T> void do_oop_nv(T* p);
iveresov@1696 69 virtual void do_oop(oop* p) { do_oop_nv(p); }
iveresov@1696 70 virtual void do_oop(narrowOop* p) { do_oop_nv(p); }
iveresov@1696 71 };
iveresov@1696 72
ysr@777 73 class G1ParScanClosure : public G1ParClosureSuper {
ysr@777 74 public:
johnc@3175 75 G1ParScanClosure(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state, ReferenceProcessor* rp) :
johnc@3175 76 G1ParClosureSuper(g1, par_scan_state)
johnc@3175 77 {
johnc@3175 78 assert(_ref_processor == NULL, "sanity");
johnc@3175 79 _ref_processor = rp;
johnc@3175 80 }
johnc@3175 81
ysr@1280 82 template <class T> void do_oop_nv(T* p);
ysr@777 83 virtual void do_oop(oop* p) { do_oop_nv(p); }
ysr@777 84 virtual void do_oop(narrowOop* p) { do_oop_nv(p); }
ysr@777 85 };
ysr@777 86
ysr@1280 87 #define G1_PARTIAL_ARRAY_MASK 0x2
ysr@777 88
ysr@1280 89 template <class T> inline bool has_partial_array_mask(T* ref) {
ysr@1280 90 return ((uintptr_t)ref & G1_PARTIAL_ARRAY_MASK) == G1_PARTIAL_ARRAY_MASK;
tonyp@961 91 }
tonyp@961 92
ysr@1280 93 template <class T> inline T* set_partial_array_mask(T obj) {
ysr@1280 94 assert(((uintptr_t)obj & G1_PARTIAL_ARRAY_MASK) == 0, "Information loss!");
ysr@1280 95 return (T*) ((uintptr_t)obj | G1_PARTIAL_ARRAY_MASK);
tonyp@961 96 }
tonyp@961 97
ysr@1280 98 template <class T> inline oop clear_partial_array_mask(T* ref) {
ysr@1280 99 return oop((intptr_t)ref & ~G1_PARTIAL_ARRAY_MASK);
tonyp@961 100 }
tonyp@961 101
ysr@777 102 class G1ParScanPartialArrayClosure : public G1ParClosureSuper {
ysr@777 103 G1ParScanClosure _scanner;
johnc@3175 104
ysr@777 105 public:
johnc@3175 106 G1ParScanPartialArrayClosure(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state, ReferenceProcessor* rp) :
johnc@3175 107 G1ParClosureSuper(g1, par_scan_state), _scanner(g1, par_scan_state, rp)
johnc@3175 108 {
johnc@3175 109 assert(_ref_processor == NULL, "sanity");
johnc@3175 110 }
johnc@3175 111
johnc@3175 112 G1ParScanClosure* scanner() {
johnc@3175 113 return &_scanner;
johnc@3175 114 }
johnc@3175 115
ysr@1280 116 template <class T> void do_oop_nv(T* p);
ysr@777 117 virtual void do_oop(oop* p) { do_oop_nv(p); }
ysr@777 118 virtual void do_oop(narrowOop* p) { do_oop_nv(p); }
ysr@777 119 };
ysr@777 120
ysr@777 121
ysr@777 122 class G1ParCopyHelper : public G1ParClosureSuper {
ysr@777 123 G1ParScanClosure *_scanner;
ysr@777 124 protected:
tonyp@3416 125 // Mark the object if it's not already marked. This is used to mark
tonyp@3416 126 // objects pointed to by roots that are guaranteed not to move
tonyp@3416 127 // during the GC (i.e., non-CSet objects). It is MT-safe.
tonyp@3416 128 void mark_object(oop obj);
tonyp@3416 129
tonyp@3416 130 // Mark the object if it's not already marked. This is used to mark
tonyp@3416 131 // objects pointed to by roots that have been forwarded during a
tonyp@3416 132 // GC. It is MT-safe.
tonyp@3416 133 void mark_forwarded_object(oop from_obj, oop to_obj);
tonyp@3416 134
tonyp@3416 135 oop copy_to_survivor_space(oop obj);
tonyp@3416 136
ysr@777 137 public:
ysr@777 138 G1ParCopyHelper(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state,
ysr@777 139 G1ParScanClosure *scanner) :
ysr@777 140 G1ParClosureSuper(g1, par_scan_state), _scanner(scanner) { }
ysr@777 141 };
ysr@777 142
tonyp@3416 143 template <bool do_gen_barrier, G1Barrier barrier, bool do_mark_object>
ysr@777 144 class G1ParCopyClosure : public G1ParCopyHelper {
ysr@777 145 G1ParScanClosure _scanner;
johnc@3175 146
ysr@1280 147 template <class T> void do_oop_work(T* p);
johnc@3175 148
ysr@777 149 public:
johnc@3175 150 G1ParCopyClosure(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state,
johnc@3175 151 ReferenceProcessor* rp) :
tonyp@3416 152 _scanner(g1, par_scan_state, rp),
tonyp@3416 153 G1ParCopyHelper(g1, par_scan_state, &_scanner) {
johnc@3175 154 assert(_ref_processor == NULL, "sanity");
johnc@3175 155 }
johnc@3175 156
johnc@3175 157 G1ParScanClosure* scanner() { return &_scanner; }
johnc@3175 158
ysr@1280 159 template <class T> void do_oop_nv(T* p) {
ysr@777 160 do_oop_work(p);
ysr@777 161 }
ysr@777 162 virtual void do_oop(oop* p) { do_oop_nv(p); }
ysr@777 163 virtual void do_oop(narrowOop* p) { do_oop_nv(p); }
ysr@777 164 };
ysr@777 165
iveresov@1696 166 typedef G1ParCopyClosure<false, G1BarrierNone, false> G1ParScanExtRootClosure;
iveresov@1696 167 typedef G1ParCopyClosure<true, G1BarrierNone, false> G1ParScanPermClosure;
johnc@3175 168
iveresov@1696 169 typedef G1ParCopyClosure<false, G1BarrierNone, true> G1ParScanAndMarkExtRootClosure;
iveresov@1696 170 typedef G1ParCopyClosure<true, G1BarrierNone, true> G1ParScanAndMarkPermClosure;
iveresov@1696 171
johnc@3175 172 // The following closure types are no longer used but are retained
johnc@3175 173 // for historical reasons:
johnc@3175 174 // typedef G1ParCopyClosure<false, G1BarrierRS, false> G1ParScanHeapRSClosure;
johnc@3175 175 // typedef G1ParCopyClosure<false, G1BarrierRS, true> G1ParScanAndMarkHeapRSClosure;
johnc@3175 176
johnc@3175 177 // The following closure type is defined in g1_specialized_oop_closures.hpp:
johnc@3175 178 //
johnc@3175 179 // typedef G1ParCopyClosure<false, G1BarrierEvac, false> G1ParScanHeapEvacClosure;
johnc@3175 180
johnc@3175 181 // We use a separate closure to handle references during evacuation
johnc@3175 182 // failure processing.
johnc@3175 183 // We could have used another instance of G1ParScanHeapEvacClosure
johnc@3175 184 // (since that closure no longer assumes that the references it
johnc@3175 185 // handles point into the collection set).
johnc@3175 186
iveresov@1696 187 typedef G1ParCopyClosure<false, G1BarrierEvac, false> G1ParScanHeapEvacFailureClosure;
ysr@777 188
ysr@777 189 class FilterIntoCSClosure: public OopClosure {
ysr@777 190 G1CollectedHeap* _g1;
ysr@777 191 OopClosure* _oc;
ysr@777 192 DirtyCardToOopClosure* _dcto_cl;
ysr@777 193 public:
ysr@777 194 FilterIntoCSClosure( DirtyCardToOopClosure* dcto_cl,
johnc@3175 195 G1CollectedHeap* g1,
johnc@3179 196 OopClosure* oc) :
johnc@3179 197 _dcto_cl(dcto_cl), _g1(g1), _oc(oc) { }
johnc@3175 198
ysr@1280 199 template <class T> void do_oop_nv(T* p);
ysr@1280 200 virtual void do_oop(oop* p) { do_oop_nv(p); }
ysr@1280 201 virtual void do_oop(narrowOop* p) { do_oop_nv(p); }
ysr@777 202 bool apply_to_weak_ref_discovered_field() { return true; }
ysr@777 203 bool do_header() { return false; }
ysr@777 204 };
ysr@777 205
ysr@777 206 class FilterOutOfRegionClosure: public OopClosure {
ysr@777 207 HeapWord* _r_bottom;
ysr@777 208 HeapWord* _r_end;
ysr@777 209 OopClosure* _oc;
ysr@777 210 int _out_of_region;
ysr@777 211 public:
ysr@777 212 FilterOutOfRegionClosure(HeapRegion* r, OopClosure* oc);
ysr@1280 213 template <class T> void do_oop_nv(T* p);
ysr@1280 214 virtual void do_oop(oop* p) { do_oop_nv(p); }
ysr@1280 215 virtual void do_oop(narrowOop* p) { do_oop_nv(p); }
ysr@777 216 bool apply_to_weak_ref_discovered_field() { return true; }
ysr@777 217 bool do_header() { return false; }
ysr@777 218 int out_of_region() { return _out_of_region; }
ysr@777 219 };
stefank@2314 220
tonyp@2968 221 // Closure for iterating over object fields during concurrent marking
tonyp@2968 222 class G1CMOopClosure : public OopClosure {
tonyp@3464 223 private:
tonyp@2968 224 G1CollectedHeap* _g1h;
tonyp@2968 225 ConcurrentMark* _cm;
tonyp@2968 226 CMTask* _task;
tonyp@2968 227 public:
tonyp@2968 228 G1CMOopClosure(G1CollectedHeap* g1h, ConcurrentMark* cm, CMTask* task);
tonyp@2968 229 template <class T> void do_oop_nv(T* p);
tonyp@2968 230 virtual void do_oop( oop* p) { do_oop_nv(p); }
tonyp@2968 231 virtual void do_oop(narrowOop* p) { do_oop_nv(p); }
tonyp@2968 232 };
tonyp@2968 233
tonyp@3464 234 // Closure to scan the root regions during concurrent marking
tonyp@3464 235 class G1RootRegionScanClosure : public OopClosure {
tonyp@3464 236 private:
tonyp@3464 237 G1CollectedHeap* _g1h;
tonyp@3464 238 ConcurrentMark* _cm;
tonyp@3464 239 uint _worker_id;
tonyp@3464 240 public:
tonyp@3464 241 G1RootRegionScanClosure(G1CollectedHeap* g1h, ConcurrentMark* cm,
tonyp@3464 242 uint worker_id) :
tonyp@3464 243 _g1h(g1h), _cm(cm), _worker_id(worker_id) { }
tonyp@3464 244 template <class T> void do_oop_nv(T* p);
tonyp@3464 245 virtual void do_oop( oop* p) { do_oop_nv(p); }
tonyp@3464 246 virtual void do_oop(narrowOop* p) { do_oop_nv(p); }
tonyp@3464 247 };
tonyp@3464 248
johnc@3466 249 // Closure that applies the given two closures in sequence.
johnc@3466 250 // Used by the RSet refinement code (when updating RSets
johnc@3466 251 // during an evacuation pause) to record cards containing
johnc@3466 252 // pointers into the collection set.
johnc@3466 253
johnc@3466 254 class G1Mux2Closure : public OopClosure {
johnc@3466 255 OopClosure* _c1;
johnc@3466 256 OopClosure* _c2;
johnc@3466 257 public:
johnc@3466 258 G1Mux2Closure(OopClosure *c1, OopClosure *c2);
johnc@3466 259 template <class T> void do_oop_nv(T* p);
johnc@3466 260 virtual void do_oop(oop* p) { do_oop_nv(p); }
johnc@3466 261 virtual void do_oop(narrowOop* p) { do_oop_nv(p); }
johnc@3466 262 };
johnc@3466 263
johnc@3466 264 // A closure that returns true if it is actually applied
johnc@3466 265 // to a reference
johnc@3466 266
johnc@3466 267 class G1TriggerClosure : public OopClosure {
johnc@3466 268 bool _triggered;
johnc@3466 269 public:
johnc@3466 270 G1TriggerClosure();
johnc@3466 271 bool triggered() const { return _triggered; }
johnc@3466 272 template <class T> void do_oop_nv(T* p);
johnc@3466 273 virtual void do_oop(oop* p) { do_oop_nv(p); }
johnc@3466 274 virtual void do_oop(narrowOop* p) { do_oop_nv(p); }
johnc@3466 275 };
johnc@3466 276
johnc@3466 277 // A closure which uses a triggering closure to determine
johnc@3466 278 // whether to apply an oop closure.
johnc@3466 279
johnc@3466 280 class G1InvokeIfNotTriggeredClosure: public OopClosure {
johnc@3466 281 G1TriggerClosure* _trigger_cl;
johnc@3466 282 OopClosure* _oop_cl;
johnc@3466 283 public:
johnc@3466 284 G1InvokeIfNotTriggeredClosure(G1TriggerClosure* t, OopClosure* oc);
johnc@3466 285 template <class T> void do_oop_nv(T* p);
johnc@3466 286 virtual void do_oop(oop* p) { do_oop_nv(p); }
johnc@3466 287 virtual void do_oop(narrowOop* p) { do_oop_nv(p); }
johnc@3466 288 };
johnc@3466 289
johnc@3466 290 class G1UpdateRSOrPushRefOopClosure: public OopClosure {
johnc@3466 291 G1CollectedHeap* _g1;
johnc@3466 292 G1RemSet* _g1_rem_set;
johnc@3466 293 HeapRegion* _from;
johnc@3466 294 OopsInHeapRegionClosure* _push_ref_cl;
johnc@3466 295 bool _record_refs_into_cset;
johnc@3466 296 int _worker_i;
johnc@3466 297
johnc@3466 298 public:
johnc@3466 299 G1UpdateRSOrPushRefOopClosure(G1CollectedHeap* g1h,
johnc@3466 300 G1RemSet* rs,
johnc@3466 301 OopsInHeapRegionClosure* push_ref_cl,
johnc@3466 302 bool record_refs_into_cset,
johnc@3466 303 int worker_i = 0);
johnc@3466 304
johnc@3466 305 void set_from(HeapRegion* from) {
johnc@3466 306 assert(from != NULL, "from region must be non-NULL");
johnc@3466 307 _from = from;
johnc@3466 308 }
johnc@3466 309
johnc@3466 310 bool self_forwarded(oop obj) {
johnc@3466 311 bool result = (obj->is_forwarded() && (obj->forwardee()== obj));
johnc@3466 312 return result;
johnc@3466 313 }
johnc@3466 314
johnc@3466 315 bool apply_to_weak_ref_discovered_field() { return true; }
johnc@3466 316
johnc@3466 317 template <class T> void do_oop_nv(T* p);
johnc@3466 318 virtual void do_oop(narrowOop* p) { do_oop_nv(p); }
johnc@3466 319 virtual void do_oop(oop* p) { do_oop_nv(p); }
johnc@3466 320 };
johnc@3466 321
stefank@2314 322 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1OOPCLOSURES_HPP

mercurial