Mon, 12 Mar 2012 14:59:00 -0700
7147724: G1: hang in SurrogateLockerThread::manipulatePLL
Summary: Attempting to initiate a marking cycle when allocating a humongous object can, if a marking cycle is successfully initiated by another thread, result in the allocating thread spinning until the marking cycle is complete. Eliminate a deadlock between the main ConcurrentMarkThread, the SurrogateLocker thread, the VM thread, and a mutator thread waiting on the SecondaryFreeList_lock (while free regions are going to become available) by not manipulating the pending list lock during the prologue and epilogue of the cleanup pause.
Reviewed-by: brutisso, jcoomes, tonyp
1 /*
2 * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1REMSET_HPP
26 #define SHARE_VM_GC_IMPLEMENTATION_G1_G1REMSET_HPP
28 // A G1RemSet provides ways of iterating over pointers into a selected
29 // collection set.
31 class G1CollectedHeap;
32 class CardTableModRefBarrierSet;
33 class ConcurrentG1Refine;
35 // A G1RemSet in which each heap region has a rem set that records the
36 // external heap references into it. Uses a mod ref bs to track updates,
37 // so that they can be used to update the individual region remsets.
39 class G1RemSet: public CHeapObj {
40 protected:
41 G1CollectedHeap* _g1;
42 unsigned _conc_refine_cards;
43 uint n_workers();
45 protected:
46 enum SomePrivateConstants {
47 UpdateRStoMergeSync = 0,
48 MergeRStoDoDirtySync = 1,
49 DoDirtySync = 2,
50 LastSync = 3,
52 SeqTask = 0,
53 NumSeqTasks = 1
54 };
56 CardTableModRefBS* _ct_bs;
57 SubTasksDone* _seq_task;
58 G1CollectorPolicy* _g1p;
60 ConcurrentG1Refine* _cg1r;
62 size_t* _cards_scanned;
63 size_t _total_cards_scanned;
65 // Used for caching the closure that is responsible for scanning
66 // references into the collection set.
67 OopsInHeapRegionClosure** _cset_rs_update_cl;
69 // The routine that performs the actual work of refining a dirty
70 // card.
71 // If check_for_refs_into_refs is true then a true result is returned
72 // if the card contains oops that have references into the current
73 // collection set.
74 bool concurrentRefineOneCard_impl(jbyte* card_ptr, int worker_i,
75 bool check_for_refs_into_cset);
77 public:
78 // This is called to reset dual hash tables after the gc pause
79 // is finished and the initial hash table is no longer being
80 // scanned.
81 void cleanupHRRS();
83 G1RemSet(G1CollectedHeap* g1, CardTableModRefBS* ct_bs);
84 ~G1RemSet();
86 // Invoke "blk->do_oop" on all pointers into the CS in objects in regions
87 // outside the CS (having invoked "blk->set_region" to set the "from"
88 // region correctly beforehand.) The "worker_i" param is for the
89 // parallel case where the number of the worker thread calling this
90 // function can be helpful in partitioning the work to be done. It
91 // should be the same as the "i" passed to the calling thread's
92 // work(i) function. In the sequential case this param will be ingored.
93 void oops_into_collection_set_do(OopsInHeapRegionClosure* blk,
94 int worker_i);
96 // Prepare for and cleanup after an oops_into_collection_set_do
97 // call. Must call each of these once before and after (in sequential
98 // code) any threads call oops_into_collection_set_do. (This offers an
99 // opportunity to sequential setup and teardown of structures needed by a
100 // parallel iteration over the CS's RS.)
101 void prepare_for_oops_into_collection_set_do();
102 void cleanup_after_oops_into_collection_set_do();
104 void scanRS(OopsInHeapRegionClosure* oc, int worker_i);
105 void updateRS(DirtyCardQueue* into_cset_dcq, int worker_i);
107 CardTableModRefBS* ct_bs() { return _ct_bs; }
108 size_t cardsScanned() { return _total_cards_scanned; }
110 // Record, if necessary, the fact that *p (where "p" is in region "from",
111 // which is required to be non-NULL) has changed to a new non-NULL value.
112 template <class T> void write_ref(HeapRegion* from, T* p);
113 template <class T> void par_write_ref(HeapRegion* from, T* p, int tid);
115 // Requires "region_bm" and "card_bm" to be bitmaps with 1 bit per region
116 // or card, respectively, such that a region or card with a corresponding
117 // 0 bit contains no part of any live object. Eliminates any remembered
118 // set entries that correspond to dead heap ranges.
119 void scrub(BitMap* region_bm, BitMap* card_bm);
121 // Like the above, but assumes is called in parallel: "worker_num" is the
122 // parallel thread id of the current thread, and "claim_val" is the
123 // value that should be used to claim heap regions.
124 void scrub_par(BitMap* region_bm, BitMap* card_bm,
125 uint worker_num, int claim_val);
127 // Refine the card corresponding to "card_ptr". If "sts" is non-NULL,
128 // join and leave around parts that must be atomic wrt GC. (NULL means
129 // being done at a safepoint.)
130 // If check_for_refs_into_cset is true, a true result is returned
131 // if the given card contains oops that have references into the
132 // current collection set.
133 virtual bool concurrentRefineOneCard(jbyte* card_ptr, int worker_i,
134 bool check_for_refs_into_cset);
136 // Print any relevant summary info.
137 virtual void print_summary_info();
139 // Prepare remembered set for verification.
140 virtual void prepare_for_verify();
141 };
143 class CountNonCleanMemRegionClosure: public MemRegionClosure {
144 G1CollectedHeap* _g1;
145 int _n;
146 HeapWord* _start_first;
147 public:
148 CountNonCleanMemRegionClosure(G1CollectedHeap* g1) :
149 _g1(g1), _n(0), _start_first(NULL)
150 {}
151 void do_MemRegion(MemRegion mr);
152 int n() { return _n; };
153 HeapWord* start_first() { return _start_first; }
154 };
156 class UpdateRSOopClosure: public OopClosure {
157 HeapRegion* _from;
158 G1RemSet* _rs;
159 int _worker_i;
161 template <class T> void do_oop_work(T* p);
163 public:
164 UpdateRSOopClosure(G1RemSet* rs, int worker_i = 0) :
165 _from(NULL), _rs(rs), _worker_i(worker_i)
166 {}
168 void set_from(HeapRegion* from) {
169 assert(from != NULL, "from region must be non-NULL");
170 _from = from;
171 }
173 virtual void do_oop(narrowOop* p) { do_oop_work(p); }
174 virtual void do_oop(oop* p) { do_oop_work(p); }
176 // Override: this closure is idempotent.
177 // bool idempotent() { return true; }
178 bool apply_to_weak_ref_discovered_field() { return true; }
179 };
181 class UpdateRSetImmediate: public OopsInHeapRegionClosure {
182 private:
183 G1RemSet* _g1_rem_set;
185 template <class T> void do_oop_work(T* p);
186 public:
187 UpdateRSetImmediate(G1RemSet* rs) :
188 _g1_rem_set(rs) {}
190 virtual void do_oop(narrowOop* p) { do_oop_work(p); }
191 virtual void do_oop( oop* p) { do_oop_work(p); }
192 };
195 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1REMSET_HPP