Mon, 12 Mar 2012 14:59:00 -0700
7147724: G1: hang in SurrogateLockerThread::manipulatePLL
Summary: Attempting to initiate a marking cycle when allocating a humongous object can, if a marking cycle is successfully initiated by another thread, result in the allocating thread spinning until the marking cycle is complete. Eliminate a deadlock between the main ConcurrentMarkThread, the SurrogateLocker thread, the VM thread, and a mutator thread waiting on the SecondaryFreeList_lock (while free regions are going to become available) by not manipulating the pending list lock during the prologue and epilogue of the cleanup pause.
Reviewed-by: brutisso, jcoomes, tonyp
1 /*
2 * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "precompiled.hpp"
26 #include "classfile/javaClasses.hpp"
27 #include "classfile/symbolTable.hpp"
28 #include "classfile/systemDictionary.hpp"
29 #include "classfile/vmSymbols.hpp"
30 #include "code/codeCache.hpp"
31 #include "code/icBuffer.hpp"
32 #include "gc_implementation/g1/g1MarkSweep.hpp"
33 #include "memory/gcLocker.hpp"
34 #include "memory/genCollectedHeap.hpp"
35 #include "memory/modRefBarrierSet.hpp"
36 #include "memory/referencePolicy.hpp"
37 #include "memory/space.hpp"
38 #include "oops/instanceRefKlass.hpp"
39 #include "oops/oop.inline.hpp"
40 #include "prims/jvmtiExport.hpp"
41 #include "runtime/aprofiler.hpp"
42 #include "runtime/biasedLocking.hpp"
43 #include "runtime/fprofiler.hpp"
44 #include "runtime/synchronizer.hpp"
45 #include "runtime/thread.hpp"
46 #include "runtime/vmThread.hpp"
47 #include "utilities/copy.hpp"
48 #include "utilities/events.hpp"
50 class HeapRegion;
52 void G1MarkSweep::invoke_at_safepoint(ReferenceProcessor* rp,
53 bool clear_all_softrefs) {
54 assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint");
56 SharedHeap* sh = SharedHeap::heap();
57 #ifdef ASSERT
58 if (sh->collector_policy()->should_clear_all_soft_refs()) {
59 assert(clear_all_softrefs, "Policy should have been checked earler");
60 }
61 #endif
62 // hook up weak ref data so it can be used during Mark-Sweep
63 assert(GenMarkSweep::ref_processor() == NULL, "no stomping");
64 assert(rp != NULL, "should be non-NULL");
65 assert(rp == G1CollectedHeap::heap()->ref_processor_stw(), "Precondition");
67 GenMarkSweep::_ref_processor = rp;
68 rp->setup_policy(clear_all_softrefs);
70 // When collecting the permanent generation methodOops may be moving,
71 // so we either have to flush all bcp data or convert it into bci.
72 CodeCache::gc_prologue();
73 Threads::gc_prologue();
75 // Increment the invocation count for the permanent generation, since it is
76 // implicitly collected whenever we do a full mark sweep collection.
77 sh->perm_gen()->stat_record()->invocations++;
79 bool marked_for_unloading = false;
81 allocate_stacks();
83 // We should save the marks of the currently locked biased monitors.
84 // The marking doesn't preserve the marks of biased objects.
85 BiasedLocking::preserve_marks();
87 mark_sweep_phase1(marked_for_unloading, clear_all_softrefs);
89 mark_sweep_phase2();
91 // Don't add any more derived pointers during phase3
92 COMPILER2_PRESENT(DerivedPointerTable::set_active(false));
94 mark_sweep_phase3();
96 mark_sweep_phase4();
98 GenMarkSweep::restore_marks();
99 BiasedLocking::restore_marks();
100 GenMarkSweep::deallocate_stacks();
102 // We must invalidate the perm-gen rs, so that it gets rebuilt.
103 GenRemSet* rs = sh->rem_set();
104 rs->invalidate(sh->perm_gen()->used_region(), true /*whole_heap*/);
106 // "free at last gc" is calculated from these.
107 // CHF: cheating for now!!!
108 // Universe::set_heap_capacity_at_last_gc(Universe::heap()->capacity());
109 // Universe::set_heap_used_at_last_gc(Universe::heap()->used());
111 Threads::gc_epilogue();
112 CodeCache::gc_epilogue();
113 JvmtiExport::gc_epilogue();
115 // refs processing: clean slate
116 GenMarkSweep::_ref_processor = NULL;
117 }
120 void G1MarkSweep::allocate_stacks() {
121 GenMarkSweep::_preserved_count_max = 0;
122 GenMarkSweep::_preserved_marks = NULL;
123 GenMarkSweep::_preserved_count = 0;
124 }
126 void G1MarkSweep::mark_sweep_phase1(bool& marked_for_unloading,
127 bool clear_all_softrefs) {
128 // Recursively traverse all live objects and mark them
129 TraceTime tm("phase 1", PrintGC && Verbose, true, gclog_or_tty);
130 GenMarkSweep::trace(" 1");
132 SharedHeap* sh = SharedHeap::heap();
134 sh->process_strong_roots(true, // activeate StrongRootsScope
135 true, // Collecting permanent generation.
136 SharedHeap::SO_SystemClasses,
137 &GenMarkSweep::follow_root_closure,
138 &GenMarkSweep::follow_code_root_closure,
139 &GenMarkSweep::follow_root_closure);
141 // Process reference objects found during marking
142 ReferenceProcessor* rp = GenMarkSweep::ref_processor();
143 assert(rp == G1CollectedHeap::heap()->ref_processor_stw(), "Sanity");
145 rp->setup_policy(clear_all_softrefs);
146 rp->process_discovered_references(&GenMarkSweep::is_alive,
147 &GenMarkSweep::keep_alive,
148 &GenMarkSweep::follow_stack_closure,
149 NULL);
151 // Follow system dictionary roots and unload classes
152 bool purged_class = SystemDictionary::do_unloading(&GenMarkSweep::is_alive);
153 assert(GenMarkSweep::_marking_stack.is_empty(),
154 "stack should be empty by now");
156 // Follow code cache roots (has to be done after system dictionary,
157 // assumes all live klasses are marked)
158 CodeCache::do_unloading(&GenMarkSweep::is_alive,
159 &GenMarkSweep::keep_alive,
160 purged_class);
161 GenMarkSweep::follow_stack();
163 // Update subklass/sibling/implementor links of live klasses
164 GenMarkSweep::follow_weak_klass_links();
165 assert(GenMarkSweep::_marking_stack.is_empty(),
166 "stack should be empty by now");
168 // Visit memoized MDO's and clear any unmarked weak refs
169 GenMarkSweep::follow_mdo_weak_refs();
170 assert(GenMarkSweep::_marking_stack.is_empty(), "just drained");
172 // Visit interned string tables and delete unmarked oops
173 StringTable::unlink(&GenMarkSweep::is_alive);
174 // Clean up unreferenced symbols in symbol table.
175 SymbolTable::unlink();
177 assert(GenMarkSweep::_marking_stack.is_empty(),
178 "stack should be empty by now");
180 if (VerifyDuringGC) {
181 HandleMark hm; // handle scope
182 COMPILER2_PRESENT(DerivedPointerTableDeactivate dpt_deact);
183 gclog_or_tty->print(" VerifyDuringGC:(full)[Verifying ");
184 Universe::heap()->prepare_for_verify();
185 // Note: we can verify only the heap here. When an object is
186 // marked, the previous value of the mark word (including
187 // identity hash values, ages, etc) is preserved, and the mark
188 // word is set to markOop::marked_value - effectively removing
189 // any hash values from the mark word. These hash values are
190 // used when verifying the dictionaries and so removing them
191 // from the mark word can make verification of the dictionaries
192 // fail. At the end of the GC, the orginal mark word values
193 // (including hash values) are restored to the appropriate
194 // objects.
195 Universe::heap()->verify(/* allow dirty */ true,
196 /* silent */ false,
197 /* option */ VerifyOption_G1UseMarkWord);
199 G1CollectedHeap* g1h = G1CollectedHeap::heap();
200 gclog_or_tty->print_cr("]");
201 }
202 }
204 class G1PrepareCompactClosure: public HeapRegionClosure {
205 G1CollectedHeap* _g1h;
206 ModRefBarrierSet* _mrbs;
207 CompactPoint _cp;
208 HumongousRegionSet _humongous_proxy_set;
210 void free_humongous_region(HeapRegion* hr) {
211 HeapWord* end = hr->end();
212 size_t dummy_pre_used;
213 FreeRegionList dummy_free_list("Dummy Free List for G1MarkSweep");
215 assert(hr->startsHumongous(),
216 "Only the start of a humongous region should be freed.");
217 _g1h->free_humongous_region(hr, &dummy_pre_used, &dummy_free_list,
218 &_humongous_proxy_set, false /* par */);
219 hr->prepare_for_compaction(&_cp);
220 // Also clear the part of the card table that will be unused after
221 // compaction.
222 _mrbs->clear(MemRegion(hr->compaction_top(), end));
223 dummy_free_list.remove_all();
224 }
226 public:
227 G1PrepareCompactClosure(CompactibleSpace* cs)
228 : _g1h(G1CollectedHeap::heap()),
229 _mrbs(G1CollectedHeap::heap()->mr_bs()),
230 _cp(NULL, cs, cs->initialize_threshold()),
231 _humongous_proxy_set("G1MarkSweep Humongous Proxy Set") { }
233 void update_sets() {
234 // We'll recalculate total used bytes and recreate the free list
235 // at the end of the GC, so no point in updating those values here.
236 _g1h->update_sets_after_freeing_regions(0, /* pre_used */
237 NULL, /* free_list */
238 NULL, /* old_proxy_set */
239 &_humongous_proxy_set,
240 false /* par */);
241 }
243 bool doHeapRegion(HeapRegion* hr) {
244 if (hr->isHumongous()) {
245 if (hr->startsHumongous()) {
246 oop obj = oop(hr->bottom());
247 if (obj->is_gc_marked()) {
248 obj->forward_to(obj);
249 } else {
250 free_humongous_region(hr);
251 }
252 } else {
253 assert(hr->continuesHumongous(), "Invalid humongous.");
254 }
255 } else {
256 hr->prepare_for_compaction(&_cp);
257 // Also clear the part of the card table that will be unused after
258 // compaction.
259 _mrbs->clear(MemRegion(hr->compaction_top(), hr->end()));
260 }
261 return false;
262 }
263 };
265 // Finds the first HeapRegion.
266 class FindFirstRegionClosure: public HeapRegionClosure {
267 HeapRegion* _a_region;
268 public:
269 FindFirstRegionClosure() : _a_region(NULL) {}
270 bool doHeapRegion(HeapRegion* r) {
271 _a_region = r;
272 return true;
273 }
274 HeapRegion* result() { return _a_region; }
275 };
277 void G1MarkSweep::mark_sweep_phase2() {
278 // Now all live objects are marked, compute the new object addresses.
280 // It is imperative that we traverse perm_gen LAST. If dead space is
281 // allowed a range of dead object may get overwritten by a dead int
282 // array. If perm_gen is not traversed last a klassOop may get
283 // overwritten. This is fine since it is dead, but if the class has dead
284 // instances we have to skip them, and in order to find their size we
285 // need the klassOop!
286 //
287 // It is not required that we traverse spaces in the same order in
288 // phase2, phase3 and phase4, but the ValidateMarkSweep live oops
289 // tracking expects us to do so. See comment under phase4.
291 G1CollectedHeap* g1h = G1CollectedHeap::heap();
292 Generation* pg = g1h->perm_gen();
294 TraceTime tm("phase 2", PrintGC && Verbose, true, gclog_or_tty);
295 GenMarkSweep::trace("2");
297 FindFirstRegionClosure cl;
298 g1h->heap_region_iterate(&cl);
299 HeapRegion *r = cl.result();
300 CompactibleSpace* sp = r;
301 if (r->isHumongous() && oop(r->bottom())->is_gc_marked()) {
302 sp = r->next_compaction_space();
303 }
305 G1PrepareCompactClosure blk(sp);
306 g1h->heap_region_iterate(&blk);
307 blk.update_sets();
309 CompactPoint perm_cp(pg, NULL, NULL);
310 pg->prepare_for_compaction(&perm_cp);
311 }
313 class G1AdjustPointersClosure: public HeapRegionClosure {
314 public:
315 bool doHeapRegion(HeapRegion* r) {
316 if (r->isHumongous()) {
317 if (r->startsHumongous()) {
318 // We must adjust the pointers on the single H object.
319 oop obj = oop(r->bottom());
320 debug_only(GenMarkSweep::track_interior_pointers(obj));
321 // point all the oops to the new location
322 obj->adjust_pointers();
323 debug_only(GenMarkSweep::check_interior_pointers());
324 }
325 } else {
326 // This really ought to be "as_CompactibleSpace"...
327 r->adjust_pointers();
328 }
329 return false;
330 }
331 };
333 void G1MarkSweep::mark_sweep_phase3() {
334 G1CollectedHeap* g1h = G1CollectedHeap::heap();
335 Generation* pg = g1h->perm_gen();
337 // Adjust the pointers to reflect the new locations
338 TraceTime tm("phase 3", PrintGC && Verbose, true, gclog_or_tty);
339 GenMarkSweep::trace("3");
341 SharedHeap* sh = SharedHeap::heap();
343 sh->process_strong_roots(true, // activate StrongRootsScope
344 true, // Collecting permanent generation.
345 SharedHeap::SO_AllClasses,
346 &GenMarkSweep::adjust_root_pointer_closure,
347 NULL, // do not touch code cache here
348 &GenMarkSweep::adjust_pointer_closure);
350 assert(GenMarkSweep::ref_processor() == g1h->ref_processor_stw(), "Sanity");
351 g1h->ref_processor_stw()->weak_oops_do(&GenMarkSweep::adjust_root_pointer_closure);
353 // Now adjust pointers in remaining weak roots. (All of which should
354 // have been cleared if they pointed to non-surviving objects.)
355 g1h->g1_process_weak_roots(&GenMarkSweep::adjust_root_pointer_closure,
356 &GenMarkSweep::adjust_pointer_closure);
358 GenMarkSweep::adjust_marks();
360 G1AdjustPointersClosure blk;
361 g1h->heap_region_iterate(&blk);
362 pg->adjust_pointers();
363 }
365 class G1SpaceCompactClosure: public HeapRegionClosure {
366 public:
367 G1SpaceCompactClosure() {}
369 bool doHeapRegion(HeapRegion* hr) {
370 if (hr->isHumongous()) {
371 if (hr->startsHumongous()) {
372 oop obj = oop(hr->bottom());
373 if (obj->is_gc_marked()) {
374 obj->init_mark();
375 } else {
376 assert(hr->is_empty(), "Should have been cleared in phase 2.");
377 }
378 hr->reset_during_compaction();
379 }
380 } else {
381 hr->compact();
382 }
383 return false;
384 }
385 };
387 void G1MarkSweep::mark_sweep_phase4() {
388 // All pointers are now adjusted, move objects accordingly
390 // It is imperative that we traverse perm_gen first in phase4. All
391 // classes must be allocated earlier than their instances, and traversing
392 // perm_gen first makes sure that all klassOops have moved to their new
393 // location before any instance does a dispatch through it's klass!
395 // The ValidateMarkSweep live oops tracking expects us to traverse spaces
396 // in the same order in phase2, phase3 and phase4. We don't quite do that
397 // here (perm_gen first rather than last), so we tell the validate code
398 // to use a higher index (saved from phase2) when verifying perm_gen.
399 G1CollectedHeap* g1h = G1CollectedHeap::heap();
400 Generation* pg = g1h->perm_gen();
402 TraceTime tm("phase 4", PrintGC && Verbose, true, gclog_or_tty);
403 GenMarkSweep::trace("4");
405 pg->compact();
407 G1SpaceCompactClosure blk;
408 g1h->heap_region_iterate(&blk);
410 }
412 // Local Variables: ***
413 // c-indentation-style: gnu ***
414 // End: ***