src/share/vm/gc_implementation/g1/g1MarkSweep.cpp

Mon, 03 Aug 2009 12:59:30 -0700

author
johnc
date
Mon, 03 Aug 2009 12:59:30 -0700
changeset 1324
15c5903cf9e1
parent 1279
bd02caa94611
child 1376
8b46c4d82093
child 1424
148e5441d916
permissions
-rw-r--r--

6865703: G1: Parallelize hot card cache cleanup
Summary: Have the GC worker threads clear the hot card cache in parallel by having each worker thread claim a chunk of the card cache and process the cards in that chunk. The size of the chunks that each thread will claim is determined at VM initialization from the size of the card cache and the number of worker threads.
Reviewed-by: jmasa, tonyp

ysr@777 1 /*
xdono@1279 2 * Copyright 2001-2009 Sun Microsystems, Inc. All Rights Reserved.
ysr@777 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
ysr@777 4 *
ysr@777 5 * This code is free software; you can redistribute it and/or modify it
ysr@777 6 * under the terms of the GNU General Public License version 2 only, as
ysr@777 7 * published by the Free Software Foundation.
ysr@777 8 *
ysr@777 9 * This code is distributed in the hope that it will be useful, but WITHOUT
ysr@777 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
ysr@777 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
ysr@777 12 * version 2 for more details (a copy is included in the LICENSE file that
ysr@777 13 * accompanied this code).
ysr@777 14 *
ysr@777 15 * You should have received a copy of the GNU General Public License version
ysr@777 16 * 2 along with this work; if not, write to the Free Software Foundation,
ysr@777 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
ysr@777 18 *
ysr@777 19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
ysr@777 20 * CA 95054 USA or visit www.sun.com if you need additional information or
ysr@777 21 * have any questions.
ysr@777 22 *
ysr@777 23 */
ysr@777 24
ysr@777 25 #include "incls/_precompiled.incl"
ysr@777 26 #include "incls/_g1MarkSweep.cpp.incl"
ysr@777 27
ysr@777 28 class HeapRegion;
ysr@777 29
ysr@777 30 void G1MarkSweep::invoke_at_safepoint(ReferenceProcessor* rp,
ysr@777 31 bool clear_all_softrefs) {
ysr@777 32 assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint");
ysr@777 33
ysr@777 34 // hook up weak ref data so it can be used during Mark-Sweep
ysr@777 35 assert(GenMarkSweep::ref_processor() == NULL, "no stomping");
ysr@888 36 assert(rp != NULL, "should be non-NULL");
ysr@777 37 GenMarkSweep::_ref_processor = rp;
ysr@892 38 rp->setup_policy(clear_all_softrefs);
ysr@777 39
ysr@777 40 // When collecting the permanent generation methodOops may be moving,
ysr@777 41 // so we either have to flush all bcp data or convert it into bci.
ysr@777 42 CodeCache::gc_prologue();
ysr@777 43 Threads::gc_prologue();
ysr@777 44
ysr@777 45 // Increment the invocation count for the permanent generation, since it is
ysr@777 46 // implicitly collected whenever we do a full mark sweep collection.
ysr@777 47 SharedHeap* sh = SharedHeap::heap();
ysr@777 48 sh->perm_gen()->stat_record()->invocations++;
ysr@777 49
ysr@777 50 bool marked_for_unloading = false;
ysr@777 51
ysr@777 52 allocate_stacks();
ysr@777 53
iveresov@793 54 // We should save the marks of the currently locked biased monitors.
iveresov@793 55 // The marking doesn't preserve the marks of biased objects.
iveresov@793 56 BiasedLocking::preserve_marks();
iveresov@793 57
ysr@777 58 mark_sweep_phase1(marked_for_unloading, clear_all_softrefs);
ysr@777 59
johnc@1186 60 if (VerifyDuringGC) {
ysr@777 61 G1CollectedHeap* g1h = G1CollectedHeap::heap();
ysr@777 62 g1h->checkConcurrentMark();
ysr@777 63 }
ysr@777 64
ysr@777 65 mark_sweep_phase2();
ysr@777 66
ysr@777 67 // Don't add any more derived pointers during phase3
ysr@777 68 COMPILER2_PRESENT(DerivedPointerTable::set_active(false));
ysr@777 69
ysr@777 70 mark_sweep_phase3();
ysr@777 71
ysr@777 72 mark_sweep_phase4();
ysr@777 73
ysr@777 74 GenMarkSweep::restore_marks();
iveresov@793 75 BiasedLocking::restore_marks();
ysr@777 76 GenMarkSweep::deallocate_stacks();
ysr@777 77
ysr@777 78 // We must invalidate the perm-gen rs, so that it gets rebuilt.
ysr@777 79 GenRemSet* rs = sh->rem_set();
ysr@777 80 rs->invalidate(sh->perm_gen()->used_region(), true /*whole_heap*/);
ysr@777 81
ysr@777 82 // "free at last gc" is calculated from these.
ysr@777 83 // CHF: cheating for now!!!
ysr@777 84 // Universe::set_heap_capacity_at_last_gc(Universe::heap()->capacity());
ysr@777 85 // Universe::set_heap_used_at_last_gc(Universe::heap()->used());
ysr@777 86
ysr@777 87 Threads::gc_epilogue();
ysr@777 88 CodeCache::gc_epilogue();
ysr@777 89
ysr@777 90 // refs processing: clean slate
ysr@777 91 GenMarkSweep::_ref_processor = NULL;
ysr@777 92 }
ysr@777 93
ysr@777 94
ysr@777 95 void G1MarkSweep::allocate_stacks() {
ysr@777 96 GenMarkSweep::_preserved_count_max = 0;
ysr@777 97 GenMarkSweep::_preserved_marks = NULL;
ysr@777 98 GenMarkSweep::_preserved_count = 0;
ysr@777 99 GenMarkSweep::_preserved_mark_stack = NULL;
ysr@777 100 GenMarkSweep::_preserved_oop_stack = NULL;
ysr@777 101
ysr@777 102 GenMarkSweep::_marking_stack =
ysr@777 103 new (ResourceObj::C_HEAP) GrowableArray<oop>(4000, true);
ysr@777 104
ysr@777 105 size_t size = SystemDictionary::number_of_classes() * 2;
ysr@777 106 GenMarkSweep::_revisit_klass_stack =
ysr@777 107 new (ResourceObj::C_HEAP) GrowableArray<Klass*>((int)size, true);
ysr@777 108 }
ysr@777 109
ysr@777 110 void G1MarkSweep::mark_sweep_phase1(bool& marked_for_unloading,
ysr@777 111 bool clear_all_softrefs) {
ysr@777 112 // Recursively traverse all live objects and mark them
ysr@777 113 EventMark m("1 mark object");
ysr@777 114 TraceTime tm("phase 1", PrintGC && Verbose, true, gclog_or_tty);
ysr@777 115 GenMarkSweep::trace(" 1");
ysr@777 116
ysr@777 117 SharedHeap* sh = SharedHeap::heap();
ysr@777 118
ysr@777 119 sh->process_strong_roots(true, // Collecting permanent generation.
ysr@777 120 SharedHeap::SO_SystemClasses,
ysr@777 121 &GenMarkSweep::follow_root_closure,
ysr@777 122 &GenMarkSweep::follow_root_closure);
ysr@777 123
ysr@777 124 // Process reference objects found during marking
ysr@888 125 ReferenceProcessor* rp = GenMarkSweep::ref_processor();
ysr@892 126 rp->setup_policy(clear_all_softrefs);
ysr@888 127 rp->process_discovered_references(&GenMarkSweep::is_alive,
ysr@888 128 &GenMarkSweep::keep_alive,
ysr@888 129 &GenMarkSweep::follow_stack_closure,
ysr@888 130 NULL);
ysr@777 131
ysr@777 132 // Follow system dictionary roots and unload classes
ysr@777 133 bool purged_class = SystemDictionary::do_unloading(&GenMarkSweep::is_alive);
ysr@777 134 assert(GenMarkSweep::_marking_stack->is_empty(),
ysr@777 135 "stack should be empty by now");
ysr@777 136
ysr@777 137 // Follow code cache roots (has to be done after system dictionary,
ysr@777 138 // assumes all live klasses are marked)
ysr@777 139 CodeCache::do_unloading(&GenMarkSweep::is_alive,
ysr@777 140 &GenMarkSweep::keep_alive,
ysr@777 141 purged_class);
ysr@777 142 GenMarkSweep::follow_stack();
ysr@777 143
ysr@777 144 // Update subklass/sibling/implementor links of live klasses
ysr@777 145 GenMarkSweep::follow_weak_klass_links();
ysr@777 146 assert(GenMarkSweep::_marking_stack->is_empty(),
ysr@777 147 "stack should be empty by now");
ysr@777 148
ysr@777 149 // Visit symbol and interned string tables and delete unmarked oops
ysr@777 150 SymbolTable::unlink(&GenMarkSweep::is_alive);
ysr@777 151 StringTable::unlink(&GenMarkSweep::is_alive);
ysr@777 152
ysr@777 153 assert(GenMarkSweep::_marking_stack->is_empty(),
ysr@777 154 "stack should be empty by now");
ysr@777 155 }
ysr@777 156
ysr@777 157 class G1PrepareCompactClosure: public HeapRegionClosure {
ysr@777 158 ModRefBarrierSet* _mrbs;
ysr@777 159 CompactPoint _cp;
ysr@777 160
ysr@777 161 void free_humongous_region(HeapRegion* hr) {
ysr@777 162 HeapWord* bot = hr->bottom();
ysr@777 163 HeapWord* end = hr->end();
ysr@777 164 assert(hr->startsHumongous(),
ysr@777 165 "Only the start of a humongous region should be freed.");
ysr@777 166 G1CollectedHeap::heap()->free_region(hr);
ysr@777 167 hr->prepare_for_compaction(&_cp);
ysr@777 168 // Also clear the part of the card table that will be unused after
ysr@777 169 // compaction.
ysr@777 170 _mrbs->clear(MemRegion(hr->compaction_top(), hr->end()));
ysr@777 171 }
ysr@777 172
ysr@777 173 public:
apetrusenko@1112 174 G1PrepareCompactClosure(CompactibleSpace* cs) :
ysr@777 175 _cp(NULL, cs, cs->initialize_threshold()),
apetrusenko@1112 176 _mrbs(G1CollectedHeap::heap()->mr_bs())
ysr@777 177 {}
ysr@777 178 bool doHeapRegion(HeapRegion* hr) {
ysr@777 179 if (hr->isHumongous()) {
ysr@777 180 if (hr->startsHumongous()) {
ysr@777 181 oop obj = oop(hr->bottom());
ysr@777 182 if (obj->is_gc_marked()) {
ysr@777 183 obj->forward_to(obj);
ysr@777 184 } else {
ysr@777 185 free_humongous_region(hr);
ysr@777 186 }
ysr@777 187 } else {
ysr@777 188 assert(hr->continuesHumongous(), "Invalid humongous.");
ysr@777 189 }
ysr@777 190 } else {
ysr@777 191 hr->prepare_for_compaction(&_cp);
ysr@777 192 // Also clear the part of the card table that will be unused after
ysr@777 193 // compaction.
ysr@777 194 _mrbs->clear(MemRegion(hr->compaction_top(), hr->end()));
ysr@777 195 }
ysr@777 196 return false;
ysr@777 197 }
ysr@777 198 };
apetrusenko@1112 199
apetrusenko@1112 200 // Finds the first HeapRegion.
ysr@777 201 class FindFirstRegionClosure: public HeapRegionClosure {
ysr@777 202 HeapRegion* _a_region;
ysr@777 203 public:
apetrusenko@1112 204 FindFirstRegionClosure() : _a_region(NULL) {}
ysr@777 205 bool doHeapRegion(HeapRegion* r) {
apetrusenko@1112 206 _a_region = r;
apetrusenko@1112 207 return true;
ysr@777 208 }
ysr@777 209 HeapRegion* result() { return _a_region; }
ysr@777 210 };
ysr@777 211
ysr@777 212 void G1MarkSweep::mark_sweep_phase2() {
ysr@777 213 // Now all live objects are marked, compute the new object addresses.
ysr@777 214
ysr@777 215 // It is imperative that we traverse perm_gen LAST. If dead space is
ysr@777 216 // allowed a range of dead object may get overwritten by a dead int
ysr@777 217 // array. If perm_gen is not traversed last a klassOop may get
ysr@777 218 // overwritten. This is fine since it is dead, but if the class has dead
ysr@777 219 // instances we have to skip them, and in order to find their size we
ysr@777 220 // need the klassOop!
ysr@777 221 //
ysr@777 222 // It is not required that we traverse spaces in the same order in
ysr@777 223 // phase2, phase3 and phase4, but the ValidateMarkSweep live oops
ysr@777 224 // tracking expects us to do so. See comment under phase4.
ysr@777 225
ysr@777 226 G1CollectedHeap* g1h = G1CollectedHeap::heap();
ysr@777 227 Generation* pg = g1h->perm_gen();
ysr@777 228
ysr@777 229 EventMark m("2 compute new addresses");
ysr@777 230 TraceTime tm("phase 2", PrintGC && Verbose, true, gclog_or_tty);
ysr@777 231 GenMarkSweep::trace("2");
ysr@777 232
apetrusenko@1112 233 FindFirstRegionClosure cl;
ysr@777 234 g1h->heap_region_iterate(&cl);
ysr@777 235 HeapRegion *r = cl.result();
ysr@777 236 CompactibleSpace* sp = r;
ysr@777 237 if (r->isHumongous() && oop(r->bottom())->is_gc_marked()) {
ysr@777 238 sp = r->next_compaction_space();
ysr@777 239 }
ysr@777 240
apetrusenko@1112 241 G1PrepareCompactClosure blk(sp);
ysr@777 242 g1h->heap_region_iterate(&blk);
ysr@777 243
ysr@777 244 CompactPoint perm_cp(pg, NULL, NULL);
ysr@777 245 pg->prepare_for_compaction(&perm_cp);
ysr@777 246 }
ysr@777 247
ysr@777 248 class G1AdjustPointersClosure: public HeapRegionClosure {
ysr@777 249 public:
ysr@777 250 bool doHeapRegion(HeapRegion* r) {
ysr@777 251 if (r->isHumongous()) {
ysr@777 252 if (r->startsHumongous()) {
ysr@777 253 // We must adjust the pointers on the single H object.
ysr@777 254 oop obj = oop(r->bottom());
ysr@777 255 debug_only(GenMarkSweep::track_interior_pointers(obj));
ysr@777 256 // point all the oops to the new location
ysr@777 257 obj->adjust_pointers();
ysr@777 258 debug_only(GenMarkSweep::check_interior_pointers());
ysr@777 259 }
ysr@777 260 } else {
ysr@777 261 // This really ought to be "as_CompactibleSpace"...
ysr@777 262 r->adjust_pointers();
ysr@777 263 }
ysr@777 264 return false;
ysr@777 265 }
ysr@777 266 };
ysr@777 267
ysr@777 268 void G1MarkSweep::mark_sweep_phase3() {
ysr@777 269 G1CollectedHeap* g1h = G1CollectedHeap::heap();
ysr@777 270 Generation* pg = g1h->perm_gen();
ysr@777 271
ysr@777 272 // Adjust the pointers to reflect the new locations
ysr@777 273 EventMark m("3 adjust pointers");
ysr@777 274 TraceTime tm("phase 3", PrintGC && Verbose, true, gclog_or_tty);
ysr@777 275 GenMarkSweep::trace("3");
ysr@777 276
ysr@777 277 SharedHeap* sh = SharedHeap::heap();
ysr@777 278
ysr@777 279 sh->process_strong_roots(true, // Collecting permanent generation.
ysr@777 280 SharedHeap::SO_AllClasses,
ysr@777 281 &GenMarkSweep::adjust_root_pointer_closure,
ysr@777 282 &GenMarkSweep::adjust_pointer_closure);
ysr@777 283
ysr@777 284 g1h->ref_processor()->weak_oops_do(&GenMarkSweep::adjust_root_pointer_closure);
ysr@777 285
ysr@777 286 // Now adjust pointers in remaining weak roots. (All of which should
ysr@777 287 // have been cleared if they pointed to non-surviving objects.)
ysr@777 288 g1h->g1_process_weak_roots(&GenMarkSweep::adjust_root_pointer_closure,
ysr@777 289 &GenMarkSweep::adjust_pointer_closure);
ysr@777 290
ysr@777 291 GenMarkSweep::adjust_marks();
ysr@777 292
ysr@777 293 G1AdjustPointersClosure blk;
ysr@777 294 g1h->heap_region_iterate(&blk);
ysr@777 295 pg->adjust_pointers();
ysr@777 296 }
ysr@777 297
ysr@777 298 class G1SpaceCompactClosure: public HeapRegionClosure {
ysr@777 299 public:
ysr@777 300 G1SpaceCompactClosure() {}
ysr@777 301
ysr@777 302 bool doHeapRegion(HeapRegion* hr) {
ysr@777 303 if (hr->isHumongous()) {
ysr@777 304 if (hr->startsHumongous()) {
ysr@777 305 oop obj = oop(hr->bottom());
ysr@777 306 if (obj->is_gc_marked()) {
ysr@777 307 obj->init_mark();
ysr@777 308 } else {
ysr@777 309 assert(hr->is_empty(), "Should have been cleared in phase 2.");
ysr@777 310 }
ysr@777 311 hr->reset_during_compaction();
ysr@777 312 }
ysr@777 313 } else {
ysr@777 314 hr->compact();
ysr@777 315 }
ysr@777 316 return false;
ysr@777 317 }
ysr@777 318 };
ysr@777 319
ysr@777 320 void G1MarkSweep::mark_sweep_phase4() {
ysr@777 321 // All pointers are now adjusted, move objects accordingly
ysr@777 322
ysr@777 323 // It is imperative that we traverse perm_gen first in phase4. All
ysr@777 324 // classes must be allocated earlier than their instances, and traversing
ysr@777 325 // perm_gen first makes sure that all klassOops have moved to their new
ysr@777 326 // location before any instance does a dispatch through it's klass!
ysr@777 327
ysr@777 328 // The ValidateMarkSweep live oops tracking expects us to traverse spaces
ysr@777 329 // in the same order in phase2, phase3 and phase4. We don't quite do that
ysr@777 330 // here (perm_gen first rather than last), so we tell the validate code
ysr@777 331 // to use a higher index (saved from phase2) when verifying perm_gen.
ysr@777 332 G1CollectedHeap* g1h = G1CollectedHeap::heap();
ysr@777 333 Generation* pg = g1h->perm_gen();
ysr@777 334
ysr@777 335 EventMark m("4 compact heap");
ysr@777 336 TraceTime tm("phase 4", PrintGC && Verbose, true, gclog_or_tty);
ysr@777 337 GenMarkSweep::trace("4");
ysr@777 338
ysr@777 339 pg->compact();
ysr@777 340
ysr@777 341 G1SpaceCompactClosure blk;
ysr@777 342 g1h->heap_region_iterate(&blk);
ysr@777 343
ysr@777 344 }
ysr@777 345
ysr@777 346 // Local Variables: ***
ysr@777 347 // c-indentation-style: gnu ***
ysr@777 348 // End: ***

mercurial