src/share/vm/gc_implementation/g1/g1MarkSweep.cpp

Mon, 02 Aug 2010 12:51:43 -0700

author
johnc
date
Mon, 02 Aug 2010 12:51:43 -0700
changeset 2060
2d160770d2e5
parent 1907
c18cbe5936b8
child 2191
894b1d7c7e01
permissions
-rw-r--r--

6814437: G1: remove the _new_refs array
Summary: The per-worker _new_refs array is used to hold references that point into the collection set. It is populated during RSet updating and subsequently processed. In the event of an evacuation failure it processed again to recreate the RSets of regions in the collection set. Remove the per-worker _new_refs array by processing the references directly. Use a DirtyCardQueue to hold the cards containing the references so that the RSets of regions in the collection set can be recreated when handling an evacuation failure.
Reviewed-by: iveresov, jmasa, tonyp

     1 /*
     2  * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    20  * or visit www.oracle.com if you need additional information or have any
    21  * questions.
    22  *
    23  */
    25 #include "incls/_precompiled.incl"
    26 #include "incls/_g1MarkSweep.cpp.incl"
    28 class HeapRegion;
    30 void G1MarkSweep::invoke_at_safepoint(ReferenceProcessor* rp,
    31                                       bool clear_all_softrefs) {
    32   assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint");
    34   SharedHeap* sh = SharedHeap::heap();
    35 #ifdef ASSERT
    36   if (sh->collector_policy()->should_clear_all_soft_refs()) {
    37     assert(clear_all_softrefs, "Policy should have been checked earler");
    38   }
    39 #endif
    40   // hook up weak ref data so it can be used during Mark-Sweep
    41   assert(GenMarkSweep::ref_processor() == NULL, "no stomping");
    42   assert(rp != NULL, "should be non-NULL");
    43   GenMarkSweep::_ref_processor = rp;
    44   rp->setup_policy(clear_all_softrefs);
    46   // When collecting the permanent generation methodOops may be moving,
    47   // so we either have to flush all bcp data or convert it into bci.
    48   CodeCache::gc_prologue();
    49   Threads::gc_prologue();
    51   // Increment the invocation count for the permanent generation, since it is
    52   // implicitly collected whenever we do a full mark sweep collection.
    53   sh->perm_gen()->stat_record()->invocations++;
    55   bool marked_for_unloading = false;
    57   allocate_stacks();
    59   // We should save the marks of the currently locked biased monitors.
    60   // The marking doesn't preserve the marks of biased objects.
    61   BiasedLocking::preserve_marks();
    63   mark_sweep_phase1(marked_for_unloading, clear_all_softrefs);
    65   if (VerifyDuringGC) {
    66       G1CollectedHeap* g1h = G1CollectedHeap::heap();
    67       g1h->checkConcurrentMark();
    68   }
    70   mark_sweep_phase2();
    72   // Don't add any more derived pointers during phase3
    73   COMPILER2_PRESENT(DerivedPointerTable::set_active(false));
    75   mark_sweep_phase3();
    77   mark_sweep_phase4();
    79   GenMarkSweep::restore_marks();
    80   BiasedLocking::restore_marks();
    81   GenMarkSweep::deallocate_stacks();
    83   // We must invalidate the perm-gen rs, so that it gets rebuilt.
    84   GenRemSet* rs = sh->rem_set();
    85   rs->invalidate(sh->perm_gen()->used_region(), true /*whole_heap*/);
    87   // "free at last gc" is calculated from these.
    88   // CHF: cheating for now!!!
    89   //  Universe::set_heap_capacity_at_last_gc(Universe::heap()->capacity());
    90   //  Universe::set_heap_used_at_last_gc(Universe::heap()->used());
    92   Threads::gc_epilogue();
    93   CodeCache::gc_epilogue();
    95   // refs processing: clean slate
    96   GenMarkSweep::_ref_processor = NULL;
    97 }
   100 void G1MarkSweep::allocate_stacks() {
   101   GenMarkSweep::_preserved_count_max = 0;
   102   GenMarkSweep::_preserved_marks = NULL;
   103   GenMarkSweep::_preserved_count = 0;
   104   GenMarkSweep::_preserved_mark_stack = NULL;
   105   GenMarkSweep::_preserved_oop_stack = NULL;
   107   GenMarkSweep::_marking_stack =
   108     new (ResourceObj::C_HEAP) GrowableArray<oop>(4000, true);
   109   GenMarkSweep::_objarray_stack =
   110     new (ResourceObj::C_HEAP) GrowableArray<ObjArrayTask>(50, true);
   112   int size = SystemDictionary::number_of_classes() * 2;
   113   GenMarkSweep::_revisit_klass_stack =
   114     new (ResourceObj::C_HEAP) GrowableArray<Klass*>(size, true);
   115   // (#klass/k)^2 for k ~ 10 appears a better fit, but this will have to do
   116   // for now until we have a chance to work out a more optimal setting.
   117   GenMarkSweep::_revisit_mdo_stack =
   118     new (ResourceObj::C_HEAP) GrowableArray<DataLayout*>(size*2, true);
   120 }
   122 void G1MarkSweep::mark_sweep_phase1(bool& marked_for_unloading,
   123                                     bool clear_all_softrefs) {
   124   // Recursively traverse all live objects and mark them
   125   EventMark m("1 mark object");
   126   TraceTime tm("phase 1", PrintGC && Verbose, true, gclog_or_tty);
   127   GenMarkSweep::trace(" 1");
   129   SharedHeap* sh = SharedHeap::heap();
   131   sh->process_strong_roots(true,  // activeate StrongRootsScope
   132                            true,  // Collecting permanent generation.
   133                            SharedHeap::SO_SystemClasses,
   134                            &GenMarkSweep::follow_root_closure,
   135                            &GenMarkSweep::follow_code_root_closure,
   136                            &GenMarkSweep::follow_root_closure);
   138   // Process reference objects found during marking
   139   ReferenceProcessor* rp = GenMarkSweep::ref_processor();
   140   rp->setup_policy(clear_all_softrefs);
   141   rp->process_discovered_references(&GenMarkSweep::is_alive,
   142                                     &GenMarkSweep::keep_alive,
   143                                     &GenMarkSweep::follow_stack_closure,
   144                                     NULL);
   146   // Follow system dictionary roots and unload classes
   147   bool purged_class = SystemDictionary::do_unloading(&GenMarkSweep::is_alive);
   148   assert(GenMarkSweep::_marking_stack->is_empty(),
   149          "stack should be empty by now");
   151   // Follow code cache roots (has to be done after system dictionary,
   152   // assumes all live klasses are marked)
   153   CodeCache::do_unloading(&GenMarkSweep::is_alive,
   154                                    &GenMarkSweep::keep_alive,
   155                                    purged_class);
   156   GenMarkSweep::follow_stack();
   158   // Update subklass/sibling/implementor links of live klasses
   159   GenMarkSweep::follow_weak_klass_links();
   160   assert(GenMarkSweep::_marking_stack->is_empty(),
   161          "stack should be empty by now");
   163   // Visit memoized MDO's and clear any unmarked weak refs
   164   GenMarkSweep::follow_mdo_weak_refs();
   165   assert(GenMarkSweep::_marking_stack->is_empty(), "just drained");
   168   // Visit symbol and interned string tables and delete unmarked oops
   169   SymbolTable::unlink(&GenMarkSweep::is_alive);
   170   StringTable::unlink(&GenMarkSweep::is_alive);
   172   assert(GenMarkSweep::_marking_stack->is_empty(),
   173          "stack should be empty by now");
   174 }
   176 class G1PrepareCompactClosure: public HeapRegionClosure {
   177   ModRefBarrierSet* _mrbs;
   178   CompactPoint _cp;
   180   void free_humongous_region(HeapRegion* hr) {
   181     HeapWord* bot = hr->bottom();
   182     HeapWord* end = hr->end();
   183     assert(hr->startsHumongous(),
   184            "Only the start of a humongous region should be freed.");
   185     G1CollectedHeap::heap()->free_region(hr);
   186     hr->prepare_for_compaction(&_cp);
   187     // Also clear the part of the card table that will be unused after
   188     // compaction.
   189     _mrbs->clear(MemRegion(hr->compaction_top(), hr->end()));
   190   }
   192 public:
   193   G1PrepareCompactClosure(CompactibleSpace* cs) :
   194     _cp(NULL, cs, cs->initialize_threshold()),
   195     _mrbs(G1CollectedHeap::heap()->mr_bs())
   196   {}
   197   bool doHeapRegion(HeapRegion* hr) {
   198     if (hr->isHumongous()) {
   199       if (hr->startsHumongous()) {
   200         oop obj = oop(hr->bottom());
   201         if (obj->is_gc_marked()) {
   202           obj->forward_to(obj);
   203         } else  {
   204           free_humongous_region(hr);
   205         }
   206       } else {
   207         assert(hr->continuesHumongous(), "Invalid humongous.");
   208       }
   209     } else {
   210       hr->prepare_for_compaction(&_cp);
   211       // Also clear the part of the card table that will be unused after
   212       // compaction.
   213       _mrbs->clear(MemRegion(hr->compaction_top(), hr->end()));
   214     }
   215     return false;
   216   }
   217 };
   219 // Finds the first HeapRegion.
   220 class FindFirstRegionClosure: public HeapRegionClosure {
   221   HeapRegion* _a_region;
   222 public:
   223   FindFirstRegionClosure() : _a_region(NULL) {}
   224   bool doHeapRegion(HeapRegion* r) {
   225     _a_region = r;
   226     return true;
   227   }
   228   HeapRegion* result() { return _a_region; }
   229 };
   231 void G1MarkSweep::mark_sweep_phase2() {
   232   // Now all live objects are marked, compute the new object addresses.
   234   // It is imperative that we traverse perm_gen LAST. If dead space is
   235   // allowed a range of dead object may get overwritten by a dead int
   236   // array. If perm_gen is not traversed last a klassOop may get
   237   // overwritten. This is fine since it is dead, but if the class has dead
   238   // instances we have to skip them, and in order to find their size we
   239   // need the klassOop!
   240   //
   241   // It is not required that we traverse spaces in the same order in
   242   // phase2, phase3 and phase4, but the ValidateMarkSweep live oops
   243   // tracking expects us to do so. See comment under phase4.
   245   G1CollectedHeap* g1h = G1CollectedHeap::heap();
   246   Generation* pg = g1h->perm_gen();
   248   EventMark m("2 compute new addresses");
   249   TraceTime tm("phase 2", PrintGC && Verbose, true, gclog_or_tty);
   250   GenMarkSweep::trace("2");
   252   FindFirstRegionClosure cl;
   253   g1h->heap_region_iterate(&cl);
   254   HeapRegion *r = cl.result();
   255   CompactibleSpace* sp = r;
   256   if (r->isHumongous() && oop(r->bottom())->is_gc_marked()) {
   257     sp = r->next_compaction_space();
   258   }
   260   G1PrepareCompactClosure blk(sp);
   261   g1h->heap_region_iterate(&blk);
   263   CompactPoint perm_cp(pg, NULL, NULL);
   264   pg->prepare_for_compaction(&perm_cp);
   265 }
   267 class G1AdjustPointersClosure: public HeapRegionClosure {
   268  public:
   269   bool doHeapRegion(HeapRegion* r) {
   270     if (r->isHumongous()) {
   271       if (r->startsHumongous()) {
   272         // We must adjust the pointers on the single H object.
   273         oop obj = oop(r->bottom());
   274         debug_only(GenMarkSweep::track_interior_pointers(obj));
   275         // point all the oops to the new location
   276         obj->adjust_pointers();
   277         debug_only(GenMarkSweep::check_interior_pointers());
   278       }
   279     } else {
   280       // This really ought to be "as_CompactibleSpace"...
   281       r->adjust_pointers();
   282     }
   283     return false;
   284   }
   285 };
   287 void G1MarkSweep::mark_sweep_phase3() {
   288   G1CollectedHeap* g1h = G1CollectedHeap::heap();
   289   Generation* pg = g1h->perm_gen();
   291   // Adjust the pointers to reflect the new locations
   292   EventMark m("3 adjust pointers");
   293   TraceTime tm("phase 3", PrintGC && Verbose, true, gclog_or_tty);
   294   GenMarkSweep::trace("3");
   296   SharedHeap* sh = SharedHeap::heap();
   298   sh->process_strong_roots(true,  // activate StrongRootsScope
   299                            true,  // Collecting permanent generation.
   300                            SharedHeap::SO_AllClasses,
   301                            &GenMarkSweep::adjust_root_pointer_closure,
   302                            NULL,  // do not touch code cache here
   303                            &GenMarkSweep::adjust_pointer_closure);
   305   g1h->ref_processor()->weak_oops_do(&GenMarkSweep::adjust_root_pointer_closure);
   307   // Now adjust pointers in remaining weak roots.  (All of which should
   308   // have been cleared if they pointed to non-surviving objects.)
   309   g1h->g1_process_weak_roots(&GenMarkSweep::adjust_root_pointer_closure,
   310                              &GenMarkSweep::adjust_pointer_closure);
   312   GenMarkSweep::adjust_marks();
   314   G1AdjustPointersClosure blk;
   315   g1h->heap_region_iterate(&blk);
   316   pg->adjust_pointers();
   317 }
   319 class G1SpaceCompactClosure: public HeapRegionClosure {
   320 public:
   321   G1SpaceCompactClosure() {}
   323   bool doHeapRegion(HeapRegion* hr) {
   324     if (hr->isHumongous()) {
   325       if (hr->startsHumongous()) {
   326         oop obj = oop(hr->bottom());
   327         if (obj->is_gc_marked()) {
   328           obj->init_mark();
   329         } else {
   330           assert(hr->is_empty(), "Should have been cleared in phase 2.");
   331         }
   332         hr->reset_during_compaction();
   333       }
   334     } else {
   335       hr->compact();
   336     }
   337     return false;
   338   }
   339 };
   341 void G1MarkSweep::mark_sweep_phase4() {
   342   // All pointers are now adjusted, move objects accordingly
   344   // It is imperative that we traverse perm_gen first in phase4. All
   345   // classes must be allocated earlier than their instances, and traversing
   346   // perm_gen first makes sure that all klassOops have moved to their new
   347   // location before any instance does a dispatch through it's klass!
   349   // The ValidateMarkSweep live oops tracking expects us to traverse spaces
   350   // in the same order in phase2, phase3 and phase4. We don't quite do that
   351   // here (perm_gen first rather than last), so we tell the validate code
   352   // to use a higher index (saved from phase2) when verifying perm_gen.
   353   G1CollectedHeap* g1h = G1CollectedHeap::heap();
   354   Generation* pg = g1h->perm_gen();
   356   EventMark m("4 compact heap");
   357   TraceTime tm("phase 4", PrintGC && Verbose, true, gclog_or_tty);
   358   GenMarkSweep::trace("4");
   360   pg->compact();
   362   G1SpaceCompactClosure blk;
   363   g1h->heap_region_iterate(&blk);
   365 }
   367 // Local Variables: ***
   368 // c-indentation-style: gnu ***
   369 // End: ***

mercurial