src/share/vm/gc_implementation/g1/g1MarkSweep.cpp

Thu, 22 Sep 2011 10:57:37 -0700

author
johnc
date
Thu, 22 Sep 2011 10:57:37 -0700
changeset 3175
4dfb2df418f2
parent 2969
6747fd0512e0
child 3268
8aae2050e83e
permissions
-rw-r--r--

6484982: G1: process references during evacuation pauses
Summary: G1 now uses two reference processors - one is used by concurrent marking and the other is used by STW GCs (both full and incremental evacuation pauses). In an evacuation pause, the reference processor is embedded into the closures used to scan objects. Doing so causes causes reference objects to be 'discovered' by the reference processor. At the end of the evacuation pause, these discovered reference objects are processed - preserving (and copying) referent objects (and their reachable graphs) as appropriate.
Reviewed-by: ysr, jwilhelm, brutisso, stefank, tonyp

     1 /*
     2  * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    20  * or visit www.oracle.com if you need additional information or have any
    21  * questions.
    22  *
    23  */
    25 #include "precompiled.hpp"
    26 #include "classfile/javaClasses.hpp"
    27 #include "classfile/symbolTable.hpp"
    28 #include "classfile/systemDictionary.hpp"
    29 #include "classfile/vmSymbols.hpp"
    30 #include "code/codeCache.hpp"
    31 #include "code/icBuffer.hpp"
    32 #include "gc_implementation/g1/g1MarkSweep.hpp"
    33 #include "memory/gcLocker.hpp"
    34 #include "memory/genCollectedHeap.hpp"
    35 #include "memory/modRefBarrierSet.hpp"
    36 #include "memory/referencePolicy.hpp"
    37 #include "memory/space.hpp"
    38 #include "oops/instanceRefKlass.hpp"
    39 #include "oops/oop.inline.hpp"
    40 #include "prims/jvmtiExport.hpp"
    41 #include "runtime/aprofiler.hpp"
    42 #include "runtime/biasedLocking.hpp"
    43 #include "runtime/fprofiler.hpp"
    44 #include "runtime/synchronizer.hpp"
    45 #include "runtime/thread.hpp"
    46 #include "runtime/vmThread.hpp"
    47 #include "utilities/copy.hpp"
    48 #include "utilities/events.hpp"
    50 class HeapRegion;
    52 void G1MarkSweep::invoke_at_safepoint(ReferenceProcessor* rp,
    53                                       bool clear_all_softrefs) {
    54   assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint");
    56   SharedHeap* sh = SharedHeap::heap();
    57 #ifdef ASSERT
    58   if (sh->collector_policy()->should_clear_all_soft_refs()) {
    59     assert(clear_all_softrefs, "Policy should have been checked earler");
    60   }
    61 #endif
    62   // hook up weak ref data so it can be used during Mark-Sweep
    63   assert(GenMarkSweep::ref_processor() == NULL, "no stomping");
    64   assert(rp != NULL, "should be non-NULL");
    65   assert(rp == G1CollectedHeap::heap()->ref_processor_stw(), "Precondition");
    67   GenMarkSweep::_ref_processor = rp;
    68   rp->setup_policy(clear_all_softrefs);
    70   // When collecting the permanent generation methodOops may be moving,
    71   // so we either have to flush all bcp data or convert it into bci.
    72   CodeCache::gc_prologue();
    73   Threads::gc_prologue();
    75   // Increment the invocation count for the permanent generation, since it is
    76   // implicitly collected whenever we do a full mark sweep collection.
    77   sh->perm_gen()->stat_record()->invocations++;
    79   bool marked_for_unloading = false;
    81   allocate_stacks();
    83   // We should save the marks of the currently locked biased monitors.
    84   // The marking doesn't preserve the marks of biased objects.
    85   BiasedLocking::preserve_marks();
    87   mark_sweep_phase1(marked_for_unloading, clear_all_softrefs);
    89   mark_sweep_phase2();
    91   // Don't add any more derived pointers during phase3
    92   COMPILER2_PRESENT(DerivedPointerTable::set_active(false));
    94   mark_sweep_phase3();
    96   mark_sweep_phase4();
    98   GenMarkSweep::restore_marks();
    99   BiasedLocking::restore_marks();
   100   GenMarkSweep::deallocate_stacks();
   102   // We must invalidate the perm-gen rs, so that it gets rebuilt.
   103   GenRemSet* rs = sh->rem_set();
   104   rs->invalidate(sh->perm_gen()->used_region(), true /*whole_heap*/);
   106   // "free at last gc" is calculated from these.
   107   // CHF: cheating for now!!!
   108   //  Universe::set_heap_capacity_at_last_gc(Universe::heap()->capacity());
   109   //  Universe::set_heap_used_at_last_gc(Universe::heap()->used());
   111   Threads::gc_epilogue();
   112   CodeCache::gc_epilogue();
   113   JvmtiExport::gc_epilogue();
   115   // refs processing: clean slate
   116   GenMarkSweep::_ref_processor = NULL;
   117 }
   120 void G1MarkSweep::allocate_stacks() {
   121   GenMarkSweep::_preserved_count_max = 0;
   122   GenMarkSweep::_preserved_marks = NULL;
   123   GenMarkSweep::_preserved_count = 0;
   124 }
   126 void G1MarkSweep::mark_sweep_phase1(bool& marked_for_unloading,
   127                                     bool clear_all_softrefs) {
   128   // Recursively traverse all live objects and mark them
   129   EventMark m("1 mark object");
   130   TraceTime tm("phase 1", PrintGC && Verbose, true, gclog_or_tty);
   131   GenMarkSweep::trace(" 1");
   133   SharedHeap* sh = SharedHeap::heap();
   135   sh->process_strong_roots(true,  // activeate StrongRootsScope
   136                            true,  // Collecting permanent generation.
   137                            SharedHeap::SO_SystemClasses,
   138                            &GenMarkSweep::follow_root_closure,
   139                            &GenMarkSweep::follow_code_root_closure,
   140                            &GenMarkSweep::follow_root_closure);
   142   // Process reference objects found during marking
   143   ReferenceProcessor* rp = GenMarkSweep::ref_processor();
   144   assert(rp == G1CollectedHeap::heap()->ref_processor_stw(), "Sanity");
   146   rp->setup_policy(clear_all_softrefs);
   147   rp->process_discovered_references(&GenMarkSweep::is_alive,
   148                                     &GenMarkSweep::keep_alive,
   149                                     &GenMarkSweep::follow_stack_closure,
   150                                     NULL);
   152   // Follow system dictionary roots and unload classes
   153   bool purged_class = SystemDictionary::do_unloading(&GenMarkSweep::is_alive);
   154   assert(GenMarkSweep::_marking_stack.is_empty(),
   155          "stack should be empty by now");
   157   // Follow code cache roots (has to be done after system dictionary,
   158   // assumes all live klasses are marked)
   159   CodeCache::do_unloading(&GenMarkSweep::is_alive,
   160                                    &GenMarkSweep::keep_alive,
   161                                    purged_class);
   162   GenMarkSweep::follow_stack();
   164   // Update subklass/sibling/implementor links of live klasses
   165   GenMarkSweep::follow_weak_klass_links();
   166   assert(GenMarkSweep::_marking_stack.is_empty(),
   167          "stack should be empty by now");
   169   // Visit memoized MDO's and clear any unmarked weak refs
   170   GenMarkSweep::follow_mdo_weak_refs();
   171   assert(GenMarkSweep::_marking_stack.is_empty(), "just drained");
   173   // Visit interned string tables and delete unmarked oops
   174   StringTable::unlink(&GenMarkSweep::is_alive);
   175   // Clean up unreferenced symbols in symbol table.
   176   SymbolTable::unlink();
   178   assert(GenMarkSweep::_marking_stack.is_empty(),
   179          "stack should be empty by now");
   181   if (VerifyDuringGC) {
   182     HandleMark hm;  // handle scope
   183     COMPILER2_PRESENT(DerivedPointerTableDeactivate dpt_deact);
   184     gclog_or_tty->print(" VerifyDuringGC:(full)[Verifying ");
   185     Universe::heap()->prepare_for_verify();
   186     // Note: we can verify only the heap here. When an object is
   187     // marked, the previous value of the mark word (including
   188     // identity hash values, ages, etc) is preserved, and the mark
   189     // word is set to markOop::marked_value - effectively removing
   190     // any hash values from the mark word. These hash values are
   191     // used when verifying the dictionaries and so removing them
   192     // from the mark word can make verification of the dictionaries
   193     // fail. At the end of the GC, the orginal mark word values
   194     // (including hash values) are restored to the appropriate
   195     // objects.
   196     Universe::heap()->verify(/* allow dirty */ true,
   197                              /* silent      */ false,
   198                              /* option      */ VerifyOption_G1UseMarkWord);
   200     G1CollectedHeap* g1h = G1CollectedHeap::heap();
   201     gclog_or_tty->print_cr("]");
   202   }
   203 }
   205 class G1PrepareCompactClosure: public HeapRegionClosure {
   206   G1CollectedHeap* _g1h;
   207   ModRefBarrierSet* _mrbs;
   208   CompactPoint _cp;
   209   HumongousRegionSet _humongous_proxy_set;
   211   void free_humongous_region(HeapRegion* hr) {
   212     HeapWord* end = hr->end();
   213     size_t dummy_pre_used;
   214     FreeRegionList dummy_free_list("Dummy Free List for G1MarkSweep");
   216     assert(hr->startsHumongous(),
   217            "Only the start of a humongous region should be freed.");
   218     _g1h->free_humongous_region(hr, &dummy_pre_used, &dummy_free_list,
   219                                 &_humongous_proxy_set, false /* par */);
   220     hr->prepare_for_compaction(&_cp);
   221     // Also clear the part of the card table that will be unused after
   222     // compaction.
   223     _mrbs->clear(MemRegion(hr->compaction_top(), end));
   224     dummy_free_list.remove_all();
   225   }
   227 public:
   228   G1PrepareCompactClosure(CompactibleSpace* cs)
   229   : _g1h(G1CollectedHeap::heap()),
   230     _mrbs(G1CollectedHeap::heap()->mr_bs()),
   231     _cp(NULL, cs, cs->initialize_threshold()),
   232     _humongous_proxy_set("G1MarkSweep Humongous Proxy Set") { }
   234   void update_sets() {
   235     // We'll recalculate total used bytes and recreate the free list
   236     // at the end of the GC, so no point in updating those values here.
   237     _g1h->update_sets_after_freeing_regions(0, /* pre_used */
   238                                             NULL, /* free_list */
   239                                             &_humongous_proxy_set,
   240                                             false /* par */);
   241   }
   243   bool doHeapRegion(HeapRegion* hr) {
   244     if (hr->isHumongous()) {
   245       if (hr->startsHumongous()) {
   246         oop obj = oop(hr->bottom());
   247         if (obj->is_gc_marked()) {
   248           obj->forward_to(obj);
   249         } else  {
   250           free_humongous_region(hr);
   251         }
   252       } else {
   253         assert(hr->continuesHumongous(), "Invalid humongous.");
   254       }
   255     } else {
   256       hr->prepare_for_compaction(&_cp);
   257       // Also clear the part of the card table that will be unused after
   258       // compaction.
   259       _mrbs->clear(MemRegion(hr->compaction_top(), hr->end()));
   260     }
   261     return false;
   262   }
   263 };
   265 // Finds the first HeapRegion.
   266 class FindFirstRegionClosure: public HeapRegionClosure {
   267   HeapRegion* _a_region;
   268 public:
   269   FindFirstRegionClosure() : _a_region(NULL) {}
   270   bool doHeapRegion(HeapRegion* r) {
   271     _a_region = r;
   272     return true;
   273   }
   274   HeapRegion* result() { return _a_region; }
   275 };
   277 void G1MarkSweep::mark_sweep_phase2() {
   278   // Now all live objects are marked, compute the new object addresses.
   280   // It is imperative that we traverse perm_gen LAST. If dead space is
   281   // allowed a range of dead object may get overwritten by a dead int
   282   // array. If perm_gen is not traversed last a klassOop may get
   283   // overwritten. This is fine since it is dead, but if the class has dead
   284   // instances we have to skip them, and in order to find their size we
   285   // need the klassOop!
   286   //
   287   // It is not required that we traverse spaces in the same order in
   288   // phase2, phase3 and phase4, but the ValidateMarkSweep live oops
   289   // tracking expects us to do so. See comment under phase4.
   291   G1CollectedHeap* g1h = G1CollectedHeap::heap();
   292   Generation* pg = g1h->perm_gen();
   294   EventMark m("2 compute new addresses");
   295   TraceTime tm("phase 2", PrintGC && Verbose, true, gclog_or_tty);
   296   GenMarkSweep::trace("2");
   298   FindFirstRegionClosure cl;
   299   g1h->heap_region_iterate(&cl);
   300   HeapRegion *r = cl.result();
   301   CompactibleSpace* sp = r;
   302   if (r->isHumongous() && oop(r->bottom())->is_gc_marked()) {
   303     sp = r->next_compaction_space();
   304   }
   306   G1PrepareCompactClosure blk(sp);
   307   g1h->heap_region_iterate(&blk);
   308   blk.update_sets();
   310   CompactPoint perm_cp(pg, NULL, NULL);
   311   pg->prepare_for_compaction(&perm_cp);
   312 }
   314 class G1AdjustPointersClosure: public HeapRegionClosure {
   315  public:
   316   bool doHeapRegion(HeapRegion* r) {
   317     if (r->isHumongous()) {
   318       if (r->startsHumongous()) {
   319         // We must adjust the pointers on the single H object.
   320         oop obj = oop(r->bottom());
   321         debug_only(GenMarkSweep::track_interior_pointers(obj));
   322         // point all the oops to the new location
   323         obj->adjust_pointers();
   324         debug_only(GenMarkSweep::check_interior_pointers());
   325       }
   326     } else {
   327       // This really ought to be "as_CompactibleSpace"...
   328       r->adjust_pointers();
   329     }
   330     return false;
   331   }
   332 };
   334 void G1MarkSweep::mark_sweep_phase3() {
   335   G1CollectedHeap* g1h = G1CollectedHeap::heap();
   336   Generation* pg = g1h->perm_gen();
   338   // Adjust the pointers to reflect the new locations
   339   EventMark m("3 adjust pointers");
   340   TraceTime tm("phase 3", PrintGC && Verbose, true, gclog_or_tty);
   341   GenMarkSweep::trace("3");
   343   SharedHeap* sh = SharedHeap::heap();
   345   sh->process_strong_roots(true,  // activate StrongRootsScope
   346                            true,  // Collecting permanent generation.
   347                            SharedHeap::SO_AllClasses,
   348                            &GenMarkSweep::adjust_root_pointer_closure,
   349                            NULL,  // do not touch code cache here
   350                            &GenMarkSweep::adjust_pointer_closure);
   352   assert(GenMarkSweep::ref_processor() == g1h->ref_processor_stw(), "Sanity");
   353   g1h->ref_processor_stw()->weak_oops_do(&GenMarkSweep::adjust_root_pointer_closure);
   355   // Now adjust pointers in remaining weak roots.  (All of which should
   356   // have been cleared if they pointed to non-surviving objects.)
   357   g1h->g1_process_weak_roots(&GenMarkSweep::adjust_root_pointer_closure,
   358                              &GenMarkSweep::adjust_pointer_closure);
   360   GenMarkSweep::adjust_marks();
   362   G1AdjustPointersClosure blk;
   363   g1h->heap_region_iterate(&blk);
   364   pg->adjust_pointers();
   365 }
   367 class G1SpaceCompactClosure: public HeapRegionClosure {
   368 public:
   369   G1SpaceCompactClosure() {}
   371   bool doHeapRegion(HeapRegion* hr) {
   372     if (hr->isHumongous()) {
   373       if (hr->startsHumongous()) {
   374         oop obj = oop(hr->bottom());
   375         if (obj->is_gc_marked()) {
   376           obj->init_mark();
   377         } else {
   378           assert(hr->is_empty(), "Should have been cleared in phase 2.");
   379         }
   380         hr->reset_during_compaction();
   381       }
   382     } else {
   383       hr->compact();
   384     }
   385     return false;
   386   }
   387 };
   389 void G1MarkSweep::mark_sweep_phase4() {
   390   // All pointers are now adjusted, move objects accordingly
   392   // It is imperative that we traverse perm_gen first in phase4. All
   393   // classes must be allocated earlier than their instances, and traversing
   394   // perm_gen first makes sure that all klassOops have moved to their new
   395   // location before any instance does a dispatch through it's klass!
   397   // The ValidateMarkSweep live oops tracking expects us to traverse spaces
   398   // in the same order in phase2, phase3 and phase4. We don't quite do that
   399   // here (perm_gen first rather than last), so we tell the validate code
   400   // to use a higher index (saved from phase2) when verifying perm_gen.
   401   G1CollectedHeap* g1h = G1CollectedHeap::heap();
   402   Generation* pg = g1h->perm_gen();
   404   EventMark m("4 compact heap");
   405   TraceTime tm("phase 4", PrintGC && Verbose, true, gclog_or_tty);
   406   GenMarkSweep::trace("4");
   408   pg->compact();
   410   G1SpaceCompactClosure blk;
   411   g1h->heap_region_iterate(&blk);
   413 }
   415 // Local Variables: ***
   416 // c-indentation-style: gnu ***
   417 // End: ***

mercurial