src/share/vm/gc_implementation/g1/g1MarkSweep.cpp

Wed, 19 Jan 2011 19:30:42 -0500

author
tonyp
date
Wed, 19 Jan 2011 19:30:42 -0500
changeset 2472
0fa27f37d4d4
parent 2314
f95d63e2154a
child 2473
377371490991
permissions
-rw-r--r--

6977804: G1: remove the zero-filling thread
Summary: This changeset removes the zero-filling thread from G1 and collapses the two free region lists we had before (the "free" and "unclean" lists) into one. The new free list uses the new heap region sets / lists abstractions that we'll ultimately use it to keep track of all regions in the heap. A heap region set was also introduced for the humongous regions. Finally, this change increases the concurrency between the thread that completes freeing regions (after a cleanup pause) and the rest of the system (before we'd have to wait for said thread to complete before allocating a new region). The changest also includes a lot of refactoring and code simplification.
Reviewed-by: jcoomes, johnc

     1 /*
     2  * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    20  * or visit www.oracle.com if you need additional information or have any
    21  * questions.
    22  *
    23  */
    25 #include "precompiled.hpp"
    26 #include "classfile/javaClasses.hpp"
    27 #include "classfile/symbolTable.hpp"
    28 #include "classfile/systemDictionary.hpp"
    29 #include "classfile/vmSymbols.hpp"
    30 #include "code/codeCache.hpp"
    31 #include "code/icBuffer.hpp"
    32 #include "gc_implementation/g1/g1MarkSweep.hpp"
    33 #include "memory/gcLocker.hpp"
    34 #include "memory/genCollectedHeap.hpp"
    35 #include "memory/modRefBarrierSet.hpp"
    36 #include "memory/referencePolicy.hpp"
    37 #include "memory/space.hpp"
    38 #include "oops/instanceRefKlass.hpp"
    39 #include "oops/oop.inline.hpp"
    40 #include "prims/jvmtiExport.hpp"
    41 #include "runtime/aprofiler.hpp"
    42 #include "runtime/biasedLocking.hpp"
    43 #include "runtime/fprofiler.hpp"
    44 #include "runtime/synchronizer.hpp"
    45 #include "runtime/thread.hpp"
    46 #include "runtime/vmThread.hpp"
    47 #include "utilities/copy.hpp"
    48 #include "utilities/events.hpp"
    50 class HeapRegion;
    52 void G1MarkSweep::invoke_at_safepoint(ReferenceProcessor* rp,
    53                                       bool clear_all_softrefs) {
    54   assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint");
    56   SharedHeap* sh = SharedHeap::heap();
    57 #ifdef ASSERT
    58   if (sh->collector_policy()->should_clear_all_soft_refs()) {
    59     assert(clear_all_softrefs, "Policy should have been checked earler");
    60   }
    61 #endif
    62   // hook up weak ref data so it can be used during Mark-Sweep
    63   assert(GenMarkSweep::ref_processor() == NULL, "no stomping");
    64   assert(rp != NULL, "should be non-NULL");
    65   GenMarkSweep::_ref_processor = rp;
    66   rp->setup_policy(clear_all_softrefs);
    68   // When collecting the permanent generation methodOops may be moving,
    69   // so we either have to flush all bcp data or convert it into bci.
    70   CodeCache::gc_prologue();
    71   Threads::gc_prologue();
    73   // Increment the invocation count for the permanent generation, since it is
    74   // implicitly collected whenever we do a full mark sweep collection.
    75   sh->perm_gen()->stat_record()->invocations++;
    77   bool marked_for_unloading = false;
    79   allocate_stacks();
    81   // We should save the marks of the currently locked biased monitors.
    82   // The marking doesn't preserve the marks of biased objects.
    83   BiasedLocking::preserve_marks();
    85   mark_sweep_phase1(marked_for_unloading, clear_all_softrefs);
    87   if (VerifyDuringGC) {
    88       G1CollectedHeap* g1h = G1CollectedHeap::heap();
    89       g1h->checkConcurrentMark();
    90   }
    92   mark_sweep_phase2();
    94   // Don't add any more derived pointers during phase3
    95   COMPILER2_PRESENT(DerivedPointerTable::set_active(false));
    97   mark_sweep_phase3();
    99   mark_sweep_phase4();
   101   GenMarkSweep::restore_marks();
   102   BiasedLocking::restore_marks();
   103   GenMarkSweep::deallocate_stacks();
   105   // We must invalidate the perm-gen rs, so that it gets rebuilt.
   106   GenRemSet* rs = sh->rem_set();
   107   rs->invalidate(sh->perm_gen()->used_region(), true /*whole_heap*/);
   109   // "free at last gc" is calculated from these.
   110   // CHF: cheating for now!!!
   111   //  Universe::set_heap_capacity_at_last_gc(Universe::heap()->capacity());
   112   //  Universe::set_heap_used_at_last_gc(Universe::heap()->used());
   114   Threads::gc_epilogue();
   115   CodeCache::gc_epilogue();
   117   // refs processing: clean slate
   118   GenMarkSweep::_ref_processor = NULL;
   119 }
   122 void G1MarkSweep::allocate_stacks() {
   123   GenMarkSweep::_preserved_count_max = 0;
   124   GenMarkSweep::_preserved_marks = NULL;
   125   GenMarkSweep::_preserved_count = 0;
   126 }
   128 void G1MarkSweep::mark_sweep_phase1(bool& marked_for_unloading,
   129                                     bool clear_all_softrefs) {
   130   // Recursively traverse all live objects and mark them
   131   EventMark m("1 mark object");
   132   TraceTime tm("phase 1", PrintGC && Verbose, true, gclog_or_tty);
   133   GenMarkSweep::trace(" 1");
   135   SharedHeap* sh = SharedHeap::heap();
   137   sh->process_strong_roots(true,  // activeate StrongRootsScope
   138                            true,  // Collecting permanent generation.
   139                            SharedHeap::SO_SystemClasses,
   140                            &GenMarkSweep::follow_root_closure,
   141                            &GenMarkSweep::follow_code_root_closure,
   142                            &GenMarkSweep::follow_root_closure);
   144   // Process reference objects found during marking
   145   ReferenceProcessor* rp = GenMarkSweep::ref_processor();
   146   rp->setup_policy(clear_all_softrefs);
   147   rp->process_discovered_references(&GenMarkSweep::is_alive,
   148                                     &GenMarkSweep::keep_alive,
   149                                     &GenMarkSweep::follow_stack_closure,
   150                                     NULL);
   152   // Follow system dictionary roots and unload classes
   153   bool purged_class = SystemDictionary::do_unloading(&GenMarkSweep::is_alive);
   154   assert(GenMarkSweep::_marking_stack.is_empty(),
   155          "stack should be empty by now");
   157   // Follow code cache roots (has to be done after system dictionary,
   158   // assumes all live klasses are marked)
   159   CodeCache::do_unloading(&GenMarkSweep::is_alive,
   160                                    &GenMarkSweep::keep_alive,
   161                                    purged_class);
   162   GenMarkSweep::follow_stack();
   164   // Update subklass/sibling/implementor links of live klasses
   165   GenMarkSweep::follow_weak_klass_links();
   166   assert(GenMarkSweep::_marking_stack.is_empty(),
   167          "stack should be empty by now");
   169   // Visit memoized MDO's and clear any unmarked weak refs
   170   GenMarkSweep::follow_mdo_weak_refs();
   171   assert(GenMarkSweep::_marking_stack.is_empty(), "just drained");
   174   // Visit symbol and interned string tables and delete unmarked oops
   175   SymbolTable::unlink(&GenMarkSweep::is_alive);
   176   StringTable::unlink(&GenMarkSweep::is_alive);
   178   assert(GenMarkSweep::_marking_stack.is_empty(),
   179          "stack should be empty by now");
   180 }
   182 class G1PrepareCompactClosure: public HeapRegionClosure {
   183   G1CollectedHeap* _g1h;
   184   ModRefBarrierSet* _mrbs;
   185   CompactPoint _cp;
   186   size_t _pre_used;
   187   FreeRegionList _free_list;
   188   HumongousRegionSet _humongous_proxy_set;
   190   void free_humongous_region(HeapRegion* hr) {
   191     HeapWord* end = hr->end();
   192     assert(hr->startsHumongous(),
   193            "Only the start of a humongous region should be freed.");
   194     _g1h->free_humongous_region(hr, &_pre_used, &_free_list,
   195                                 &_humongous_proxy_set, false /* par */);
   196     // Do we also need to do this for the continues humongous regions
   197     // we just collapsed?
   198     hr->prepare_for_compaction(&_cp);
   199     // Also clear the part of the card table that will be unused after
   200     // compaction.
   201     _mrbs->clear(MemRegion(hr->compaction_top(), end));
   202   }
   204 public:
   205   G1PrepareCompactClosure(CompactibleSpace* cs)
   206   : _g1h(G1CollectedHeap::heap()),
   207     _mrbs(G1CollectedHeap::heap()->mr_bs()),
   208     _cp(NULL, cs, cs->initialize_threshold()),
   209     _pre_used(0),
   210     _free_list("Local Free List for G1MarkSweep"),
   211     _humongous_proxy_set("G1MarkSweep Humongous Proxy Set") { }
   213   void update_sets() {
   214     // We'll recalculate total used bytes and recreate the free list
   215     // at the end of the GC, so no point in updating those values here.
   216     _g1h->update_sets_after_freeing_regions(0, /* pre_used */
   217                                             NULL, /* free_list */
   218                                             &_humongous_proxy_set,
   219                                             false /* par */);
   220     _free_list.remove_all();
   221   }
   223   bool doHeapRegion(HeapRegion* hr) {
   224     if (hr->isHumongous()) {
   225       if (hr->startsHumongous()) {
   226         oop obj = oop(hr->bottom());
   227         if (obj->is_gc_marked()) {
   228           obj->forward_to(obj);
   229         } else  {
   230           free_humongous_region(hr);
   231         }
   232       } else {
   233         assert(hr->continuesHumongous(), "Invalid humongous.");
   234       }
   235     } else {
   236       hr->prepare_for_compaction(&_cp);
   237       // Also clear the part of the card table that will be unused after
   238       // compaction.
   239       _mrbs->clear(MemRegion(hr->compaction_top(), hr->end()));
   240     }
   241     return false;
   242   }
   243 };
   245 // Finds the first HeapRegion.
   246 class FindFirstRegionClosure: public HeapRegionClosure {
   247   HeapRegion* _a_region;
   248 public:
   249   FindFirstRegionClosure() : _a_region(NULL) {}
   250   bool doHeapRegion(HeapRegion* r) {
   251     _a_region = r;
   252     return true;
   253   }
   254   HeapRegion* result() { return _a_region; }
   255 };
   257 void G1MarkSweep::mark_sweep_phase2() {
   258   // Now all live objects are marked, compute the new object addresses.
   260   // It is imperative that we traverse perm_gen LAST. If dead space is
   261   // allowed a range of dead object may get overwritten by a dead int
   262   // array. If perm_gen is not traversed last a klassOop may get
   263   // overwritten. This is fine since it is dead, but if the class has dead
   264   // instances we have to skip them, and in order to find their size we
   265   // need the klassOop!
   266   //
   267   // It is not required that we traverse spaces in the same order in
   268   // phase2, phase3 and phase4, but the ValidateMarkSweep live oops
   269   // tracking expects us to do so. See comment under phase4.
   271   G1CollectedHeap* g1h = G1CollectedHeap::heap();
   272   Generation* pg = g1h->perm_gen();
   274   EventMark m("2 compute new addresses");
   275   TraceTime tm("phase 2", PrintGC && Verbose, true, gclog_or_tty);
   276   GenMarkSweep::trace("2");
   278   FindFirstRegionClosure cl;
   279   g1h->heap_region_iterate(&cl);
   280   HeapRegion *r = cl.result();
   281   CompactibleSpace* sp = r;
   282   if (r->isHumongous() && oop(r->bottom())->is_gc_marked()) {
   283     sp = r->next_compaction_space();
   284   }
   286   G1PrepareCompactClosure blk(sp);
   287   g1h->heap_region_iterate(&blk);
   288   blk.update_sets();
   290   CompactPoint perm_cp(pg, NULL, NULL);
   291   pg->prepare_for_compaction(&perm_cp);
   292 }
   294 class G1AdjustPointersClosure: public HeapRegionClosure {
   295  public:
   296   bool doHeapRegion(HeapRegion* r) {
   297     if (r->isHumongous()) {
   298       if (r->startsHumongous()) {
   299         // We must adjust the pointers on the single H object.
   300         oop obj = oop(r->bottom());
   301         debug_only(GenMarkSweep::track_interior_pointers(obj));
   302         // point all the oops to the new location
   303         obj->adjust_pointers();
   304         debug_only(GenMarkSweep::check_interior_pointers());
   305       }
   306     } else {
   307       // This really ought to be "as_CompactibleSpace"...
   308       r->adjust_pointers();
   309     }
   310     return false;
   311   }
   312 };
   314 void G1MarkSweep::mark_sweep_phase3() {
   315   G1CollectedHeap* g1h = G1CollectedHeap::heap();
   316   Generation* pg = g1h->perm_gen();
   318   // Adjust the pointers to reflect the new locations
   319   EventMark m("3 adjust pointers");
   320   TraceTime tm("phase 3", PrintGC && Verbose, true, gclog_or_tty);
   321   GenMarkSweep::trace("3");
   323   SharedHeap* sh = SharedHeap::heap();
   325   sh->process_strong_roots(true,  // activate StrongRootsScope
   326                            true,  // Collecting permanent generation.
   327                            SharedHeap::SO_AllClasses,
   328                            &GenMarkSweep::adjust_root_pointer_closure,
   329                            NULL,  // do not touch code cache here
   330                            &GenMarkSweep::adjust_pointer_closure);
   332   g1h->ref_processor()->weak_oops_do(&GenMarkSweep::adjust_root_pointer_closure);
   334   // Now adjust pointers in remaining weak roots.  (All of which should
   335   // have been cleared if they pointed to non-surviving objects.)
   336   g1h->g1_process_weak_roots(&GenMarkSweep::adjust_root_pointer_closure,
   337                              &GenMarkSweep::adjust_pointer_closure);
   339   GenMarkSweep::adjust_marks();
   341   G1AdjustPointersClosure blk;
   342   g1h->heap_region_iterate(&blk);
   343   pg->adjust_pointers();
   344 }
   346 class G1SpaceCompactClosure: public HeapRegionClosure {
   347 public:
   348   G1SpaceCompactClosure() {}
   350   bool doHeapRegion(HeapRegion* hr) {
   351     if (hr->isHumongous()) {
   352       if (hr->startsHumongous()) {
   353         oop obj = oop(hr->bottom());
   354         if (obj->is_gc_marked()) {
   355           obj->init_mark();
   356         } else {
   357           assert(hr->is_empty(), "Should have been cleared in phase 2.");
   358         }
   359         hr->reset_during_compaction();
   360       }
   361     } else {
   362       hr->compact();
   363     }
   364     return false;
   365   }
   366 };
   368 void G1MarkSweep::mark_sweep_phase4() {
   369   // All pointers are now adjusted, move objects accordingly
   371   // It is imperative that we traverse perm_gen first in phase4. All
   372   // classes must be allocated earlier than their instances, and traversing
   373   // perm_gen first makes sure that all klassOops have moved to their new
   374   // location before any instance does a dispatch through it's klass!
   376   // The ValidateMarkSweep live oops tracking expects us to traverse spaces
   377   // in the same order in phase2, phase3 and phase4. We don't quite do that
   378   // here (perm_gen first rather than last), so we tell the validate code
   379   // to use a higher index (saved from phase2) when verifying perm_gen.
   380   G1CollectedHeap* g1h = G1CollectedHeap::heap();
   381   Generation* pg = g1h->perm_gen();
   383   EventMark m("4 compact heap");
   384   TraceTime tm("phase 4", PrintGC && Verbose, true, gclog_or_tty);
   385   GenMarkSweep::trace("4");
   387   pg->compact();
   389   G1SpaceCompactClosure blk;
   390   g1h->heap_region_iterate(&blk);
   392 }
   394 // Local Variables: ***
   395 // c-indentation-style: gnu ***
   396 // End: ***

mercurial