src/share/vm/gc_implementation/g1/g1MarkSweep.cpp

Fri, 13 Apr 2012 01:59:38 +0200

author
brutisso
date
Fri, 13 Apr 2012 01:59:38 +0200
changeset 3710
5c86f8211d1e
parent 3499
aa3d708d67c4
child 3711
b632e80fc9dc
permissions
-rw-r--r--

7160728: Introduce an extra logging level for G1 logging
Summary: Added log levels "fine", "finer" and "finest". Let PrintGC map to "fine" and PrintGCDetails map to "finer". Separated out the per worker information in the G1 logging to the "finest" level.
Reviewed-by: stefank, jwilhelm, tonyp, johnc

     1 /*
     2  * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    20  * or visit www.oracle.com if you need additional information or have any
    21  * questions.
    22  *
    23  */
    25 #include "precompiled.hpp"
    26 #include "classfile/javaClasses.hpp"
    27 #include "classfile/symbolTable.hpp"
    28 #include "classfile/systemDictionary.hpp"
    29 #include "classfile/vmSymbols.hpp"
    30 #include "code/codeCache.hpp"
    31 #include "code/icBuffer.hpp"
    32 #include "gc_implementation/g1/g1Log.hpp"
    33 #include "gc_implementation/g1/g1MarkSweep.hpp"
    34 #include "memory/gcLocker.hpp"
    35 #include "memory/genCollectedHeap.hpp"
    36 #include "memory/modRefBarrierSet.hpp"
    37 #include "memory/referencePolicy.hpp"
    38 #include "memory/space.hpp"
    39 #include "oops/instanceRefKlass.hpp"
    40 #include "oops/oop.inline.hpp"
    41 #include "prims/jvmtiExport.hpp"
    42 #include "runtime/aprofiler.hpp"
    43 #include "runtime/biasedLocking.hpp"
    44 #include "runtime/fprofiler.hpp"
    45 #include "runtime/synchronizer.hpp"
    46 #include "runtime/thread.hpp"
    47 #include "runtime/vmThread.hpp"
    48 #include "utilities/copy.hpp"
    49 #include "utilities/events.hpp"
    51 class HeapRegion;
    53 void G1MarkSweep::invoke_at_safepoint(ReferenceProcessor* rp,
    54                                       bool clear_all_softrefs) {
    55   assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint");
    57   SharedHeap* sh = SharedHeap::heap();
    58 #ifdef ASSERT
    59   if (sh->collector_policy()->should_clear_all_soft_refs()) {
    60     assert(clear_all_softrefs, "Policy should have been checked earler");
    61   }
    62 #endif
    63   // hook up weak ref data so it can be used during Mark-Sweep
    64   assert(GenMarkSweep::ref_processor() == NULL, "no stomping");
    65   assert(rp != NULL, "should be non-NULL");
    66   assert(rp == G1CollectedHeap::heap()->ref_processor_stw(), "Precondition");
    68   GenMarkSweep::_ref_processor = rp;
    69   rp->setup_policy(clear_all_softrefs);
    71   // When collecting the permanent generation methodOops may be moving,
    72   // so we either have to flush all bcp data or convert it into bci.
    73   CodeCache::gc_prologue();
    74   Threads::gc_prologue();
    76   // Increment the invocation count for the permanent generation, since it is
    77   // implicitly collected whenever we do a full mark sweep collection.
    78   sh->perm_gen()->stat_record()->invocations++;
    80   bool marked_for_unloading = false;
    82   allocate_stacks();
    84   // We should save the marks of the currently locked biased monitors.
    85   // The marking doesn't preserve the marks of biased objects.
    86   BiasedLocking::preserve_marks();
    88   mark_sweep_phase1(marked_for_unloading, clear_all_softrefs);
    90   mark_sweep_phase2();
    92   // Don't add any more derived pointers during phase3
    93   COMPILER2_PRESENT(DerivedPointerTable::set_active(false));
    95   mark_sweep_phase3();
    97   mark_sweep_phase4();
    99   GenMarkSweep::restore_marks();
   100   BiasedLocking::restore_marks();
   101   GenMarkSweep::deallocate_stacks();
   103   // We must invalidate the perm-gen rs, so that it gets rebuilt.
   104   GenRemSet* rs = sh->rem_set();
   105   rs->invalidate(sh->perm_gen()->used_region(), true /*whole_heap*/);
   107   // "free at last gc" is calculated from these.
   108   // CHF: cheating for now!!!
   109   //  Universe::set_heap_capacity_at_last_gc(Universe::heap()->capacity());
   110   //  Universe::set_heap_used_at_last_gc(Universe::heap()->used());
   112   Threads::gc_epilogue();
   113   CodeCache::gc_epilogue();
   114   JvmtiExport::gc_epilogue();
   116   // refs processing: clean slate
   117   GenMarkSweep::_ref_processor = NULL;
   118 }
   121 void G1MarkSweep::allocate_stacks() {
   122   GenMarkSweep::_preserved_count_max = 0;
   123   GenMarkSweep::_preserved_marks = NULL;
   124   GenMarkSweep::_preserved_count = 0;
   125 }
   127 void G1MarkSweep::mark_sweep_phase1(bool& marked_for_unloading,
   128                                     bool clear_all_softrefs) {
   129   // Recursively traverse all live objects and mark them
   130   TraceTime tm("phase 1", G1Log::fine() && Verbose, true, gclog_or_tty);
   131   GenMarkSweep::trace(" 1");
   133   SharedHeap* sh = SharedHeap::heap();
   135   sh->process_strong_roots(true,  // activeate StrongRootsScope
   136                            true,  // Collecting permanent generation.
   137                            SharedHeap::SO_SystemClasses,
   138                            &GenMarkSweep::follow_root_closure,
   139                            &GenMarkSweep::follow_code_root_closure,
   140                            &GenMarkSweep::follow_root_closure);
   142   // Process reference objects found during marking
   143   ReferenceProcessor* rp = GenMarkSweep::ref_processor();
   144   assert(rp == G1CollectedHeap::heap()->ref_processor_stw(), "Sanity");
   146   rp->setup_policy(clear_all_softrefs);
   147   rp->process_discovered_references(&GenMarkSweep::is_alive,
   148                                     &GenMarkSweep::keep_alive,
   149                                     &GenMarkSweep::follow_stack_closure,
   150                                     NULL);
   152   // Follow system dictionary roots and unload classes
   153   bool purged_class = SystemDictionary::do_unloading(&GenMarkSweep::is_alive);
   154   assert(GenMarkSweep::_marking_stack.is_empty(),
   155          "stack should be empty by now");
   157   // Follow code cache roots (has to be done after system dictionary,
   158   // assumes all live klasses are marked)
   159   CodeCache::do_unloading(&GenMarkSweep::is_alive,
   160                                    &GenMarkSweep::keep_alive,
   161                                    purged_class);
   162   GenMarkSweep::follow_stack();
   164   // Update subklass/sibling/implementor links of live klasses
   165   GenMarkSweep::follow_weak_klass_links();
   166   assert(GenMarkSweep::_marking_stack.is_empty(),
   167          "stack should be empty by now");
   169   // Visit memoized MDO's and clear any unmarked weak refs
   170   GenMarkSweep::follow_mdo_weak_refs();
   171   assert(GenMarkSweep::_marking_stack.is_empty(), "just drained");
   173   // Visit interned string tables and delete unmarked oops
   174   StringTable::unlink(&GenMarkSweep::is_alive);
   175   // Clean up unreferenced symbols in symbol table.
   176   SymbolTable::unlink();
   178   assert(GenMarkSweep::_marking_stack.is_empty(),
   179          "stack should be empty by now");
   181   if (VerifyDuringGC) {
   182     HandleMark hm;  // handle scope
   183     COMPILER2_PRESENT(DerivedPointerTableDeactivate dpt_deact);
   184     gclog_or_tty->print(" VerifyDuringGC:(full)[Verifying ");
   185     Universe::heap()->prepare_for_verify();
   186     // Note: we can verify only the heap here. When an object is
   187     // marked, the previous value of the mark word (including
   188     // identity hash values, ages, etc) is preserved, and the mark
   189     // word is set to markOop::marked_value - effectively removing
   190     // any hash values from the mark word. These hash values are
   191     // used when verifying the dictionaries and so removing them
   192     // from the mark word can make verification of the dictionaries
   193     // fail. At the end of the GC, the orginal mark word values
   194     // (including hash values) are restored to the appropriate
   195     // objects.
   196     Universe::heap()->verify(/* allow dirty */ true,
   197                              /* silent      */ false,
   198                              /* option      */ VerifyOption_G1UseMarkWord);
   200     G1CollectedHeap* g1h = G1CollectedHeap::heap();
   201     gclog_or_tty->print_cr("]");
   202   }
   203 }
   205 class G1PrepareCompactClosure: public HeapRegionClosure {
   206   G1CollectedHeap* _g1h;
   207   ModRefBarrierSet* _mrbs;
   208   CompactPoint _cp;
   209   HumongousRegionSet _humongous_proxy_set;
   211   void free_humongous_region(HeapRegion* hr) {
   212     HeapWord* end = hr->end();
   213     size_t dummy_pre_used;
   214     FreeRegionList dummy_free_list("Dummy Free List for G1MarkSweep");
   216     assert(hr->startsHumongous(),
   217            "Only the start of a humongous region should be freed.");
   218     _g1h->free_humongous_region(hr, &dummy_pre_used, &dummy_free_list,
   219                                 &_humongous_proxy_set, false /* par */);
   220     hr->prepare_for_compaction(&_cp);
   221     // Also clear the part of the card table that will be unused after
   222     // compaction.
   223     _mrbs->clear(MemRegion(hr->compaction_top(), end));
   224     dummy_free_list.remove_all();
   225   }
   227 public:
   228   G1PrepareCompactClosure(CompactibleSpace* cs)
   229   : _g1h(G1CollectedHeap::heap()),
   230     _mrbs(G1CollectedHeap::heap()->mr_bs()),
   231     _cp(NULL, cs, cs->initialize_threshold()),
   232     _humongous_proxy_set("G1MarkSweep Humongous Proxy Set") { }
   234   void update_sets() {
   235     // We'll recalculate total used bytes and recreate the free list
   236     // at the end of the GC, so no point in updating those values here.
   237     _g1h->update_sets_after_freeing_regions(0, /* pre_used */
   238                                             NULL, /* free_list */
   239                                             NULL, /* old_proxy_set */
   240                                             &_humongous_proxy_set,
   241                                             false /* par */);
   242   }
   244   bool doHeapRegion(HeapRegion* hr) {
   245     if (hr->isHumongous()) {
   246       if (hr->startsHumongous()) {
   247         oop obj = oop(hr->bottom());
   248         if (obj->is_gc_marked()) {
   249           obj->forward_to(obj);
   250         } else  {
   251           free_humongous_region(hr);
   252         }
   253       } else {
   254         assert(hr->continuesHumongous(), "Invalid humongous.");
   255       }
   256     } else {
   257       hr->prepare_for_compaction(&_cp);
   258       // Also clear the part of the card table that will be unused after
   259       // compaction.
   260       _mrbs->clear(MemRegion(hr->compaction_top(), hr->end()));
   261     }
   262     return false;
   263   }
   264 };
   266 // Finds the first HeapRegion.
   267 class FindFirstRegionClosure: public HeapRegionClosure {
   268   HeapRegion* _a_region;
   269 public:
   270   FindFirstRegionClosure() : _a_region(NULL) {}
   271   bool doHeapRegion(HeapRegion* r) {
   272     _a_region = r;
   273     return true;
   274   }
   275   HeapRegion* result() { return _a_region; }
   276 };
   278 void G1MarkSweep::mark_sweep_phase2() {
   279   // Now all live objects are marked, compute the new object addresses.
   281   // It is imperative that we traverse perm_gen LAST. If dead space is
   282   // allowed a range of dead object may get overwritten by a dead int
   283   // array. If perm_gen is not traversed last a klassOop may get
   284   // overwritten. This is fine since it is dead, but if the class has dead
   285   // instances we have to skip them, and in order to find their size we
   286   // need the klassOop!
   287   //
   288   // It is not required that we traverse spaces in the same order in
   289   // phase2, phase3 and phase4, but the ValidateMarkSweep live oops
   290   // tracking expects us to do so. See comment under phase4.
   292   G1CollectedHeap* g1h = G1CollectedHeap::heap();
   293   Generation* pg = g1h->perm_gen();
   295   TraceTime tm("phase 2", G1Log::fine() && Verbose, true, gclog_or_tty);
   296   GenMarkSweep::trace("2");
   298   FindFirstRegionClosure cl;
   299   g1h->heap_region_iterate(&cl);
   300   HeapRegion *r = cl.result();
   301   CompactibleSpace* sp = r;
   302   if (r->isHumongous() && oop(r->bottom())->is_gc_marked()) {
   303     sp = r->next_compaction_space();
   304   }
   306   G1PrepareCompactClosure blk(sp);
   307   g1h->heap_region_iterate(&blk);
   308   blk.update_sets();
   310   CompactPoint perm_cp(pg, NULL, NULL);
   311   pg->prepare_for_compaction(&perm_cp);
   312 }
   314 class G1AdjustPointersClosure: public HeapRegionClosure {
   315  public:
   316   bool doHeapRegion(HeapRegion* r) {
   317     if (r->isHumongous()) {
   318       if (r->startsHumongous()) {
   319         // We must adjust the pointers on the single H object.
   320         oop obj = oop(r->bottom());
   321         debug_only(GenMarkSweep::track_interior_pointers(obj));
   322         // point all the oops to the new location
   323         obj->adjust_pointers();
   324         debug_only(GenMarkSweep::check_interior_pointers());
   325       }
   326     } else {
   327       // This really ought to be "as_CompactibleSpace"...
   328       r->adjust_pointers();
   329     }
   330     return false;
   331   }
   332 };
   334 void G1MarkSweep::mark_sweep_phase3() {
   335   G1CollectedHeap* g1h = G1CollectedHeap::heap();
   336   Generation* pg = g1h->perm_gen();
   338   // Adjust the pointers to reflect the new locations
   339   TraceTime tm("phase 3", G1Log::fine() && Verbose, true, gclog_or_tty);
   340   GenMarkSweep::trace("3");
   342   SharedHeap* sh = SharedHeap::heap();
   344   sh->process_strong_roots(true,  // activate StrongRootsScope
   345                            true,  // Collecting permanent generation.
   346                            SharedHeap::SO_AllClasses,
   347                            &GenMarkSweep::adjust_root_pointer_closure,
   348                            NULL,  // do not touch code cache here
   349                            &GenMarkSweep::adjust_pointer_closure);
   351   assert(GenMarkSweep::ref_processor() == g1h->ref_processor_stw(), "Sanity");
   352   g1h->ref_processor_stw()->weak_oops_do(&GenMarkSweep::adjust_root_pointer_closure);
   354   // Now adjust pointers in remaining weak roots.  (All of which should
   355   // have been cleared if they pointed to non-surviving objects.)
   356   g1h->g1_process_weak_roots(&GenMarkSweep::adjust_root_pointer_closure,
   357                              &GenMarkSweep::adjust_pointer_closure);
   359   GenMarkSweep::adjust_marks();
   361   G1AdjustPointersClosure blk;
   362   g1h->heap_region_iterate(&blk);
   363   pg->adjust_pointers();
   364 }
   366 class G1SpaceCompactClosure: public HeapRegionClosure {
   367 public:
   368   G1SpaceCompactClosure() {}
   370   bool doHeapRegion(HeapRegion* hr) {
   371     if (hr->isHumongous()) {
   372       if (hr->startsHumongous()) {
   373         oop obj = oop(hr->bottom());
   374         if (obj->is_gc_marked()) {
   375           obj->init_mark();
   376         } else {
   377           assert(hr->is_empty(), "Should have been cleared in phase 2.");
   378         }
   379         hr->reset_during_compaction();
   380       }
   381     } else {
   382       hr->compact();
   383     }
   384     return false;
   385   }
   386 };
   388 void G1MarkSweep::mark_sweep_phase4() {
   389   // All pointers are now adjusted, move objects accordingly
   391   // It is imperative that we traverse perm_gen first in phase4. All
   392   // classes must be allocated earlier than their instances, and traversing
   393   // perm_gen first makes sure that all klassOops have moved to their new
   394   // location before any instance does a dispatch through it's klass!
   396   // The ValidateMarkSweep live oops tracking expects us to traverse spaces
   397   // in the same order in phase2, phase3 and phase4. We don't quite do that
   398   // here (perm_gen first rather than last), so we tell the validate code
   399   // to use a higher index (saved from phase2) when verifying perm_gen.
   400   G1CollectedHeap* g1h = G1CollectedHeap::heap();
   401   Generation* pg = g1h->perm_gen();
   403   TraceTime tm("phase 4", G1Log::fine() && Verbose, true, gclog_or_tty);
   404   GenMarkSweep::trace("4");
   406   pg->compact();
   408   G1SpaceCompactClosure blk;
   409   g1h->heap_region_iterate(&blk);
   411 }
   413 // Local Variables: ***
   414 // c-indentation-style: gnu ***
   415 // End: ***

mercurial