src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.cpp

Sat, 01 Sep 2012 13:25:18 -0400

author
coleenp
date
Sat, 01 Sep 2012 13:25:18 -0400
changeset 4037
da91efe96a93
parent 3767
9d679effd28c
child 4098
8966c2d65d96
permissions
-rw-r--r--

6964458: Reimplement class meta-data storage to use native memory
Summary: Remove PermGen, allocate meta-data in metaspace linked to class loaders, rewrite GC walking, rewrite and rename metadata to be C++ classes
Reviewed-by: jmasa, stefank, never, coleenp, kvn, brutisso, mgerdin, dholmes, jrose, twisti, roland
Contributed-by: jmasa <jon.masamitsu@oracle.com>, stefank <stefan.karlsson@oracle.com>, mgerdin <mikael.gerdin@oracle.com>, never <tom.rodriguez@oracle.com>

     1 /*
     2  * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    20  * or visit www.oracle.com if you need additional information or have any
    21  * questions.
    22  *
    23  */
    25 #include "precompiled.hpp"
    26 #include "classfile/symbolTable.hpp"
    27 #include "classfile/systemDictionary.hpp"
    28 #include "code/codeCache.hpp"
    29 #include "gc_implementation/parallelScavenge/generationSizer.hpp"
    30 #include "gc_implementation/parallelScavenge/parallelScavengeHeap.hpp"
    31 #include "gc_implementation/parallelScavenge/psAdaptiveSizePolicy.hpp"
    32 #include "gc_implementation/parallelScavenge/psMarkSweep.hpp"
    33 #include "gc_implementation/parallelScavenge/psMarkSweepDecorator.hpp"
    34 #include "gc_implementation/parallelScavenge/psOldGen.hpp"
    35 #include "gc_implementation/parallelScavenge/psScavenge.hpp"
    36 #include "gc_implementation/parallelScavenge/psYoungGen.hpp"
    37 #include "gc_implementation/shared/isGCActiveMark.hpp"
    38 #include "gc_implementation/shared/markSweep.hpp"
    39 #include "gc_implementation/shared/spaceDecorator.hpp"
    40 #include "gc_interface/gcCause.hpp"
    41 #include "memory/gcLocker.inline.hpp"
    42 #include "memory/referencePolicy.hpp"
    43 #include "memory/referenceProcessor.hpp"
    44 #include "oops/oop.inline.hpp"
    45 #include "runtime/biasedLocking.hpp"
    46 #include "runtime/fprofiler.hpp"
    47 #include "runtime/safepoint.hpp"
    48 #include "runtime/vmThread.hpp"
    49 #include "services/management.hpp"
    50 #include "services/memoryService.hpp"
    51 #include "utilities/events.hpp"
    52 #include "utilities/stack.inline.hpp"
    54 elapsedTimer        PSMarkSweep::_accumulated_time;
    55 jlong               PSMarkSweep::_time_of_last_gc   = 0;
    56 CollectorCounters*  PSMarkSweep::_counters = NULL;
    58 void PSMarkSweep::initialize() {
    59   MemRegion mr = Universe::heap()->reserved_region();
    60   _ref_processor = new ReferenceProcessor(mr);     // a vanilla ref proc
    61   _counters = new CollectorCounters("PSMarkSweep", 1);
    62 }
    64 // This method contains all heap specific policy for invoking mark sweep.
    65 // PSMarkSweep::invoke_no_policy() will only attempt to mark-sweep-compact
    66 // the heap. It will do nothing further. If we need to bail out for policy
    67 // reasons, scavenge before full gc, or any other specialized behavior, it
    68 // needs to be added here.
    69 //
    70 // Note that this method should only be called from the vm_thread while
    71 // at a safepoint!
    72 //
    73 // Note that the all_soft_refs_clear flag in the collector policy
    74 // may be true because this method can be called without intervening
    75 // activity.  For example when the heap space is tight and full measure
    76 // are being taken to free space.
    78 void PSMarkSweep::invoke(bool maximum_heap_compaction) {
    79   assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
    80   assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
    81   assert(!Universe::heap()->is_gc_active(), "not reentrant");
    83   ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
    84   GCCause::Cause gc_cause = heap->gc_cause();
    85   PSAdaptiveSizePolicy* policy = heap->size_policy();
    86   IsGCActiveMark mark;
    88   if (ScavengeBeforeFullGC) {
    89     PSScavenge::invoke_no_policy();
    90   }
    92   const bool clear_all_soft_refs =
    93     heap->collector_policy()->should_clear_all_soft_refs();
    95   int count = (maximum_heap_compaction)?1:MarkSweepAlwaysCompactCount;
    96   IntFlagSetting flag_setting(MarkSweepAlwaysCompactCount, count);
    97   PSMarkSweep::invoke_no_policy(clear_all_soft_refs || maximum_heap_compaction);
    98 }
   100 // This method contains no policy. You should probably
   101 // be calling invoke() instead.
   102 bool PSMarkSweep::invoke_no_policy(bool clear_all_softrefs) {
   103   assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint");
   104   assert(ref_processor() != NULL, "Sanity");
   106   if (GC_locker::check_active_before_gc()) {
   107     return false;
   108   }
   110   ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
   111   GCCause::Cause gc_cause = heap->gc_cause();
   112   assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
   113   PSAdaptiveSizePolicy* size_policy = heap->size_policy();
   115   // The scope of casr should end after code that can change
   116   // CollectorPolicy::_should_clear_all_soft_refs.
   117   ClearedAllSoftRefs casr(clear_all_softrefs, heap->collector_policy());
   119   PSYoungGen* young_gen = heap->young_gen();
   120   PSOldGen* old_gen = heap->old_gen();
   122   // Increment the invocation count
   123   heap->increment_total_collections(true /* full */);
   125   // Save information needed to minimize mangling
   126   heap->record_gen_tops_before_GC();
   128   // We need to track unique mark sweep invocations as well.
   129   _total_invocations++;
   131   AdaptiveSizePolicyOutput(size_policy, heap->total_collections());
   133   heap->print_heap_before_gc();
   135   // Fill in TLABs
   136   heap->accumulate_statistics_all_tlabs();
   137   heap->ensure_parsability(true);  // retire TLABs
   139   if (VerifyBeforeGC && heap->total_collections() >= VerifyGCStartAt) {
   140     HandleMark hm;  // Discard invalid handles created during verification
   141     gclog_or_tty->print(" VerifyBeforeGC:");
   142     Universe::verify(true);
   143   }
   145   // Verify object start arrays
   146   if (VerifyObjectStartArray &&
   147       VerifyBeforeGC) {
   148     old_gen->verify_object_start_array();
   149   }
   151   heap->pre_full_gc_dump();
   153   // Filled in below to track the state of the young gen after the collection.
   154   bool eden_empty;
   155   bool survivors_empty;
   156   bool young_gen_empty;
   158   {
   159     HandleMark hm;
   161     gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps);
   162     TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
   163     TraceTime t1(GCCauseString("Full GC", gc_cause), PrintGC, !PrintGCDetails, gclog_or_tty);
   164     TraceCollectorStats tcs(counters());
   165     TraceMemoryManagerStats tms(true /* Full GC */,gc_cause);
   167     if (TraceGen1Time) accumulated_time()->start();
   169     // Let the size policy know we're starting
   170     size_policy->major_collection_begin();
   172     CodeCache::gc_prologue();
   173     Threads::gc_prologue();
   174     BiasedLocking::preserve_marks();
   176     // Capture heap size before collection for printing.
   177     size_t prev_used = heap->used();
   179     // Capture metadata size before collection for sizing.
   180     size_t metadata_prev_used = MetaspaceAux::used_in_bytes();
   182     // For PrintGCDetails
   183     size_t old_gen_prev_used = old_gen->used_in_bytes();
   184     size_t young_gen_prev_used = young_gen->used_in_bytes();
   186     allocate_stacks();
   188     COMPILER2_PRESENT(DerivedPointerTable::clear());
   190     ref_processor()->enable_discovery(true /*verify_disabled*/, true /*verify_no_refs*/);
   191     ref_processor()->setup_policy(clear_all_softrefs);
   193     mark_sweep_phase1(clear_all_softrefs);
   195     mark_sweep_phase2();
   197     // Don't add any more derived pointers during phase3
   198     COMPILER2_PRESENT(assert(DerivedPointerTable::is_active(), "Sanity"));
   199     COMPILER2_PRESENT(DerivedPointerTable::set_active(false));
   201     mark_sweep_phase3();
   203     mark_sweep_phase4();
   205     restore_marks();
   207     deallocate_stacks();
   209     if (ZapUnusedHeapArea) {
   210       // Do a complete mangle (top to end) because the usage for
   211       // scratch does not maintain a top pointer.
   212       young_gen->to_space()->mangle_unused_area_complete();
   213     }
   215     eden_empty = young_gen->eden_space()->is_empty();
   216     if (!eden_empty) {
   217       eden_empty = absorb_live_data_from_eden(size_policy, young_gen, old_gen);
   218     }
   220     // Update heap occupancy information which is used as
   221     // input to soft ref clearing policy at the next gc.
   222     Universe::update_heap_info_at_gc();
   224     survivors_empty = young_gen->from_space()->is_empty() &&
   225                       young_gen->to_space()->is_empty();
   226     young_gen_empty = eden_empty && survivors_empty;
   228     BarrierSet* bs = heap->barrier_set();
   229     if (bs->is_a(BarrierSet::ModRef)) {
   230       ModRefBarrierSet* modBS = (ModRefBarrierSet*)bs;
   231       MemRegion old_mr = heap->old_gen()->reserved();
   232       if (young_gen_empty) {
   233         modBS->clear(MemRegion(old_mr.start(), old_mr.end()));
   234       } else {
   235         modBS->invalidate(MemRegion(old_mr.start(), old_mr.end()));
   236       }
   237     }
   239     // Delete metaspaces for unloaded class loaders and clean up loader_data graph
   240     ClassLoaderDataGraph::purge();
   242     BiasedLocking::restore_marks();
   243     Threads::gc_epilogue();
   244     CodeCache::gc_epilogue();
   245     JvmtiExport::gc_epilogue();
   247     COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
   249     ref_processor()->enqueue_discovered_references(NULL);
   251     // Update time of last GC
   252     reset_millis_since_last_gc();
   254     // Let the size policy know we're done
   255     size_policy->major_collection_end(old_gen->used_in_bytes(), gc_cause);
   257     if (UseAdaptiveSizePolicy) {
   259       if (PrintAdaptiveSizePolicy) {
   260         gclog_or_tty->print("AdaptiveSizeStart: ");
   261         gclog_or_tty->stamp();
   262         gclog_or_tty->print_cr(" collection: %d ",
   263                        heap->total_collections());
   264         if (Verbose) {
   265           gclog_or_tty->print("old_gen_capacity: %d young_gen_capacity: %d",
   266             old_gen->capacity_in_bytes(), young_gen->capacity_in_bytes());
   267         }
   268       }
   270       // Don't check if the size_policy is ready here.  Let
   271       // the size_policy check that internally.
   272       if (UseAdaptiveGenerationSizePolicyAtMajorCollection &&
   273           ((gc_cause != GCCause::_java_lang_system_gc) ||
   274             UseAdaptiveSizePolicyWithSystemGC)) {
   275         // Calculate optimal free space amounts
   276         assert(young_gen->max_size() >
   277           young_gen->from_space()->capacity_in_bytes() +
   278           young_gen->to_space()->capacity_in_bytes(),
   279           "Sizes of space in young gen are out-of-bounds");
   280         size_t max_eden_size = young_gen->max_size() -
   281           young_gen->from_space()->capacity_in_bytes() -
   282           young_gen->to_space()->capacity_in_bytes();
   283         size_policy->compute_generation_free_space(young_gen->used_in_bytes(),
   284                                  young_gen->eden_space()->used_in_bytes(),
   285                                  old_gen->used_in_bytes(),
   286                                  young_gen->eden_space()->capacity_in_bytes(),
   287                                  old_gen->max_gen_size(),
   288                                  max_eden_size,
   289                                  true /* full gc*/,
   290                                  gc_cause,
   291                                  heap->collector_policy());
   293         heap->resize_old_gen(size_policy->calculated_old_free_size_in_bytes());
   295         // Don't resize the young generation at an major collection.  A
   296         // desired young generation size may have been calculated but
   297         // resizing the young generation complicates the code because the
   298         // resizing of the old generation may have moved the boundary
   299         // between the young generation and the old generation.  Let the
   300         // young generation resizing happen at the minor collections.
   301       }
   302       if (PrintAdaptiveSizePolicy) {
   303         gclog_or_tty->print_cr("AdaptiveSizeStop: collection: %d ",
   304                        heap->total_collections());
   305       }
   306     }
   308     if (UsePerfData) {
   309       heap->gc_policy_counters()->update_counters();
   310       heap->gc_policy_counters()->update_old_capacity(
   311         old_gen->capacity_in_bytes());
   312       heap->gc_policy_counters()->update_young_capacity(
   313         young_gen->capacity_in_bytes());
   314     }
   316     heap->resize_all_tlabs();
   318     // We collected the heap, recalculate the metaspace capacity
   319     MetaspaceGC::compute_new_size();
   321     if (TraceGen1Time) accumulated_time()->stop();
   323     if (PrintGC) {
   324       if (PrintGCDetails) {
   325         // Don't print a GC timestamp here.  This is after the GC so
   326         // would be confusing.
   327         young_gen->print_used_change(young_gen_prev_used);
   328         old_gen->print_used_change(old_gen_prev_used);
   329       }
   330       heap->print_heap_change(prev_used);
   331       if (PrintGCDetails) {
   332         MetaspaceAux::print_metaspace_change(metadata_prev_used);
   333       }
   334     }
   336     // Track memory usage and detect low memory
   337     MemoryService::track_memory_usage();
   338     heap->update_counters();
   339   }
   341   if (VerifyAfterGC && heap->total_collections() >= VerifyGCStartAt) {
   342     HandleMark hm;  // Discard invalid handles created during verification
   343     gclog_or_tty->print(" VerifyAfterGC:");
   344     Universe::verify(false);
   345   }
   347   // Re-verify object start arrays
   348   if (VerifyObjectStartArray &&
   349       VerifyAfterGC) {
   350     old_gen->verify_object_start_array();
   351   }
   353   if (ZapUnusedHeapArea) {
   354     old_gen->object_space()->check_mangled_unused_area_complete();
   355   }
   357   NOT_PRODUCT(ref_processor()->verify_no_references_recorded());
   359   heap->print_heap_after_gc();
   361   heap->post_full_gc_dump();
   363 #ifdef TRACESPINNING
   364   ParallelTaskTerminator::print_termination_counts();
   365 #endif
   367   return true;
   368 }
   370 bool PSMarkSweep::absorb_live_data_from_eden(PSAdaptiveSizePolicy* size_policy,
   371                                              PSYoungGen* young_gen,
   372                                              PSOldGen* old_gen) {
   373   MutableSpace* const eden_space = young_gen->eden_space();
   374   assert(!eden_space->is_empty(), "eden must be non-empty");
   375   assert(young_gen->virtual_space()->alignment() ==
   376          old_gen->virtual_space()->alignment(), "alignments do not match");
   378   if (!(UseAdaptiveSizePolicy && UseAdaptiveGCBoundary)) {
   379     return false;
   380   }
   382   // Both generations must be completely committed.
   383   if (young_gen->virtual_space()->uncommitted_size() != 0) {
   384     return false;
   385   }
   386   if (old_gen->virtual_space()->uncommitted_size() != 0) {
   387     return false;
   388   }
   390   // Figure out how much to take from eden.  Include the average amount promoted
   391   // in the total; otherwise the next young gen GC will simply bail out to a
   392   // full GC.
   393   const size_t alignment = old_gen->virtual_space()->alignment();
   394   const size_t eden_used = eden_space->used_in_bytes();
   395   const size_t promoted = (size_t)size_policy->avg_promoted()->padded_average();
   396   const size_t absorb_size = align_size_up(eden_used + promoted, alignment);
   397   const size_t eden_capacity = eden_space->capacity_in_bytes();
   399   if (absorb_size >= eden_capacity) {
   400     return false; // Must leave some space in eden.
   401   }
   403   const size_t new_young_size = young_gen->capacity_in_bytes() - absorb_size;
   404   if (new_young_size < young_gen->min_gen_size()) {
   405     return false; // Respect young gen minimum size.
   406   }
   408   if (TraceAdaptiveGCBoundary && Verbose) {
   409     gclog_or_tty->print(" absorbing " SIZE_FORMAT "K:  "
   410                         "eden " SIZE_FORMAT "K->" SIZE_FORMAT "K "
   411                         "from " SIZE_FORMAT "K, to " SIZE_FORMAT "K "
   412                         "young_gen " SIZE_FORMAT "K->" SIZE_FORMAT "K ",
   413                         absorb_size / K,
   414                         eden_capacity / K, (eden_capacity - absorb_size) / K,
   415                         young_gen->from_space()->used_in_bytes() / K,
   416                         young_gen->to_space()->used_in_bytes() / K,
   417                         young_gen->capacity_in_bytes() / K, new_young_size / K);
   418   }
   420   // Fill the unused part of the old gen.
   421   MutableSpace* const old_space = old_gen->object_space();
   422   HeapWord* const unused_start = old_space->top();
   423   size_t const unused_words = pointer_delta(old_space->end(), unused_start);
   425   if (unused_words > 0) {
   426     if (unused_words < CollectedHeap::min_fill_size()) {
   427       return false;  // If the old gen cannot be filled, must give up.
   428     }
   429     CollectedHeap::fill_with_objects(unused_start, unused_words);
   430   }
   432   // Take the live data from eden and set both top and end in the old gen to
   433   // eden top.  (Need to set end because reset_after_change() mangles the region
   434   // from end to virtual_space->high() in debug builds).
   435   HeapWord* const new_top = eden_space->top();
   436   old_gen->virtual_space()->expand_into(young_gen->virtual_space(),
   437                                         absorb_size);
   438   young_gen->reset_after_change();
   439   old_space->set_top(new_top);
   440   old_space->set_end(new_top);
   441   old_gen->reset_after_change();
   443   // Update the object start array for the filler object and the data from eden.
   444   ObjectStartArray* const start_array = old_gen->start_array();
   445   for (HeapWord* p = unused_start; p < new_top; p += oop(p)->size()) {
   446     start_array->allocate_block(p);
   447   }
   449   // Could update the promoted average here, but it is not typically updated at
   450   // full GCs and the value to use is unclear.  Something like
   451   //
   452   // cur_promoted_avg + absorb_size / number_of_scavenges_since_last_full_gc.
   454   size_policy->set_bytes_absorbed_from_eden(absorb_size);
   455   return true;
   456 }
   458 void PSMarkSweep::allocate_stacks() {
   459   ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
   460   assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
   462   PSYoungGen* young_gen = heap->young_gen();
   464   MutableSpace* to_space = young_gen->to_space();
   465   _preserved_marks = (PreservedMark*)to_space->top();
   466   _preserved_count = 0;
   468   // We want to calculate the size in bytes first.
   469   _preserved_count_max  = pointer_delta(to_space->end(), to_space->top(), sizeof(jbyte));
   470   // Now divide by the size of a PreservedMark
   471   _preserved_count_max /= sizeof(PreservedMark);
   472 }
   475 void PSMarkSweep::deallocate_stacks() {
   476   _preserved_mark_stack.clear(true);
   477   _preserved_oop_stack.clear(true);
   478   _marking_stack.clear();
   479   _objarray_stack.clear(true);
   480 }
   482 void PSMarkSweep::mark_sweep_phase1(bool clear_all_softrefs) {
   483   // Recursively traverse all live objects and mark them
   484   TraceTime tm("phase 1", PrintGCDetails && Verbose, true, gclog_or_tty);
   485   trace(" 1");
   487   ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
   488   assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
   490   // Need to clear claim bits before the tracing starts.
   491   ClassLoaderDataGraph::clear_claimed_marks();
   493   // General strong roots.
   494   {
   495     ParallelScavengeHeap::ParStrongRootsScope psrs;
   496     Universe::oops_do(mark_and_push_closure());
   497     JNIHandles::oops_do(mark_and_push_closure());   // Global (strong) JNI handles
   498     CodeBlobToOopClosure each_active_code_blob(mark_and_push_closure(), /*do_marking=*/ true);
   499     Threads::oops_do(mark_and_push_closure(), &each_active_code_blob);
   500     ObjectSynchronizer::oops_do(mark_and_push_closure());
   501     FlatProfiler::oops_do(mark_and_push_closure());
   502     Management::oops_do(mark_and_push_closure());
   503     JvmtiExport::oops_do(mark_and_push_closure());
   504     SystemDictionary::always_strong_oops_do(mark_and_push_closure());
   505     ClassLoaderDataGraph::always_strong_oops_do(mark_and_push_closure(), follow_klass_closure(), true);
   506     // Do not treat nmethods as strong roots for mark/sweep, since we can unload them.
   507     //CodeCache::scavenge_root_nmethods_do(CodeBlobToOopClosure(mark_and_push_closure()));
   508   }
   510   // Flush marking stack.
   511   follow_stack();
   513   // Process reference objects found during marking
   514   {
   515     ref_processor()->setup_policy(clear_all_softrefs);
   516     ref_processor()->process_discovered_references(
   517       is_alive_closure(), mark_and_push_closure(), follow_stack_closure(), NULL);
   518   }
   520   // Follow system dictionary roots and unload classes
   521   bool purged_class = SystemDictionary::do_unloading(is_alive_closure());
   523   // Follow code cache roots
   524   CodeCache::do_unloading(is_alive_closure(), mark_and_push_closure(),
   525                           purged_class);
   526   follow_stack(); // Flush marking stack
   528   // Update subklass/sibling/implementor links of live klasses
   529   Klass::clean_weak_klass_links(&is_alive);
   530   assert(_marking_stack.is_empty(), "just drained");
   532   // Visit interned string tables and delete unmarked oops
   533   StringTable::unlink(is_alive_closure());
   534   // Clean up unreferenced symbols in symbol table.
   535   SymbolTable::unlink();
   537   assert(_marking_stack.is_empty(), "stack should be empty by now");
   538 }
   541 void PSMarkSweep::mark_sweep_phase2() {
   542   TraceTime tm("phase 2", PrintGCDetails && Verbose, true, gclog_or_tty);
   543   trace("2");
   545   // Now all live objects are marked, compute the new object addresses.
   547   // It is not required that we traverse spaces in the same order in
   548   // phase2, phase3 and phase4, but the ValidateMarkSweep live oops
   549   // tracking expects us to do so. See comment under phase4.
   551   ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
   552   assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
   554   PSOldGen* old_gen = heap->old_gen();
   556   // Begin compacting into the old gen
   557   PSMarkSweepDecorator::set_destination_decorator_tenured();
   559   // This will also compact the young gen spaces.
   560   old_gen->precompact();
   561 }
   563 // This should be moved to the shared markSweep code!
   564 class PSAlwaysTrueClosure: public BoolObjectClosure {
   565 public:
   566   void do_object(oop p) { ShouldNotReachHere(); }
   567   bool do_object_b(oop p) { return true; }
   568 };
   569 static PSAlwaysTrueClosure always_true;
   571 void PSMarkSweep::mark_sweep_phase3() {
   572   // Adjust the pointers to reflect the new locations
   573   TraceTime tm("phase 3", PrintGCDetails && Verbose, true, gclog_or_tty);
   574   trace("3");
   576   ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
   577   assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
   579   PSYoungGen* young_gen = heap->young_gen();
   580   PSOldGen* old_gen = heap->old_gen();
   582   // Need to clear claim bits before the tracing starts.
   583   ClassLoaderDataGraph::clear_claimed_marks();
   585   // General strong roots.
   586   Universe::oops_do(adjust_root_pointer_closure());
   587   JNIHandles::oops_do(adjust_root_pointer_closure());   // Global (strong) JNI handles
   588   Threads::oops_do(adjust_root_pointer_closure(), NULL);
   589   ObjectSynchronizer::oops_do(adjust_root_pointer_closure());
   590   FlatProfiler::oops_do(adjust_root_pointer_closure());
   591   Management::oops_do(adjust_root_pointer_closure());
   592   JvmtiExport::oops_do(adjust_root_pointer_closure());
   593   // SO_AllClasses
   594   SystemDictionary::oops_do(adjust_root_pointer_closure());
   595   ClassLoaderDataGraph::oops_do(adjust_root_pointer_closure(), adjust_klass_closure(), true);
   596   //CodeCache::scavenge_root_nmethods_oops_do(adjust_root_pointer_closure());
   598   // Now adjust pointers in remaining weak roots.  (All of which should
   599   // have been cleared if they pointed to non-surviving objects.)
   600   // Global (weak) JNI handles
   601   JNIHandles::weak_oops_do(&always_true, adjust_root_pointer_closure());
   603   CodeCache::oops_do(adjust_pointer_closure());
   604   StringTable::oops_do(adjust_root_pointer_closure());
   605   ref_processor()->weak_oops_do(adjust_root_pointer_closure());
   606   PSScavenge::reference_processor()->weak_oops_do(adjust_root_pointer_closure());
   608   adjust_marks();
   610   young_gen->adjust_pointers();
   611   old_gen->adjust_pointers();
   612 }
   614 void PSMarkSweep::mark_sweep_phase4() {
   615   EventMark m("4 compact heap");
   616   TraceTime tm("phase 4", PrintGCDetails && Verbose, true, gclog_or_tty);
   617   trace("4");
   619   // All pointers are now adjusted, move objects accordingly
   621   ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
   622   assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
   624   PSYoungGen* young_gen = heap->young_gen();
   625   PSOldGen* old_gen = heap->old_gen();
   627   old_gen->compact();
   628   young_gen->compact();
   629 }
   631 jlong PSMarkSweep::millis_since_last_gc() {
   632   // We need a monotonically non-deccreasing time in ms but
   633   // os::javaTimeMillis() does not guarantee monotonicity.
   634   jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
   635   jlong ret_val = now - _time_of_last_gc;
   636   // XXX See note in genCollectedHeap::millis_since_last_gc().
   637   if (ret_val < 0) {
   638     NOT_PRODUCT(warning("time warp: "INT64_FORMAT, ret_val);)
   639     return 0;
   640   }
   641   return ret_val;
   642 }
   644 void PSMarkSweep::reset_millis_since_last_gc() {
   645   // We need a monotonically non-deccreasing time in ms but
   646   // os::javaTimeMillis() does not guarantee monotonicity.
   647   _time_of_last_gc = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
   648 }

mercurial