src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.cpp

Fri, 10 May 2013 08:27:30 -0700

author
minqi
date
Fri, 10 May 2013 08:27:30 -0700
changeset 5097
92ef81e2f571
parent 5020
2f50bc369470
child 5119
12f651e29f6b
permissions
-rw-r--r--

8003557: NPG: Klass* const k should be const Klass* k.
Summary: With NPG, const KlassOop klass which is in fact a definition converted to Klass* const, which is not the original intention. The right usage is converting them to const Klass*.
Reviewed-by: coleenp, kvn
Contributed-by: yumin.qi@oracle.com

     1 /*
     2  * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    20  * or visit www.oracle.com if you need additional information or have any
    21  * questions.
    22  *
    23  */
    25 #include "precompiled.hpp"
    26 #include "classfile/symbolTable.hpp"
    27 #include "classfile/systemDictionary.hpp"
    28 #include "code/codeCache.hpp"
    29 #include "gc_implementation/parallelScavenge/generationSizer.hpp"
    30 #include "gc_implementation/parallelScavenge/parallelScavengeHeap.hpp"
    31 #include "gc_implementation/parallelScavenge/psAdaptiveSizePolicy.hpp"
    32 #include "gc_implementation/parallelScavenge/psMarkSweep.hpp"
    33 #include "gc_implementation/parallelScavenge/psMarkSweepDecorator.hpp"
    34 #include "gc_implementation/parallelScavenge/psOldGen.hpp"
    35 #include "gc_implementation/parallelScavenge/psScavenge.hpp"
    36 #include "gc_implementation/parallelScavenge/psYoungGen.hpp"
    37 #include "gc_implementation/shared/isGCActiveMark.hpp"
    38 #include "gc_implementation/shared/markSweep.hpp"
    39 #include "gc_implementation/shared/spaceDecorator.hpp"
    40 #include "gc_interface/gcCause.hpp"
    41 #include "memory/gcLocker.inline.hpp"
    42 #include "memory/referencePolicy.hpp"
    43 #include "memory/referenceProcessor.hpp"
    44 #include "oops/oop.inline.hpp"
    45 #include "runtime/biasedLocking.hpp"
    46 #include "runtime/fprofiler.hpp"
    47 #include "runtime/safepoint.hpp"
    48 #include "runtime/vmThread.hpp"
    49 #include "services/management.hpp"
    50 #include "services/memoryService.hpp"
    51 #include "utilities/events.hpp"
    52 #include "utilities/stack.inline.hpp"
    54 elapsedTimer        PSMarkSweep::_accumulated_time;
    55 jlong               PSMarkSweep::_time_of_last_gc   = 0;
    56 CollectorCounters*  PSMarkSweep::_counters = NULL;
    58 void PSMarkSweep::initialize() {
    59   MemRegion mr = Universe::heap()->reserved_region();
    60   _ref_processor = new ReferenceProcessor(mr);     // a vanilla ref proc
    61   _counters = new CollectorCounters("PSMarkSweep", 1);
    62 }
    64 // This method contains all heap specific policy for invoking mark sweep.
    65 // PSMarkSweep::invoke_no_policy() will only attempt to mark-sweep-compact
    66 // the heap. It will do nothing further. If we need to bail out for policy
    67 // reasons, scavenge before full gc, or any other specialized behavior, it
    68 // needs to be added here.
    69 //
    70 // Note that this method should only be called from the vm_thread while
    71 // at a safepoint!
    72 //
    73 // Note that the all_soft_refs_clear flag in the collector policy
    74 // may be true because this method can be called without intervening
    75 // activity.  For example when the heap space is tight and full measure
    76 // are being taken to free space.
    78 void PSMarkSweep::invoke(bool maximum_heap_compaction) {
    79   assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
    80   assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
    81   assert(!Universe::heap()->is_gc_active(), "not reentrant");
    83   ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
    84   GCCause::Cause gc_cause = heap->gc_cause();
    85   PSAdaptiveSizePolicy* policy = heap->size_policy();
    86   IsGCActiveMark mark;
    88   if (ScavengeBeforeFullGC) {
    89     PSScavenge::invoke_no_policy();
    90   }
    92   const bool clear_all_soft_refs =
    93     heap->collector_policy()->should_clear_all_soft_refs();
    95   int count = (maximum_heap_compaction)?1:MarkSweepAlwaysCompactCount;
    96   IntFlagSetting flag_setting(MarkSweepAlwaysCompactCount, count);
    97   PSMarkSweep::invoke_no_policy(clear_all_soft_refs || maximum_heap_compaction);
    98 }
   100 // This method contains no policy. You should probably
   101 // be calling invoke() instead.
   102 bool PSMarkSweep::invoke_no_policy(bool clear_all_softrefs) {
   103   assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint");
   104   assert(ref_processor() != NULL, "Sanity");
   106   if (GC_locker::check_active_before_gc()) {
   107     return false;
   108   }
   110   ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
   111   GCCause::Cause gc_cause = heap->gc_cause();
   112   assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
   113   PSAdaptiveSizePolicy* size_policy = heap->size_policy();
   115   // The scope of casr should end after code that can change
   116   // CollectorPolicy::_should_clear_all_soft_refs.
   117   ClearedAllSoftRefs casr(clear_all_softrefs, heap->collector_policy());
   119   PSYoungGen* young_gen = heap->young_gen();
   120   PSOldGen* old_gen = heap->old_gen();
   122   // Increment the invocation count
   123   heap->increment_total_collections(true /* full */);
   125   // Save information needed to minimize mangling
   126   heap->record_gen_tops_before_GC();
   128   // We need to track unique mark sweep invocations as well.
   129   _total_invocations++;
   131   AdaptiveSizePolicyOutput(size_policy, heap->total_collections());
   133   heap->print_heap_before_gc();
   135   // Fill in TLABs
   136   heap->accumulate_statistics_all_tlabs();
   137   heap->ensure_parsability(true);  // retire TLABs
   139   if (VerifyBeforeGC && heap->total_collections() >= VerifyGCStartAt) {
   140     HandleMark hm;  // Discard invalid handles created during verification
   141     Universe::verify(" VerifyBeforeGC:");
   142   }
   144   // Verify object start arrays
   145   if (VerifyObjectStartArray &&
   146       VerifyBeforeGC) {
   147     old_gen->verify_object_start_array();
   148   }
   150   heap->pre_full_gc_dump();
   152   // Filled in below to track the state of the young gen after the collection.
   153   bool eden_empty;
   154   bool survivors_empty;
   155   bool young_gen_empty;
   157   {
   158     HandleMark hm;
   160     gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps);
   161     TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
   162     TraceTime t1(GCCauseString("Full GC", gc_cause), PrintGC, !PrintGCDetails, gclog_or_tty);
   163     TraceCollectorStats tcs(counters());
   164     TraceMemoryManagerStats tms(true /* Full GC */,gc_cause);
   166     if (TraceGen1Time) accumulated_time()->start();
   168     // Let the size policy know we're starting
   169     size_policy->major_collection_begin();
   171     CodeCache::gc_prologue();
   172     Threads::gc_prologue();
   173     BiasedLocking::preserve_marks();
   175     // Capture heap size before collection for printing.
   176     size_t prev_used = heap->used();
   178     // Capture metadata size before collection for sizing.
   179     size_t metadata_prev_used = MetaspaceAux::allocated_used_bytes();
   181     // For PrintGCDetails
   182     size_t old_gen_prev_used = old_gen->used_in_bytes();
   183     size_t young_gen_prev_used = young_gen->used_in_bytes();
   185     allocate_stacks();
   187     COMPILER2_PRESENT(DerivedPointerTable::clear());
   189     ref_processor()->enable_discovery(true /*verify_disabled*/, true /*verify_no_refs*/);
   190     ref_processor()->setup_policy(clear_all_softrefs);
   192     mark_sweep_phase1(clear_all_softrefs);
   194     mark_sweep_phase2();
   196     // Don't add any more derived pointers during phase3
   197     COMPILER2_PRESENT(assert(DerivedPointerTable::is_active(), "Sanity"));
   198     COMPILER2_PRESENT(DerivedPointerTable::set_active(false));
   200     mark_sweep_phase3();
   202     mark_sweep_phase4();
   204     restore_marks();
   206     deallocate_stacks();
   208     if (ZapUnusedHeapArea) {
   209       // Do a complete mangle (top to end) because the usage for
   210       // scratch does not maintain a top pointer.
   211       young_gen->to_space()->mangle_unused_area_complete();
   212     }
   214     eden_empty = young_gen->eden_space()->is_empty();
   215     if (!eden_empty) {
   216       eden_empty = absorb_live_data_from_eden(size_policy, young_gen, old_gen);
   217     }
   219     // Update heap occupancy information which is used as
   220     // input to soft ref clearing policy at the next gc.
   221     Universe::update_heap_info_at_gc();
   223     survivors_empty = young_gen->from_space()->is_empty() &&
   224                       young_gen->to_space()->is_empty();
   225     young_gen_empty = eden_empty && survivors_empty;
   227     BarrierSet* bs = heap->barrier_set();
   228     if (bs->is_a(BarrierSet::ModRef)) {
   229       ModRefBarrierSet* modBS = (ModRefBarrierSet*)bs;
   230       MemRegion old_mr = heap->old_gen()->reserved();
   231       if (young_gen_empty) {
   232         modBS->clear(MemRegion(old_mr.start(), old_mr.end()));
   233       } else {
   234         modBS->invalidate(MemRegion(old_mr.start(), old_mr.end()));
   235       }
   236     }
   238     // Delete metaspaces for unloaded class loaders and clean up loader_data graph
   239     ClassLoaderDataGraph::purge();
   240     MetaspaceAux::verify_metrics();
   242     BiasedLocking::restore_marks();
   243     Threads::gc_epilogue();
   244     CodeCache::gc_epilogue();
   245     JvmtiExport::gc_epilogue();
   247     COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
   249     ref_processor()->enqueue_discovered_references(NULL);
   251     // Update time of last GC
   252     reset_millis_since_last_gc();
   254     // Let the size policy know we're done
   255     size_policy->major_collection_end(old_gen->used_in_bytes(), gc_cause);
   257     if (UseAdaptiveSizePolicy) {
   259       if (PrintAdaptiveSizePolicy) {
   260         gclog_or_tty->print("AdaptiveSizeStart: ");
   261         gclog_or_tty->stamp();
   262         gclog_or_tty->print_cr(" collection: %d ",
   263                        heap->total_collections());
   264         if (Verbose) {
   265           gclog_or_tty->print("old_gen_capacity: %d young_gen_capacity: %d",
   266             old_gen->capacity_in_bytes(), young_gen->capacity_in_bytes());
   267         }
   268       }
   270       // Don't check if the size_policy is ready here.  Let
   271       // the size_policy check that internally.
   272       if (UseAdaptiveGenerationSizePolicyAtMajorCollection &&
   273           ((gc_cause != GCCause::_java_lang_system_gc) ||
   274             UseAdaptiveSizePolicyWithSystemGC)) {
   275         // Calculate optimal free space amounts
   276         assert(young_gen->max_size() >
   277           young_gen->from_space()->capacity_in_bytes() +
   278           young_gen->to_space()->capacity_in_bytes(),
   279           "Sizes of space in young gen are out-of-bounds");
   280         size_t max_eden_size = young_gen->max_size() -
   281           young_gen->from_space()->capacity_in_bytes() -
   282           young_gen->to_space()->capacity_in_bytes();
   283         size_policy->compute_generation_free_space(young_gen->used_in_bytes(),
   284                                  young_gen->eden_space()->used_in_bytes(),
   285                                  old_gen->used_in_bytes(),
   286                                  young_gen->eden_space()->capacity_in_bytes(),
   287                                  old_gen->max_gen_size(),
   288                                  max_eden_size,
   289                                  true /* full gc*/,
   290                                  gc_cause,
   291                                  heap->collector_policy());
   293         heap->resize_old_gen(size_policy->calculated_old_free_size_in_bytes());
   295         // Don't resize the young generation at an major collection.  A
   296         // desired young generation size may have been calculated but
   297         // resizing the young generation complicates the code because the
   298         // resizing of the old generation may have moved the boundary
   299         // between the young generation and the old generation.  Let the
   300         // young generation resizing happen at the minor collections.
   301       }
   302       if (PrintAdaptiveSizePolicy) {
   303         gclog_or_tty->print_cr("AdaptiveSizeStop: collection: %d ",
   304                        heap->total_collections());
   305       }
   306     }
   308     if (UsePerfData) {
   309       heap->gc_policy_counters()->update_counters();
   310       heap->gc_policy_counters()->update_old_capacity(
   311         old_gen->capacity_in_bytes());
   312       heap->gc_policy_counters()->update_young_capacity(
   313         young_gen->capacity_in_bytes());
   314     }
   316     heap->resize_all_tlabs();
   318     // We collected the heap, recalculate the metaspace capacity
   319     MetaspaceGC::compute_new_size();
   321     if (TraceGen1Time) accumulated_time()->stop();
   323     if (PrintGC) {
   324       if (PrintGCDetails) {
   325         // Don't print a GC timestamp here.  This is after the GC so
   326         // would be confusing.
   327         young_gen->print_used_change(young_gen_prev_used);
   328         old_gen->print_used_change(old_gen_prev_used);
   329       }
   330       heap->print_heap_change(prev_used);
   331       if (PrintGCDetails) {
   332         MetaspaceAux::print_metaspace_change(metadata_prev_used);
   333       }
   334     }
   336     // Track memory usage and detect low memory
   337     MemoryService::track_memory_usage();
   338     heap->update_counters();
   339   }
   341   if (VerifyAfterGC && heap->total_collections() >= VerifyGCStartAt) {
   342     HandleMark hm;  // Discard invalid handles created during verification
   343     Universe::verify(" VerifyAfterGC:");
   344   }
   346   // Re-verify object start arrays
   347   if (VerifyObjectStartArray &&
   348       VerifyAfterGC) {
   349     old_gen->verify_object_start_array();
   350   }
   352   if (ZapUnusedHeapArea) {
   353     old_gen->object_space()->check_mangled_unused_area_complete();
   354   }
   356   NOT_PRODUCT(ref_processor()->verify_no_references_recorded());
   358   heap->print_heap_after_gc();
   360   heap->post_full_gc_dump();
   362 #ifdef TRACESPINNING
   363   ParallelTaskTerminator::print_termination_counts();
   364 #endif
   366   return true;
   367 }
   369 bool PSMarkSweep::absorb_live_data_from_eden(PSAdaptiveSizePolicy* size_policy,
   370                                              PSYoungGen* young_gen,
   371                                              PSOldGen* old_gen) {
   372   MutableSpace* const eden_space = young_gen->eden_space();
   373   assert(!eden_space->is_empty(), "eden must be non-empty");
   374   assert(young_gen->virtual_space()->alignment() ==
   375          old_gen->virtual_space()->alignment(), "alignments do not match");
   377   if (!(UseAdaptiveSizePolicy && UseAdaptiveGCBoundary)) {
   378     return false;
   379   }
   381   // Both generations must be completely committed.
   382   if (young_gen->virtual_space()->uncommitted_size() != 0) {
   383     return false;
   384   }
   385   if (old_gen->virtual_space()->uncommitted_size() != 0) {
   386     return false;
   387   }
   389   // Figure out how much to take from eden.  Include the average amount promoted
   390   // in the total; otherwise the next young gen GC will simply bail out to a
   391   // full GC.
   392   const size_t alignment = old_gen->virtual_space()->alignment();
   393   const size_t eden_used = eden_space->used_in_bytes();
   394   const size_t promoted = (size_t)size_policy->avg_promoted()->padded_average();
   395   const size_t absorb_size = align_size_up(eden_used + promoted, alignment);
   396   const size_t eden_capacity = eden_space->capacity_in_bytes();
   398   if (absorb_size >= eden_capacity) {
   399     return false; // Must leave some space in eden.
   400   }
   402   const size_t new_young_size = young_gen->capacity_in_bytes() - absorb_size;
   403   if (new_young_size < young_gen->min_gen_size()) {
   404     return false; // Respect young gen minimum size.
   405   }
   407   if (TraceAdaptiveGCBoundary && Verbose) {
   408     gclog_or_tty->print(" absorbing " SIZE_FORMAT "K:  "
   409                         "eden " SIZE_FORMAT "K->" SIZE_FORMAT "K "
   410                         "from " SIZE_FORMAT "K, to " SIZE_FORMAT "K "
   411                         "young_gen " SIZE_FORMAT "K->" SIZE_FORMAT "K ",
   412                         absorb_size / K,
   413                         eden_capacity / K, (eden_capacity - absorb_size) / K,
   414                         young_gen->from_space()->used_in_bytes() / K,
   415                         young_gen->to_space()->used_in_bytes() / K,
   416                         young_gen->capacity_in_bytes() / K, new_young_size / K);
   417   }
   419   // Fill the unused part of the old gen.
   420   MutableSpace* const old_space = old_gen->object_space();
   421   HeapWord* const unused_start = old_space->top();
   422   size_t const unused_words = pointer_delta(old_space->end(), unused_start);
   424   if (unused_words > 0) {
   425     if (unused_words < CollectedHeap::min_fill_size()) {
   426       return false;  // If the old gen cannot be filled, must give up.
   427     }
   428     CollectedHeap::fill_with_objects(unused_start, unused_words);
   429   }
   431   // Take the live data from eden and set both top and end in the old gen to
   432   // eden top.  (Need to set end because reset_after_change() mangles the region
   433   // from end to virtual_space->high() in debug builds).
   434   HeapWord* const new_top = eden_space->top();
   435   old_gen->virtual_space()->expand_into(young_gen->virtual_space(),
   436                                         absorb_size);
   437   young_gen->reset_after_change();
   438   old_space->set_top(new_top);
   439   old_space->set_end(new_top);
   440   old_gen->reset_after_change();
   442   // Update the object start array for the filler object and the data from eden.
   443   ObjectStartArray* const start_array = old_gen->start_array();
   444   for (HeapWord* p = unused_start; p < new_top; p += oop(p)->size()) {
   445     start_array->allocate_block(p);
   446   }
   448   // Could update the promoted average here, but it is not typically updated at
   449   // full GCs and the value to use is unclear.  Something like
   450   //
   451   // cur_promoted_avg + absorb_size / number_of_scavenges_since_last_full_gc.
   453   size_policy->set_bytes_absorbed_from_eden(absorb_size);
   454   return true;
   455 }
   457 void PSMarkSweep::allocate_stacks() {
   458   ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
   459   assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
   461   PSYoungGen* young_gen = heap->young_gen();
   463   MutableSpace* to_space = young_gen->to_space();
   464   _preserved_marks = (PreservedMark*)to_space->top();
   465   _preserved_count = 0;
   467   // We want to calculate the size in bytes first.
   468   _preserved_count_max  = pointer_delta(to_space->end(), to_space->top(), sizeof(jbyte));
   469   // Now divide by the size of a PreservedMark
   470   _preserved_count_max /= sizeof(PreservedMark);
   471 }
   474 void PSMarkSweep::deallocate_stacks() {
   475   _preserved_mark_stack.clear(true);
   476   _preserved_oop_stack.clear(true);
   477   _marking_stack.clear();
   478   _objarray_stack.clear(true);
   479 }
   481 void PSMarkSweep::mark_sweep_phase1(bool clear_all_softrefs) {
   482   // Recursively traverse all live objects and mark them
   483   TraceTime tm("phase 1", PrintGCDetails && Verbose, true, gclog_or_tty);
   484   trace(" 1");
   486   ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
   487   assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
   489   // Need to clear claim bits before the tracing starts.
   490   ClassLoaderDataGraph::clear_claimed_marks();
   492   // General strong roots.
   493   {
   494     ParallelScavengeHeap::ParStrongRootsScope psrs;
   495     Universe::oops_do(mark_and_push_closure());
   496     JNIHandles::oops_do(mark_and_push_closure());   // Global (strong) JNI handles
   497     CLDToOopClosure mark_and_push_from_cld(mark_and_push_closure());
   498     CodeBlobToOopClosure each_active_code_blob(mark_and_push_closure(), /*do_marking=*/ true);
   499     Threads::oops_do(mark_and_push_closure(), &mark_and_push_from_cld, &each_active_code_blob);
   500     ObjectSynchronizer::oops_do(mark_and_push_closure());
   501     FlatProfiler::oops_do(mark_and_push_closure());
   502     Management::oops_do(mark_and_push_closure());
   503     JvmtiExport::oops_do(mark_and_push_closure());
   504     SystemDictionary::always_strong_oops_do(mark_and_push_closure());
   505     ClassLoaderDataGraph::always_strong_oops_do(mark_and_push_closure(), follow_klass_closure(), true);
   506     // Do not treat nmethods as strong roots for mark/sweep, since we can unload them.
   507     //CodeCache::scavenge_root_nmethods_do(CodeBlobToOopClosure(mark_and_push_closure()));
   508   }
   510   // Flush marking stack.
   511   follow_stack();
   513   // Process reference objects found during marking
   514   {
   515     ref_processor()->setup_policy(clear_all_softrefs);
   516     ref_processor()->process_discovered_references(
   517       is_alive_closure(), mark_and_push_closure(), follow_stack_closure(), NULL);
   518   }
   520   // This is the point where the entire marking should have completed.
   521   assert(_marking_stack.is_empty(), "Marking should have completed");
   523   // Unload classes and purge the SystemDictionary.
   524   bool purged_class = SystemDictionary::do_unloading(is_alive_closure());
   526   // Unload nmethods.
   527   CodeCache::do_unloading(is_alive_closure(), purged_class);
   529   // Prune dead klasses from subklass/sibling/implementor lists.
   530   Klass::clean_weak_klass_links(is_alive_closure());
   532   // Delete entries for dead interned strings.
   533   StringTable::unlink(is_alive_closure());
   535   // Clean up unreferenced symbols in symbol table.
   536   SymbolTable::unlink();
   537 }
   540 void PSMarkSweep::mark_sweep_phase2() {
   541   TraceTime tm("phase 2", PrintGCDetails && Verbose, true, gclog_or_tty);
   542   trace("2");
   544   // Now all live objects are marked, compute the new object addresses.
   546   // It is not required that we traverse spaces in the same order in
   547   // phase2, phase3 and phase4, but the ValidateMarkSweep live oops
   548   // tracking expects us to do so. See comment under phase4.
   550   ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
   551   assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
   553   PSOldGen* old_gen = heap->old_gen();
   555   // Begin compacting into the old gen
   556   PSMarkSweepDecorator::set_destination_decorator_tenured();
   558   // This will also compact the young gen spaces.
   559   old_gen->precompact();
   560 }
   562 // This should be moved to the shared markSweep code!
   563 class PSAlwaysTrueClosure: public BoolObjectClosure {
   564 public:
   565   void do_object(oop p) { ShouldNotReachHere(); }
   566   bool do_object_b(oop p) { return true; }
   567 };
   568 static PSAlwaysTrueClosure always_true;
   570 void PSMarkSweep::mark_sweep_phase3() {
   571   // Adjust the pointers to reflect the new locations
   572   TraceTime tm("phase 3", PrintGCDetails && Verbose, true, gclog_or_tty);
   573   trace("3");
   575   ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
   576   assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
   578   PSYoungGen* young_gen = heap->young_gen();
   579   PSOldGen* old_gen = heap->old_gen();
   581   // Need to clear claim bits before the tracing starts.
   582   ClassLoaderDataGraph::clear_claimed_marks();
   584   // General strong roots.
   585   Universe::oops_do(adjust_pointer_closure());
   586   JNIHandles::oops_do(adjust_pointer_closure());   // Global (strong) JNI handles
   587   CLDToOopClosure adjust_from_cld(adjust_pointer_closure());
   588   Threads::oops_do(adjust_pointer_closure(), &adjust_from_cld, NULL);
   589   ObjectSynchronizer::oops_do(adjust_pointer_closure());
   590   FlatProfiler::oops_do(adjust_pointer_closure());
   591   Management::oops_do(adjust_pointer_closure());
   592   JvmtiExport::oops_do(adjust_pointer_closure());
   593   // SO_AllClasses
   594   SystemDictionary::oops_do(adjust_pointer_closure());
   595   ClassLoaderDataGraph::oops_do(adjust_pointer_closure(), adjust_klass_closure(), true);
   597   // Now adjust pointers in remaining weak roots.  (All of which should
   598   // have been cleared if they pointed to non-surviving objects.)
   599   // Global (weak) JNI handles
   600   JNIHandles::weak_oops_do(&always_true, adjust_pointer_closure());
   602   CodeCache::oops_do(adjust_pointer_closure());
   603   StringTable::oops_do(adjust_pointer_closure());
   604   ref_processor()->weak_oops_do(adjust_pointer_closure());
   605   PSScavenge::reference_processor()->weak_oops_do(adjust_pointer_closure());
   607   adjust_marks();
   609   young_gen->adjust_pointers();
   610   old_gen->adjust_pointers();
   611 }
   613 void PSMarkSweep::mark_sweep_phase4() {
   614   EventMark m("4 compact heap");
   615   TraceTime tm("phase 4", PrintGCDetails && Verbose, true, gclog_or_tty);
   616   trace("4");
   618   // All pointers are now adjusted, move objects accordingly
   620   ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
   621   assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
   623   PSYoungGen* young_gen = heap->young_gen();
   624   PSOldGen* old_gen = heap->old_gen();
   626   old_gen->compact();
   627   young_gen->compact();
   628 }
   630 jlong PSMarkSweep::millis_since_last_gc() {
   631   // We need a monotonically non-deccreasing time in ms but
   632   // os::javaTimeMillis() does not guarantee monotonicity.
   633   jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
   634   jlong ret_val = now - _time_of_last_gc;
   635   // XXX See note in genCollectedHeap::millis_since_last_gc().
   636   if (ret_val < 0) {
   637     NOT_PRODUCT(warning("time warp: "INT64_FORMAT, ret_val);)
   638     return 0;
   639   }
   640   return ret_val;
   641 }
   643 void PSMarkSweep::reset_millis_since_last_gc() {
   644   // We need a monotonically non-deccreasing time in ms but
   645   // os::javaTimeMillis() does not guarantee monotonicity.
   646   _time_of_last_gc = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
   647 }

mercurial