src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.cpp

Thu, 27 Jan 2011 16:11:27 -0800

author
coleenp
date
Thu, 27 Jan 2011 16:11:27 -0800
changeset 2497
3582bf76420e
parent 2467
9afee0b9fc1d
child 2651
92da084fefc9
permissions
-rw-r--r--

6990754: Use native memory and reference counting to implement SymbolTable
Summary: move symbols from permgen into C heap and reference count them
Reviewed-by: never, acorn, jmasa, stefank

     1 /*
     2  * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    20  * or visit www.oracle.com if you need additional information or have any
    21  * questions.
    22  *
    23  */
    25 #include "precompiled.hpp"
    26 #include "classfile/symbolTable.hpp"
    27 #include "classfile/systemDictionary.hpp"
    28 #include "code/codeCache.hpp"
    29 #include "gc_implementation/parallelScavenge/generationSizer.hpp"
    30 #include "gc_implementation/parallelScavenge/parallelScavengeHeap.hpp"
    31 #include "gc_implementation/parallelScavenge/psAdaptiveSizePolicy.hpp"
    32 #include "gc_implementation/parallelScavenge/psMarkSweep.hpp"
    33 #include "gc_implementation/parallelScavenge/psMarkSweepDecorator.hpp"
    34 #include "gc_implementation/parallelScavenge/psOldGen.hpp"
    35 #include "gc_implementation/parallelScavenge/psPermGen.hpp"
    36 #include "gc_implementation/parallelScavenge/psScavenge.hpp"
    37 #include "gc_implementation/parallelScavenge/psYoungGen.hpp"
    38 #include "gc_implementation/shared/isGCActiveMark.hpp"
    39 #include "gc_implementation/shared/spaceDecorator.hpp"
    40 #include "gc_interface/gcCause.hpp"
    41 #include "memory/gcLocker.inline.hpp"
    42 #include "memory/referencePolicy.hpp"
    43 #include "memory/referenceProcessor.hpp"
    44 #include "oops/oop.inline.hpp"
    45 #include "runtime/biasedLocking.hpp"
    46 #include "runtime/fprofiler.hpp"
    47 #include "runtime/safepoint.hpp"
    48 #include "runtime/vmThread.hpp"
    49 #include "services/management.hpp"
    50 #include "services/memoryService.hpp"
    51 #include "utilities/events.hpp"
    52 #include "utilities/stack.inline.hpp"
    54 elapsedTimer        PSMarkSweep::_accumulated_time;
    55 unsigned int        PSMarkSweep::_total_invocations = 0;
    56 jlong               PSMarkSweep::_time_of_last_gc   = 0;
    57 CollectorCounters*  PSMarkSweep::_counters = NULL;
    59 void PSMarkSweep::initialize() {
    60   MemRegion mr = Universe::heap()->reserved_region();
    61   _ref_processor = new ReferenceProcessor(mr,
    62                                           true,    // atomic_discovery
    63                                           false);  // mt_discovery
    64   _counters = new CollectorCounters("PSMarkSweep", 1);
    65 }
    67 // This method contains all heap specific policy for invoking mark sweep.
    68 // PSMarkSweep::invoke_no_policy() will only attempt to mark-sweep-compact
    69 // the heap. It will do nothing further. If we need to bail out for policy
    70 // reasons, scavenge before full gc, or any other specialized behavior, it
    71 // needs to be added here.
    72 //
    73 // Note that this method should only be called from the vm_thread while
    74 // at a safepoint!
    75 //
    76 // Note that the all_soft_refs_clear flag in the collector policy
    77 // may be true because this method can be called without intervening
    78 // activity.  For example when the heap space is tight and full measure
    79 // are being taken to free space.
    81 void PSMarkSweep::invoke(bool maximum_heap_compaction) {
    82   assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
    83   assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
    84   assert(!Universe::heap()->is_gc_active(), "not reentrant");
    86   ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
    87   GCCause::Cause gc_cause = heap->gc_cause();
    88   PSAdaptiveSizePolicy* policy = heap->size_policy();
    89   IsGCActiveMark mark;
    91   if (ScavengeBeforeFullGC) {
    92     PSScavenge::invoke_no_policy();
    93   }
    95   const bool clear_all_soft_refs =
    96     heap->collector_policy()->should_clear_all_soft_refs();
    98   int count = (maximum_heap_compaction)?1:MarkSweepAlwaysCompactCount;
    99   IntFlagSetting flag_setting(MarkSweepAlwaysCompactCount, count);
   100   PSMarkSweep::invoke_no_policy(clear_all_soft_refs || maximum_heap_compaction);
   101 }
   103 // This method contains no policy. You should probably
   104 // be calling invoke() instead.
   105 void PSMarkSweep::invoke_no_policy(bool clear_all_softrefs) {
   106   assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint");
   107   assert(ref_processor() != NULL, "Sanity");
   109   if (GC_locker::check_active_before_gc()) {
   110     return;
   111   }
   113   ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
   114   GCCause::Cause gc_cause = heap->gc_cause();
   115   assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
   116   PSAdaptiveSizePolicy* size_policy = heap->size_policy();
   118   // The scope of casr should end after code that can change
   119   // CollectorPolicy::_should_clear_all_soft_refs.
   120   ClearedAllSoftRefs casr(clear_all_softrefs, heap->collector_policy());
   122   PSYoungGen* young_gen = heap->young_gen();
   123   PSOldGen* old_gen = heap->old_gen();
   124   PSPermGen* perm_gen = heap->perm_gen();
   126   // Increment the invocation count
   127   heap->increment_total_collections(true /* full */);
   129   // Save information needed to minimize mangling
   130   heap->record_gen_tops_before_GC();
   132   // We need to track unique mark sweep invocations as well.
   133   _total_invocations++;
   135   AdaptiveSizePolicyOutput(size_policy, heap->total_collections());
   137   if (PrintHeapAtGC) {
   138     Universe::print_heap_before_gc();
   139   }
   141   // Fill in TLABs
   142   heap->accumulate_statistics_all_tlabs();
   143   heap->ensure_parsability(true);  // retire TLABs
   145   if (VerifyBeforeGC && heap->total_collections() >= VerifyGCStartAt) {
   146     HandleMark hm;  // Discard invalid handles created during verification
   147     gclog_or_tty->print(" VerifyBeforeGC:");
   148     Universe::verify(true);
   149   }
   151   // Verify object start arrays
   152   if (VerifyObjectStartArray &&
   153       VerifyBeforeGC) {
   154     old_gen->verify_object_start_array();
   155     perm_gen->verify_object_start_array();
   156   }
   158   heap->pre_full_gc_dump();
   160   // Filled in below to track the state of the young gen after the collection.
   161   bool eden_empty;
   162   bool survivors_empty;
   163   bool young_gen_empty;
   165   {
   166     HandleMark hm;
   167     const bool is_system_gc = gc_cause == GCCause::_java_lang_system_gc;
   168     // This is useful for debugging but don't change the output the
   169     // the customer sees.
   170     const char* gc_cause_str = "Full GC";
   171     if (is_system_gc && PrintGCDetails) {
   172       gc_cause_str = "Full GC (System)";
   173     }
   174     gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps);
   175     TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
   176     TraceTime t1(gc_cause_str, PrintGC, !PrintGCDetails, gclog_or_tty);
   177     TraceCollectorStats tcs(counters());
   178     TraceMemoryManagerStats tms(true /* Full GC */);
   180     if (TraceGen1Time) accumulated_time()->start();
   182     // Let the size policy know we're starting
   183     size_policy->major_collection_begin();
   185     // When collecting the permanent generation methodOops may be moving,
   186     // so we either have to flush all bcp data or convert it into bci.
   187     CodeCache::gc_prologue();
   188     Threads::gc_prologue();
   189     BiasedLocking::preserve_marks();
   191     // Capture heap size before collection for printing.
   192     size_t prev_used = heap->used();
   194     // Capture perm gen size before collection for sizing.
   195     size_t perm_gen_prev_used = perm_gen->used_in_bytes();
   197     // For PrintGCDetails
   198     size_t old_gen_prev_used = old_gen->used_in_bytes();
   199     size_t young_gen_prev_used = young_gen->used_in_bytes();
   201     allocate_stacks();
   203     NOT_PRODUCT(ref_processor()->verify_no_references_recorded());
   204     COMPILER2_PRESENT(DerivedPointerTable::clear());
   206     ref_processor()->enable_discovery();
   207     ref_processor()->setup_policy(clear_all_softrefs);
   209     mark_sweep_phase1(clear_all_softrefs);
   211     mark_sweep_phase2();
   213     // Don't add any more derived pointers during phase3
   214     COMPILER2_PRESENT(assert(DerivedPointerTable::is_active(), "Sanity"));
   215     COMPILER2_PRESENT(DerivedPointerTable::set_active(false));
   217     mark_sweep_phase3();
   219     mark_sweep_phase4();
   221     restore_marks();
   223     deallocate_stacks();
   225     if (ZapUnusedHeapArea) {
   226       // Do a complete mangle (top to end) because the usage for
   227       // scratch does not maintain a top pointer.
   228       young_gen->to_space()->mangle_unused_area_complete();
   229     }
   231     eden_empty = young_gen->eden_space()->is_empty();
   232     if (!eden_empty) {
   233       eden_empty = absorb_live_data_from_eden(size_policy, young_gen, old_gen);
   234     }
   236     // Update heap occupancy information which is used as
   237     // input to soft ref clearing policy at the next gc.
   238     Universe::update_heap_info_at_gc();
   240     survivors_empty = young_gen->from_space()->is_empty() &&
   241                       young_gen->to_space()->is_empty();
   242     young_gen_empty = eden_empty && survivors_empty;
   244     BarrierSet* bs = heap->barrier_set();
   245     if (bs->is_a(BarrierSet::ModRef)) {
   246       ModRefBarrierSet* modBS = (ModRefBarrierSet*)bs;
   247       MemRegion old_mr = heap->old_gen()->reserved();
   248       MemRegion perm_mr = heap->perm_gen()->reserved();
   249       assert(perm_mr.end() <= old_mr.start(), "Generations out of order");
   251       if (young_gen_empty) {
   252         modBS->clear(MemRegion(perm_mr.start(), old_mr.end()));
   253       } else {
   254         modBS->invalidate(MemRegion(perm_mr.start(), old_mr.end()));
   255       }
   256     }
   258     BiasedLocking::restore_marks();
   259     Threads::gc_epilogue();
   260     CodeCache::gc_epilogue();
   261     JvmtiExport::gc_epilogue();
   263     COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
   265     ref_processor()->enqueue_discovered_references(NULL);
   267     // Update time of last GC
   268     reset_millis_since_last_gc();
   270     // Let the size policy know we're done
   271     size_policy->major_collection_end(old_gen->used_in_bytes(), gc_cause);
   273     if (UseAdaptiveSizePolicy) {
   275       if (PrintAdaptiveSizePolicy) {
   276         gclog_or_tty->print("AdaptiveSizeStart: ");
   277         gclog_or_tty->stamp();
   278         gclog_or_tty->print_cr(" collection: %d ",
   279                        heap->total_collections());
   280         if (Verbose) {
   281           gclog_or_tty->print("old_gen_capacity: %d young_gen_capacity: %d"
   282             " perm_gen_capacity: %d ",
   283             old_gen->capacity_in_bytes(), young_gen->capacity_in_bytes(),
   284             perm_gen->capacity_in_bytes());
   285         }
   286       }
   288       // Don't check if the size_policy is ready here.  Let
   289       // the size_policy check that internally.
   290       if (UseAdaptiveGenerationSizePolicyAtMajorCollection &&
   291           ((gc_cause != GCCause::_java_lang_system_gc) ||
   292             UseAdaptiveSizePolicyWithSystemGC)) {
   293         // Calculate optimal free space amounts
   294         assert(young_gen->max_size() >
   295           young_gen->from_space()->capacity_in_bytes() +
   296           young_gen->to_space()->capacity_in_bytes(),
   297           "Sizes of space in young gen are out-of-bounds");
   298         size_t max_eden_size = young_gen->max_size() -
   299           young_gen->from_space()->capacity_in_bytes() -
   300           young_gen->to_space()->capacity_in_bytes();
   301         size_policy->compute_generation_free_space(young_gen->used_in_bytes(),
   302                                  young_gen->eden_space()->used_in_bytes(),
   303                                  old_gen->used_in_bytes(),
   304                                  perm_gen->used_in_bytes(),
   305                                  young_gen->eden_space()->capacity_in_bytes(),
   306                                  old_gen->max_gen_size(),
   307                                  max_eden_size,
   308                                  true /* full gc*/,
   309                                  gc_cause,
   310                                  heap->collector_policy());
   312         heap->resize_old_gen(size_policy->calculated_old_free_size_in_bytes());
   314         // Don't resize the young generation at an major collection.  A
   315         // desired young generation size may have been calculated but
   316         // resizing the young generation complicates the code because the
   317         // resizing of the old generation may have moved the boundary
   318         // between the young generation and the old generation.  Let the
   319         // young generation resizing happen at the minor collections.
   320       }
   321       if (PrintAdaptiveSizePolicy) {
   322         gclog_or_tty->print_cr("AdaptiveSizeStop: collection: %d ",
   323                        heap->total_collections());
   324       }
   325     }
   327     if (UsePerfData) {
   328       heap->gc_policy_counters()->update_counters();
   329       heap->gc_policy_counters()->update_old_capacity(
   330         old_gen->capacity_in_bytes());
   331       heap->gc_policy_counters()->update_young_capacity(
   332         young_gen->capacity_in_bytes());
   333     }
   335     heap->resize_all_tlabs();
   337     // We collected the perm gen, so we'll resize it here.
   338     perm_gen->compute_new_size(perm_gen_prev_used);
   340     if (TraceGen1Time) accumulated_time()->stop();
   342     if (PrintGC) {
   343       if (PrintGCDetails) {
   344         // Don't print a GC timestamp here.  This is after the GC so
   345         // would be confusing.
   346         young_gen->print_used_change(young_gen_prev_used);
   347         old_gen->print_used_change(old_gen_prev_used);
   348       }
   349       heap->print_heap_change(prev_used);
   350       // Do perm gen after heap becase prev_used does
   351       // not include the perm gen (done this way in the other
   352       // collectors).
   353       if (PrintGCDetails) {
   354         perm_gen->print_used_change(perm_gen_prev_used);
   355       }
   356     }
   358     // Track memory usage and detect low memory
   359     MemoryService::track_memory_usage();
   360     heap->update_counters();
   361   }
   363   if (VerifyAfterGC && heap->total_collections() >= VerifyGCStartAt) {
   364     HandleMark hm;  // Discard invalid handles created during verification
   365     gclog_or_tty->print(" VerifyAfterGC:");
   366     Universe::verify(false);
   367   }
   369   // Re-verify object start arrays
   370   if (VerifyObjectStartArray &&
   371       VerifyAfterGC) {
   372     old_gen->verify_object_start_array();
   373     perm_gen->verify_object_start_array();
   374   }
   376   if (ZapUnusedHeapArea) {
   377     old_gen->object_space()->check_mangled_unused_area_complete();
   378     perm_gen->object_space()->check_mangled_unused_area_complete();
   379   }
   381   NOT_PRODUCT(ref_processor()->verify_no_references_recorded());
   383   if (PrintHeapAtGC) {
   384     Universe::print_heap_after_gc();
   385   }
   387   heap->post_full_gc_dump();
   389 #ifdef TRACESPINNING
   390   ParallelTaskTerminator::print_termination_counts();
   391 #endif
   392 }
   394 bool PSMarkSweep::absorb_live_data_from_eden(PSAdaptiveSizePolicy* size_policy,
   395                                              PSYoungGen* young_gen,
   396                                              PSOldGen* old_gen) {
   397   MutableSpace* const eden_space = young_gen->eden_space();
   398   assert(!eden_space->is_empty(), "eden must be non-empty");
   399   assert(young_gen->virtual_space()->alignment() ==
   400          old_gen->virtual_space()->alignment(), "alignments do not match");
   402   if (!(UseAdaptiveSizePolicy && UseAdaptiveGCBoundary)) {
   403     return false;
   404   }
   406   // Both generations must be completely committed.
   407   if (young_gen->virtual_space()->uncommitted_size() != 0) {
   408     return false;
   409   }
   410   if (old_gen->virtual_space()->uncommitted_size() != 0) {
   411     return false;
   412   }
   414   // Figure out how much to take from eden.  Include the average amount promoted
   415   // in the total; otherwise the next young gen GC will simply bail out to a
   416   // full GC.
   417   const size_t alignment = old_gen->virtual_space()->alignment();
   418   const size_t eden_used = eden_space->used_in_bytes();
   419   const size_t promoted = (size_t)size_policy->avg_promoted()->padded_average();
   420   const size_t absorb_size = align_size_up(eden_used + promoted, alignment);
   421   const size_t eden_capacity = eden_space->capacity_in_bytes();
   423   if (absorb_size >= eden_capacity) {
   424     return false; // Must leave some space in eden.
   425   }
   427   const size_t new_young_size = young_gen->capacity_in_bytes() - absorb_size;
   428   if (new_young_size < young_gen->min_gen_size()) {
   429     return false; // Respect young gen minimum size.
   430   }
   432   if (TraceAdaptiveGCBoundary && Verbose) {
   433     gclog_or_tty->print(" absorbing " SIZE_FORMAT "K:  "
   434                         "eden " SIZE_FORMAT "K->" SIZE_FORMAT "K "
   435                         "from " SIZE_FORMAT "K, to " SIZE_FORMAT "K "
   436                         "young_gen " SIZE_FORMAT "K->" SIZE_FORMAT "K ",
   437                         absorb_size / K,
   438                         eden_capacity / K, (eden_capacity - absorb_size) / K,
   439                         young_gen->from_space()->used_in_bytes() / K,
   440                         young_gen->to_space()->used_in_bytes() / K,
   441                         young_gen->capacity_in_bytes() / K, new_young_size / K);
   442   }
   444   // Fill the unused part of the old gen.
   445   MutableSpace* const old_space = old_gen->object_space();
   446   HeapWord* const unused_start = old_space->top();
   447   size_t const unused_words = pointer_delta(old_space->end(), unused_start);
   449   if (unused_words > 0) {
   450     if (unused_words < CollectedHeap::min_fill_size()) {
   451       return false;  // If the old gen cannot be filled, must give up.
   452     }
   453     CollectedHeap::fill_with_objects(unused_start, unused_words);
   454   }
   456   // Take the live data from eden and set both top and end in the old gen to
   457   // eden top.  (Need to set end because reset_after_change() mangles the region
   458   // from end to virtual_space->high() in debug builds).
   459   HeapWord* const new_top = eden_space->top();
   460   old_gen->virtual_space()->expand_into(young_gen->virtual_space(),
   461                                         absorb_size);
   462   young_gen->reset_after_change();
   463   old_space->set_top(new_top);
   464   old_space->set_end(new_top);
   465   old_gen->reset_after_change();
   467   // Update the object start array for the filler object and the data from eden.
   468   ObjectStartArray* const start_array = old_gen->start_array();
   469   for (HeapWord* p = unused_start; p < new_top; p += oop(p)->size()) {
   470     start_array->allocate_block(p);
   471   }
   473   // Could update the promoted average here, but it is not typically updated at
   474   // full GCs and the value to use is unclear.  Something like
   475   //
   476   // cur_promoted_avg + absorb_size / number_of_scavenges_since_last_full_gc.
   478   size_policy->set_bytes_absorbed_from_eden(absorb_size);
   479   return true;
   480 }
   482 void PSMarkSweep::allocate_stacks() {
   483   ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
   484   assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
   486   PSYoungGen* young_gen = heap->young_gen();
   488   MutableSpace* to_space = young_gen->to_space();
   489   _preserved_marks = (PreservedMark*)to_space->top();
   490   _preserved_count = 0;
   492   // We want to calculate the size in bytes first.
   493   _preserved_count_max  = pointer_delta(to_space->end(), to_space->top(), sizeof(jbyte));
   494   // Now divide by the size of a PreservedMark
   495   _preserved_count_max /= sizeof(PreservedMark);
   496 }
   499 void PSMarkSweep::deallocate_stacks() {
   500   _preserved_mark_stack.clear(true);
   501   _preserved_oop_stack.clear(true);
   502   _marking_stack.clear();
   503   _objarray_stack.clear(true);
   504   _revisit_klass_stack.clear(true);
   505   _revisit_mdo_stack.clear(true);
   506 }
   508 void PSMarkSweep::mark_sweep_phase1(bool clear_all_softrefs) {
   509   // Recursively traverse all live objects and mark them
   510   EventMark m("1 mark object");
   511   TraceTime tm("phase 1", PrintGCDetails && Verbose, true, gclog_or_tty);
   512   trace(" 1");
   514   ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
   515   assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
   517   // General strong roots.
   518   {
   519     ParallelScavengeHeap::ParStrongRootsScope psrs;
   520     Universe::oops_do(mark_and_push_closure());
   521     ReferenceProcessor::oops_do(mark_and_push_closure());
   522     JNIHandles::oops_do(mark_and_push_closure());   // Global (strong) JNI handles
   523     CodeBlobToOopClosure each_active_code_blob(mark_and_push_closure(), /*do_marking=*/ true);
   524     Threads::oops_do(mark_and_push_closure(), &each_active_code_blob);
   525     ObjectSynchronizer::oops_do(mark_and_push_closure());
   526     FlatProfiler::oops_do(mark_and_push_closure());
   527     Management::oops_do(mark_and_push_closure());
   528     JvmtiExport::oops_do(mark_and_push_closure());
   529     SystemDictionary::always_strong_oops_do(mark_and_push_closure());
   530     // Do not treat nmethods as strong roots for mark/sweep, since we can unload them.
   531     //CodeCache::scavenge_root_nmethods_do(CodeBlobToOopClosure(mark_and_push_closure()));
   532   }
   534   // Flush marking stack.
   535   follow_stack();
   537   // Process reference objects found during marking
   538   {
   539     ref_processor()->setup_policy(clear_all_softrefs);
   540     ref_processor()->process_discovered_references(
   541       is_alive_closure(), mark_and_push_closure(), follow_stack_closure(), NULL);
   542   }
   544   // Follow system dictionary roots and unload classes
   545   bool purged_class = SystemDictionary::do_unloading(is_alive_closure());
   547   // Follow code cache roots
   548   CodeCache::do_unloading(is_alive_closure(), mark_and_push_closure(),
   549                           purged_class);
   550   follow_stack(); // Flush marking stack
   552   // Update subklass/sibling/implementor links of live klasses
   553   follow_weak_klass_links();
   554   assert(_marking_stack.is_empty(), "just drained");
   556   // Visit memoized mdo's and clear unmarked weak refs
   557   follow_mdo_weak_refs();
   558   assert(_marking_stack.is_empty(), "just drained");
   560   // Visit interned string tables and delete unmarked oops
   561   StringTable::unlink(is_alive_closure());
   562   // Clean up unreferenced symbols in symbol table.
   563   SymbolTable::unlink();
   565   assert(_marking_stack.is_empty(), "stack should be empty by now");
   566 }
   569 void PSMarkSweep::mark_sweep_phase2() {
   570   EventMark m("2 compute new addresses");
   571   TraceTime tm("phase 2", PrintGCDetails && Verbose, true, gclog_or_tty);
   572   trace("2");
   574   // Now all live objects are marked, compute the new object addresses.
   576   // It is imperative that we traverse perm_gen LAST. If dead space is
   577   // allowed a range of dead object may get overwritten by a dead int
   578   // array. If perm_gen is not traversed last a klassOop may get
   579   // overwritten. This is fine since it is dead, but if the class has dead
   580   // instances we have to skip them, and in order to find their size we
   581   // need the klassOop!
   582   //
   583   // It is not required that we traverse spaces in the same order in
   584   // phase2, phase3 and phase4, but the ValidateMarkSweep live oops
   585   // tracking expects us to do so. See comment under phase4.
   587   ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
   588   assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
   590   PSOldGen* old_gen = heap->old_gen();
   591   PSPermGen* perm_gen = heap->perm_gen();
   593   // Begin compacting into the old gen
   594   PSMarkSweepDecorator::set_destination_decorator_tenured();
   596   // This will also compact the young gen spaces.
   597   old_gen->precompact();
   599   // Compact the perm gen into the perm gen
   600   PSMarkSweepDecorator::set_destination_decorator_perm_gen();
   602   perm_gen->precompact();
   603 }
   605 // This should be moved to the shared markSweep code!
   606 class PSAlwaysTrueClosure: public BoolObjectClosure {
   607 public:
   608   void do_object(oop p) { ShouldNotReachHere(); }
   609   bool do_object_b(oop p) { return true; }
   610 };
   611 static PSAlwaysTrueClosure always_true;
   613 void PSMarkSweep::mark_sweep_phase3() {
   614   // Adjust the pointers to reflect the new locations
   615   EventMark m("3 adjust pointers");
   616   TraceTime tm("phase 3", PrintGCDetails && Verbose, true, gclog_or_tty);
   617   trace("3");
   619   ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
   620   assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
   622   PSYoungGen* young_gen = heap->young_gen();
   623   PSOldGen* old_gen = heap->old_gen();
   624   PSPermGen* perm_gen = heap->perm_gen();
   626   // General strong roots.
   627   Universe::oops_do(adjust_root_pointer_closure());
   628   ReferenceProcessor::oops_do(adjust_root_pointer_closure());
   629   JNIHandles::oops_do(adjust_root_pointer_closure());   // Global (strong) JNI handles
   630   Threads::oops_do(adjust_root_pointer_closure(), NULL);
   631   ObjectSynchronizer::oops_do(adjust_root_pointer_closure());
   632   FlatProfiler::oops_do(adjust_root_pointer_closure());
   633   Management::oops_do(adjust_root_pointer_closure());
   634   JvmtiExport::oops_do(adjust_root_pointer_closure());
   635   // SO_AllClasses
   636   SystemDictionary::oops_do(adjust_root_pointer_closure());
   637   //CodeCache::scavenge_root_nmethods_oops_do(adjust_root_pointer_closure());
   639   // Now adjust pointers in remaining weak roots.  (All of which should
   640   // have been cleared if they pointed to non-surviving objects.)
   641   // Global (weak) JNI handles
   642   JNIHandles::weak_oops_do(&always_true, adjust_root_pointer_closure());
   644   CodeCache::oops_do(adjust_pointer_closure());
   645   StringTable::oops_do(adjust_root_pointer_closure());
   646   ref_processor()->weak_oops_do(adjust_root_pointer_closure());
   647   PSScavenge::reference_processor()->weak_oops_do(adjust_root_pointer_closure());
   649   adjust_marks();
   651   young_gen->adjust_pointers();
   652   old_gen->adjust_pointers();
   653   perm_gen->adjust_pointers();
   654 }
   656 void PSMarkSweep::mark_sweep_phase4() {
   657   EventMark m("4 compact heap");
   658   TraceTime tm("phase 4", PrintGCDetails && Verbose, true, gclog_or_tty);
   659   trace("4");
   661   // All pointers are now adjusted, move objects accordingly
   663   // It is imperative that we traverse perm_gen first in phase4. All
   664   // classes must be allocated earlier than their instances, and traversing
   665   // perm_gen first makes sure that all klassOops have moved to their new
   666   // location before any instance does a dispatch through it's klass!
   667   ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
   668   assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
   670   PSYoungGen* young_gen = heap->young_gen();
   671   PSOldGen* old_gen = heap->old_gen();
   672   PSPermGen* perm_gen = heap->perm_gen();
   674   perm_gen->compact();
   675   old_gen->compact();
   676   young_gen->compact();
   677 }
   679 jlong PSMarkSweep::millis_since_last_gc() {
   680   jlong ret_val = os::javaTimeMillis() - _time_of_last_gc;
   681   // XXX See note in genCollectedHeap::millis_since_last_gc().
   682   if (ret_val < 0) {
   683     NOT_PRODUCT(warning("time warp: %d", ret_val);)
   684     return 0;
   685   }
   686   return ret_val;
   687 }
   689 void PSMarkSweep::reset_millis_since_last_gc() {
   690   _time_of_last_gc = os::javaTimeMillis();
   691 }

mercurial