src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.cpp

Sat, 19 Jul 2008 17:38:22 -0400

author
coleenp
date
Sat, 19 Jul 2008 17:38:22 -0400
changeset 672
1fdb98a17101
parent 435
a61af66fc99e
child 698
12eea04c8b06
permissions
-rw-r--r--

6716785: implicit null checks not triggering with CompressedOops
Summary: allocate alignment-sized page(s) below java heap so that memory accesses at heap_base+1page give signal and cause an implicit null check
Reviewed-by: kvn, jmasa, phh, jcoomes

     1 /*
     2  * Copyright 2001-2007 Sun Microsystems, Inc.  All Rights Reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
    20  * CA 95054 USA or visit www.sun.com if you need additional information or
    21  * have any questions.
    22  *
    23  */
    25 #include "incls/_precompiled.incl"
    26 #include "incls/_psMarkSweep.cpp.incl"
    28 elapsedTimer        PSMarkSweep::_accumulated_time;
    29 unsigned int        PSMarkSweep::_total_invocations = 0;
    30 jlong               PSMarkSweep::_time_of_last_gc   = 0;
    31 CollectorCounters*  PSMarkSweep::_counters = NULL;
    33 void PSMarkSweep::initialize() {
    34   MemRegion mr = Universe::heap()->reserved_region();
    35   _ref_processor = new ReferenceProcessor(mr,
    36                                           true,    // atomic_discovery
    37                                           false);  // mt_discovery
    38   if (!UseParallelOldGC || !VerifyParallelOldWithMarkSweep) {
    39     _counters = new CollectorCounters("PSMarkSweep", 1);
    40   }
    41 }
    43 // This method contains all heap specific policy for invoking mark sweep.
    44 // PSMarkSweep::invoke_no_policy() will only attempt to mark-sweep-compact
    45 // the heap. It will do nothing further. If we need to bail out for policy
    46 // reasons, scavenge before full gc, or any other specialized behavior, it
    47 // needs to be added here.
    48 //
    49 // Note that this method should only be called from the vm_thread while
    50 // at a safepoint!
    51 void PSMarkSweep::invoke(bool maximum_heap_compaction) {
    52   assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
    53   assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
    54   assert(!Universe::heap()->is_gc_active(), "not reentrant");
    56   ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
    57   GCCause::Cause gc_cause = heap->gc_cause();
    58   PSAdaptiveSizePolicy* policy = heap->size_policy();
    60   // Before each allocation/collection attempt, find out from the
    61   // policy object if GCs are, on the whole, taking too long. If so,
    62   // bail out without attempting a collection.  The exceptions are
    63   // for explicitly requested GC's.
    64   if (!policy->gc_time_limit_exceeded() ||
    65       GCCause::is_user_requested_gc(gc_cause) ||
    66       GCCause::is_serviceability_requested_gc(gc_cause)) {
    67     IsGCActiveMark mark;
    69     if (ScavengeBeforeFullGC) {
    70       PSScavenge::invoke_no_policy();
    71     }
    73     int count = (maximum_heap_compaction)?1:MarkSweepAlwaysCompactCount;
    74     IntFlagSetting flag_setting(MarkSweepAlwaysCompactCount, count);
    75     PSMarkSweep::invoke_no_policy(maximum_heap_compaction);
    76   }
    77 }
    79 // This method contains no policy. You should probably
    80 // be calling invoke() instead.
    81 void PSMarkSweep::invoke_no_policy(bool clear_all_softrefs) {
    82   assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint");
    83   assert(ref_processor() != NULL, "Sanity");
    85   if (GC_locker::check_active_before_gc()) {
    86     return;
    87   }
    89   ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
    90   GCCause::Cause gc_cause = heap->gc_cause();
    91   assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
    92   PSAdaptiveSizePolicy* size_policy = heap->size_policy();
    94   PSYoungGen* young_gen = heap->young_gen();
    95   PSOldGen* old_gen = heap->old_gen();
    96   PSPermGen* perm_gen = heap->perm_gen();
    98   // Increment the invocation count
    99   heap->increment_total_collections(true /* full */);
   101   // We need to track unique mark sweep invocations as well.
   102   _total_invocations++;
   104   AdaptiveSizePolicyOutput(size_policy, heap->total_collections());
   106   if (PrintHeapAtGC) {
   107     Universe::print_heap_before_gc();
   108   }
   110   // Fill in TLABs
   111   heap->accumulate_statistics_all_tlabs();
   112   heap->ensure_parsability(true);  // retire TLABs
   114   if (VerifyBeforeGC && heap->total_collections() >= VerifyGCStartAt) {
   115     HandleMark hm;  // Discard invalid handles created during verification
   116     gclog_or_tty->print(" VerifyBeforeGC:");
   117     Universe::verify(true);
   118   }
   120   // Verify object start arrays
   121   if (VerifyObjectStartArray &&
   122       VerifyBeforeGC) {
   123     old_gen->verify_object_start_array();
   124     perm_gen->verify_object_start_array();
   125   }
   127   // Filled in below to track the state of the young gen after the collection.
   128   bool eden_empty;
   129   bool survivors_empty;
   130   bool young_gen_empty;
   132   {
   133     HandleMark hm;
   134     const bool is_system_gc = gc_cause == GCCause::_java_lang_system_gc;
   135     // This is useful for debugging but don't change the output the
   136     // the customer sees.
   137     const char* gc_cause_str = "Full GC";
   138     if (is_system_gc && PrintGCDetails) {
   139       gc_cause_str = "Full GC (System)";
   140     }
   141     gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps);
   142     TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
   143     TraceTime t1(gc_cause_str, PrintGC, !PrintGCDetails, gclog_or_tty);
   144     TraceCollectorStats tcs(counters());
   145     TraceMemoryManagerStats tms(true /* Full GC */);
   147     if (TraceGen1Time) accumulated_time()->start();
   149     // Let the size policy know we're starting
   150     size_policy->major_collection_begin();
   152     // When collecting the permanent generation methodOops may be moving,
   153     // so we either have to flush all bcp data or convert it into bci.
   154     CodeCache::gc_prologue();
   155     Threads::gc_prologue();
   156     BiasedLocking::preserve_marks();
   158     // Capture heap size before collection for printing.
   159     size_t prev_used = heap->used();
   161     // Capture perm gen size before collection for sizing.
   162     size_t perm_gen_prev_used = perm_gen->used_in_bytes();
   164     // For PrintGCDetails
   165     size_t old_gen_prev_used = old_gen->used_in_bytes();
   166     size_t young_gen_prev_used = young_gen->used_in_bytes();
   168     allocate_stacks();
   170     NOT_PRODUCT(ref_processor()->verify_no_references_recorded());
   171     COMPILER2_PRESENT(DerivedPointerTable::clear());
   173     ref_processor()->enable_discovery();
   175     mark_sweep_phase1(clear_all_softrefs);
   177     mark_sweep_phase2();
   179     // Don't add any more derived pointers during phase3
   180     COMPILER2_PRESENT(assert(DerivedPointerTable::is_active(), "Sanity"));
   181     COMPILER2_PRESENT(DerivedPointerTable::set_active(false));
   183     mark_sweep_phase3();
   185     mark_sweep_phase4();
   187     restore_marks();
   189     deallocate_stacks();
   191     eden_empty = young_gen->eden_space()->is_empty();
   192     if (!eden_empty) {
   193       eden_empty = absorb_live_data_from_eden(size_policy, young_gen, old_gen);
   194     }
   196     // Update heap occupancy information which is used as
   197     // input to soft ref clearing policy at the next gc.
   198     Universe::update_heap_info_at_gc();
   200     survivors_empty = young_gen->from_space()->is_empty() &&
   201       young_gen->to_space()->is_empty();
   202     young_gen_empty = eden_empty && survivors_empty;
   204     BarrierSet* bs = heap->barrier_set();
   205     if (bs->is_a(BarrierSet::ModRef)) {
   206       ModRefBarrierSet* modBS = (ModRefBarrierSet*)bs;
   207       MemRegion old_mr = heap->old_gen()->reserved();
   208       MemRegion perm_mr = heap->perm_gen()->reserved();
   209       assert(perm_mr.end() <= old_mr.start(), "Generations out of order");
   211       if (young_gen_empty) {
   212         modBS->clear(MemRegion(perm_mr.start(), old_mr.end()));
   213       } else {
   214         modBS->invalidate(MemRegion(perm_mr.start(), old_mr.end()));
   215       }
   216     }
   218     BiasedLocking::restore_marks();
   219     Threads::gc_epilogue();
   220     CodeCache::gc_epilogue();
   222     COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
   224     ref_processor()->enqueue_discovered_references(NULL);
   226     // Update time of last GC
   227     reset_millis_since_last_gc();
   229     // Let the size policy know we're done
   230     size_policy->major_collection_end(old_gen->used_in_bytes(), gc_cause);
   232     if (UseAdaptiveSizePolicy) {
   234       if (PrintAdaptiveSizePolicy) {
   235         gclog_or_tty->print("AdaptiveSizeStart: ");
   236         gclog_or_tty->stamp();
   237         gclog_or_tty->print_cr(" collection: %d ",
   238                        heap->total_collections());
   239         if (Verbose) {
   240           gclog_or_tty->print("old_gen_capacity: %d young_gen_capacity: %d"
   241             " perm_gen_capacity: %d ",
   242             old_gen->capacity_in_bytes(), young_gen->capacity_in_bytes(),
   243             perm_gen->capacity_in_bytes());
   244         }
   245       }
   247       // Don't check if the size_policy is ready here.  Let
   248       // the size_policy check that internally.
   249       if (UseAdaptiveGenerationSizePolicyAtMajorCollection &&
   250           ((gc_cause != GCCause::_java_lang_system_gc) ||
   251             UseAdaptiveSizePolicyWithSystemGC)) {
   252         // Calculate optimal free space amounts
   253         assert(young_gen->max_size() >
   254           young_gen->from_space()->capacity_in_bytes() +
   255           young_gen->to_space()->capacity_in_bytes(),
   256           "Sizes of space in young gen are out-of-bounds");
   257         size_t max_eden_size = young_gen->max_size() -
   258           young_gen->from_space()->capacity_in_bytes() -
   259           young_gen->to_space()->capacity_in_bytes();
   260         size_policy->compute_generation_free_space(young_gen->used_in_bytes(),
   261                                  young_gen->eden_space()->used_in_bytes(),
   262                                  old_gen->used_in_bytes(),
   263                                  perm_gen->used_in_bytes(),
   264                                  young_gen->eden_space()->capacity_in_bytes(),
   265                                  old_gen->max_gen_size(),
   266                                  max_eden_size,
   267                                  true /* full gc*/,
   268                                  gc_cause);
   270         heap->resize_old_gen(size_policy->calculated_old_free_size_in_bytes());
   272         // Don't resize the young generation at an major collection.  A
   273         // desired young generation size may have been calculated but
   274         // resizing the young generation complicates the code because the
   275         // resizing of the old generation may have moved the boundary
   276         // between the young generation and the old generation.  Let the
   277         // young generation resizing happen at the minor collections.
   278       }
   279       if (PrintAdaptiveSizePolicy) {
   280         gclog_or_tty->print_cr("AdaptiveSizeStop: collection: %d ",
   281                        heap->total_collections());
   282       }
   283     }
   285     if (UsePerfData) {
   286       heap->gc_policy_counters()->update_counters();
   287       heap->gc_policy_counters()->update_old_capacity(
   288         old_gen->capacity_in_bytes());
   289       heap->gc_policy_counters()->update_young_capacity(
   290         young_gen->capacity_in_bytes());
   291     }
   293     heap->resize_all_tlabs();
   295     // We collected the perm gen, so we'll resize it here.
   296     perm_gen->compute_new_size(perm_gen_prev_used);
   298     if (TraceGen1Time) accumulated_time()->stop();
   300     if (PrintGC) {
   301       if (PrintGCDetails) {
   302         // Don't print a GC timestamp here.  This is after the GC so
   303         // would be confusing.
   304         young_gen->print_used_change(young_gen_prev_used);
   305         old_gen->print_used_change(old_gen_prev_used);
   306       }
   307       heap->print_heap_change(prev_used);
   308       // Do perm gen after heap becase prev_used does
   309       // not include the perm gen (done this way in the other
   310       // collectors).
   311       if (PrintGCDetails) {
   312         perm_gen->print_used_change(perm_gen_prev_used);
   313       }
   314     }
   316     // Track memory usage and detect low memory
   317     MemoryService::track_memory_usage();
   318     heap->update_counters();
   320     if (PrintGCDetails) {
   321       if (size_policy->print_gc_time_limit_would_be_exceeded()) {
   322         if (size_policy->gc_time_limit_exceeded()) {
   323           gclog_or_tty->print_cr("      GC time is exceeding GCTimeLimit "
   324             "of %d%%", GCTimeLimit);
   325         } else {
   326           gclog_or_tty->print_cr("      GC time would exceed GCTimeLimit "
   327             "of %d%%", GCTimeLimit);
   328         }
   329       }
   330       size_policy->set_print_gc_time_limit_would_be_exceeded(false);
   331     }
   332   }
   334   if (VerifyAfterGC && heap->total_collections() >= VerifyGCStartAt) {
   335     HandleMark hm;  // Discard invalid handles created during verification
   336     gclog_or_tty->print(" VerifyAfterGC:");
   337     Universe::verify(false);
   338   }
   340   // Re-verify object start arrays
   341   if (VerifyObjectStartArray &&
   342       VerifyAfterGC) {
   343     old_gen->verify_object_start_array();
   344     perm_gen->verify_object_start_array();
   345   }
   347   NOT_PRODUCT(ref_processor()->verify_no_references_recorded());
   349   if (PrintHeapAtGC) {
   350     Universe::print_heap_after_gc();
   351   }
   352 }
   354 bool PSMarkSweep::absorb_live_data_from_eden(PSAdaptiveSizePolicy* size_policy,
   355                                              PSYoungGen* young_gen,
   356                                              PSOldGen* old_gen) {
   357   MutableSpace* const eden_space = young_gen->eden_space();
   358   assert(!eden_space->is_empty(), "eden must be non-empty");
   359   assert(young_gen->virtual_space()->alignment() ==
   360          old_gen->virtual_space()->alignment(), "alignments do not match");
   362   if (!(UseAdaptiveSizePolicy && UseAdaptiveGCBoundary)) {
   363     return false;
   364   }
   366   // Both generations must be completely committed.
   367   if (young_gen->virtual_space()->uncommitted_size() != 0) {
   368     return false;
   369   }
   370   if (old_gen->virtual_space()->uncommitted_size() != 0) {
   371     return false;
   372   }
   374   // Figure out how much to take from eden.  Include the average amount promoted
   375   // in the total; otherwise the next young gen GC will simply bail out to a
   376   // full GC.
   377   const size_t alignment = old_gen->virtual_space()->alignment();
   378   const size_t eden_used = eden_space->used_in_bytes();
   379   const size_t promoted = (size_t)(size_policy->avg_promoted()->padded_average());
   380   const size_t absorb_size = align_size_up(eden_used + promoted, alignment);
   381   const size_t eden_capacity = eden_space->capacity_in_bytes();
   383   if (absorb_size >= eden_capacity) {
   384     return false; // Must leave some space in eden.
   385   }
   387   const size_t new_young_size = young_gen->capacity_in_bytes() - absorb_size;
   388   if (new_young_size < young_gen->min_gen_size()) {
   389     return false; // Respect young gen minimum size.
   390   }
   392   if (TraceAdaptiveGCBoundary && Verbose) {
   393     gclog_or_tty->print(" absorbing " SIZE_FORMAT "K:  "
   394                         "eden " SIZE_FORMAT "K->" SIZE_FORMAT "K "
   395                         "from " SIZE_FORMAT "K, to " SIZE_FORMAT "K "
   396                         "young_gen " SIZE_FORMAT "K->" SIZE_FORMAT "K ",
   397                         absorb_size / K,
   398                         eden_capacity / K, (eden_capacity - absorb_size) / K,
   399                         young_gen->from_space()->used_in_bytes() / K,
   400                         young_gen->to_space()->used_in_bytes() / K,
   401                         young_gen->capacity_in_bytes() / K, new_young_size / K);
   402   }
   404   // Fill the unused part of the old gen.
   405   MutableSpace* const old_space = old_gen->object_space();
   406   MemRegion old_gen_unused(old_space->top(), old_space->end());
   408   // If the unused part of the old gen cannot be filled, skip
   409   // absorbing eden.
   410   if (old_gen_unused.word_size() < SharedHeap::min_fill_size()) {
   411     return false;
   412   }
   414   if (!old_gen_unused.is_empty()) {
   415     SharedHeap::fill_region_with_object(old_gen_unused);
   416   }
   418   // Take the live data from eden and set both top and end in the old gen to
   419   // eden top.  (Need to set end because reset_after_change() mangles the region
   420   // from end to virtual_space->high() in debug builds).
   421   HeapWord* const new_top = eden_space->top();
   422   old_gen->virtual_space()->expand_into(young_gen->virtual_space(),
   423                                         absorb_size);
   424   young_gen->reset_after_change();
   425   old_space->set_top(new_top);
   426   old_space->set_end(new_top);
   427   old_gen->reset_after_change();
   429   // Update the object start array for the filler object and the data from eden.
   430   ObjectStartArray* const start_array = old_gen->start_array();
   431   HeapWord* const start = old_gen_unused.start();
   432   for (HeapWord* addr = start; addr < new_top; addr += oop(addr)->size()) {
   433     start_array->allocate_block(addr);
   434   }
   436   // Could update the promoted average here, but it is not typically updated at
   437   // full GCs and the value to use is unclear.  Something like
   438   //
   439   // cur_promoted_avg + absorb_size / number_of_scavenges_since_last_full_gc.
   441   size_policy->set_bytes_absorbed_from_eden(absorb_size);
   442   return true;
   443 }
   445 void PSMarkSweep::allocate_stacks() {
   446   ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
   447   assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
   449   PSYoungGen* young_gen = heap->young_gen();
   451   MutableSpace* to_space = young_gen->to_space();
   452   _preserved_marks = (PreservedMark*)to_space->top();
   453   _preserved_count = 0;
   455   // We want to calculate the size in bytes first.
   456   _preserved_count_max  = pointer_delta(to_space->end(), to_space->top(), sizeof(jbyte));
   457   // Now divide by the size of a PreservedMark
   458   _preserved_count_max /= sizeof(PreservedMark);
   460   _preserved_mark_stack = NULL;
   461   _preserved_oop_stack = NULL;
   463   _marking_stack = new (ResourceObj::C_HEAP) GrowableArray<oop>(4000, true);
   465   int size = SystemDictionary::number_of_classes() * 2;
   466   _revisit_klass_stack = new (ResourceObj::C_HEAP) GrowableArray<Klass*>(size, true);
   467 }
   470 void PSMarkSweep::deallocate_stacks() {
   471   if (_preserved_oop_stack) {
   472     delete _preserved_mark_stack;
   473     _preserved_mark_stack = NULL;
   474     delete _preserved_oop_stack;
   475     _preserved_oop_stack = NULL;
   476   }
   478   delete _marking_stack;
   479   delete _revisit_klass_stack;
   480 }
   482 void PSMarkSweep::mark_sweep_phase1(bool clear_all_softrefs) {
   483   // Recursively traverse all live objects and mark them
   484   EventMark m("1 mark object");
   485   TraceTime tm("phase 1", PrintGCDetails && Verbose, true, gclog_or_tty);
   486   trace(" 1");
   488   ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
   489   assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
   491   // General strong roots.
   492   Universe::oops_do(mark_and_push_closure());
   493   ReferenceProcessor::oops_do(mark_and_push_closure());
   494   JNIHandles::oops_do(mark_and_push_closure());   // Global (strong) JNI handles
   495   Threads::oops_do(mark_and_push_closure());
   496   ObjectSynchronizer::oops_do(mark_and_push_closure());
   497   FlatProfiler::oops_do(mark_and_push_closure());
   498   Management::oops_do(mark_and_push_closure());
   499   JvmtiExport::oops_do(mark_and_push_closure());
   500   SystemDictionary::always_strong_oops_do(mark_and_push_closure());
   501   vmSymbols::oops_do(mark_and_push_closure());
   503   // Flush marking stack.
   504   follow_stack();
   506   // Process reference objects found during marking
   508   // Skipping the reference processing for VerifyParallelOldWithMarkSweep
   509   // affects the marking (makes it different).
   510   {
   511     ReferencePolicy *soft_ref_policy;
   512     if (clear_all_softrefs) {
   513       soft_ref_policy = new AlwaysClearPolicy();
   514     } else {
   515 #ifdef COMPILER2
   516       soft_ref_policy = new LRUMaxHeapPolicy();
   517 #else
   518       soft_ref_policy = new LRUCurrentHeapPolicy();
   519 #endif // COMPILER2
   520     }
   521     assert(soft_ref_policy != NULL,"No soft reference policy");
   522     ref_processor()->process_discovered_references(
   523       soft_ref_policy, is_alive_closure(), mark_and_push_closure(),
   524       follow_stack_closure(), NULL);
   525   }
   527   // Follow system dictionary roots and unload classes
   528   bool purged_class = SystemDictionary::do_unloading(is_alive_closure());
   530   // Follow code cache roots
   531   CodeCache::do_unloading(is_alive_closure(), mark_and_push_closure(),
   532                           purged_class);
   533   follow_stack(); // Flush marking stack
   535   // Update subklass/sibling/implementor links of live klasses
   536   follow_weak_klass_links();
   537   assert(_marking_stack->is_empty(), "just drained");
   539   // Visit symbol and interned string tables and delete unmarked oops
   540   SymbolTable::unlink(is_alive_closure());
   541   StringTable::unlink(is_alive_closure());
   543   assert(_marking_stack->is_empty(), "stack should be empty by now");
   544 }
   547 void PSMarkSweep::mark_sweep_phase2() {
   548   EventMark m("2 compute new addresses");
   549   TraceTime tm("phase 2", PrintGCDetails && Verbose, true, gclog_or_tty);
   550   trace("2");
   552   // Now all live objects are marked, compute the new object addresses.
   554   // It is imperative that we traverse perm_gen LAST. If dead space is
   555   // allowed a range of dead object may get overwritten by a dead int
   556   // array. If perm_gen is not traversed last a klassOop may get
   557   // overwritten. This is fine since it is dead, but if the class has dead
   558   // instances we have to skip them, and in order to find their size we
   559   // need the klassOop!
   560   //
   561   // It is not required that we traverse spaces in the same order in
   562   // phase2, phase3 and phase4, but the ValidateMarkSweep live oops
   563   // tracking expects us to do so. See comment under phase4.
   565   ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
   566   assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
   568   PSOldGen* old_gen = heap->old_gen();
   569   PSPermGen* perm_gen = heap->perm_gen();
   571   // Begin compacting into the old gen
   572   PSMarkSweepDecorator::set_destination_decorator_tenured();
   574   // This will also compact the young gen spaces.
   575   old_gen->precompact();
   577   // Compact the perm gen into the perm gen
   578   PSMarkSweepDecorator::set_destination_decorator_perm_gen();
   580   perm_gen->precompact();
   581 }
   583 // This should be moved to the shared markSweep code!
   584 class PSAlwaysTrueClosure: public BoolObjectClosure {
   585 public:
   586   void do_object(oop p) { ShouldNotReachHere(); }
   587   bool do_object_b(oop p) { return true; }
   588 };
   589 static PSAlwaysTrueClosure always_true;
   591 void PSMarkSweep::mark_sweep_phase3() {
   592   // Adjust the pointers to reflect the new locations
   593   EventMark m("3 adjust pointers");
   594   TraceTime tm("phase 3", PrintGCDetails && Verbose, true, gclog_or_tty);
   595   trace("3");
   597   ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
   598   assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
   600   PSYoungGen* young_gen = heap->young_gen();
   601   PSOldGen* old_gen = heap->old_gen();
   602   PSPermGen* perm_gen = heap->perm_gen();
   604   // General strong roots.
   605   Universe::oops_do(adjust_root_pointer_closure());
   606   ReferenceProcessor::oops_do(adjust_root_pointer_closure());
   607   JNIHandles::oops_do(adjust_root_pointer_closure());   // Global (strong) JNI handles
   608   Threads::oops_do(adjust_root_pointer_closure());
   609   ObjectSynchronizer::oops_do(adjust_root_pointer_closure());
   610   FlatProfiler::oops_do(adjust_root_pointer_closure());
   611   Management::oops_do(adjust_root_pointer_closure());
   612   JvmtiExport::oops_do(adjust_root_pointer_closure());
   613   // SO_AllClasses
   614   SystemDictionary::oops_do(adjust_root_pointer_closure());
   615   vmSymbols::oops_do(adjust_root_pointer_closure());
   617   // Now adjust pointers in remaining weak roots.  (All of which should
   618   // have been cleared if they pointed to non-surviving objects.)
   619   // Global (weak) JNI handles
   620   JNIHandles::weak_oops_do(&always_true, adjust_root_pointer_closure());
   622   CodeCache::oops_do(adjust_pointer_closure());
   623   SymbolTable::oops_do(adjust_root_pointer_closure());
   624   StringTable::oops_do(adjust_root_pointer_closure());
   625   ref_processor()->weak_oops_do(adjust_root_pointer_closure());
   626   PSScavenge::reference_processor()->weak_oops_do(adjust_root_pointer_closure());
   628   adjust_marks();
   630   young_gen->adjust_pointers();
   631   old_gen->adjust_pointers();
   632   perm_gen->adjust_pointers();
   633 }
   635 void PSMarkSweep::mark_sweep_phase4() {
   636   EventMark m("4 compact heap");
   637   TraceTime tm("phase 4", PrintGCDetails && Verbose, true, gclog_or_tty);
   638   trace("4");
   640   // All pointers are now adjusted, move objects accordingly
   642   // It is imperative that we traverse perm_gen first in phase4. All
   643   // classes must be allocated earlier than their instances, and traversing
   644   // perm_gen first makes sure that all klassOops have moved to their new
   645   // location before any instance does a dispatch through it's klass!
   646   ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
   647   assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
   649   PSYoungGen* young_gen = heap->young_gen();
   650   PSOldGen* old_gen = heap->old_gen();
   651   PSPermGen* perm_gen = heap->perm_gen();
   653   perm_gen->compact();
   654   old_gen->compact();
   655   young_gen->compact();
   656 }
   658 jlong PSMarkSweep::millis_since_last_gc() {
   659   jlong ret_val = os::javaTimeMillis() - _time_of_last_gc;
   660   // XXX See note in genCollectedHeap::millis_since_last_gc().
   661   if (ret_val < 0) {
   662     NOT_PRODUCT(warning("time warp: %d", ret_val);)
   663     return 0;
   664   }
   665   return ret_val;
   666 }
   668 void PSMarkSweep::reset_millis_since_last_gc() {
   669   _time_of_last_gc = os::javaTimeMillis();
   670 }

mercurial