src/share/vm/gc_implementation/parallelScavenge/asPSYoungGen.cpp

Thu, 22 Sep 2011 10:57:37 -0700

author
johnc
date
Thu, 22 Sep 2011 10:57:37 -0700
changeset 3175
4dfb2df418f2
parent 2314
f95d63e2154a
child 6084
46d7652b223c
permissions
-rw-r--r--

6484982: G1: process references during evacuation pauses
Summary: G1 now uses two reference processors - one is used by concurrent marking and the other is used by STW GCs (both full and incremental evacuation pauses). In an evacuation pause, the reference processor is embedded into the closures used to scan objects. Doing so causes causes reference objects to be 'discovered' by the reference processor. At the end of the evacuation pause, these discovered reference objects are processed - preserving (and copying) referent objects (and their reachable graphs) as appropriate.
Reviewed-by: ysr, jwilhelm, brutisso, stefank, tonyp

     1 /*
     2  * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    20  * or visit www.oracle.com if you need additional information or have any
    21  * questions.
    22  *
    23  */
    25 #include "precompiled.hpp"
    26 #include "gc_implementation/parallelScavenge/asPSYoungGen.hpp"
    27 #include "gc_implementation/parallelScavenge/parallelScavengeHeap.hpp"
    28 #include "gc_implementation/parallelScavenge/psMarkSweepDecorator.hpp"
    29 #include "gc_implementation/parallelScavenge/psScavenge.hpp"
    30 #include "gc_implementation/parallelScavenge/psYoungGen.hpp"
    31 #include "gc_implementation/shared/gcUtil.hpp"
    32 #include "gc_implementation/shared/spaceDecorator.hpp"
    33 #include "oops/oop.inline.hpp"
    34 #include "runtime/java.hpp"
    36 ASPSYoungGen::ASPSYoungGen(size_t init_byte_size,
    37                            size_t minimum_byte_size,
    38                            size_t byte_size_limit) :
    39   PSYoungGen(init_byte_size, minimum_byte_size, byte_size_limit),
    40   _gen_size_limit(byte_size_limit) {
    41 }
    44 ASPSYoungGen::ASPSYoungGen(PSVirtualSpace* vs,
    45                            size_t init_byte_size,
    46                            size_t minimum_byte_size,
    47                            size_t byte_size_limit) :
    48   //PSYoungGen(init_byte_size, minimum_byte_size, byte_size_limit),
    49   PSYoungGen(vs->committed_size(), minimum_byte_size, byte_size_limit),
    50   _gen_size_limit(byte_size_limit) {
    52   assert(vs->committed_size() == init_byte_size, "Cannot replace with");
    54   _virtual_space = vs;
    55 }
    57 void ASPSYoungGen::initialize_virtual_space(ReservedSpace rs,
    58                                             size_t alignment) {
    59   assert(_init_gen_size != 0, "Should have a finite size");
    60   _virtual_space = new PSVirtualSpaceHighToLow(rs, alignment);
    61   if (!_virtual_space->expand_by(_init_gen_size)) {
    62     vm_exit_during_initialization("Could not reserve enough space for "
    63                                   "object heap");
    64   }
    65 }
    67 void ASPSYoungGen::initialize(ReservedSpace rs, size_t alignment) {
    68   initialize_virtual_space(rs, alignment);
    69   initialize_work();
    70 }
    72 size_t ASPSYoungGen::available_for_expansion() {
    74   size_t current_committed_size = virtual_space()->committed_size();
    75   assert((gen_size_limit() >= current_committed_size),
    76     "generation size limit is wrong");
    77   ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
    78   size_t result =  gen_size_limit() - current_committed_size;
    79   size_t result_aligned = align_size_down(result, heap->young_gen_alignment());
    80   return result_aligned;
    81 }
    83 // Return the number of bytes the young gen is willing give up.
    84 //
    85 // Future implementations could check the survivors and if to_space is in the
    86 // right place (below from_space), take a chunk from to_space.
    87 size_t ASPSYoungGen::available_for_contraction() {
    89   size_t uncommitted_bytes = virtual_space()->uncommitted_size();
    90   if (uncommitted_bytes != 0) {
    91     return uncommitted_bytes;
    92   }
    94   if (eden_space()->is_empty()) {
    95     // Respect the minimum size for eden and for the young gen as a whole.
    96     ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
    97     const size_t eden_alignment = heap->intra_heap_alignment();
    98     const size_t gen_alignment = heap->young_gen_alignment();
   100     assert(eden_space()->capacity_in_bytes() >= eden_alignment,
   101       "Alignment is wrong");
   102     size_t eden_avail = eden_space()->capacity_in_bytes() - eden_alignment;
   103     eden_avail = align_size_down(eden_avail, gen_alignment);
   105     assert(virtual_space()->committed_size() >= min_gen_size(),
   106       "minimum gen size is wrong");
   107     size_t gen_avail = virtual_space()->committed_size() - min_gen_size();
   108     assert(virtual_space()->is_aligned(gen_avail), "not aligned");
   110     const size_t max_contraction = MIN2(eden_avail, gen_avail);
   111     // See comment for ASPSOldGen::available_for_contraction()
   112     // for reasons the "increment" fraction is used.
   113     PSAdaptiveSizePolicy* policy = heap->size_policy();
   114     size_t result = policy->eden_increment_aligned_down(max_contraction);
   115     size_t result_aligned = align_size_down(result, gen_alignment);
   116     if (PrintAdaptiveSizePolicy && Verbose) {
   117       gclog_or_tty->print_cr("ASPSYoungGen::available_for_contraction: %d K",
   118         result_aligned/K);
   119       gclog_or_tty->print_cr("  max_contraction %d K", max_contraction/K);
   120       gclog_or_tty->print_cr("  eden_avail %d K", eden_avail/K);
   121       gclog_or_tty->print_cr("  gen_avail %d K", gen_avail/K);
   122     }
   123     return result_aligned;
   125   }
   127   return 0;
   128 }
   130 // The current implementation only considers to the end of eden.
   131 // If to_space is below from_space, to_space is not considered.
   132 // to_space can be.
   133 size_t ASPSYoungGen::available_to_live() {
   134   ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
   135   const size_t alignment = heap->intra_heap_alignment();
   137   // Include any space that is committed but is not in eden.
   138   size_t available = pointer_delta(eden_space()->bottom(),
   139                                    virtual_space()->low(),
   140                                    sizeof(char));
   142   const size_t eden_capacity = eden_space()->capacity_in_bytes();
   143   if (eden_space()->is_empty() && eden_capacity > alignment) {
   144     available += eden_capacity - alignment;
   145   }
   146   return available;
   147 }
   149 // Similar to PSYoungGen::resize_generation() but
   150 //  allows sum of eden_size and 2 * survivor_size to exceed _max_gen_size
   151 //  expands at the low end of the virtual space
   152 //  moves the boundary between the generations in order to expand
   153 //  some additional diagnostics
   154 // If no additional changes are required, this can be deleted
   155 // and the changes factored back into PSYoungGen::resize_generation().
   156 bool ASPSYoungGen::resize_generation(size_t eden_size, size_t survivor_size) {
   157   const size_t alignment = virtual_space()->alignment();
   158   size_t orig_size = virtual_space()->committed_size();
   159   bool size_changed = false;
   161   // There used to be a guarantee here that
   162   //   (eden_size + 2*survivor_size)  <= _max_gen_size
   163   // This requirement is enforced by the calculation of desired_size
   164   // below.  It may not be true on entry since the size of the
   165   // eden_size is no bounded by the generation size.
   167   assert(max_size() == reserved().byte_size(), "max gen size problem?");
   168   assert(min_gen_size() <= orig_size && orig_size <= max_size(),
   169          "just checking");
   171   // Adjust new generation size
   172   const size_t eden_plus_survivors =
   173     align_size_up(eden_size + 2 * survivor_size, alignment);
   174   size_t desired_size = MAX2(MIN2(eden_plus_survivors, gen_size_limit()),
   175                              min_gen_size());
   176   assert(desired_size <= gen_size_limit(), "just checking");
   178   if (desired_size > orig_size) {
   179     // Grow the generation
   180     size_t change = desired_size - orig_size;
   181     HeapWord* prev_low = (HeapWord*) virtual_space()->low();
   182     if (!virtual_space()->expand_by(change)) {
   183       return false;
   184     }
   185     if (ZapUnusedHeapArea) {
   186       // Mangle newly committed space immediately because it
   187       // can be done here more simply that after the new
   188       // spaces have been computed.
   189       HeapWord* new_low = (HeapWord*) virtual_space()->low();
   190       assert(new_low < prev_low, "Did not grow");
   192       MemRegion mangle_region(new_low, prev_low);
   193       SpaceMangler::mangle_region(mangle_region);
   194     }
   195     size_changed = true;
   196   } else if (desired_size < orig_size) {
   197     size_t desired_change = orig_size - desired_size;
   199     // How much is available for shrinking.
   200     size_t available_bytes = limit_gen_shrink(desired_change);
   201     size_t change = MIN2(desired_change, available_bytes);
   202     virtual_space()->shrink_by(change);
   203     size_changed = true;
   204   } else {
   205     if (Verbose && PrintGC) {
   206       if (orig_size == gen_size_limit()) {
   207         gclog_or_tty->print_cr("ASPSYoung generation size at maximum: "
   208           SIZE_FORMAT "K", orig_size/K);
   209       } else if (orig_size == min_gen_size()) {
   210         gclog_or_tty->print_cr("ASPSYoung generation size at minium: "
   211           SIZE_FORMAT "K", orig_size/K);
   212       }
   213     }
   214   }
   216   if (size_changed) {
   217     reset_after_change();
   218     if (Verbose && PrintGC) {
   219       size_t current_size  = virtual_space()->committed_size();
   220       gclog_or_tty->print_cr("ASPSYoung generation size changed: "
   221         SIZE_FORMAT "K->" SIZE_FORMAT "K",
   222         orig_size/K, current_size/K);
   223     }
   224   }
   226   guarantee(eden_plus_survivors <= virtual_space()->committed_size() ||
   227             virtual_space()->committed_size() == max_size(), "Sanity");
   229   return true;
   230 }
   232 // Similar to PSYoungGen::resize_spaces() but
   233 //  eden always starts at the low end of the committed virtual space
   234 //  current implementation does not allow holes between the spaces
   235 //  _young_generation_boundary has to be reset because it changes.
   236 //  so additional verification
   238 void ASPSYoungGen::resize_spaces(size_t requested_eden_size,
   239                                  size_t requested_survivor_size) {
   240   assert(UseAdaptiveSizePolicy, "sanity check");
   241   assert(requested_eden_size > 0 && requested_survivor_size > 0,
   242          "just checking");
   244   space_invariants();
   246   // We require eden and to space to be empty
   247   if ((!eden_space()->is_empty()) || (!to_space()->is_empty())) {
   248     return;
   249   }
   251   if (PrintAdaptiveSizePolicy && Verbose) {
   252     gclog_or_tty->print_cr("PSYoungGen::resize_spaces(requested_eden_size: "
   253                   SIZE_FORMAT
   254                   ", requested_survivor_size: " SIZE_FORMAT ")",
   255                   requested_eden_size, requested_survivor_size);
   256     gclog_or_tty->print_cr("    eden: [" PTR_FORMAT ".." PTR_FORMAT ") "
   257                   SIZE_FORMAT,
   258                   eden_space()->bottom(),
   259                   eden_space()->end(),
   260                   pointer_delta(eden_space()->end(),
   261                                 eden_space()->bottom(),
   262                                 sizeof(char)));
   263     gclog_or_tty->print_cr("    from: [" PTR_FORMAT ".." PTR_FORMAT ") "
   264                   SIZE_FORMAT,
   265                   from_space()->bottom(),
   266                   from_space()->end(),
   267                   pointer_delta(from_space()->end(),
   268                                 from_space()->bottom(),
   269                                 sizeof(char)));
   270     gclog_or_tty->print_cr("      to: [" PTR_FORMAT ".." PTR_FORMAT ") "
   271                   SIZE_FORMAT,
   272                   to_space()->bottom(),
   273                   to_space()->end(),
   274                   pointer_delta(  to_space()->end(),
   275                                   to_space()->bottom(),
   276                                   sizeof(char)));
   277   }
   279   // There's nothing to do if the new sizes are the same as the current
   280   if (requested_survivor_size == to_space()->capacity_in_bytes() &&
   281       requested_survivor_size == from_space()->capacity_in_bytes() &&
   282       requested_eden_size == eden_space()->capacity_in_bytes()) {
   283     if (PrintAdaptiveSizePolicy && Verbose) {
   284       gclog_or_tty->print_cr("    capacities are the right sizes, returning");
   285     }
   286     return;
   287   }
   289   char* eden_start = (char*)virtual_space()->low();
   290   char* eden_end   = (char*)eden_space()->end();
   291   char* from_start = (char*)from_space()->bottom();
   292   char* from_end   = (char*)from_space()->end();
   293   char* to_start   = (char*)to_space()->bottom();
   294   char* to_end     = (char*)to_space()->end();
   296   assert(eden_start < from_start, "Cannot push into from_space");
   298   ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
   299   const size_t alignment = heap->intra_heap_alignment();
   300   const bool maintain_minimum =
   301     (requested_eden_size + 2 * requested_survivor_size) <= min_gen_size();
   303   bool eden_from_to_order = from_start < to_start;
   304   // Check whether from space is below to space
   305   if (eden_from_to_order) {
   306     // Eden, from, to
   308     if (PrintAdaptiveSizePolicy && Verbose) {
   309       gclog_or_tty->print_cr("  Eden, from, to:");
   310     }
   312     // Set eden
   313     // "requested_eden_size" is a goal for the size of eden
   314     // and may not be attainable.  "eden_size" below is
   315     // calculated based on the location of from-space and
   316     // the goal for the size of eden.  from-space is
   317     // fixed in place because it contains live data.
   318     // The calculation is done this way to avoid 32bit
   319     // overflow (i.e., eden_start + requested_eden_size
   320     // may too large for representation in 32bits).
   321     size_t eden_size;
   322     if (maintain_minimum) {
   323       // Only make eden larger than the requested size if
   324       // the minimum size of the generation has to be maintained.
   325       // This could be done in general but policy at a higher
   326       // level is determining a requested size for eden and that
   327       // should be honored unless there is a fundamental reason.
   328       eden_size = pointer_delta(from_start,
   329                                 eden_start,
   330                                 sizeof(char));
   331     } else {
   332       eden_size = MIN2(requested_eden_size,
   333                        pointer_delta(from_start, eden_start, sizeof(char)));
   334     }
   336     eden_end = eden_start + eden_size;
   337     assert(eden_end >= eden_start, "addition overflowed");
   339     // To may resize into from space as long as it is clear of live data.
   340     // From space must remain page aligned, though, so we need to do some
   341     // extra calculations.
   343     // First calculate an optimal to-space
   344     to_end   = (char*)virtual_space()->high();
   345     to_start = (char*)pointer_delta(to_end,
   346                                     (char*)requested_survivor_size,
   347                                     sizeof(char));
   349     // Does the optimal to-space overlap from-space?
   350     if (to_start < (char*)from_space()->end()) {
   351       assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
   353       // Calculate the minimum offset possible for from_end
   354       size_t from_size =
   355         pointer_delta(from_space()->top(), from_start, sizeof(char));
   357       // Should we be in this method if from_space is empty? Why not the set_space method? FIX ME!
   358       if (from_size == 0) {
   359         from_size = alignment;
   360       } else {
   361         from_size = align_size_up(from_size, alignment);
   362       }
   364       from_end = from_start + from_size;
   365       assert(from_end > from_start, "addition overflow or from_size problem");
   367       guarantee(from_end <= (char*)from_space()->end(),
   368         "from_end moved to the right");
   370       // Now update to_start with the new from_end
   371       to_start = MAX2(from_end, to_start);
   372     }
   374     guarantee(to_start != to_end, "to space is zero sized");
   376     if (PrintAdaptiveSizePolicy && Verbose) {
   377       gclog_or_tty->print_cr("    [eden_start .. eden_end): "
   378                     "[" PTR_FORMAT " .. " PTR_FORMAT ") " SIZE_FORMAT,
   379                     eden_start,
   380                     eden_end,
   381                     pointer_delta(eden_end, eden_start, sizeof(char)));
   382       gclog_or_tty->print_cr("    [from_start .. from_end): "
   383                     "[" PTR_FORMAT " .. " PTR_FORMAT ") " SIZE_FORMAT,
   384                     from_start,
   385                     from_end,
   386                     pointer_delta(from_end, from_start, sizeof(char)));
   387       gclog_or_tty->print_cr("    [  to_start ..   to_end): "
   388                     "[" PTR_FORMAT " .. " PTR_FORMAT ") " SIZE_FORMAT,
   389                     to_start,
   390                     to_end,
   391                     pointer_delta(  to_end,   to_start, sizeof(char)));
   392     }
   393   } else {
   394     // Eden, to, from
   395     if (PrintAdaptiveSizePolicy && Verbose) {
   396       gclog_or_tty->print_cr("  Eden, to, from:");
   397     }
   399     // To space gets priority over eden resizing. Note that we position
   400     // to space as if we were able to resize from space, even though from
   401     // space is not modified.
   402     // Giving eden priority was tried and gave poorer performance.
   403     to_end   = (char*)pointer_delta(virtual_space()->high(),
   404                                     (char*)requested_survivor_size,
   405                                     sizeof(char));
   406     to_end   = MIN2(to_end, from_start);
   407     to_start = (char*)pointer_delta(to_end, (char*)requested_survivor_size,
   408                                     sizeof(char));
   409     // if the space sizes are to be increased by several times then
   410     // 'to_start' will point beyond the young generation. In this case
   411     // 'to_start' should be adjusted.
   412     to_start = MAX2(to_start, eden_start + alignment);
   414     // Compute how big eden can be, then adjust end.
   415     // See  comments above on calculating eden_end.
   416     size_t eden_size;
   417     if (maintain_minimum) {
   418       eden_size = pointer_delta(to_start, eden_start, sizeof(char));
   419     } else {
   420       eden_size = MIN2(requested_eden_size,
   421                        pointer_delta(to_start, eden_start, sizeof(char)));
   422     }
   423     eden_end = eden_start + eden_size;
   424     assert(eden_end >= eden_start, "addition overflowed");
   426     // Don't let eden shrink down to 0 or less.
   427     eden_end = MAX2(eden_end, eden_start + alignment);
   428     to_start = MAX2(to_start, eden_end);
   430     if (PrintAdaptiveSizePolicy && Verbose) {
   431       gclog_or_tty->print_cr("    [eden_start .. eden_end): "
   432                     "[" PTR_FORMAT " .. " PTR_FORMAT ") " SIZE_FORMAT,
   433                     eden_start,
   434                     eden_end,
   435                     pointer_delta(eden_end, eden_start, sizeof(char)));
   436       gclog_or_tty->print_cr("    [  to_start ..   to_end): "
   437                     "[" PTR_FORMAT " .. " PTR_FORMAT ") " SIZE_FORMAT,
   438                     to_start,
   439                     to_end,
   440                     pointer_delta(  to_end,   to_start, sizeof(char)));
   441       gclog_or_tty->print_cr("    [from_start .. from_end): "
   442                     "[" PTR_FORMAT " .. " PTR_FORMAT ") " SIZE_FORMAT,
   443                     from_start,
   444                     from_end,
   445                     pointer_delta(from_end, from_start, sizeof(char)));
   446     }
   447   }
   450   guarantee((HeapWord*)from_start <= from_space()->bottom(),
   451             "from start moved to the right");
   452   guarantee((HeapWord*)from_end >= from_space()->top(),
   453             "from end moved into live data");
   454   assert(is_object_aligned((intptr_t)eden_start), "checking alignment");
   455   assert(is_object_aligned((intptr_t)from_start), "checking alignment");
   456   assert(is_object_aligned((intptr_t)to_start), "checking alignment");
   458   MemRegion edenMR((HeapWord*)eden_start, (HeapWord*)eden_end);
   459   MemRegion toMR  ((HeapWord*)to_start,   (HeapWord*)to_end);
   460   MemRegion fromMR((HeapWord*)from_start, (HeapWord*)from_end);
   462   // Let's make sure the call to initialize doesn't reset "top"!
   463   DEBUG_ONLY(HeapWord* old_from_top = from_space()->top();)
   465   // For PrintAdaptiveSizePolicy block  below
   466   size_t old_from = from_space()->capacity_in_bytes();
   467   size_t old_to   = to_space()->capacity_in_bytes();
   469   if (ZapUnusedHeapArea) {
   470     // NUMA is a special case because a numa space is not mangled
   471     // in order to not prematurely bind its address to memory to
   472     // the wrong memory (i.e., don't want the GC thread to first
   473     // touch the memory).  The survivor spaces are not numa
   474     // spaces and are mangled.
   475     if (UseNUMA) {
   476       if (eden_from_to_order) {
   477         mangle_survivors(from_space(), fromMR, to_space(), toMR);
   478       } else {
   479         mangle_survivors(to_space(), toMR, from_space(), fromMR);
   480       }
   481     }
   483     // If not mangling the spaces, do some checking to verify that
   484     // the spaces are already mangled.
   485     // The spaces should be correctly mangled at this point so
   486     // do some checking here. Note that they are not being mangled
   487     // in the calls to initialize().
   488     // Must check mangling before the spaces are reshaped.  Otherwise,
   489     // the bottom or end of one space may have moved into an area
   490     // covered by another space and a failure of the check may
   491     // not correctly indicate which space is not properly mangled.
   493     HeapWord* limit = (HeapWord*) virtual_space()->high();
   494     eden_space()->check_mangled_unused_area(limit);
   495     from_space()->check_mangled_unused_area(limit);
   496       to_space()->check_mangled_unused_area(limit);
   497   }
   498   // When an existing space is being initialized, it is not
   499   // mangled because the space has been previously mangled.
   500   eden_space()->initialize(edenMR,
   501                            SpaceDecorator::Clear,
   502                            SpaceDecorator::DontMangle);
   503     to_space()->initialize(toMR,
   504                            SpaceDecorator::Clear,
   505                            SpaceDecorator::DontMangle);
   506   from_space()->initialize(fromMR,
   507                            SpaceDecorator::DontClear,
   508                            SpaceDecorator::DontMangle);
   510   PSScavenge::set_young_generation_boundary(eden_space()->bottom());
   512   assert(from_space()->top() == old_from_top, "from top changed!");
   514   if (PrintAdaptiveSizePolicy) {
   515     ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
   516     assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
   518     gclog_or_tty->print("AdaptiveSizePolicy::survivor space sizes: "
   519                   "collection: %d "
   520                   "(" SIZE_FORMAT ", " SIZE_FORMAT ") -> "
   521                   "(" SIZE_FORMAT ", " SIZE_FORMAT ") ",
   522                   heap->total_collections(),
   523                   old_from, old_to,
   524                   from_space()->capacity_in_bytes(),
   525                   to_space()->capacity_in_bytes());
   526     gclog_or_tty->cr();
   527   }
   528   space_invariants();
   529 }
   530 void ASPSYoungGen::reset_after_change() {
   531   assert_locked_or_safepoint(Heap_lock);
   533   _reserved = MemRegion((HeapWord*)virtual_space()->low_boundary(),
   534                         (HeapWord*)virtual_space()->high_boundary());
   535   PSScavenge::reference_processor()->set_span(_reserved);
   537   HeapWord* new_eden_bottom = (HeapWord*)virtual_space()->low();
   538   HeapWord* eden_bottom = eden_space()->bottom();
   539   if (new_eden_bottom != eden_bottom) {
   540     MemRegion eden_mr(new_eden_bottom, eden_space()->end());
   541     eden_space()->initialize(eden_mr,
   542                              SpaceDecorator::Clear,
   543                              SpaceDecorator::Mangle);
   544     PSScavenge::set_young_generation_boundary(eden_space()->bottom());
   545   }
   546   MemRegion cmr((HeapWord*)virtual_space()->low(),
   547                 (HeapWord*)virtual_space()->high());
   548   Universe::heap()->barrier_set()->resize_covered_region(cmr);
   550   space_invariants();
   551 }

mercurial