src/share/vm/gc_implementation/parallelScavenge/psYoungGen.cpp

Tue, 10 Dec 2013 10:31:00 +0100

author
sjohanss
date
Tue, 10 Dec 2013 10:31:00 +0100
changeset 6169
ad72068ac41e
parent 6085
8f07aa079343
child 6198
55fb97c4c58d
permissions
-rw-r--r--

8028993: Full collections with ParallelScavenge slower in JDK 8 compared to 7u40
Summary: Reducing the number of calls to follow_class_loader to speed up the marking phase. Also removed some unnecessary calls to adjust_klass.
Reviewed-by: stefank, jmasa, mgerdin

     1 /*
     2  * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    20  * or visit www.oracle.com if you need additional information or have any
    21  * questions.
    22  *
    23  */
    25 #include "precompiled.hpp"
    26 #include "gc_implementation/parallelScavenge/parallelScavengeHeap.hpp"
    27 #include "gc_implementation/parallelScavenge/psMarkSweepDecorator.hpp"
    28 #include "gc_implementation/parallelScavenge/psScavenge.hpp"
    29 #include "gc_implementation/parallelScavenge/psYoungGen.hpp"
    30 #include "gc_implementation/shared/gcUtil.hpp"
    31 #include "gc_implementation/shared/mutableNUMASpace.hpp"
    32 #include "gc_implementation/shared/spaceDecorator.hpp"
    33 #include "oops/oop.inline.hpp"
    34 #include "runtime/java.hpp"
    36 PSYoungGen::PSYoungGen(size_t        initial_size,
    37                        size_t        min_size,
    38                        size_t        max_size) :
    39   _init_gen_size(initial_size),
    40   _min_gen_size(min_size),
    41   _max_gen_size(max_size)
    42 {}
    44 void PSYoungGen::initialize_virtual_space(ReservedSpace rs, size_t alignment) {
    45   assert(_init_gen_size != 0, "Should have a finite size");
    46   _virtual_space = new PSVirtualSpace(rs, alignment);
    47   if (!virtual_space()->expand_by(_init_gen_size)) {
    48     vm_exit_during_initialization("Could not reserve enough space for "
    49                                   "object heap");
    50   }
    51 }
    53 void PSYoungGen::initialize(ReservedSpace rs, size_t alignment) {
    54   initialize_virtual_space(rs, alignment);
    55   initialize_work();
    56 }
    58 void PSYoungGen::initialize_work() {
    60   _reserved = MemRegion((HeapWord*)virtual_space()->low_boundary(),
    61                         (HeapWord*)virtual_space()->high_boundary());
    63   MemRegion cmr((HeapWord*)virtual_space()->low(),
    64                 (HeapWord*)virtual_space()->high());
    65   Universe::heap()->barrier_set()->resize_covered_region(cmr);
    67   if (ZapUnusedHeapArea) {
    68     // Mangle newly committed space immediately because it
    69     // can be done here more simply that after the new
    70     // spaces have been computed.
    71     SpaceMangler::mangle_region(cmr);
    72   }
    74   if (UseNUMA) {
    75     _eden_space = new MutableNUMASpace(virtual_space()->alignment());
    76   } else {
    77     _eden_space = new MutableSpace(virtual_space()->alignment());
    78   }
    79   _from_space = new MutableSpace(virtual_space()->alignment());
    80   _to_space   = new MutableSpace(virtual_space()->alignment());
    82   if (_eden_space == NULL || _from_space == NULL || _to_space == NULL) {
    83     vm_exit_during_initialization("Could not allocate a young gen space");
    84   }
    86   // Allocate the mark sweep views of spaces
    87   _eden_mark_sweep =
    88       new PSMarkSweepDecorator(_eden_space, NULL, MarkSweepDeadRatio);
    89   _from_mark_sweep =
    90       new PSMarkSweepDecorator(_from_space, NULL, MarkSweepDeadRatio);
    91   _to_mark_sweep =
    92       new PSMarkSweepDecorator(_to_space, NULL, MarkSweepDeadRatio);
    94   if (_eden_mark_sweep == NULL ||
    95       _from_mark_sweep == NULL ||
    96       _to_mark_sweep == NULL) {
    97     vm_exit_during_initialization("Could not complete allocation"
    98                                   " of the young generation");
    99   }
   101   // Generation Counters - generation 0, 3 subspaces
   102   _gen_counters = new PSGenerationCounters("new", 0, 3, _virtual_space);
   104   // Compute maximum space sizes for performance counters
   105   ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
   106   size_t alignment = heap->space_alignment();
   107   size_t size = virtual_space()->reserved_size();
   109   size_t max_survivor_size;
   110   size_t max_eden_size;
   112   if (UseAdaptiveSizePolicy) {
   113     max_survivor_size = size / MinSurvivorRatio;
   115     // round the survivor space size down to the nearest alignment
   116     // and make sure its size is greater than 0.
   117     max_survivor_size = align_size_down(max_survivor_size, alignment);
   118     max_survivor_size = MAX2(max_survivor_size, alignment);
   120     // set the maximum size of eden to be the size of the young gen
   121     // less two times the minimum survivor size. The minimum survivor
   122     // size for UseAdaptiveSizePolicy is one alignment.
   123     max_eden_size = size - 2 * alignment;
   124   } else {
   125     max_survivor_size = size / InitialSurvivorRatio;
   127     // round the survivor space size down to the nearest alignment
   128     // and make sure its size is greater than 0.
   129     max_survivor_size = align_size_down(max_survivor_size, alignment);
   130     max_survivor_size = MAX2(max_survivor_size, alignment);
   132     // set the maximum size of eden to be the size of the young gen
   133     // less two times the survivor size when the generation is 100%
   134     // committed. The minimum survivor size for -UseAdaptiveSizePolicy
   135     // is dependent on the committed portion (current capacity) of the
   136     // generation - the less space committed, the smaller the survivor
   137     // space, possibly as small as an alignment. However, we are interested
   138     // in the case where the young generation is 100% committed, as this
   139     // is the point where eden reachs its maximum size. At this point,
   140     // the size of a survivor space is max_survivor_size.
   141     max_eden_size = size - 2 * max_survivor_size;
   142   }
   144   _eden_counters = new SpaceCounters("eden", 0, max_eden_size, _eden_space,
   145                                      _gen_counters);
   146   _from_counters = new SpaceCounters("s0", 1, max_survivor_size, _from_space,
   147                                      _gen_counters);
   148   _to_counters = new SpaceCounters("s1", 2, max_survivor_size, _to_space,
   149                                    _gen_counters);
   151   compute_initial_space_boundaries();
   152 }
   154 void PSYoungGen::compute_initial_space_boundaries() {
   155   ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
   156   assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
   158   // Compute sizes
   159   size_t alignment = heap->space_alignment();
   160   size_t size = virtual_space()->committed_size();
   161   assert(size >= 3 * alignment, "Young space is not large enough for eden + 2 survivors");
   163   size_t survivor_size = size / InitialSurvivorRatio;
   164   survivor_size = align_size_down(survivor_size, alignment);
   165   // ... but never less than an alignment
   166   survivor_size = MAX2(survivor_size, alignment);
   168   // Young generation is eden + 2 survivor spaces
   169   size_t eden_size = size - (2 * survivor_size);
   171   // Now go ahead and set 'em.
   172   set_space_boundaries(eden_size, survivor_size);
   173   space_invariants();
   175   if (UsePerfData) {
   176     _eden_counters->update_capacity();
   177     _from_counters->update_capacity();
   178     _to_counters->update_capacity();
   179   }
   180 }
   182 void PSYoungGen::set_space_boundaries(size_t eden_size, size_t survivor_size) {
   183   assert(eden_size < virtual_space()->committed_size(), "just checking");
   184   assert(eden_size > 0  && survivor_size > 0, "just checking");
   186   // Initial layout is Eden, to, from. After swapping survivor spaces,
   187   // that leaves us with Eden, from, to, which is step one in our two
   188   // step resize-with-live-data procedure.
   189   char *eden_start = virtual_space()->low();
   190   char *to_start   = eden_start + eden_size;
   191   char *from_start = to_start   + survivor_size;
   192   char *from_end   = from_start + survivor_size;
   194   assert(from_end == virtual_space()->high(), "just checking");
   195   assert(is_object_aligned((intptr_t)eden_start), "checking alignment");
   196   assert(is_object_aligned((intptr_t)to_start),   "checking alignment");
   197   assert(is_object_aligned((intptr_t)from_start), "checking alignment");
   199   MemRegion eden_mr((HeapWord*)eden_start, (HeapWord*)to_start);
   200   MemRegion to_mr  ((HeapWord*)to_start, (HeapWord*)from_start);
   201   MemRegion from_mr((HeapWord*)from_start, (HeapWord*)from_end);
   203   eden_space()->initialize(eden_mr, true, ZapUnusedHeapArea);
   204     to_space()->initialize(to_mr  , true, ZapUnusedHeapArea);
   205   from_space()->initialize(from_mr, true, ZapUnusedHeapArea);
   206 }
   208 #ifndef PRODUCT
   209 void PSYoungGen::space_invariants() {
   210   ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
   211   const size_t alignment = heap->space_alignment();
   213   // Currently, our eden size cannot shrink to zero
   214   guarantee(eden_space()->capacity_in_bytes() >= alignment, "eden too small");
   215   guarantee(from_space()->capacity_in_bytes() >= alignment, "from too small");
   216   guarantee(to_space()->capacity_in_bytes() >= alignment, "to too small");
   218   // Relationship of spaces to each other
   219   char* eden_start = (char*)eden_space()->bottom();
   220   char* eden_end   = (char*)eden_space()->end();
   221   char* from_start = (char*)from_space()->bottom();
   222   char* from_end   = (char*)from_space()->end();
   223   char* to_start   = (char*)to_space()->bottom();
   224   char* to_end     = (char*)to_space()->end();
   226   guarantee(eden_start >= virtual_space()->low(), "eden bottom");
   227   guarantee(eden_start < eden_end, "eden space consistency");
   228   guarantee(from_start < from_end, "from space consistency");
   229   guarantee(to_start < to_end, "to space consistency");
   231   // Check whether from space is below to space
   232   if (from_start < to_start) {
   233     // Eden, from, to
   234     guarantee(eden_end <= from_start, "eden/from boundary");
   235     guarantee(from_end <= to_start,   "from/to boundary");
   236     guarantee(to_end <= virtual_space()->high(), "to end");
   237   } else {
   238     // Eden, to, from
   239     guarantee(eden_end <= to_start, "eden/to boundary");
   240     guarantee(to_end <= from_start, "to/from boundary");
   241     guarantee(from_end <= virtual_space()->high(), "from end");
   242   }
   244   // More checks that the virtual space is consistent with the spaces
   245   assert(virtual_space()->committed_size() >=
   246     (eden_space()->capacity_in_bytes() +
   247      to_space()->capacity_in_bytes() +
   248      from_space()->capacity_in_bytes()), "Committed size is inconsistent");
   249   assert(virtual_space()->committed_size() <= virtual_space()->reserved_size(),
   250     "Space invariant");
   251   char* eden_top = (char*)eden_space()->top();
   252   char* from_top = (char*)from_space()->top();
   253   char* to_top = (char*)to_space()->top();
   254   assert(eden_top <= virtual_space()->high(), "eden top");
   255   assert(from_top <= virtual_space()->high(), "from top");
   256   assert(to_top <= virtual_space()->high(), "to top");
   258   virtual_space()->verify();
   259 }
   260 #endif
   262 void PSYoungGen::resize(size_t eden_size, size_t survivor_size) {
   263   // Resize the generation if needed. If the generation resize
   264   // reports false, do not attempt to resize the spaces.
   265   if (resize_generation(eden_size, survivor_size)) {
   266     // Then we lay out the spaces inside the generation
   267     resize_spaces(eden_size, survivor_size);
   269     space_invariants();
   271     if (PrintAdaptiveSizePolicy && Verbose) {
   272       gclog_or_tty->print_cr("Young generation size: "
   273         "desired eden: " SIZE_FORMAT " survivor: " SIZE_FORMAT
   274         " used: " SIZE_FORMAT " capacity: " SIZE_FORMAT
   275         " gen limits: " SIZE_FORMAT " / " SIZE_FORMAT,
   276         eden_size, survivor_size, used_in_bytes(), capacity_in_bytes(),
   277         _max_gen_size, min_gen_size());
   278     }
   279   }
   280 }
   283 bool PSYoungGen::resize_generation(size_t eden_size, size_t survivor_size) {
   284   const size_t alignment = virtual_space()->alignment();
   285   size_t orig_size = virtual_space()->committed_size();
   286   bool size_changed = false;
   288   // There used to be this guarantee there.
   289   // guarantee ((eden_size + 2*survivor_size)  <= _max_gen_size, "incorrect input arguments");
   290   // Code below forces this requirement.  In addition the desired eden
   291   // size and disired survivor sizes are desired goals and may
   292   // exceed the total generation size.
   294   assert(min_gen_size() <= orig_size && orig_size <= max_size(), "just checking");
   296   // Adjust new generation size
   297   const size_t eden_plus_survivors =
   298           align_size_up(eden_size + 2 * survivor_size, alignment);
   299   size_t desired_size = MAX2(MIN2(eden_plus_survivors, max_size()),
   300                              min_gen_size());
   301   assert(desired_size <= max_size(), "just checking");
   303   if (desired_size > orig_size) {
   304     // Grow the generation
   305     size_t change = desired_size - orig_size;
   306     assert(change % alignment == 0, "just checking");
   307     HeapWord* prev_high = (HeapWord*) virtual_space()->high();
   308     if (!virtual_space()->expand_by(change)) {
   309       return false; // Error if we fail to resize!
   310     }
   311     if (ZapUnusedHeapArea) {
   312       // Mangle newly committed space immediately because it
   313       // can be done here more simply that after the new
   314       // spaces have been computed.
   315       HeapWord* new_high = (HeapWord*) virtual_space()->high();
   316       MemRegion mangle_region(prev_high, new_high);
   317       SpaceMangler::mangle_region(mangle_region);
   318     }
   319     size_changed = true;
   320   } else if (desired_size < orig_size) {
   321     size_t desired_change = orig_size - desired_size;
   322     assert(desired_change % alignment == 0, "just checking");
   324     desired_change = limit_gen_shrink(desired_change);
   326     if (desired_change > 0) {
   327       virtual_space()->shrink_by(desired_change);
   328       reset_survivors_after_shrink();
   330       size_changed = true;
   331     }
   332   } else {
   333     if (Verbose && PrintGC) {
   334       if (orig_size == gen_size_limit()) {
   335         gclog_or_tty->print_cr("PSYoung generation size at maximum: "
   336           SIZE_FORMAT "K", orig_size/K);
   337       } else if (orig_size == min_gen_size()) {
   338         gclog_or_tty->print_cr("PSYoung generation size at minium: "
   339           SIZE_FORMAT "K", orig_size/K);
   340       }
   341     }
   342   }
   344   if (size_changed) {
   345     post_resize();
   347     if (Verbose && PrintGC) {
   348       size_t current_size  = virtual_space()->committed_size();
   349       gclog_or_tty->print_cr("PSYoung generation size changed: "
   350                              SIZE_FORMAT "K->" SIZE_FORMAT "K",
   351                              orig_size/K, current_size/K);
   352     }
   353   }
   355   guarantee(eden_plus_survivors <= virtual_space()->committed_size() ||
   356             virtual_space()->committed_size() == max_size(), "Sanity");
   358   return true;
   359 }
   361 #ifndef PRODUCT
   362 // In the numa case eden is not mangled so a survivor space
   363 // moving into a region previously occupied by a survivor
   364 // may find an unmangled region.  Also in the PS case eden
   365 // to-space and from-space may not touch (i.e., there may be
   366 // gaps between them due to movement while resizing the
   367 // spaces).  Those gaps must be mangled.
   368 void PSYoungGen::mangle_survivors(MutableSpace* s1,
   369                                   MemRegion s1MR,
   370                                   MutableSpace* s2,
   371                                   MemRegion s2MR) {
   372   // Check eden and gap between eden and from-space, in deciding
   373   // what to mangle in from-space.  Check the gap between from-space
   374   // and to-space when deciding what to mangle.
   375   //
   376   //      +--------+   +----+    +---+
   377   //      | eden   |   |s1  |    |s2 |
   378   //      +--------+   +----+    +---+
   379   //                 +-------+ +-----+
   380   //                 |s1MR   | |s2MR |
   381   //                 +-------+ +-----+
   382   // All of survivor-space is properly mangled so find the
   383   // upper bound on the mangling for any portion above current s1.
   384   HeapWord* delta_end = MIN2(s1->bottom(), s1MR.end());
   385   MemRegion delta1_left;
   386   if (s1MR.start() < delta_end) {
   387     delta1_left = MemRegion(s1MR.start(), delta_end);
   388     s1->mangle_region(delta1_left);
   389   }
   390   // Find any portion to the right of the current s1.
   391   HeapWord* delta_start = MAX2(s1->end(), s1MR.start());
   392   MemRegion delta1_right;
   393   if (delta_start < s1MR.end()) {
   394     delta1_right = MemRegion(delta_start, s1MR.end());
   395     s1->mangle_region(delta1_right);
   396   }
   398   // Similarly for the second survivor space except that
   399   // any of the new region that overlaps with the current
   400   // region of the first survivor space has already been
   401   // mangled.
   402   delta_end = MIN2(s2->bottom(), s2MR.end());
   403   delta_start = MAX2(s2MR.start(), s1->end());
   404   MemRegion delta2_left;
   405   if (s2MR.start() < delta_end) {
   406     delta2_left = MemRegion(s2MR.start(), delta_end);
   407     s2->mangle_region(delta2_left);
   408   }
   409   delta_start = MAX2(s2->end(), s2MR.start());
   410   MemRegion delta2_right;
   411   if (delta_start < s2MR.end()) {
   412     s2->mangle_region(delta2_right);
   413   }
   415   if (TraceZapUnusedHeapArea) {
   416     // s1
   417     gclog_or_tty->print_cr("Current region: [" PTR_FORMAT ", " PTR_FORMAT ") "
   418       "New region: [" PTR_FORMAT ", " PTR_FORMAT ")",
   419       s1->bottom(), s1->end(), s1MR.start(), s1MR.end());
   420     gclog_or_tty->print_cr("    Mangle before: [" PTR_FORMAT ", "
   421       PTR_FORMAT ")  Mangle after: [" PTR_FORMAT ", " PTR_FORMAT ")",
   422       delta1_left.start(), delta1_left.end(), delta1_right.start(),
   423       delta1_right.end());
   425     // s2
   426     gclog_or_tty->print_cr("Current region: [" PTR_FORMAT ", " PTR_FORMAT ") "
   427       "New region: [" PTR_FORMAT ", " PTR_FORMAT ")",
   428       s2->bottom(), s2->end(), s2MR.start(), s2MR.end());
   429     gclog_or_tty->print_cr("    Mangle before: [" PTR_FORMAT ", "
   430       PTR_FORMAT ")  Mangle after: [" PTR_FORMAT ", " PTR_FORMAT ")",
   431       delta2_left.start(), delta2_left.end(), delta2_right.start(),
   432       delta2_right.end());
   433   }
   435 }
   436 #endif // NOT PRODUCT
   438 void PSYoungGen::resize_spaces(size_t requested_eden_size,
   439                                size_t requested_survivor_size) {
   440   assert(UseAdaptiveSizePolicy, "sanity check");
   441   assert(requested_eden_size > 0  && requested_survivor_size > 0,
   442          "just checking");
   444   // We require eden and to space to be empty
   445   if ((!eden_space()->is_empty()) || (!to_space()->is_empty())) {
   446     return;
   447   }
   449   if (PrintAdaptiveSizePolicy && Verbose) {
   450     gclog_or_tty->print_cr("PSYoungGen::resize_spaces(requested_eden_size: "
   451                   SIZE_FORMAT
   452                   ", requested_survivor_size: " SIZE_FORMAT ")",
   453                   requested_eden_size, requested_survivor_size);
   454     gclog_or_tty->print_cr("    eden: [" PTR_FORMAT ".." PTR_FORMAT ") "
   455                   SIZE_FORMAT,
   456                   eden_space()->bottom(),
   457                   eden_space()->end(),
   458                   pointer_delta(eden_space()->end(),
   459                                 eden_space()->bottom(),
   460                                 sizeof(char)));
   461     gclog_or_tty->print_cr("    from: [" PTR_FORMAT ".." PTR_FORMAT ") "
   462                   SIZE_FORMAT,
   463                   from_space()->bottom(),
   464                   from_space()->end(),
   465                   pointer_delta(from_space()->end(),
   466                                 from_space()->bottom(),
   467                                 sizeof(char)));
   468     gclog_or_tty->print_cr("      to: [" PTR_FORMAT ".." PTR_FORMAT ") "
   469                   SIZE_FORMAT,
   470                   to_space()->bottom(),
   471                   to_space()->end(),
   472                   pointer_delta(  to_space()->end(),
   473                                   to_space()->bottom(),
   474                                   sizeof(char)));
   475   }
   477   // There's nothing to do if the new sizes are the same as the current
   478   if (requested_survivor_size == to_space()->capacity_in_bytes() &&
   479       requested_survivor_size == from_space()->capacity_in_bytes() &&
   480       requested_eden_size == eden_space()->capacity_in_bytes()) {
   481     if (PrintAdaptiveSizePolicy && Verbose) {
   482       gclog_or_tty->print_cr("    capacities are the right sizes, returning");
   483     }
   484     return;
   485   }
   487   char* eden_start = (char*)eden_space()->bottom();
   488   char* eden_end   = (char*)eden_space()->end();
   489   char* from_start = (char*)from_space()->bottom();
   490   char* from_end   = (char*)from_space()->end();
   491   char* to_start   = (char*)to_space()->bottom();
   492   char* to_end     = (char*)to_space()->end();
   494   ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
   495   const size_t alignment = heap->space_alignment();
   496   const bool maintain_minimum =
   497     (requested_eden_size + 2 * requested_survivor_size) <= min_gen_size();
   499   bool eden_from_to_order = from_start < to_start;
   500   // Check whether from space is below to space
   501   if (eden_from_to_order) {
   502     // Eden, from, to
   503     eden_from_to_order = true;
   504     if (PrintAdaptiveSizePolicy && Verbose) {
   505       gclog_or_tty->print_cr("  Eden, from, to:");
   506     }
   508     // Set eden
   509     // "requested_eden_size" is a goal for the size of eden
   510     // and may not be attainable.  "eden_size" below is
   511     // calculated based on the location of from-space and
   512     // the goal for the size of eden.  from-space is
   513     // fixed in place because it contains live data.
   514     // The calculation is done this way to avoid 32bit
   515     // overflow (i.e., eden_start + requested_eden_size
   516     // may too large for representation in 32bits).
   517     size_t eden_size;
   518     if (maintain_minimum) {
   519       // Only make eden larger than the requested size if
   520       // the minimum size of the generation has to be maintained.
   521       // This could be done in general but policy at a higher
   522       // level is determining a requested size for eden and that
   523       // should be honored unless there is a fundamental reason.
   524       eden_size = pointer_delta(from_start,
   525                                 eden_start,
   526                                 sizeof(char));
   527     } else {
   528       eden_size = MIN2(requested_eden_size,
   529                        pointer_delta(from_start, eden_start, sizeof(char)));
   530     }
   532     eden_end = eden_start + eden_size;
   533     assert(eden_end >= eden_start, "addition overflowed");
   535     // To may resize into from space as long as it is clear of live data.
   536     // From space must remain page aligned, though, so we need to do some
   537     // extra calculations.
   539     // First calculate an optimal to-space
   540     to_end   = (char*)virtual_space()->high();
   541     to_start = (char*)pointer_delta(to_end, (char*)requested_survivor_size,
   542                                     sizeof(char));
   544     // Does the optimal to-space overlap from-space?
   545     if (to_start < (char*)from_space()->end()) {
   546       assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
   548       // Calculate the minimum offset possible for from_end
   549       size_t from_size = pointer_delta(from_space()->top(), from_start, sizeof(char));
   551       // Should we be in this method if from_space is empty? Why not the set_space method? FIX ME!
   552       if (from_size == 0) {
   553         from_size = alignment;
   554       } else {
   555         from_size = align_size_up(from_size, alignment);
   556       }
   558       from_end = from_start + from_size;
   559       assert(from_end > from_start, "addition overflow or from_size problem");
   561       guarantee(from_end <= (char*)from_space()->end(), "from_end moved to the right");
   563       // Now update to_start with the new from_end
   564       to_start = MAX2(from_end, to_start);
   565     }
   567     guarantee(to_start != to_end, "to space is zero sized");
   569     if (PrintAdaptiveSizePolicy && Verbose) {
   570       gclog_or_tty->print_cr("    [eden_start .. eden_end): "
   571                     "[" PTR_FORMAT " .. " PTR_FORMAT ") " SIZE_FORMAT,
   572                     eden_start,
   573                     eden_end,
   574                     pointer_delta(eden_end, eden_start, sizeof(char)));
   575       gclog_or_tty->print_cr("    [from_start .. from_end): "
   576                     "[" PTR_FORMAT " .. " PTR_FORMAT ") " SIZE_FORMAT,
   577                     from_start,
   578                     from_end,
   579                     pointer_delta(from_end, from_start, sizeof(char)));
   580       gclog_or_tty->print_cr("    [  to_start ..   to_end): "
   581                     "[" PTR_FORMAT " .. " PTR_FORMAT ") " SIZE_FORMAT,
   582                     to_start,
   583                     to_end,
   584                     pointer_delta(  to_end,   to_start, sizeof(char)));
   585     }
   586   } else {
   587     // Eden, to, from
   588     if (PrintAdaptiveSizePolicy && Verbose) {
   589       gclog_or_tty->print_cr("  Eden, to, from:");
   590     }
   592     // To space gets priority over eden resizing. Note that we position
   593     // to space as if we were able to resize from space, even though from
   594     // space is not modified.
   595     // Giving eden priority was tried and gave poorer performance.
   596     to_end   = (char*)pointer_delta(virtual_space()->high(),
   597                                     (char*)requested_survivor_size,
   598                                     sizeof(char));
   599     to_end   = MIN2(to_end, from_start);
   600     to_start = (char*)pointer_delta(to_end, (char*)requested_survivor_size,
   601                                     sizeof(char));
   602     // if the space sizes are to be increased by several times then
   603     // 'to_start' will point beyond the young generation. In this case
   604     // 'to_start' should be adjusted.
   605     to_start = MAX2(to_start, eden_start + alignment);
   607     // Compute how big eden can be, then adjust end.
   608     // See  comments above on calculating eden_end.
   609     size_t eden_size;
   610     if (maintain_minimum) {
   611       eden_size = pointer_delta(to_start, eden_start, sizeof(char));
   612     } else {
   613       eden_size = MIN2(requested_eden_size,
   614                        pointer_delta(to_start, eden_start, sizeof(char)));
   615     }
   616     eden_end = eden_start + eden_size;
   617     assert(eden_end >= eden_start, "addition overflowed");
   619     // Could choose to not let eden shrink
   620     // to_start = MAX2(to_start, eden_end);
   622     // Don't let eden shrink down to 0 or less.
   623     eden_end = MAX2(eden_end, eden_start + alignment);
   624     to_start = MAX2(to_start, eden_end);
   626     if (PrintAdaptiveSizePolicy && Verbose) {
   627       gclog_or_tty->print_cr("    [eden_start .. eden_end): "
   628                     "[" PTR_FORMAT " .. " PTR_FORMAT ") " SIZE_FORMAT,
   629                     eden_start,
   630                     eden_end,
   631                     pointer_delta(eden_end, eden_start, sizeof(char)));
   632       gclog_or_tty->print_cr("    [  to_start ..   to_end): "
   633                     "[" PTR_FORMAT " .. " PTR_FORMAT ") " SIZE_FORMAT,
   634                     to_start,
   635                     to_end,
   636                     pointer_delta(  to_end,   to_start, sizeof(char)));
   637       gclog_or_tty->print_cr("    [from_start .. from_end): "
   638                     "[" PTR_FORMAT " .. " PTR_FORMAT ") " SIZE_FORMAT,
   639                     from_start,
   640                     from_end,
   641                     pointer_delta(from_end, from_start, sizeof(char)));
   642     }
   643   }
   646   guarantee((HeapWord*)from_start <= from_space()->bottom(),
   647             "from start moved to the right");
   648   guarantee((HeapWord*)from_end >= from_space()->top(),
   649             "from end moved into live data");
   650   assert(is_object_aligned((intptr_t)eden_start), "checking alignment");
   651   assert(is_object_aligned((intptr_t)from_start), "checking alignment");
   652   assert(is_object_aligned((intptr_t)to_start), "checking alignment");
   654   MemRegion edenMR((HeapWord*)eden_start, (HeapWord*)eden_end);
   655   MemRegion toMR  ((HeapWord*)to_start,   (HeapWord*)to_end);
   656   MemRegion fromMR((HeapWord*)from_start, (HeapWord*)from_end);
   658   // Let's make sure the call to initialize doesn't reset "top"!
   659   HeapWord* old_from_top = from_space()->top();
   661   // For PrintAdaptiveSizePolicy block  below
   662   size_t old_from = from_space()->capacity_in_bytes();
   663   size_t old_to   = to_space()->capacity_in_bytes();
   665   if (ZapUnusedHeapArea) {
   666     // NUMA is a special case because a numa space is not mangled
   667     // in order to not prematurely bind its address to memory to
   668     // the wrong memory (i.e., don't want the GC thread to first
   669     // touch the memory).  The survivor spaces are not numa
   670     // spaces and are mangled.
   671     if (UseNUMA) {
   672       if (eden_from_to_order) {
   673         mangle_survivors(from_space(), fromMR, to_space(), toMR);
   674       } else {
   675         mangle_survivors(to_space(), toMR, from_space(), fromMR);
   676       }
   677     }
   679     // If not mangling the spaces, do some checking to verify that
   680     // the spaces are already mangled.
   681     // The spaces should be correctly mangled at this point so
   682     // do some checking here. Note that they are not being mangled
   683     // in the calls to initialize().
   684     // Must check mangling before the spaces are reshaped.  Otherwise,
   685     // the bottom or end of one space may have moved into an area
   686     // covered by another space and a failure of the check may
   687     // not correctly indicate which space is not properly mangled.
   688     HeapWord* limit = (HeapWord*) virtual_space()->high();
   689     eden_space()->check_mangled_unused_area(limit);
   690     from_space()->check_mangled_unused_area(limit);
   691       to_space()->check_mangled_unused_area(limit);
   692   }
   693   // When an existing space is being initialized, it is not
   694   // mangled because the space has been previously mangled.
   695   eden_space()->initialize(edenMR,
   696                            SpaceDecorator::Clear,
   697                            SpaceDecorator::DontMangle);
   698     to_space()->initialize(toMR,
   699                            SpaceDecorator::Clear,
   700                            SpaceDecorator::DontMangle);
   701   from_space()->initialize(fromMR,
   702                            SpaceDecorator::DontClear,
   703                            SpaceDecorator::DontMangle);
   705   assert(from_space()->top() == old_from_top, "from top changed!");
   707   if (PrintAdaptiveSizePolicy) {
   708     ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
   709     assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
   711     gclog_or_tty->print("AdaptiveSizePolicy::survivor space sizes: "
   712                   "collection: %d "
   713                   "(" SIZE_FORMAT ", " SIZE_FORMAT ") -> "
   714                   "(" SIZE_FORMAT ", " SIZE_FORMAT ") ",
   715                   heap->total_collections(),
   716                   old_from, old_to,
   717                   from_space()->capacity_in_bytes(),
   718                   to_space()->capacity_in_bytes());
   719     gclog_or_tty->cr();
   720   }
   721 }
   723 void PSYoungGen::swap_spaces() {
   724   MutableSpace* s    = from_space();
   725   _from_space        = to_space();
   726   _to_space          = s;
   728   // Now update the decorators.
   729   PSMarkSweepDecorator* md = from_mark_sweep();
   730   _from_mark_sweep           = to_mark_sweep();
   731   _to_mark_sweep             = md;
   733   assert(from_mark_sweep()->space() == from_space(), "Sanity");
   734   assert(to_mark_sweep()->space() == to_space(), "Sanity");
   735 }
   737 size_t PSYoungGen::capacity_in_bytes() const {
   738   return eden_space()->capacity_in_bytes()
   739        + from_space()->capacity_in_bytes();  // to_space() is only used during scavenge
   740 }
   743 size_t PSYoungGen::used_in_bytes() const {
   744   return eden_space()->used_in_bytes()
   745        + from_space()->used_in_bytes();      // to_space() is only used during scavenge
   746 }
   749 size_t PSYoungGen::free_in_bytes() const {
   750   return eden_space()->free_in_bytes()
   751        + from_space()->free_in_bytes();      // to_space() is only used during scavenge
   752 }
   754 size_t PSYoungGen::capacity_in_words() const {
   755   return eden_space()->capacity_in_words()
   756        + from_space()->capacity_in_words();  // to_space() is only used during scavenge
   757 }
   760 size_t PSYoungGen::used_in_words() const {
   761   return eden_space()->used_in_words()
   762        + from_space()->used_in_words();      // to_space() is only used during scavenge
   763 }
   766 size_t PSYoungGen::free_in_words() const {
   767   return eden_space()->free_in_words()
   768        + from_space()->free_in_words();      // to_space() is only used during scavenge
   769 }
   771 void PSYoungGen::object_iterate(ObjectClosure* blk) {
   772   eden_space()->object_iterate(blk);
   773   from_space()->object_iterate(blk);
   774   to_space()->object_iterate(blk);
   775 }
   777 void PSYoungGen::precompact() {
   778   eden_mark_sweep()->precompact();
   779   from_mark_sweep()->precompact();
   780   to_mark_sweep()->precompact();
   781 }
   783 void PSYoungGen::adjust_pointers() {
   784   eden_mark_sweep()->adjust_pointers();
   785   from_mark_sweep()->adjust_pointers();
   786   to_mark_sweep()->adjust_pointers();
   787 }
   789 void PSYoungGen::compact() {
   790   eden_mark_sweep()->compact(ZapUnusedHeapArea);
   791   from_mark_sweep()->compact(ZapUnusedHeapArea);
   792   // Mark sweep stores preserved markOops in to space, don't disturb!
   793   to_mark_sweep()->compact(false);
   794 }
   796 void PSYoungGen::print() const { print_on(tty); }
   797 void PSYoungGen::print_on(outputStream* st) const {
   798   st->print(" %-15s", "PSYoungGen");
   799   if (PrintGCDetails && Verbose) {
   800     st->print(" total " SIZE_FORMAT ", used " SIZE_FORMAT,
   801                capacity_in_bytes(), used_in_bytes());
   802   } else {
   803     st->print(" total " SIZE_FORMAT "K, used " SIZE_FORMAT "K",
   804                capacity_in_bytes()/K, used_in_bytes()/K);
   805   }
   806   virtual_space()->print_space_boundaries_on(st);
   807   st->print("  eden"); eden_space()->print_on(st);
   808   st->print("  from"); from_space()->print_on(st);
   809   st->print("  to  "); to_space()->print_on(st);
   810 }
   812 // Note that a space is not printed before the [NAME:
   813 void PSYoungGen::print_used_change(size_t prev_used) const {
   814   gclog_or_tty->print("[%s:", name());
   815   gclog_or_tty->print(" "  SIZE_FORMAT "K"
   816                       "->" SIZE_FORMAT "K"
   817                       "("  SIZE_FORMAT "K)",
   818                       prev_used / K, used_in_bytes() / K,
   819                       capacity_in_bytes() / K);
   820   gclog_or_tty->print("]");
   821 }
   823 size_t PSYoungGen::available_for_expansion() {
   824   ShouldNotReachHere();
   825   return 0;
   826 }
   828 size_t PSYoungGen::available_for_contraction() {
   829   ShouldNotReachHere();
   830   return 0;
   831 }
   833 size_t PSYoungGen::available_to_min_gen() {
   834   assert(virtual_space()->committed_size() >= min_gen_size(), "Invariant");
   835   return virtual_space()->committed_size() - min_gen_size();
   836 }
   838 // This method assumes that from-space has live data and that
   839 // any shrinkage of the young gen is limited by location of
   840 // from-space.
   841 size_t PSYoungGen::available_to_live() {
   842   size_t delta_in_survivor = 0;
   843   ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
   844   const size_t space_alignment = heap->space_alignment();
   845   const size_t gen_alignment = heap->generation_alignment();
   847   MutableSpace* space_shrinking = NULL;
   848   if (from_space()->end() > to_space()->end()) {
   849     space_shrinking = from_space();
   850   } else {
   851     space_shrinking = to_space();
   852   }
   854   // Include any space that is committed but not included in
   855   // the survivor spaces.
   856   assert(((HeapWord*)virtual_space()->high()) >= space_shrinking->end(),
   857     "Survivor space beyond high end");
   858   size_t unused_committed = pointer_delta(virtual_space()->high(),
   859     space_shrinking->end(), sizeof(char));
   861   if (space_shrinking->is_empty()) {
   862     // Don't let the space shrink to 0
   863     assert(space_shrinking->capacity_in_bytes() >= space_alignment,
   864       "Space is too small");
   865     delta_in_survivor = space_shrinking->capacity_in_bytes() - space_alignment;
   866   } else {
   867     delta_in_survivor = pointer_delta(space_shrinking->end(),
   868                                       space_shrinking->top(),
   869                                       sizeof(char));
   870   }
   872   size_t delta_in_bytes = unused_committed + delta_in_survivor;
   873   delta_in_bytes = align_size_down(delta_in_bytes, gen_alignment);
   874   return delta_in_bytes;
   875 }
   877 // Return the number of bytes available for resizing down the young
   878 // generation.  This is the minimum of
   879 //      input "bytes"
   880 //      bytes to the minimum young gen size
   881 //      bytes to the size currently being used + some small extra
   882 size_t PSYoungGen::limit_gen_shrink(size_t bytes) {
   883   // Allow shrinkage into the current eden but keep eden large enough
   884   // to maintain the minimum young gen size
   885   bytes = MIN3(bytes, available_to_min_gen(), available_to_live());
   886   return align_size_down(bytes, virtual_space()->alignment());
   887 }
   889 void PSYoungGen::reset_after_change() {
   890   ShouldNotReachHere();
   891 }
   893 void PSYoungGen::reset_survivors_after_shrink() {
   894   _reserved = MemRegion((HeapWord*)virtual_space()->low_boundary(),
   895                         (HeapWord*)virtual_space()->high_boundary());
   896   PSScavenge::reference_processor()->set_span(_reserved);
   898   MutableSpace* space_shrinking = NULL;
   899   if (from_space()->end() > to_space()->end()) {
   900     space_shrinking = from_space();
   901   } else {
   902     space_shrinking = to_space();
   903   }
   905   HeapWord* new_end = (HeapWord*)virtual_space()->high();
   906   assert(new_end >= space_shrinking->bottom(), "Shrink was too large");
   907   // Was there a shrink of the survivor space?
   908   if (new_end < space_shrinking->end()) {
   909     MemRegion mr(space_shrinking->bottom(), new_end);
   910     space_shrinking->initialize(mr,
   911                                 SpaceDecorator::DontClear,
   912                                 SpaceDecorator::Mangle);
   913   }
   914 }
   916 // This method currently does not expect to expand into eden (i.e.,
   917 // the virtual space boundaries is expected to be consistent
   918 // with the eden boundaries..
   919 void PSYoungGen::post_resize() {
   920   assert_locked_or_safepoint(Heap_lock);
   921   assert((eden_space()->bottom() < to_space()->bottom()) &&
   922          (eden_space()->bottom() < from_space()->bottom()),
   923          "Eden is assumed to be below the survivor spaces");
   925   MemRegion cmr((HeapWord*)virtual_space()->low(),
   926                 (HeapWord*)virtual_space()->high());
   927   Universe::heap()->barrier_set()->resize_covered_region(cmr);
   928   space_invariants();
   929 }
   933 void PSYoungGen::update_counters() {
   934   if (UsePerfData) {
   935     _eden_counters->update_all();
   936     _from_counters->update_all();
   937     _to_counters->update_all();
   938     _gen_counters->update_all();
   939   }
   940 }
   942 void PSYoungGen::verify() {
   943   eden_space()->verify();
   944   from_space()->verify();
   945   to_space()->verify();
   946 }
   948 #ifndef PRODUCT
   949 void PSYoungGen::record_spaces_top() {
   950   assert(ZapUnusedHeapArea, "Not mangling unused space");
   951   eden_space()->set_top_for_allocations();
   952   from_space()->set_top_for_allocations();
   953   to_space()->set_top_for_allocations();
   954 }
   955 #endif

mercurial