src/share/vm/gc_implementation/parallelScavenge/psYoungGen.cpp

Thu, 22 Sep 2011 10:57:37 -0700

author
johnc
date
Thu, 22 Sep 2011 10:57:37 -0700
changeset 3175
4dfb2df418f2
parent 2783
eda9eb483d29
child 3711
b632e80fc9dc
permissions
-rw-r--r--

6484982: G1: process references during evacuation pauses
Summary: G1 now uses two reference processors - one is used by concurrent marking and the other is used by STW GCs (both full and incremental evacuation pauses). In an evacuation pause, the reference processor is embedded into the closures used to scan objects. Doing so causes causes reference objects to be 'discovered' by the reference processor. At the end of the evacuation pause, these discovered reference objects are processed - preserving (and copying) referent objects (and their reachable graphs) as appropriate.
Reviewed-by: ysr, jwilhelm, brutisso, stefank, tonyp

duke@435 1 /*
jcoomes@2783 2 * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
duke@435 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
duke@435 4 *
duke@435 5 * This code is free software; you can redistribute it and/or modify it
duke@435 6 * under the terms of the GNU General Public License version 2 only, as
duke@435 7 * published by the Free Software Foundation.
duke@435 8 *
duke@435 9 * This code is distributed in the hope that it will be useful, but WITHOUT
duke@435 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
duke@435 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
duke@435 12 * version 2 for more details (a copy is included in the LICENSE file that
duke@435 13 * accompanied this code).
duke@435 14 *
duke@435 15 * You should have received a copy of the GNU General Public License version
duke@435 16 * 2 along with this work; if not, write to the Free Software Foundation,
duke@435 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
duke@435 18 *
trims@1907 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
trims@1907 20 * or visit www.oracle.com if you need additional information or have any
trims@1907 21 * questions.
duke@435 22 *
duke@435 23 */
duke@435 24
stefank@2314 25 #include "precompiled.hpp"
stefank@2314 26 #include "gc_implementation/parallelScavenge/parallelScavengeHeap.hpp"
stefank@2314 27 #include "gc_implementation/parallelScavenge/psMarkSweepDecorator.hpp"
stefank@2314 28 #include "gc_implementation/parallelScavenge/psScavenge.hpp"
stefank@2314 29 #include "gc_implementation/parallelScavenge/psYoungGen.hpp"
stefank@2314 30 #include "gc_implementation/shared/gcUtil.hpp"
stefank@2314 31 #include "gc_implementation/shared/mutableNUMASpace.hpp"
stefank@2314 32 #include "gc_implementation/shared/spaceDecorator.hpp"
stefank@2314 33 #include "oops/oop.inline.hpp"
stefank@2314 34 #include "runtime/java.hpp"
duke@435 35
duke@435 36 PSYoungGen::PSYoungGen(size_t initial_size,
duke@435 37 size_t min_size,
duke@435 38 size_t max_size) :
duke@435 39 _init_gen_size(initial_size),
duke@435 40 _min_gen_size(min_size),
duke@435 41 _max_gen_size(max_size)
duke@435 42 {}
duke@435 43
duke@435 44 void PSYoungGen::initialize_virtual_space(ReservedSpace rs, size_t alignment) {
duke@435 45 assert(_init_gen_size != 0, "Should have a finite size");
duke@435 46 _virtual_space = new PSVirtualSpace(rs, alignment);
jmasa@698 47 if (!virtual_space()->expand_by(_init_gen_size)) {
duke@435 48 vm_exit_during_initialization("Could not reserve enough space for "
duke@435 49 "object heap");
duke@435 50 }
duke@435 51 }
duke@435 52
duke@435 53 void PSYoungGen::initialize(ReservedSpace rs, size_t alignment) {
duke@435 54 initialize_virtual_space(rs, alignment);
duke@435 55 initialize_work();
duke@435 56 }
duke@435 57
duke@435 58 void PSYoungGen::initialize_work() {
duke@435 59
jmasa@698 60 _reserved = MemRegion((HeapWord*)virtual_space()->low_boundary(),
jmasa@698 61 (HeapWord*)virtual_space()->high_boundary());
duke@435 62
jmasa@698 63 MemRegion cmr((HeapWord*)virtual_space()->low(),
jmasa@698 64 (HeapWord*)virtual_space()->high());
duke@435 65 Universe::heap()->barrier_set()->resize_covered_region(cmr);
duke@435 66
jmasa@698 67 if (ZapUnusedHeapArea) {
jmasa@698 68 // Mangle newly committed space immediately because it
jmasa@698 69 // can be done here more simply that after the new
jmasa@698 70 // spaces have been computed.
jmasa@698 71 SpaceMangler::mangle_region(cmr);
jmasa@698 72 }
jmasa@698 73
duke@435 74 if (UseNUMA) {
iveresov@970 75 _eden_space = new MutableNUMASpace(virtual_space()->alignment());
duke@435 76 } else {
iveresov@970 77 _eden_space = new MutableSpace(virtual_space()->alignment());
duke@435 78 }
iveresov@970 79 _from_space = new MutableSpace(virtual_space()->alignment());
iveresov@970 80 _to_space = new MutableSpace(virtual_space()->alignment());
duke@435 81
duke@435 82 if (_eden_space == NULL || _from_space == NULL || _to_space == NULL) {
duke@435 83 vm_exit_during_initialization("Could not allocate a young gen space");
duke@435 84 }
duke@435 85
duke@435 86 // Allocate the mark sweep views of spaces
duke@435 87 _eden_mark_sweep =
duke@435 88 new PSMarkSweepDecorator(_eden_space, NULL, MarkSweepDeadRatio);
duke@435 89 _from_mark_sweep =
duke@435 90 new PSMarkSweepDecorator(_from_space, NULL, MarkSweepDeadRatio);
duke@435 91 _to_mark_sweep =
duke@435 92 new PSMarkSweepDecorator(_to_space, NULL, MarkSweepDeadRatio);
duke@435 93
duke@435 94 if (_eden_mark_sweep == NULL ||
duke@435 95 _from_mark_sweep == NULL ||
duke@435 96 _to_mark_sweep == NULL) {
duke@435 97 vm_exit_during_initialization("Could not complete allocation"
duke@435 98 " of the young generation");
duke@435 99 }
duke@435 100
duke@435 101 // Generation Counters - generation 0, 3 subspaces
duke@435 102 _gen_counters = new PSGenerationCounters("new", 0, 3, _virtual_space);
duke@435 103
duke@435 104 // Compute maximum space sizes for performance counters
duke@435 105 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
jmasa@448 106 size_t alignment = heap->intra_heap_alignment();
jmasa@698 107 size_t size = virtual_space()->reserved_size();
duke@435 108
duke@435 109 size_t max_survivor_size;
duke@435 110 size_t max_eden_size;
duke@435 111
duke@435 112 if (UseAdaptiveSizePolicy) {
duke@435 113 max_survivor_size = size / MinSurvivorRatio;
duke@435 114
duke@435 115 // round the survivor space size down to the nearest alignment
duke@435 116 // and make sure its size is greater than 0.
duke@435 117 max_survivor_size = align_size_down(max_survivor_size, alignment);
duke@435 118 max_survivor_size = MAX2(max_survivor_size, alignment);
duke@435 119
duke@435 120 // set the maximum size of eden to be the size of the young gen
duke@435 121 // less two times the minimum survivor size. The minimum survivor
duke@435 122 // size for UseAdaptiveSizePolicy is one alignment.
duke@435 123 max_eden_size = size - 2 * alignment;
duke@435 124 } else {
duke@435 125 max_survivor_size = size / InitialSurvivorRatio;
duke@435 126
duke@435 127 // round the survivor space size down to the nearest alignment
duke@435 128 // and make sure its size is greater than 0.
duke@435 129 max_survivor_size = align_size_down(max_survivor_size, alignment);
duke@435 130 max_survivor_size = MAX2(max_survivor_size, alignment);
duke@435 131
duke@435 132 // set the maximum size of eden to be the size of the young gen
duke@435 133 // less two times the survivor size when the generation is 100%
duke@435 134 // committed. The minimum survivor size for -UseAdaptiveSizePolicy
duke@435 135 // is dependent on the committed portion (current capacity) of the
duke@435 136 // generation - the less space committed, the smaller the survivor
duke@435 137 // space, possibly as small as an alignment. However, we are interested
duke@435 138 // in the case where the young generation is 100% committed, as this
duke@435 139 // is the point where eden reachs its maximum size. At this point,
duke@435 140 // the size of a survivor space is max_survivor_size.
duke@435 141 max_eden_size = size - 2 * max_survivor_size;
duke@435 142 }
duke@435 143
duke@435 144 _eden_counters = new SpaceCounters("eden", 0, max_eden_size, _eden_space,
duke@435 145 _gen_counters);
duke@435 146 _from_counters = new SpaceCounters("s0", 1, max_survivor_size, _from_space,
duke@435 147 _gen_counters);
duke@435 148 _to_counters = new SpaceCounters("s1", 2, max_survivor_size, _to_space,
duke@435 149 _gen_counters);
duke@435 150
duke@435 151 compute_initial_space_boundaries();
duke@435 152 }
duke@435 153
duke@435 154 void PSYoungGen::compute_initial_space_boundaries() {
duke@435 155 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
duke@435 156 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
duke@435 157
duke@435 158 // Compute sizes
jmasa@448 159 size_t alignment = heap->intra_heap_alignment();
jmasa@698 160 size_t size = virtual_space()->committed_size();
duke@435 161
duke@435 162 size_t survivor_size = size / InitialSurvivorRatio;
duke@435 163 survivor_size = align_size_down(survivor_size, alignment);
duke@435 164 // ... but never less than an alignment
duke@435 165 survivor_size = MAX2(survivor_size, alignment);
duke@435 166
duke@435 167 // Young generation is eden + 2 survivor spaces
duke@435 168 size_t eden_size = size - (2 * survivor_size);
duke@435 169
duke@435 170 // Now go ahead and set 'em.
duke@435 171 set_space_boundaries(eden_size, survivor_size);
duke@435 172 space_invariants();
duke@435 173
duke@435 174 if (UsePerfData) {
duke@435 175 _eden_counters->update_capacity();
duke@435 176 _from_counters->update_capacity();
duke@435 177 _to_counters->update_capacity();
duke@435 178 }
duke@435 179 }
duke@435 180
duke@435 181 void PSYoungGen::set_space_boundaries(size_t eden_size, size_t survivor_size) {
jmasa@698 182 assert(eden_size < virtual_space()->committed_size(), "just checking");
duke@435 183 assert(eden_size > 0 && survivor_size > 0, "just checking");
duke@435 184
duke@435 185 // Initial layout is Eden, to, from. After swapping survivor spaces,
duke@435 186 // that leaves us with Eden, from, to, which is step one in our two
duke@435 187 // step resize-with-live-data procedure.
jmasa@698 188 char *eden_start = virtual_space()->low();
duke@435 189 char *to_start = eden_start + eden_size;
duke@435 190 char *from_start = to_start + survivor_size;
duke@435 191 char *from_end = from_start + survivor_size;
duke@435 192
jmasa@698 193 assert(from_end == virtual_space()->high(), "just checking");
duke@435 194 assert(is_object_aligned((intptr_t)eden_start), "checking alignment");
duke@435 195 assert(is_object_aligned((intptr_t)to_start), "checking alignment");
duke@435 196 assert(is_object_aligned((intptr_t)from_start), "checking alignment");
duke@435 197
duke@435 198 MemRegion eden_mr((HeapWord*)eden_start, (HeapWord*)to_start);
duke@435 199 MemRegion to_mr ((HeapWord*)to_start, (HeapWord*)from_start);
duke@435 200 MemRegion from_mr((HeapWord*)from_start, (HeapWord*)from_end);
duke@435 201
jmasa@698 202 eden_space()->initialize(eden_mr, true, ZapUnusedHeapArea);
jmasa@698 203 to_space()->initialize(to_mr , true, ZapUnusedHeapArea);
jmasa@698 204 from_space()->initialize(from_mr, true, ZapUnusedHeapArea);
duke@435 205 }
duke@435 206
duke@435 207 #ifndef PRODUCT
duke@435 208 void PSYoungGen::space_invariants() {
duke@435 209 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
jmasa@448 210 const size_t alignment = heap->intra_heap_alignment();
duke@435 211
duke@435 212 // Currently, our eden size cannot shrink to zero
duke@435 213 guarantee(eden_space()->capacity_in_bytes() >= alignment, "eden too small");
duke@435 214 guarantee(from_space()->capacity_in_bytes() >= alignment, "from too small");
duke@435 215 guarantee(to_space()->capacity_in_bytes() >= alignment, "to too small");
duke@435 216
duke@435 217 // Relationship of spaces to each other
duke@435 218 char* eden_start = (char*)eden_space()->bottom();
duke@435 219 char* eden_end = (char*)eden_space()->end();
duke@435 220 char* from_start = (char*)from_space()->bottom();
duke@435 221 char* from_end = (char*)from_space()->end();
duke@435 222 char* to_start = (char*)to_space()->bottom();
duke@435 223 char* to_end = (char*)to_space()->end();
duke@435 224
jmasa@698 225 guarantee(eden_start >= virtual_space()->low(), "eden bottom");
duke@435 226 guarantee(eden_start < eden_end, "eden space consistency");
duke@435 227 guarantee(from_start < from_end, "from space consistency");
duke@435 228 guarantee(to_start < to_end, "to space consistency");
duke@435 229
duke@435 230 // Check whether from space is below to space
duke@435 231 if (from_start < to_start) {
duke@435 232 // Eden, from, to
duke@435 233 guarantee(eden_end <= from_start, "eden/from boundary");
duke@435 234 guarantee(from_end <= to_start, "from/to boundary");
jmasa@698 235 guarantee(to_end <= virtual_space()->high(), "to end");
duke@435 236 } else {
duke@435 237 // Eden, to, from
duke@435 238 guarantee(eden_end <= to_start, "eden/to boundary");
duke@435 239 guarantee(to_end <= from_start, "to/from boundary");
jmasa@698 240 guarantee(from_end <= virtual_space()->high(), "from end");
duke@435 241 }
duke@435 242
duke@435 243 // More checks that the virtual space is consistent with the spaces
jmasa@698 244 assert(virtual_space()->committed_size() >=
duke@435 245 (eden_space()->capacity_in_bytes() +
duke@435 246 to_space()->capacity_in_bytes() +
duke@435 247 from_space()->capacity_in_bytes()), "Committed size is inconsistent");
jmasa@698 248 assert(virtual_space()->committed_size() <= virtual_space()->reserved_size(),
duke@435 249 "Space invariant");
duke@435 250 char* eden_top = (char*)eden_space()->top();
duke@435 251 char* from_top = (char*)from_space()->top();
duke@435 252 char* to_top = (char*)to_space()->top();
jmasa@698 253 assert(eden_top <= virtual_space()->high(), "eden top");
jmasa@698 254 assert(from_top <= virtual_space()->high(), "from top");
jmasa@698 255 assert(to_top <= virtual_space()->high(), "to top");
duke@435 256
jmasa@698 257 virtual_space()->verify();
duke@435 258 }
duke@435 259 #endif
duke@435 260
duke@435 261 void PSYoungGen::resize(size_t eden_size, size_t survivor_size) {
duke@435 262 // Resize the generation if needed. If the generation resize
duke@435 263 // reports false, do not attempt to resize the spaces.
duke@435 264 if (resize_generation(eden_size, survivor_size)) {
duke@435 265 // Then we lay out the spaces inside the generation
duke@435 266 resize_spaces(eden_size, survivor_size);
duke@435 267
duke@435 268 space_invariants();
duke@435 269
duke@435 270 if (PrintAdaptiveSizePolicy && Verbose) {
duke@435 271 gclog_or_tty->print_cr("Young generation size: "
duke@435 272 "desired eden: " SIZE_FORMAT " survivor: " SIZE_FORMAT
duke@435 273 " used: " SIZE_FORMAT " capacity: " SIZE_FORMAT
duke@435 274 " gen limits: " SIZE_FORMAT " / " SIZE_FORMAT,
duke@435 275 eden_size, survivor_size, used_in_bytes(), capacity_in_bytes(),
duke@435 276 _max_gen_size, min_gen_size());
duke@435 277 }
duke@435 278 }
duke@435 279 }
duke@435 280
duke@435 281
duke@435 282 bool PSYoungGen::resize_generation(size_t eden_size, size_t survivor_size) {
jmasa@698 283 const size_t alignment = virtual_space()->alignment();
jmasa@698 284 size_t orig_size = virtual_space()->committed_size();
duke@435 285 bool size_changed = false;
duke@435 286
duke@435 287 // There used to be this guarantee there.
duke@435 288 // guarantee ((eden_size + 2*survivor_size) <= _max_gen_size, "incorrect input arguments");
duke@435 289 // Code below forces this requirement. In addition the desired eden
duke@435 290 // size and disired survivor sizes are desired goals and may
duke@435 291 // exceed the total generation size.
duke@435 292
duke@435 293 assert(min_gen_size() <= orig_size && orig_size <= max_size(), "just checking");
duke@435 294
duke@435 295 // Adjust new generation size
duke@435 296 const size_t eden_plus_survivors =
duke@435 297 align_size_up(eden_size + 2 * survivor_size, alignment);
duke@435 298 size_t desired_size = MAX2(MIN2(eden_plus_survivors, max_size()),
duke@435 299 min_gen_size());
duke@435 300 assert(desired_size <= max_size(), "just checking");
duke@435 301
duke@435 302 if (desired_size > orig_size) {
duke@435 303 // Grow the generation
duke@435 304 size_t change = desired_size - orig_size;
duke@435 305 assert(change % alignment == 0, "just checking");
jmasa@698 306 HeapWord* prev_high = (HeapWord*) virtual_space()->high();
jmasa@698 307 if (!virtual_space()->expand_by(change)) {
duke@435 308 return false; // Error if we fail to resize!
duke@435 309 }
jmasa@698 310 if (ZapUnusedHeapArea) {
jmasa@698 311 // Mangle newly committed space immediately because it
jmasa@698 312 // can be done here more simply that after the new
jmasa@698 313 // spaces have been computed.
jmasa@698 314 HeapWord* new_high = (HeapWord*) virtual_space()->high();
jmasa@698 315 MemRegion mangle_region(prev_high, new_high);
jmasa@698 316 SpaceMangler::mangle_region(mangle_region);
jmasa@698 317 }
duke@435 318 size_changed = true;
duke@435 319 } else if (desired_size < orig_size) {
duke@435 320 size_t desired_change = orig_size - desired_size;
duke@435 321 assert(desired_change % alignment == 0, "just checking");
duke@435 322
duke@435 323 desired_change = limit_gen_shrink(desired_change);
duke@435 324
duke@435 325 if (desired_change > 0) {
duke@435 326 virtual_space()->shrink_by(desired_change);
duke@435 327 reset_survivors_after_shrink();
duke@435 328
duke@435 329 size_changed = true;
duke@435 330 }
duke@435 331 } else {
duke@435 332 if (Verbose && PrintGC) {
duke@435 333 if (orig_size == gen_size_limit()) {
duke@435 334 gclog_or_tty->print_cr("PSYoung generation size at maximum: "
duke@435 335 SIZE_FORMAT "K", orig_size/K);
duke@435 336 } else if (orig_size == min_gen_size()) {
duke@435 337 gclog_or_tty->print_cr("PSYoung generation size at minium: "
duke@435 338 SIZE_FORMAT "K", orig_size/K);
duke@435 339 }
duke@435 340 }
duke@435 341 }
duke@435 342
duke@435 343 if (size_changed) {
duke@435 344 post_resize();
duke@435 345
duke@435 346 if (Verbose && PrintGC) {
jmasa@698 347 size_t current_size = virtual_space()->committed_size();
duke@435 348 gclog_or_tty->print_cr("PSYoung generation size changed: "
duke@435 349 SIZE_FORMAT "K->" SIZE_FORMAT "K",
duke@435 350 orig_size/K, current_size/K);
duke@435 351 }
duke@435 352 }
duke@435 353
jmasa@698 354 guarantee(eden_plus_survivors <= virtual_space()->committed_size() ||
jmasa@698 355 virtual_space()->committed_size() == max_size(), "Sanity");
duke@435 356
duke@435 357 return true;
duke@435 358 }
duke@435 359
jmasa@698 360 #ifndef PRODUCT
jmasa@698 361 // In the numa case eden is not mangled so a survivor space
jmasa@698 362 // moving into a region previously occupied by a survivor
jmasa@698 363 // may find an unmangled region. Also in the PS case eden
jmasa@698 364 // to-space and from-space may not touch (i.e., there may be
jmasa@698 365 // gaps between them due to movement while resizing the
jmasa@698 366 // spaces). Those gaps must be mangled.
jmasa@698 367 void PSYoungGen::mangle_survivors(MutableSpace* s1,
jmasa@698 368 MemRegion s1MR,
jmasa@698 369 MutableSpace* s2,
jmasa@698 370 MemRegion s2MR) {
jmasa@698 371 // Check eden and gap between eden and from-space, in deciding
jmasa@698 372 // what to mangle in from-space. Check the gap between from-space
jmasa@698 373 // and to-space when deciding what to mangle.
jmasa@698 374 //
jmasa@698 375 // +--------+ +----+ +---+
jmasa@698 376 // | eden | |s1 | |s2 |
jmasa@698 377 // +--------+ +----+ +---+
jmasa@698 378 // +-------+ +-----+
jmasa@698 379 // |s1MR | |s2MR |
jmasa@698 380 // +-------+ +-----+
jmasa@698 381 // All of survivor-space is properly mangled so find the
jmasa@698 382 // upper bound on the mangling for any portion above current s1.
jmasa@698 383 HeapWord* delta_end = MIN2(s1->bottom(), s1MR.end());
jmasa@698 384 MemRegion delta1_left;
jmasa@698 385 if (s1MR.start() < delta_end) {
jmasa@698 386 delta1_left = MemRegion(s1MR.start(), delta_end);
jmasa@698 387 s1->mangle_region(delta1_left);
jmasa@698 388 }
jmasa@698 389 // Find any portion to the right of the current s1.
jmasa@698 390 HeapWord* delta_start = MAX2(s1->end(), s1MR.start());
jmasa@698 391 MemRegion delta1_right;
jmasa@698 392 if (delta_start < s1MR.end()) {
jmasa@698 393 delta1_right = MemRegion(delta_start, s1MR.end());
jmasa@698 394 s1->mangle_region(delta1_right);
jmasa@698 395 }
jmasa@698 396
jmasa@698 397 // Similarly for the second survivor space except that
jmasa@698 398 // any of the new region that overlaps with the current
jmasa@698 399 // region of the first survivor space has already been
jmasa@698 400 // mangled.
jmasa@698 401 delta_end = MIN2(s2->bottom(), s2MR.end());
jmasa@698 402 delta_start = MAX2(s2MR.start(), s1->end());
jmasa@698 403 MemRegion delta2_left;
jmasa@698 404 if (s2MR.start() < delta_end) {
jmasa@698 405 delta2_left = MemRegion(s2MR.start(), delta_end);
jmasa@698 406 s2->mangle_region(delta2_left);
jmasa@698 407 }
jmasa@698 408 delta_start = MAX2(s2->end(), s2MR.start());
jmasa@698 409 MemRegion delta2_right;
jmasa@698 410 if (delta_start < s2MR.end()) {
jmasa@698 411 s2->mangle_region(delta2_right);
jmasa@698 412 }
jmasa@698 413
jmasa@698 414 if (TraceZapUnusedHeapArea) {
jmasa@698 415 // s1
jmasa@698 416 gclog_or_tty->print_cr("Current region: [" PTR_FORMAT ", " PTR_FORMAT ") "
jmasa@698 417 "New region: [" PTR_FORMAT ", " PTR_FORMAT ")",
jmasa@698 418 s1->bottom(), s1->end(), s1MR.start(), s1MR.end());
jmasa@698 419 gclog_or_tty->print_cr(" Mangle before: [" PTR_FORMAT ", "
jmasa@698 420 PTR_FORMAT ") Mangle after: [" PTR_FORMAT ", " PTR_FORMAT ")",
jmasa@698 421 delta1_left.start(), delta1_left.end(), delta1_right.start(),
jmasa@698 422 delta1_right.end());
jmasa@698 423
jmasa@698 424 // s2
jmasa@698 425 gclog_or_tty->print_cr("Current region: [" PTR_FORMAT ", " PTR_FORMAT ") "
jmasa@698 426 "New region: [" PTR_FORMAT ", " PTR_FORMAT ")",
jmasa@698 427 s2->bottom(), s2->end(), s2MR.start(), s2MR.end());
jmasa@698 428 gclog_or_tty->print_cr(" Mangle before: [" PTR_FORMAT ", "
jmasa@698 429 PTR_FORMAT ") Mangle after: [" PTR_FORMAT ", " PTR_FORMAT ")",
jmasa@698 430 delta2_left.start(), delta2_left.end(), delta2_right.start(),
jmasa@698 431 delta2_right.end());
jmasa@698 432 }
jmasa@698 433
jmasa@698 434 }
jmasa@698 435 #endif // NOT PRODUCT
duke@435 436
duke@435 437 void PSYoungGen::resize_spaces(size_t requested_eden_size,
duke@435 438 size_t requested_survivor_size) {
duke@435 439 assert(UseAdaptiveSizePolicy, "sanity check");
duke@435 440 assert(requested_eden_size > 0 && requested_survivor_size > 0,
duke@435 441 "just checking");
duke@435 442
duke@435 443 // We require eden and to space to be empty
duke@435 444 if ((!eden_space()->is_empty()) || (!to_space()->is_empty())) {
duke@435 445 return;
duke@435 446 }
duke@435 447
duke@435 448 if (PrintAdaptiveSizePolicy && Verbose) {
duke@435 449 gclog_or_tty->print_cr("PSYoungGen::resize_spaces(requested_eden_size: "
duke@435 450 SIZE_FORMAT
duke@435 451 ", requested_survivor_size: " SIZE_FORMAT ")",
duke@435 452 requested_eden_size, requested_survivor_size);
duke@435 453 gclog_or_tty->print_cr(" eden: [" PTR_FORMAT ".." PTR_FORMAT ") "
duke@435 454 SIZE_FORMAT,
duke@435 455 eden_space()->bottom(),
duke@435 456 eden_space()->end(),
duke@435 457 pointer_delta(eden_space()->end(),
duke@435 458 eden_space()->bottom(),
duke@435 459 sizeof(char)));
duke@435 460 gclog_or_tty->print_cr(" from: [" PTR_FORMAT ".." PTR_FORMAT ") "
duke@435 461 SIZE_FORMAT,
duke@435 462 from_space()->bottom(),
duke@435 463 from_space()->end(),
duke@435 464 pointer_delta(from_space()->end(),
duke@435 465 from_space()->bottom(),
duke@435 466 sizeof(char)));
duke@435 467 gclog_or_tty->print_cr(" to: [" PTR_FORMAT ".." PTR_FORMAT ") "
duke@435 468 SIZE_FORMAT,
duke@435 469 to_space()->bottom(),
duke@435 470 to_space()->end(),
duke@435 471 pointer_delta( to_space()->end(),
duke@435 472 to_space()->bottom(),
duke@435 473 sizeof(char)));
duke@435 474 }
duke@435 475
duke@435 476 // There's nothing to do if the new sizes are the same as the current
duke@435 477 if (requested_survivor_size == to_space()->capacity_in_bytes() &&
duke@435 478 requested_survivor_size == from_space()->capacity_in_bytes() &&
duke@435 479 requested_eden_size == eden_space()->capacity_in_bytes()) {
duke@435 480 if (PrintAdaptiveSizePolicy && Verbose) {
duke@435 481 gclog_or_tty->print_cr(" capacities are the right sizes, returning");
duke@435 482 }
duke@435 483 return;
duke@435 484 }
duke@435 485
duke@435 486 char* eden_start = (char*)eden_space()->bottom();
duke@435 487 char* eden_end = (char*)eden_space()->end();
duke@435 488 char* from_start = (char*)from_space()->bottom();
duke@435 489 char* from_end = (char*)from_space()->end();
duke@435 490 char* to_start = (char*)to_space()->bottom();
duke@435 491 char* to_end = (char*)to_space()->end();
duke@435 492
duke@435 493 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
jmasa@448 494 const size_t alignment = heap->intra_heap_alignment();
duke@435 495 const bool maintain_minimum =
duke@435 496 (requested_eden_size + 2 * requested_survivor_size) <= min_gen_size();
duke@435 497
jmasa@698 498 bool eden_from_to_order = from_start < to_start;
duke@435 499 // Check whether from space is below to space
jmasa@698 500 if (eden_from_to_order) {
duke@435 501 // Eden, from, to
jmasa@698 502 eden_from_to_order = true;
duke@435 503 if (PrintAdaptiveSizePolicy && Verbose) {
duke@435 504 gclog_or_tty->print_cr(" Eden, from, to:");
duke@435 505 }
duke@435 506
duke@435 507 // Set eden
duke@435 508 // "requested_eden_size" is a goal for the size of eden
duke@435 509 // and may not be attainable. "eden_size" below is
duke@435 510 // calculated based on the location of from-space and
duke@435 511 // the goal for the size of eden. from-space is
duke@435 512 // fixed in place because it contains live data.
duke@435 513 // The calculation is done this way to avoid 32bit
duke@435 514 // overflow (i.e., eden_start + requested_eden_size
duke@435 515 // may too large for representation in 32bits).
duke@435 516 size_t eden_size;
duke@435 517 if (maintain_minimum) {
duke@435 518 // Only make eden larger than the requested size if
duke@435 519 // the minimum size of the generation has to be maintained.
duke@435 520 // This could be done in general but policy at a higher
duke@435 521 // level is determining a requested size for eden and that
duke@435 522 // should be honored unless there is a fundamental reason.
duke@435 523 eden_size = pointer_delta(from_start,
duke@435 524 eden_start,
duke@435 525 sizeof(char));
duke@435 526 } else {
duke@435 527 eden_size = MIN2(requested_eden_size,
duke@435 528 pointer_delta(from_start, eden_start, sizeof(char)));
duke@435 529 }
duke@435 530
duke@435 531 eden_end = eden_start + eden_size;
jcoomes@1844 532 assert(eden_end >= eden_start, "addition overflowed");
duke@435 533
duke@435 534 // To may resize into from space as long as it is clear of live data.
duke@435 535 // From space must remain page aligned, though, so we need to do some
duke@435 536 // extra calculations.
duke@435 537
duke@435 538 // First calculate an optimal to-space
jmasa@698 539 to_end = (char*)virtual_space()->high();
duke@435 540 to_start = (char*)pointer_delta(to_end, (char*)requested_survivor_size,
duke@435 541 sizeof(char));
duke@435 542
duke@435 543 // Does the optimal to-space overlap from-space?
duke@435 544 if (to_start < (char*)from_space()->end()) {
duke@435 545 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
duke@435 546
duke@435 547 // Calculate the minimum offset possible for from_end
duke@435 548 size_t from_size = pointer_delta(from_space()->top(), from_start, sizeof(char));
duke@435 549
duke@435 550 // Should we be in this method if from_space is empty? Why not the set_space method? FIX ME!
duke@435 551 if (from_size == 0) {
duke@435 552 from_size = alignment;
duke@435 553 } else {
duke@435 554 from_size = align_size_up(from_size, alignment);
duke@435 555 }
duke@435 556
duke@435 557 from_end = from_start + from_size;
duke@435 558 assert(from_end > from_start, "addition overflow or from_size problem");
duke@435 559
duke@435 560 guarantee(from_end <= (char*)from_space()->end(), "from_end moved to the right");
duke@435 561
duke@435 562 // Now update to_start with the new from_end
duke@435 563 to_start = MAX2(from_end, to_start);
duke@435 564 }
duke@435 565
duke@435 566 guarantee(to_start != to_end, "to space is zero sized");
duke@435 567
duke@435 568 if (PrintAdaptiveSizePolicy && Verbose) {
duke@435 569 gclog_or_tty->print_cr(" [eden_start .. eden_end): "
duke@435 570 "[" PTR_FORMAT " .. " PTR_FORMAT ") " SIZE_FORMAT,
duke@435 571 eden_start,
duke@435 572 eden_end,
duke@435 573 pointer_delta(eden_end, eden_start, sizeof(char)));
duke@435 574 gclog_or_tty->print_cr(" [from_start .. from_end): "
duke@435 575 "[" PTR_FORMAT " .. " PTR_FORMAT ") " SIZE_FORMAT,
duke@435 576 from_start,
duke@435 577 from_end,
duke@435 578 pointer_delta(from_end, from_start, sizeof(char)));
duke@435 579 gclog_or_tty->print_cr(" [ to_start .. to_end): "
duke@435 580 "[" PTR_FORMAT " .. " PTR_FORMAT ") " SIZE_FORMAT,
duke@435 581 to_start,
duke@435 582 to_end,
duke@435 583 pointer_delta( to_end, to_start, sizeof(char)));
duke@435 584 }
duke@435 585 } else {
duke@435 586 // Eden, to, from
duke@435 587 if (PrintAdaptiveSizePolicy && Verbose) {
duke@435 588 gclog_or_tty->print_cr(" Eden, to, from:");
duke@435 589 }
duke@435 590
duke@435 591 // To space gets priority over eden resizing. Note that we position
duke@435 592 // to space as if we were able to resize from space, even though from
duke@435 593 // space is not modified.
duke@435 594 // Giving eden priority was tried and gave poorer performance.
jmasa@698 595 to_end = (char*)pointer_delta(virtual_space()->high(),
duke@435 596 (char*)requested_survivor_size,
duke@435 597 sizeof(char));
duke@435 598 to_end = MIN2(to_end, from_start);
duke@435 599 to_start = (char*)pointer_delta(to_end, (char*)requested_survivor_size,
duke@435 600 sizeof(char));
duke@435 601 // if the space sizes are to be increased by several times then
duke@435 602 // 'to_start' will point beyond the young generation. In this case
duke@435 603 // 'to_start' should be adjusted.
duke@435 604 to_start = MAX2(to_start, eden_start + alignment);
duke@435 605
duke@435 606 // Compute how big eden can be, then adjust end.
duke@435 607 // See comments above on calculating eden_end.
duke@435 608 size_t eden_size;
duke@435 609 if (maintain_minimum) {
duke@435 610 eden_size = pointer_delta(to_start, eden_start, sizeof(char));
duke@435 611 } else {
duke@435 612 eden_size = MIN2(requested_eden_size,
duke@435 613 pointer_delta(to_start, eden_start, sizeof(char)));
duke@435 614 }
duke@435 615 eden_end = eden_start + eden_size;
jcoomes@1844 616 assert(eden_end >= eden_start, "addition overflowed");
duke@435 617
duke@435 618 // Could choose to not let eden shrink
duke@435 619 // to_start = MAX2(to_start, eden_end);
duke@435 620
duke@435 621 // Don't let eden shrink down to 0 or less.
duke@435 622 eden_end = MAX2(eden_end, eden_start + alignment);
duke@435 623 to_start = MAX2(to_start, eden_end);
duke@435 624
duke@435 625 if (PrintAdaptiveSizePolicy && Verbose) {
duke@435 626 gclog_or_tty->print_cr(" [eden_start .. eden_end): "
duke@435 627 "[" PTR_FORMAT " .. " PTR_FORMAT ") " SIZE_FORMAT,
duke@435 628 eden_start,
duke@435 629 eden_end,
duke@435 630 pointer_delta(eden_end, eden_start, sizeof(char)));
duke@435 631 gclog_or_tty->print_cr(" [ to_start .. to_end): "
duke@435 632 "[" PTR_FORMAT " .. " PTR_FORMAT ") " SIZE_FORMAT,
duke@435 633 to_start,
duke@435 634 to_end,
duke@435 635 pointer_delta( to_end, to_start, sizeof(char)));
duke@435 636 gclog_or_tty->print_cr(" [from_start .. from_end): "
duke@435 637 "[" PTR_FORMAT " .. " PTR_FORMAT ") " SIZE_FORMAT,
duke@435 638 from_start,
duke@435 639 from_end,
duke@435 640 pointer_delta(from_end, from_start, sizeof(char)));
duke@435 641 }
duke@435 642 }
duke@435 643
duke@435 644
duke@435 645 guarantee((HeapWord*)from_start <= from_space()->bottom(),
duke@435 646 "from start moved to the right");
duke@435 647 guarantee((HeapWord*)from_end >= from_space()->top(),
duke@435 648 "from end moved into live data");
duke@435 649 assert(is_object_aligned((intptr_t)eden_start), "checking alignment");
duke@435 650 assert(is_object_aligned((intptr_t)from_start), "checking alignment");
duke@435 651 assert(is_object_aligned((intptr_t)to_start), "checking alignment");
duke@435 652
duke@435 653 MemRegion edenMR((HeapWord*)eden_start, (HeapWord*)eden_end);
duke@435 654 MemRegion toMR ((HeapWord*)to_start, (HeapWord*)to_end);
duke@435 655 MemRegion fromMR((HeapWord*)from_start, (HeapWord*)from_end);
duke@435 656
duke@435 657 // Let's make sure the call to initialize doesn't reset "top"!
duke@435 658 HeapWord* old_from_top = from_space()->top();
duke@435 659
duke@435 660 // For PrintAdaptiveSizePolicy block below
duke@435 661 size_t old_from = from_space()->capacity_in_bytes();
duke@435 662 size_t old_to = to_space()->capacity_in_bytes();
duke@435 663
jmasa@698 664 if (ZapUnusedHeapArea) {
jmasa@698 665 // NUMA is a special case because a numa space is not mangled
jmasa@698 666 // in order to not prematurely bind its address to memory to
jmasa@698 667 // the wrong memory (i.e., don't want the GC thread to first
jmasa@698 668 // touch the memory). The survivor spaces are not numa
jmasa@698 669 // spaces and are mangled.
jmasa@698 670 if (UseNUMA) {
jmasa@698 671 if (eden_from_to_order) {
jmasa@698 672 mangle_survivors(from_space(), fromMR, to_space(), toMR);
jmasa@698 673 } else {
jmasa@698 674 mangle_survivors(to_space(), toMR, from_space(), fromMR);
jmasa@698 675 }
jmasa@698 676 }
jmasa@698 677
jmasa@698 678 // If not mangling the spaces, do some checking to verify that
jmasa@698 679 // the spaces are already mangled.
jmasa@698 680 // The spaces should be correctly mangled at this point so
jmasa@698 681 // do some checking here. Note that they are not being mangled
jmasa@698 682 // in the calls to initialize().
jmasa@698 683 // Must check mangling before the spaces are reshaped. Otherwise,
jmasa@698 684 // the bottom or end of one space may have moved into an area
jmasa@698 685 // covered by another space and a failure of the check may
jmasa@698 686 // not correctly indicate which space is not properly mangled.
jmasa@698 687 HeapWord* limit = (HeapWord*) virtual_space()->high();
jmasa@698 688 eden_space()->check_mangled_unused_area(limit);
jmasa@698 689 from_space()->check_mangled_unused_area(limit);
jmasa@698 690 to_space()->check_mangled_unused_area(limit);
jmasa@698 691 }
jmasa@698 692 // When an existing space is being initialized, it is not
jmasa@698 693 // mangled because the space has been previously mangled.
jmasa@698 694 eden_space()->initialize(edenMR,
jmasa@698 695 SpaceDecorator::Clear,
jmasa@698 696 SpaceDecorator::DontMangle);
jmasa@698 697 to_space()->initialize(toMR,
jmasa@698 698 SpaceDecorator::Clear,
jmasa@698 699 SpaceDecorator::DontMangle);
jmasa@698 700 from_space()->initialize(fromMR,
jmasa@698 701 SpaceDecorator::DontClear,
jmasa@698 702 SpaceDecorator::DontMangle);
duke@435 703
duke@435 704 assert(from_space()->top() == old_from_top, "from top changed!");
duke@435 705
duke@435 706 if (PrintAdaptiveSizePolicy) {
duke@435 707 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
duke@435 708 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
duke@435 709
duke@435 710 gclog_or_tty->print("AdaptiveSizePolicy::survivor space sizes: "
duke@435 711 "collection: %d "
duke@435 712 "(" SIZE_FORMAT ", " SIZE_FORMAT ") -> "
duke@435 713 "(" SIZE_FORMAT ", " SIZE_FORMAT ") ",
duke@435 714 heap->total_collections(),
duke@435 715 old_from, old_to,
duke@435 716 from_space()->capacity_in_bytes(),
duke@435 717 to_space()->capacity_in_bytes());
duke@435 718 gclog_or_tty->cr();
duke@435 719 }
duke@435 720 }
duke@435 721
duke@435 722 void PSYoungGen::swap_spaces() {
duke@435 723 MutableSpace* s = from_space();
duke@435 724 _from_space = to_space();
duke@435 725 _to_space = s;
duke@435 726
duke@435 727 // Now update the decorators.
duke@435 728 PSMarkSweepDecorator* md = from_mark_sweep();
duke@435 729 _from_mark_sweep = to_mark_sweep();
duke@435 730 _to_mark_sweep = md;
duke@435 731
duke@435 732 assert(from_mark_sweep()->space() == from_space(), "Sanity");
duke@435 733 assert(to_mark_sweep()->space() == to_space(), "Sanity");
duke@435 734 }
duke@435 735
duke@435 736 size_t PSYoungGen::capacity_in_bytes() const {
duke@435 737 return eden_space()->capacity_in_bytes()
duke@435 738 + from_space()->capacity_in_bytes(); // to_space() is only used during scavenge
duke@435 739 }
duke@435 740
duke@435 741
duke@435 742 size_t PSYoungGen::used_in_bytes() const {
duke@435 743 return eden_space()->used_in_bytes()
duke@435 744 + from_space()->used_in_bytes(); // to_space() is only used during scavenge
duke@435 745 }
duke@435 746
duke@435 747
duke@435 748 size_t PSYoungGen::free_in_bytes() const {
duke@435 749 return eden_space()->free_in_bytes()
duke@435 750 + from_space()->free_in_bytes(); // to_space() is only used during scavenge
duke@435 751 }
duke@435 752
duke@435 753 size_t PSYoungGen::capacity_in_words() const {
duke@435 754 return eden_space()->capacity_in_words()
duke@435 755 + from_space()->capacity_in_words(); // to_space() is only used during scavenge
duke@435 756 }
duke@435 757
duke@435 758
duke@435 759 size_t PSYoungGen::used_in_words() const {
duke@435 760 return eden_space()->used_in_words()
duke@435 761 + from_space()->used_in_words(); // to_space() is only used during scavenge
duke@435 762 }
duke@435 763
duke@435 764
duke@435 765 size_t PSYoungGen::free_in_words() const {
duke@435 766 return eden_space()->free_in_words()
duke@435 767 + from_space()->free_in_words(); // to_space() is only used during scavenge
duke@435 768 }
duke@435 769
duke@435 770 void PSYoungGen::object_iterate(ObjectClosure* blk) {
duke@435 771 eden_space()->object_iterate(blk);
duke@435 772 from_space()->object_iterate(blk);
duke@435 773 to_space()->object_iterate(blk);
duke@435 774 }
duke@435 775
duke@435 776 void PSYoungGen::precompact() {
duke@435 777 eden_mark_sweep()->precompact();
duke@435 778 from_mark_sweep()->precompact();
duke@435 779 to_mark_sweep()->precompact();
duke@435 780 }
duke@435 781
duke@435 782 void PSYoungGen::adjust_pointers() {
duke@435 783 eden_mark_sweep()->adjust_pointers();
duke@435 784 from_mark_sweep()->adjust_pointers();
duke@435 785 to_mark_sweep()->adjust_pointers();
duke@435 786 }
duke@435 787
duke@435 788 void PSYoungGen::compact() {
duke@435 789 eden_mark_sweep()->compact(ZapUnusedHeapArea);
duke@435 790 from_mark_sweep()->compact(ZapUnusedHeapArea);
duke@435 791 // Mark sweep stores preserved markOops in to space, don't disturb!
duke@435 792 to_mark_sweep()->compact(false);
duke@435 793 }
duke@435 794
duke@435 795 void PSYoungGen::print() const { print_on(tty); }
duke@435 796 void PSYoungGen::print_on(outputStream* st) const {
duke@435 797 st->print(" %-15s", "PSYoungGen");
duke@435 798 if (PrintGCDetails && Verbose) {
duke@435 799 st->print(" total " SIZE_FORMAT ", used " SIZE_FORMAT,
duke@435 800 capacity_in_bytes(), used_in_bytes());
duke@435 801 } else {
duke@435 802 st->print(" total " SIZE_FORMAT "K, used " SIZE_FORMAT "K",
duke@435 803 capacity_in_bytes()/K, used_in_bytes()/K);
duke@435 804 }
jmasa@698 805 virtual_space()->print_space_boundaries_on(st);
duke@435 806 st->print(" eden"); eden_space()->print_on(st);
duke@435 807 st->print(" from"); from_space()->print_on(st);
duke@435 808 st->print(" to "); to_space()->print_on(st);
duke@435 809 }
duke@435 810
duke@435 811 void PSYoungGen::print_used_change(size_t prev_used) const {
duke@435 812 gclog_or_tty->print(" [%s:", name());
duke@435 813 gclog_or_tty->print(" " SIZE_FORMAT "K"
duke@435 814 "->" SIZE_FORMAT "K"
duke@435 815 "(" SIZE_FORMAT "K)",
duke@435 816 prev_used / K, used_in_bytes() / K,
duke@435 817 capacity_in_bytes() / K);
duke@435 818 gclog_or_tty->print("]");
duke@435 819 }
duke@435 820
duke@435 821 size_t PSYoungGen::available_for_expansion() {
duke@435 822 ShouldNotReachHere();
duke@435 823 return 0;
duke@435 824 }
duke@435 825
duke@435 826 size_t PSYoungGen::available_for_contraction() {
duke@435 827 ShouldNotReachHere();
duke@435 828 return 0;
duke@435 829 }
duke@435 830
duke@435 831 size_t PSYoungGen::available_to_min_gen() {
duke@435 832 assert(virtual_space()->committed_size() >= min_gen_size(), "Invariant");
duke@435 833 return virtual_space()->committed_size() - min_gen_size();
duke@435 834 }
duke@435 835
duke@435 836 // This method assumes that from-space has live data and that
duke@435 837 // any shrinkage of the young gen is limited by location of
duke@435 838 // from-space.
duke@435 839 size_t PSYoungGen::available_to_live() {
duke@435 840 size_t delta_in_survivor = 0;
duke@435 841 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
jmasa@448 842 const size_t space_alignment = heap->intra_heap_alignment();
duke@435 843 const size_t gen_alignment = heap->young_gen_alignment();
duke@435 844
duke@435 845 MutableSpace* space_shrinking = NULL;
duke@435 846 if (from_space()->end() > to_space()->end()) {
duke@435 847 space_shrinking = from_space();
duke@435 848 } else {
duke@435 849 space_shrinking = to_space();
duke@435 850 }
duke@435 851
duke@435 852 // Include any space that is committed but not included in
duke@435 853 // the survivor spaces.
duke@435 854 assert(((HeapWord*)virtual_space()->high()) >= space_shrinking->end(),
duke@435 855 "Survivor space beyond high end");
duke@435 856 size_t unused_committed = pointer_delta(virtual_space()->high(),
duke@435 857 space_shrinking->end(), sizeof(char));
duke@435 858
duke@435 859 if (space_shrinking->is_empty()) {
duke@435 860 // Don't let the space shrink to 0
duke@435 861 assert(space_shrinking->capacity_in_bytes() >= space_alignment,
duke@435 862 "Space is too small");
duke@435 863 delta_in_survivor = space_shrinking->capacity_in_bytes() - space_alignment;
duke@435 864 } else {
duke@435 865 delta_in_survivor = pointer_delta(space_shrinking->end(),
duke@435 866 space_shrinking->top(),
duke@435 867 sizeof(char));
duke@435 868 }
duke@435 869
duke@435 870 size_t delta_in_bytes = unused_committed + delta_in_survivor;
duke@435 871 delta_in_bytes = align_size_down(delta_in_bytes, gen_alignment);
duke@435 872 return delta_in_bytes;
duke@435 873 }
duke@435 874
duke@435 875 // Return the number of bytes available for resizing down the young
duke@435 876 // generation. This is the minimum of
duke@435 877 // input "bytes"
duke@435 878 // bytes to the minimum young gen size
duke@435 879 // bytes to the size currently being used + some small extra
duke@435 880 size_t PSYoungGen::limit_gen_shrink(size_t bytes) {
duke@435 881 // Allow shrinkage into the current eden but keep eden large enough
duke@435 882 // to maintain the minimum young gen size
duke@435 883 bytes = MIN3(bytes, available_to_min_gen(), available_to_live());
duke@435 884 return align_size_down(bytes, virtual_space()->alignment());
duke@435 885 }
duke@435 886
duke@435 887 void PSYoungGen::reset_after_change() {
duke@435 888 ShouldNotReachHere();
duke@435 889 }
duke@435 890
duke@435 891 void PSYoungGen::reset_survivors_after_shrink() {
duke@435 892 _reserved = MemRegion((HeapWord*)virtual_space()->low_boundary(),
duke@435 893 (HeapWord*)virtual_space()->high_boundary());
duke@435 894 PSScavenge::reference_processor()->set_span(_reserved);
duke@435 895
duke@435 896 MutableSpace* space_shrinking = NULL;
duke@435 897 if (from_space()->end() > to_space()->end()) {
duke@435 898 space_shrinking = from_space();
duke@435 899 } else {
duke@435 900 space_shrinking = to_space();
duke@435 901 }
duke@435 902
duke@435 903 HeapWord* new_end = (HeapWord*)virtual_space()->high();
duke@435 904 assert(new_end >= space_shrinking->bottom(), "Shrink was too large");
duke@435 905 // Was there a shrink of the survivor space?
duke@435 906 if (new_end < space_shrinking->end()) {
duke@435 907 MemRegion mr(space_shrinking->bottom(), new_end);
jmasa@698 908 space_shrinking->initialize(mr,
jmasa@698 909 SpaceDecorator::DontClear,
jmasa@698 910 SpaceDecorator::Mangle);
duke@435 911 }
duke@435 912 }
duke@435 913
duke@435 914 // This method currently does not expect to expand into eden (i.e.,
duke@435 915 // the virtual space boundaries is expected to be consistent
duke@435 916 // with the eden boundaries..
duke@435 917 void PSYoungGen::post_resize() {
duke@435 918 assert_locked_or_safepoint(Heap_lock);
duke@435 919 assert((eden_space()->bottom() < to_space()->bottom()) &&
duke@435 920 (eden_space()->bottom() < from_space()->bottom()),
duke@435 921 "Eden is assumed to be below the survivor spaces");
duke@435 922
duke@435 923 MemRegion cmr((HeapWord*)virtual_space()->low(),
duke@435 924 (HeapWord*)virtual_space()->high());
duke@435 925 Universe::heap()->barrier_set()->resize_covered_region(cmr);
duke@435 926 space_invariants();
duke@435 927 }
duke@435 928
duke@435 929
duke@435 930
duke@435 931 void PSYoungGen::update_counters() {
duke@435 932 if (UsePerfData) {
duke@435 933 _eden_counters->update_all();
duke@435 934 _from_counters->update_all();
duke@435 935 _to_counters->update_all();
duke@435 936 _gen_counters->update_all();
duke@435 937 }
duke@435 938 }
duke@435 939
duke@435 940 void PSYoungGen::verify(bool allow_dirty) {
duke@435 941 eden_space()->verify(allow_dirty);
duke@435 942 from_space()->verify(allow_dirty);
duke@435 943 to_space()->verify(allow_dirty);
duke@435 944 }
jmasa@698 945
jmasa@698 946 #ifndef PRODUCT
jmasa@698 947 void PSYoungGen::record_spaces_top() {
jmasa@698 948 assert(ZapUnusedHeapArea, "Not mangling unused space");
jmasa@698 949 eden_space()->set_top_for_allocations();
jmasa@698 950 from_space()->set_top_for_allocations();
jmasa@698 951 to_space()->set_top_for_allocations();
jmasa@698 952 }
jmasa@698 953 #endif

mercurial