src/share/vm/gc_implementation/parallelScavenge/asPSYoungGen.cpp

Thu, 22 Sep 2011 10:57:37 -0700

author
johnc
date
Thu, 22 Sep 2011 10:57:37 -0700
changeset 3175
4dfb2df418f2
parent 2314
f95d63e2154a
child 6084
46d7652b223c
permissions
-rw-r--r--

6484982: G1: process references during evacuation pauses
Summary: G1 now uses two reference processors - one is used by concurrent marking and the other is used by STW GCs (both full and incremental evacuation pauses). In an evacuation pause, the reference processor is embedded into the closures used to scan objects. Doing so causes causes reference objects to be 'discovered' by the reference processor. At the end of the evacuation pause, these discovered reference objects are processed - preserving (and copying) referent objects (and their reachable graphs) as appropriate.
Reviewed-by: ysr, jwilhelm, brutisso, stefank, tonyp

duke@435 1 /*
stefank@2314 2 * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
duke@435 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
duke@435 4 *
duke@435 5 * This code is free software; you can redistribute it and/or modify it
duke@435 6 * under the terms of the GNU General Public License version 2 only, as
duke@435 7 * published by the Free Software Foundation.
duke@435 8 *
duke@435 9 * This code is distributed in the hope that it will be useful, but WITHOUT
duke@435 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
duke@435 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
duke@435 12 * version 2 for more details (a copy is included in the LICENSE file that
duke@435 13 * accompanied this code).
duke@435 14 *
duke@435 15 * You should have received a copy of the GNU General Public License version
duke@435 16 * 2 along with this work; if not, write to the Free Software Foundation,
duke@435 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
duke@435 18 *
trims@1907 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
trims@1907 20 * or visit www.oracle.com if you need additional information or have any
trims@1907 21 * questions.
duke@435 22 *
duke@435 23 */
duke@435 24
stefank@2314 25 #include "precompiled.hpp"
stefank@2314 26 #include "gc_implementation/parallelScavenge/asPSYoungGen.hpp"
stefank@2314 27 #include "gc_implementation/parallelScavenge/parallelScavengeHeap.hpp"
stefank@2314 28 #include "gc_implementation/parallelScavenge/psMarkSweepDecorator.hpp"
stefank@2314 29 #include "gc_implementation/parallelScavenge/psScavenge.hpp"
stefank@2314 30 #include "gc_implementation/parallelScavenge/psYoungGen.hpp"
stefank@2314 31 #include "gc_implementation/shared/gcUtil.hpp"
stefank@2314 32 #include "gc_implementation/shared/spaceDecorator.hpp"
stefank@2314 33 #include "oops/oop.inline.hpp"
stefank@2314 34 #include "runtime/java.hpp"
duke@435 35
duke@435 36 ASPSYoungGen::ASPSYoungGen(size_t init_byte_size,
duke@435 37 size_t minimum_byte_size,
duke@435 38 size_t byte_size_limit) :
duke@435 39 PSYoungGen(init_byte_size, minimum_byte_size, byte_size_limit),
duke@435 40 _gen_size_limit(byte_size_limit) {
duke@435 41 }
duke@435 42
duke@435 43
duke@435 44 ASPSYoungGen::ASPSYoungGen(PSVirtualSpace* vs,
duke@435 45 size_t init_byte_size,
duke@435 46 size_t minimum_byte_size,
duke@435 47 size_t byte_size_limit) :
duke@435 48 //PSYoungGen(init_byte_size, minimum_byte_size, byte_size_limit),
duke@435 49 PSYoungGen(vs->committed_size(), minimum_byte_size, byte_size_limit),
duke@435 50 _gen_size_limit(byte_size_limit) {
duke@435 51
duke@435 52 assert(vs->committed_size() == init_byte_size, "Cannot replace with");
duke@435 53
duke@435 54 _virtual_space = vs;
duke@435 55 }
duke@435 56
duke@435 57 void ASPSYoungGen::initialize_virtual_space(ReservedSpace rs,
duke@435 58 size_t alignment) {
duke@435 59 assert(_init_gen_size != 0, "Should have a finite size");
duke@435 60 _virtual_space = new PSVirtualSpaceHighToLow(rs, alignment);
duke@435 61 if (!_virtual_space->expand_by(_init_gen_size)) {
duke@435 62 vm_exit_during_initialization("Could not reserve enough space for "
duke@435 63 "object heap");
duke@435 64 }
duke@435 65 }
duke@435 66
duke@435 67 void ASPSYoungGen::initialize(ReservedSpace rs, size_t alignment) {
duke@435 68 initialize_virtual_space(rs, alignment);
duke@435 69 initialize_work();
duke@435 70 }
duke@435 71
duke@435 72 size_t ASPSYoungGen::available_for_expansion() {
duke@435 73
duke@435 74 size_t current_committed_size = virtual_space()->committed_size();
duke@435 75 assert((gen_size_limit() >= current_committed_size),
duke@435 76 "generation size limit is wrong");
duke@435 77 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
duke@435 78 size_t result = gen_size_limit() - current_committed_size;
duke@435 79 size_t result_aligned = align_size_down(result, heap->young_gen_alignment());
duke@435 80 return result_aligned;
duke@435 81 }
duke@435 82
duke@435 83 // Return the number of bytes the young gen is willing give up.
duke@435 84 //
duke@435 85 // Future implementations could check the survivors and if to_space is in the
duke@435 86 // right place (below from_space), take a chunk from to_space.
duke@435 87 size_t ASPSYoungGen::available_for_contraction() {
duke@435 88
duke@435 89 size_t uncommitted_bytes = virtual_space()->uncommitted_size();
duke@435 90 if (uncommitted_bytes != 0) {
duke@435 91 return uncommitted_bytes;
duke@435 92 }
duke@435 93
duke@435 94 if (eden_space()->is_empty()) {
duke@435 95 // Respect the minimum size for eden and for the young gen as a whole.
duke@435 96 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
jmasa@448 97 const size_t eden_alignment = heap->intra_heap_alignment();
duke@435 98 const size_t gen_alignment = heap->young_gen_alignment();
duke@435 99
duke@435 100 assert(eden_space()->capacity_in_bytes() >= eden_alignment,
duke@435 101 "Alignment is wrong");
duke@435 102 size_t eden_avail = eden_space()->capacity_in_bytes() - eden_alignment;
duke@435 103 eden_avail = align_size_down(eden_avail, gen_alignment);
duke@435 104
duke@435 105 assert(virtual_space()->committed_size() >= min_gen_size(),
duke@435 106 "minimum gen size is wrong");
duke@435 107 size_t gen_avail = virtual_space()->committed_size() - min_gen_size();
duke@435 108 assert(virtual_space()->is_aligned(gen_avail), "not aligned");
duke@435 109
duke@435 110 const size_t max_contraction = MIN2(eden_avail, gen_avail);
duke@435 111 // See comment for ASPSOldGen::available_for_contraction()
duke@435 112 // for reasons the "increment" fraction is used.
duke@435 113 PSAdaptiveSizePolicy* policy = heap->size_policy();
duke@435 114 size_t result = policy->eden_increment_aligned_down(max_contraction);
duke@435 115 size_t result_aligned = align_size_down(result, gen_alignment);
duke@435 116 if (PrintAdaptiveSizePolicy && Verbose) {
duke@435 117 gclog_or_tty->print_cr("ASPSYoungGen::available_for_contraction: %d K",
duke@435 118 result_aligned/K);
duke@435 119 gclog_or_tty->print_cr(" max_contraction %d K", max_contraction/K);
duke@435 120 gclog_or_tty->print_cr(" eden_avail %d K", eden_avail/K);
duke@435 121 gclog_or_tty->print_cr(" gen_avail %d K", gen_avail/K);
duke@435 122 }
duke@435 123 return result_aligned;
duke@435 124
duke@435 125 }
duke@435 126
duke@435 127 return 0;
duke@435 128 }
duke@435 129
duke@435 130 // The current implementation only considers to the end of eden.
duke@435 131 // If to_space is below from_space, to_space is not considered.
duke@435 132 // to_space can be.
duke@435 133 size_t ASPSYoungGen::available_to_live() {
duke@435 134 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
jmasa@448 135 const size_t alignment = heap->intra_heap_alignment();
duke@435 136
duke@435 137 // Include any space that is committed but is not in eden.
duke@435 138 size_t available = pointer_delta(eden_space()->bottom(),
duke@435 139 virtual_space()->low(),
duke@435 140 sizeof(char));
duke@435 141
duke@435 142 const size_t eden_capacity = eden_space()->capacity_in_bytes();
duke@435 143 if (eden_space()->is_empty() && eden_capacity > alignment) {
duke@435 144 available += eden_capacity - alignment;
duke@435 145 }
duke@435 146 return available;
duke@435 147 }
duke@435 148
duke@435 149 // Similar to PSYoungGen::resize_generation() but
duke@435 150 // allows sum of eden_size and 2 * survivor_size to exceed _max_gen_size
duke@435 151 // expands at the low end of the virtual space
duke@435 152 // moves the boundary between the generations in order to expand
duke@435 153 // some additional diagnostics
duke@435 154 // If no additional changes are required, this can be deleted
duke@435 155 // and the changes factored back into PSYoungGen::resize_generation().
duke@435 156 bool ASPSYoungGen::resize_generation(size_t eden_size, size_t survivor_size) {
duke@435 157 const size_t alignment = virtual_space()->alignment();
duke@435 158 size_t orig_size = virtual_space()->committed_size();
duke@435 159 bool size_changed = false;
duke@435 160
duke@435 161 // There used to be a guarantee here that
duke@435 162 // (eden_size + 2*survivor_size) <= _max_gen_size
duke@435 163 // This requirement is enforced by the calculation of desired_size
duke@435 164 // below. It may not be true on entry since the size of the
duke@435 165 // eden_size is no bounded by the generation size.
duke@435 166
duke@435 167 assert(max_size() == reserved().byte_size(), "max gen size problem?");
duke@435 168 assert(min_gen_size() <= orig_size && orig_size <= max_size(),
duke@435 169 "just checking");
duke@435 170
duke@435 171 // Adjust new generation size
duke@435 172 const size_t eden_plus_survivors =
duke@435 173 align_size_up(eden_size + 2 * survivor_size, alignment);
duke@435 174 size_t desired_size = MAX2(MIN2(eden_plus_survivors, gen_size_limit()),
duke@435 175 min_gen_size());
duke@435 176 assert(desired_size <= gen_size_limit(), "just checking");
duke@435 177
duke@435 178 if (desired_size > orig_size) {
duke@435 179 // Grow the generation
duke@435 180 size_t change = desired_size - orig_size;
jmasa@698 181 HeapWord* prev_low = (HeapWord*) virtual_space()->low();
duke@435 182 if (!virtual_space()->expand_by(change)) {
duke@435 183 return false;
duke@435 184 }
jmasa@698 185 if (ZapUnusedHeapArea) {
jmasa@698 186 // Mangle newly committed space immediately because it
jmasa@698 187 // can be done here more simply that after the new
jmasa@698 188 // spaces have been computed.
jmasa@698 189 HeapWord* new_low = (HeapWord*) virtual_space()->low();
jmasa@698 190 assert(new_low < prev_low, "Did not grow");
jmasa@698 191
jmasa@698 192 MemRegion mangle_region(new_low, prev_low);
jmasa@698 193 SpaceMangler::mangle_region(mangle_region);
jmasa@698 194 }
duke@435 195 size_changed = true;
duke@435 196 } else if (desired_size < orig_size) {
duke@435 197 size_t desired_change = orig_size - desired_size;
duke@435 198
duke@435 199 // How much is available for shrinking.
duke@435 200 size_t available_bytes = limit_gen_shrink(desired_change);
duke@435 201 size_t change = MIN2(desired_change, available_bytes);
duke@435 202 virtual_space()->shrink_by(change);
duke@435 203 size_changed = true;
duke@435 204 } else {
duke@435 205 if (Verbose && PrintGC) {
duke@435 206 if (orig_size == gen_size_limit()) {
duke@435 207 gclog_or_tty->print_cr("ASPSYoung generation size at maximum: "
duke@435 208 SIZE_FORMAT "K", orig_size/K);
duke@435 209 } else if (orig_size == min_gen_size()) {
duke@435 210 gclog_or_tty->print_cr("ASPSYoung generation size at minium: "
duke@435 211 SIZE_FORMAT "K", orig_size/K);
duke@435 212 }
duke@435 213 }
duke@435 214 }
duke@435 215
duke@435 216 if (size_changed) {
duke@435 217 reset_after_change();
duke@435 218 if (Verbose && PrintGC) {
duke@435 219 size_t current_size = virtual_space()->committed_size();
duke@435 220 gclog_or_tty->print_cr("ASPSYoung generation size changed: "
duke@435 221 SIZE_FORMAT "K->" SIZE_FORMAT "K",
duke@435 222 orig_size/K, current_size/K);
duke@435 223 }
duke@435 224 }
duke@435 225
duke@435 226 guarantee(eden_plus_survivors <= virtual_space()->committed_size() ||
duke@435 227 virtual_space()->committed_size() == max_size(), "Sanity");
duke@435 228
duke@435 229 return true;
duke@435 230 }
duke@435 231
duke@435 232 // Similar to PSYoungGen::resize_spaces() but
duke@435 233 // eden always starts at the low end of the committed virtual space
duke@435 234 // current implementation does not allow holes between the spaces
duke@435 235 // _young_generation_boundary has to be reset because it changes.
duke@435 236 // so additional verification
jmasa@698 237
duke@435 238 void ASPSYoungGen::resize_spaces(size_t requested_eden_size,
duke@435 239 size_t requested_survivor_size) {
jmasa@698 240 assert(UseAdaptiveSizePolicy, "sanity check");
duke@435 241 assert(requested_eden_size > 0 && requested_survivor_size > 0,
duke@435 242 "just checking");
duke@435 243
duke@435 244 space_invariants();
duke@435 245
duke@435 246 // We require eden and to space to be empty
duke@435 247 if ((!eden_space()->is_empty()) || (!to_space()->is_empty())) {
duke@435 248 return;
duke@435 249 }
duke@435 250
duke@435 251 if (PrintAdaptiveSizePolicy && Verbose) {
duke@435 252 gclog_or_tty->print_cr("PSYoungGen::resize_spaces(requested_eden_size: "
duke@435 253 SIZE_FORMAT
duke@435 254 ", requested_survivor_size: " SIZE_FORMAT ")",
duke@435 255 requested_eden_size, requested_survivor_size);
duke@435 256 gclog_or_tty->print_cr(" eden: [" PTR_FORMAT ".." PTR_FORMAT ") "
duke@435 257 SIZE_FORMAT,
duke@435 258 eden_space()->bottom(),
duke@435 259 eden_space()->end(),
duke@435 260 pointer_delta(eden_space()->end(),
duke@435 261 eden_space()->bottom(),
duke@435 262 sizeof(char)));
duke@435 263 gclog_or_tty->print_cr(" from: [" PTR_FORMAT ".." PTR_FORMAT ") "
duke@435 264 SIZE_FORMAT,
duke@435 265 from_space()->bottom(),
duke@435 266 from_space()->end(),
duke@435 267 pointer_delta(from_space()->end(),
duke@435 268 from_space()->bottom(),
duke@435 269 sizeof(char)));
duke@435 270 gclog_or_tty->print_cr(" to: [" PTR_FORMAT ".." PTR_FORMAT ") "
duke@435 271 SIZE_FORMAT,
duke@435 272 to_space()->bottom(),
duke@435 273 to_space()->end(),
duke@435 274 pointer_delta( to_space()->end(),
duke@435 275 to_space()->bottom(),
duke@435 276 sizeof(char)));
duke@435 277 }
duke@435 278
duke@435 279 // There's nothing to do if the new sizes are the same as the current
duke@435 280 if (requested_survivor_size == to_space()->capacity_in_bytes() &&
duke@435 281 requested_survivor_size == from_space()->capacity_in_bytes() &&
duke@435 282 requested_eden_size == eden_space()->capacity_in_bytes()) {
duke@435 283 if (PrintAdaptiveSizePolicy && Verbose) {
duke@435 284 gclog_or_tty->print_cr(" capacities are the right sizes, returning");
duke@435 285 }
duke@435 286 return;
duke@435 287 }
duke@435 288
duke@435 289 char* eden_start = (char*)virtual_space()->low();
duke@435 290 char* eden_end = (char*)eden_space()->end();
duke@435 291 char* from_start = (char*)from_space()->bottom();
duke@435 292 char* from_end = (char*)from_space()->end();
duke@435 293 char* to_start = (char*)to_space()->bottom();
duke@435 294 char* to_end = (char*)to_space()->end();
duke@435 295
duke@435 296 assert(eden_start < from_start, "Cannot push into from_space");
duke@435 297
duke@435 298 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
jmasa@448 299 const size_t alignment = heap->intra_heap_alignment();
jmasa@698 300 const bool maintain_minimum =
jmasa@698 301 (requested_eden_size + 2 * requested_survivor_size) <= min_gen_size();
duke@435 302
jmasa@698 303 bool eden_from_to_order = from_start < to_start;
duke@435 304 // Check whether from space is below to space
jmasa@698 305 if (eden_from_to_order) {
duke@435 306 // Eden, from, to
jmasa@698 307
duke@435 308 if (PrintAdaptiveSizePolicy && Verbose) {
duke@435 309 gclog_or_tty->print_cr(" Eden, from, to:");
duke@435 310 }
duke@435 311
duke@435 312 // Set eden
jmasa@698 313 // "requested_eden_size" is a goal for the size of eden
jmasa@698 314 // and may not be attainable. "eden_size" below is
jmasa@698 315 // calculated based on the location of from-space and
jmasa@698 316 // the goal for the size of eden. from-space is
jmasa@698 317 // fixed in place because it contains live data.
jmasa@698 318 // The calculation is done this way to avoid 32bit
jmasa@698 319 // overflow (i.e., eden_start + requested_eden_size
jmasa@698 320 // may too large for representation in 32bits).
jmasa@698 321 size_t eden_size;
jmasa@698 322 if (maintain_minimum) {
jmasa@698 323 // Only make eden larger than the requested size if
jmasa@698 324 // the minimum size of the generation has to be maintained.
jmasa@698 325 // This could be done in general but policy at a higher
jmasa@698 326 // level is determining a requested size for eden and that
jmasa@698 327 // should be honored unless there is a fundamental reason.
jmasa@698 328 eden_size = pointer_delta(from_start,
jmasa@698 329 eden_start,
jmasa@698 330 sizeof(char));
jmasa@698 331 } else {
jmasa@698 332 eden_size = MIN2(requested_eden_size,
jmasa@698 333 pointer_delta(from_start, eden_start, sizeof(char)));
jmasa@698 334 }
jmasa@698 335
duke@435 336 eden_end = eden_start + eden_size;
jcoomes@1844 337 assert(eden_end >= eden_start, "addition overflowed");
duke@435 338
duke@435 339 // To may resize into from space as long as it is clear of live data.
duke@435 340 // From space must remain page aligned, though, so we need to do some
duke@435 341 // extra calculations.
duke@435 342
duke@435 343 // First calculate an optimal to-space
duke@435 344 to_end = (char*)virtual_space()->high();
duke@435 345 to_start = (char*)pointer_delta(to_end,
duke@435 346 (char*)requested_survivor_size,
duke@435 347 sizeof(char));
duke@435 348
duke@435 349 // Does the optimal to-space overlap from-space?
duke@435 350 if (to_start < (char*)from_space()->end()) {
duke@435 351 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
duke@435 352
duke@435 353 // Calculate the minimum offset possible for from_end
duke@435 354 size_t from_size =
duke@435 355 pointer_delta(from_space()->top(), from_start, sizeof(char));
duke@435 356
duke@435 357 // Should we be in this method if from_space is empty? Why not the set_space method? FIX ME!
duke@435 358 if (from_size == 0) {
duke@435 359 from_size = alignment;
duke@435 360 } else {
duke@435 361 from_size = align_size_up(from_size, alignment);
duke@435 362 }
duke@435 363
duke@435 364 from_end = from_start + from_size;
duke@435 365 assert(from_end > from_start, "addition overflow or from_size problem");
duke@435 366
duke@435 367 guarantee(from_end <= (char*)from_space()->end(),
duke@435 368 "from_end moved to the right");
duke@435 369
duke@435 370 // Now update to_start with the new from_end
duke@435 371 to_start = MAX2(from_end, to_start);
duke@435 372 }
duke@435 373
duke@435 374 guarantee(to_start != to_end, "to space is zero sized");
duke@435 375
duke@435 376 if (PrintAdaptiveSizePolicy && Verbose) {
duke@435 377 gclog_or_tty->print_cr(" [eden_start .. eden_end): "
duke@435 378 "[" PTR_FORMAT " .. " PTR_FORMAT ") " SIZE_FORMAT,
duke@435 379 eden_start,
duke@435 380 eden_end,
duke@435 381 pointer_delta(eden_end, eden_start, sizeof(char)));
duke@435 382 gclog_or_tty->print_cr(" [from_start .. from_end): "
duke@435 383 "[" PTR_FORMAT " .. " PTR_FORMAT ") " SIZE_FORMAT,
duke@435 384 from_start,
duke@435 385 from_end,
duke@435 386 pointer_delta(from_end, from_start, sizeof(char)));
duke@435 387 gclog_or_tty->print_cr(" [ to_start .. to_end): "
duke@435 388 "[" PTR_FORMAT " .. " PTR_FORMAT ") " SIZE_FORMAT,
duke@435 389 to_start,
duke@435 390 to_end,
duke@435 391 pointer_delta( to_end, to_start, sizeof(char)));
duke@435 392 }
duke@435 393 } else {
duke@435 394 // Eden, to, from
duke@435 395 if (PrintAdaptiveSizePolicy && Verbose) {
duke@435 396 gclog_or_tty->print_cr(" Eden, to, from:");
duke@435 397 }
duke@435 398
duke@435 399 // To space gets priority over eden resizing. Note that we position
duke@435 400 // to space as if we were able to resize from space, even though from
duke@435 401 // space is not modified.
duke@435 402 // Giving eden priority was tried and gave poorer performance.
duke@435 403 to_end = (char*)pointer_delta(virtual_space()->high(),
duke@435 404 (char*)requested_survivor_size,
duke@435 405 sizeof(char));
duke@435 406 to_end = MIN2(to_end, from_start);
duke@435 407 to_start = (char*)pointer_delta(to_end, (char*)requested_survivor_size,
duke@435 408 sizeof(char));
duke@435 409 // if the space sizes are to be increased by several times then
duke@435 410 // 'to_start' will point beyond the young generation. In this case
duke@435 411 // 'to_start' should be adjusted.
duke@435 412 to_start = MAX2(to_start, eden_start + alignment);
duke@435 413
duke@435 414 // Compute how big eden can be, then adjust end.
jmasa@698 415 // See comments above on calculating eden_end.
jmasa@698 416 size_t eden_size;
jmasa@698 417 if (maintain_minimum) {
jmasa@698 418 eden_size = pointer_delta(to_start, eden_start, sizeof(char));
jmasa@698 419 } else {
jmasa@698 420 eden_size = MIN2(requested_eden_size,
jmasa@698 421 pointer_delta(to_start, eden_start, sizeof(char)));
jmasa@698 422 }
duke@435 423 eden_end = eden_start + eden_size;
jcoomes@1844 424 assert(eden_end >= eden_start, "addition overflowed");
duke@435 425
duke@435 426 // Don't let eden shrink down to 0 or less.
duke@435 427 eden_end = MAX2(eden_end, eden_start + alignment);
duke@435 428 to_start = MAX2(to_start, eden_end);
duke@435 429
duke@435 430 if (PrintAdaptiveSizePolicy && Verbose) {
duke@435 431 gclog_or_tty->print_cr(" [eden_start .. eden_end): "
duke@435 432 "[" PTR_FORMAT " .. " PTR_FORMAT ") " SIZE_FORMAT,
duke@435 433 eden_start,
duke@435 434 eden_end,
duke@435 435 pointer_delta(eden_end, eden_start, sizeof(char)));
duke@435 436 gclog_or_tty->print_cr(" [ to_start .. to_end): "
duke@435 437 "[" PTR_FORMAT " .. " PTR_FORMAT ") " SIZE_FORMAT,
duke@435 438 to_start,
duke@435 439 to_end,
duke@435 440 pointer_delta( to_end, to_start, sizeof(char)));
duke@435 441 gclog_or_tty->print_cr(" [from_start .. from_end): "
duke@435 442 "[" PTR_FORMAT " .. " PTR_FORMAT ") " SIZE_FORMAT,
duke@435 443 from_start,
duke@435 444 from_end,
duke@435 445 pointer_delta(from_end, from_start, sizeof(char)));
duke@435 446 }
duke@435 447 }
duke@435 448
duke@435 449
duke@435 450 guarantee((HeapWord*)from_start <= from_space()->bottom(),
duke@435 451 "from start moved to the right");
duke@435 452 guarantee((HeapWord*)from_end >= from_space()->top(),
duke@435 453 "from end moved into live data");
duke@435 454 assert(is_object_aligned((intptr_t)eden_start), "checking alignment");
duke@435 455 assert(is_object_aligned((intptr_t)from_start), "checking alignment");
duke@435 456 assert(is_object_aligned((intptr_t)to_start), "checking alignment");
duke@435 457
duke@435 458 MemRegion edenMR((HeapWord*)eden_start, (HeapWord*)eden_end);
duke@435 459 MemRegion toMR ((HeapWord*)to_start, (HeapWord*)to_end);
duke@435 460 MemRegion fromMR((HeapWord*)from_start, (HeapWord*)from_end);
duke@435 461
duke@435 462 // Let's make sure the call to initialize doesn't reset "top"!
duke@435 463 DEBUG_ONLY(HeapWord* old_from_top = from_space()->top();)
duke@435 464
duke@435 465 // For PrintAdaptiveSizePolicy block below
duke@435 466 size_t old_from = from_space()->capacity_in_bytes();
duke@435 467 size_t old_to = to_space()->capacity_in_bytes();
duke@435 468
jmasa@698 469 if (ZapUnusedHeapArea) {
jmasa@698 470 // NUMA is a special case because a numa space is not mangled
jmasa@698 471 // in order to not prematurely bind its address to memory to
jmasa@698 472 // the wrong memory (i.e., don't want the GC thread to first
jmasa@698 473 // touch the memory). The survivor spaces are not numa
jmasa@698 474 // spaces and are mangled.
jmasa@698 475 if (UseNUMA) {
jmasa@698 476 if (eden_from_to_order) {
jmasa@698 477 mangle_survivors(from_space(), fromMR, to_space(), toMR);
jmasa@698 478 } else {
jmasa@698 479 mangle_survivors(to_space(), toMR, from_space(), fromMR);
jmasa@698 480 }
jmasa@698 481 }
jmasa@698 482
jmasa@698 483 // If not mangling the spaces, do some checking to verify that
jmasa@698 484 // the spaces are already mangled.
jmasa@698 485 // The spaces should be correctly mangled at this point so
jmasa@698 486 // do some checking here. Note that they are not being mangled
jmasa@698 487 // in the calls to initialize().
jmasa@698 488 // Must check mangling before the spaces are reshaped. Otherwise,
jmasa@698 489 // the bottom or end of one space may have moved into an area
jmasa@698 490 // covered by another space and a failure of the check may
jmasa@698 491 // not correctly indicate which space is not properly mangled.
jmasa@698 492
jmasa@698 493 HeapWord* limit = (HeapWord*) virtual_space()->high();
jmasa@698 494 eden_space()->check_mangled_unused_area(limit);
jmasa@698 495 from_space()->check_mangled_unused_area(limit);
jmasa@698 496 to_space()->check_mangled_unused_area(limit);
jmasa@698 497 }
jmasa@698 498 // When an existing space is being initialized, it is not
jmasa@698 499 // mangled because the space has been previously mangled.
jmasa@698 500 eden_space()->initialize(edenMR,
jmasa@698 501 SpaceDecorator::Clear,
jmasa@698 502 SpaceDecorator::DontMangle);
jmasa@698 503 to_space()->initialize(toMR,
jmasa@698 504 SpaceDecorator::Clear,
jmasa@698 505 SpaceDecorator::DontMangle);
jmasa@698 506 from_space()->initialize(fromMR,
jmasa@698 507 SpaceDecorator::DontClear,
jmasa@698 508 SpaceDecorator::DontMangle);
jmasa@698 509
duke@435 510 PSScavenge::set_young_generation_boundary(eden_space()->bottom());
duke@435 511
duke@435 512 assert(from_space()->top() == old_from_top, "from top changed!");
duke@435 513
duke@435 514 if (PrintAdaptiveSizePolicy) {
duke@435 515 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
duke@435 516 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
duke@435 517
duke@435 518 gclog_or_tty->print("AdaptiveSizePolicy::survivor space sizes: "
duke@435 519 "collection: %d "
duke@435 520 "(" SIZE_FORMAT ", " SIZE_FORMAT ") -> "
duke@435 521 "(" SIZE_FORMAT ", " SIZE_FORMAT ") ",
duke@435 522 heap->total_collections(),
duke@435 523 old_from, old_to,
duke@435 524 from_space()->capacity_in_bytes(),
duke@435 525 to_space()->capacity_in_bytes());
duke@435 526 gclog_or_tty->cr();
duke@435 527 }
duke@435 528 space_invariants();
duke@435 529 }
duke@435 530 void ASPSYoungGen::reset_after_change() {
duke@435 531 assert_locked_or_safepoint(Heap_lock);
duke@435 532
duke@435 533 _reserved = MemRegion((HeapWord*)virtual_space()->low_boundary(),
duke@435 534 (HeapWord*)virtual_space()->high_boundary());
duke@435 535 PSScavenge::reference_processor()->set_span(_reserved);
duke@435 536
duke@435 537 HeapWord* new_eden_bottom = (HeapWord*)virtual_space()->low();
duke@435 538 HeapWord* eden_bottom = eden_space()->bottom();
duke@435 539 if (new_eden_bottom != eden_bottom) {
duke@435 540 MemRegion eden_mr(new_eden_bottom, eden_space()->end());
jmasa@698 541 eden_space()->initialize(eden_mr,
jmasa@698 542 SpaceDecorator::Clear,
jmasa@698 543 SpaceDecorator::Mangle);
duke@435 544 PSScavenge::set_young_generation_boundary(eden_space()->bottom());
duke@435 545 }
duke@435 546 MemRegion cmr((HeapWord*)virtual_space()->low(),
duke@435 547 (HeapWord*)virtual_space()->high());
duke@435 548 Universe::heap()->barrier_set()->resize_covered_region(cmr);
duke@435 549
duke@435 550 space_invariants();
duke@435 551 }

mercurial