src/share/vm/gc_implementation/parallelScavenge/psOldGen.cpp

Mon, 28 Jul 2008 15:30:23 -0700

author
jmasa
date
Mon, 28 Jul 2008 15:30:23 -0700
changeset 704
850fdf70db2b
parent 698
12eea04c8b06
child 706
818a18cd69a8
permissions
-rw-r--r--

Merge

duke@435 1 /*
duke@435 2 * Copyright 2001-2007 Sun Microsystems, Inc. All Rights Reserved.
duke@435 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
duke@435 4 *
duke@435 5 * This code is free software; you can redistribute it and/or modify it
duke@435 6 * under the terms of the GNU General Public License version 2 only, as
duke@435 7 * published by the Free Software Foundation.
duke@435 8 *
duke@435 9 * This code is distributed in the hope that it will be useful, but WITHOUT
duke@435 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
duke@435 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
duke@435 12 * version 2 for more details (a copy is included in the LICENSE file that
duke@435 13 * accompanied this code).
duke@435 14 *
duke@435 15 * You should have received a copy of the GNU General Public License version
duke@435 16 * 2 along with this work; if not, write to the Free Software Foundation,
duke@435 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
duke@435 18 *
duke@435 19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
duke@435 20 * CA 95054 USA or visit www.sun.com if you need additional information or
duke@435 21 * have any questions.
duke@435 22 *
duke@435 23 */
duke@435 24
duke@435 25 # include "incls/_precompiled.incl"
duke@435 26 # include "incls/_psOldGen.cpp.incl"
duke@435 27
duke@435 28 inline const char* PSOldGen::select_name() {
duke@435 29 return UseParallelOldGC ? "ParOldGen" : "PSOldGen";
duke@435 30 }
duke@435 31
duke@435 32 PSOldGen::PSOldGen(ReservedSpace rs, size_t alignment,
duke@435 33 size_t initial_size, size_t min_size, size_t max_size,
duke@435 34 const char* perf_data_name, int level):
duke@435 35 _name(select_name()), _init_gen_size(initial_size), _min_gen_size(min_size),
duke@435 36 _max_gen_size(max_size)
duke@435 37 {
duke@435 38 initialize(rs, alignment, perf_data_name, level);
duke@435 39 }
duke@435 40
duke@435 41 PSOldGen::PSOldGen(size_t initial_size,
duke@435 42 size_t min_size, size_t max_size,
duke@435 43 const char* perf_data_name, int level):
duke@435 44 _name(select_name()), _init_gen_size(initial_size), _min_gen_size(min_size),
duke@435 45 _max_gen_size(max_size)
duke@435 46 {}
duke@435 47
duke@435 48 void PSOldGen::initialize(ReservedSpace rs, size_t alignment,
duke@435 49 const char* perf_data_name, int level) {
duke@435 50 initialize_virtual_space(rs, alignment);
duke@435 51 initialize_work(perf_data_name, level);
duke@435 52 // The old gen can grow to gen_size_limit(). _reserve reflects only
duke@435 53 // the current maximum that can be committed.
duke@435 54 assert(_reserved.byte_size() <= gen_size_limit(), "Consistency check");
duke@435 55 }
duke@435 56
duke@435 57 void PSOldGen::initialize_virtual_space(ReservedSpace rs, size_t alignment) {
duke@435 58
duke@435 59 _virtual_space = new PSVirtualSpace(rs, alignment);
duke@435 60 if (!_virtual_space->expand_by(_init_gen_size)) {
duke@435 61 vm_exit_during_initialization("Could not reserve enough space for "
duke@435 62 "object heap");
duke@435 63 }
duke@435 64 }
duke@435 65
duke@435 66 void PSOldGen::initialize_work(const char* perf_data_name, int level) {
duke@435 67 //
duke@435 68 // Basic memory initialization
duke@435 69 //
duke@435 70
duke@435 71 MemRegion limit_reserved((HeapWord*)virtual_space()->low_boundary(),
duke@435 72 heap_word_size(_max_gen_size));
duke@435 73 assert(limit_reserved.byte_size() == _max_gen_size,
duke@435 74 "word vs bytes confusion");
duke@435 75 //
duke@435 76 // Object start stuff
duke@435 77 //
duke@435 78
duke@435 79 start_array()->initialize(limit_reserved);
duke@435 80
duke@435 81 _reserved = MemRegion((HeapWord*)virtual_space()->low_boundary(),
duke@435 82 (HeapWord*)virtual_space()->high_boundary());
duke@435 83
duke@435 84 //
duke@435 85 // Card table stuff
duke@435 86 //
duke@435 87
duke@435 88 MemRegion cmr((HeapWord*)virtual_space()->low(),
duke@435 89 (HeapWord*)virtual_space()->high());
jmasa@698 90 if (ZapUnusedHeapArea) {
jmasa@698 91 // Mangle newly committed space immediately rather than
jmasa@698 92 // waiting for the initialization of the space even though
jmasa@698 93 // mangling is related to spaces. Doing it here eliminates
jmasa@698 94 // the need to carry along information that a complete mangling
jmasa@698 95 // (bottom to end) needs to be done.
jmasa@698 96 SpaceMangler::mangle_region(cmr);
jmasa@698 97 }
jmasa@698 98
duke@435 99 Universe::heap()->barrier_set()->resize_covered_region(cmr);
duke@435 100
duke@435 101 CardTableModRefBS* _ct = (CardTableModRefBS*)Universe::heap()->barrier_set();
duke@435 102 assert (_ct->kind() == BarrierSet::CardTableModRef, "Sanity");
duke@435 103
duke@435 104 // Verify that the start and end of this generation is the start of a card.
duke@435 105 // If this wasn't true, a single card could span more than one generation,
duke@435 106 // which would cause problems when we commit/uncommit memory, and when we
duke@435 107 // clear and dirty cards.
duke@435 108 guarantee(_ct->is_card_aligned(_reserved.start()), "generation must be card aligned");
duke@435 109 if (_reserved.end() != Universe::heap()->reserved_region().end()) {
duke@435 110 // Don't check at the very end of the heap as we'll assert that we're probing off
duke@435 111 // the end if we try.
duke@435 112 guarantee(_ct->is_card_aligned(_reserved.end()), "generation must be card aligned");
duke@435 113 }
duke@435 114
duke@435 115 //
duke@435 116 // ObjectSpace stuff
duke@435 117 //
duke@435 118
duke@435 119 _object_space = new MutableSpace();
duke@435 120
duke@435 121 if (_object_space == NULL)
duke@435 122 vm_exit_during_initialization("Could not allocate an old gen space");
duke@435 123
jmasa@698 124 object_space()->initialize(cmr,
jmasa@698 125 SpaceDecorator::Clear,
jmasa@698 126 SpaceDecorator::Mangle);
duke@435 127
duke@435 128 _object_mark_sweep = new PSMarkSweepDecorator(_object_space, start_array(), MarkSweepDeadRatio);
duke@435 129
duke@435 130 if (_object_mark_sweep == NULL)
duke@435 131 vm_exit_during_initialization("Could not complete allocation of old generation");
duke@435 132
duke@435 133 // Update the start_array
duke@435 134 start_array()->set_covered_region(cmr);
duke@435 135
duke@435 136 // Generation Counters, generation 'level', 1 subspace
duke@435 137 _gen_counters = new PSGenerationCounters(perf_data_name, level, 1,
duke@435 138 virtual_space());
duke@435 139 _space_counters = new SpaceCounters(perf_data_name, 0,
duke@435 140 virtual_space()->reserved_size(),
duke@435 141 _object_space, _gen_counters);
duke@435 142 }
duke@435 143
duke@435 144 // Assume that the generation has been allocated if its
duke@435 145 // reserved size is not 0.
duke@435 146 bool PSOldGen::is_allocated() {
duke@435 147 return virtual_space()->reserved_size() != 0;
duke@435 148 }
duke@435 149
duke@435 150 void PSOldGen::precompact() {
duke@435 151 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
duke@435 152 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
duke@435 153
duke@435 154 // Reset start array first.
duke@435 155 debug_only(if (!UseParallelOldGC || !VerifyParallelOldWithMarkSweep) {)
duke@435 156 start_array()->reset();
duke@435 157 debug_only(})
duke@435 158
duke@435 159 object_mark_sweep()->precompact();
duke@435 160
duke@435 161 // Now compact the young gen
duke@435 162 heap->young_gen()->precompact();
duke@435 163 }
duke@435 164
duke@435 165 void PSOldGen::adjust_pointers() {
duke@435 166 object_mark_sweep()->adjust_pointers();
duke@435 167 }
duke@435 168
duke@435 169 void PSOldGen::compact() {
duke@435 170 object_mark_sweep()->compact(ZapUnusedHeapArea);
duke@435 171 }
duke@435 172
duke@435 173 void PSOldGen::move_and_update(ParCompactionManager* cm) {
duke@435 174 PSParallelCompact::move_and_update(cm, PSParallelCompact::old_space_id);
duke@435 175 }
duke@435 176
duke@435 177 size_t PSOldGen::contiguous_available() const {
duke@435 178 return object_space()->free_in_bytes() + virtual_space()->uncommitted_size();
duke@435 179 }
duke@435 180
duke@435 181 // Allocation. We report all successful allocations to the size policy
duke@435 182 // Note that the perm gen does not use this method, and should not!
duke@435 183 HeapWord* PSOldGen::allocate(size_t word_size, bool is_tlab) {
duke@435 184 assert_locked_or_safepoint(Heap_lock);
duke@435 185 HeapWord* res = allocate_noexpand(word_size, is_tlab);
duke@435 186
duke@435 187 if (res == NULL) {
duke@435 188 res = expand_and_allocate(word_size, is_tlab);
duke@435 189 }
duke@435 190
duke@435 191 // Allocations in the old generation need to be reported
duke@435 192 if (res != NULL) {
duke@435 193 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
duke@435 194 heap->size_policy()->tenured_allocation(word_size);
duke@435 195 }
duke@435 196
duke@435 197 return res;
duke@435 198 }
duke@435 199
duke@435 200 HeapWord* PSOldGen::expand_and_allocate(size_t word_size, bool is_tlab) {
duke@435 201 assert(!is_tlab, "TLAB's are not supported in PSOldGen");
duke@435 202 expand(word_size*HeapWordSize);
duke@435 203 if (GCExpandToAllocateDelayMillis > 0) {
duke@435 204 os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false);
duke@435 205 }
duke@435 206 return allocate_noexpand(word_size, is_tlab);
duke@435 207 }
duke@435 208
duke@435 209 HeapWord* PSOldGen::expand_and_cas_allocate(size_t word_size) {
duke@435 210 expand(word_size*HeapWordSize);
duke@435 211 if (GCExpandToAllocateDelayMillis > 0) {
duke@435 212 os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false);
duke@435 213 }
duke@435 214 return cas_allocate_noexpand(word_size);
duke@435 215 }
duke@435 216
duke@435 217 void PSOldGen::expand(size_t bytes) {
duke@435 218 MutexLocker x(ExpandHeap_lock);
duke@435 219 const size_t alignment = virtual_space()->alignment();
duke@435 220 size_t aligned_bytes = align_size_up(bytes, alignment);
duke@435 221 size_t aligned_expand_bytes = align_size_up(MinHeapDeltaBytes, alignment);
duke@435 222
duke@435 223 bool success = false;
duke@435 224 if (aligned_expand_bytes > aligned_bytes) {
duke@435 225 success = expand_by(aligned_expand_bytes);
duke@435 226 }
duke@435 227 if (!success) {
duke@435 228 success = expand_by(aligned_bytes);
duke@435 229 }
duke@435 230 if (!success) {
duke@435 231 success = expand_to_reserved();
duke@435 232 }
duke@435 233
duke@435 234 if (GC_locker::is_active()) {
duke@435 235 if (PrintGC && Verbose) {
duke@435 236 gclog_or_tty->print_cr("Garbage collection disabled, expanded heap instead");
duke@435 237 }
duke@435 238 }
duke@435 239 }
duke@435 240
duke@435 241 bool PSOldGen::expand_by(size_t bytes) {
duke@435 242 assert_lock_strong(ExpandHeap_lock);
duke@435 243 assert_locked_or_safepoint(Heap_lock);
duke@435 244 bool result = virtual_space()->expand_by(bytes);
duke@435 245 if (result) {
jmasa@698 246 if (ZapUnusedHeapArea) {
jmasa@698 247 // We need to mangle the newly expanded area. The memregion spans
jmasa@698 248 // end -> new_end, we assume that top -> end is already mangled.
jmasa@698 249 // Do the mangling before post_resize() is called because
jmasa@698 250 // the space is available for allocation after post_resize();
jmasa@698 251 HeapWord* const virtual_space_high = (HeapWord*) virtual_space()->high();
jmasa@698 252 assert(object_space()->end() < virtual_space_high,
jmasa@698 253 "Should be true before post_resize()");
jmasa@698 254 MemRegion mangle_region(object_space()->end(), virtual_space_high);
jmasa@698 255 // Note that the object space has not yet been updated to
jmasa@698 256 // coincede with the new underlying virtual space.
jmasa@698 257 SpaceMangler::mangle_region(mangle_region);
jmasa@698 258 }
duke@435 259 post_resize();
duke@435 260 if (UsePerfData) {
duke@435 261 _space_counters->update_capacity();
duke@435 262 _gen_counters->update_all();
duke@435 263 }
duke@435 264 }
duke@435 265
duke@435 266 if (result && Verbose && PrintGC) {
duke@435 267 size_t new_mem_size = virtual_space()->committed_size();
duke@435 268 size_t old_mem_size = new_mem_size - bytes;
duke@435 269 gclog_or_tty->print_cr("Expanding %s from " SIZE_FORMAT "K by "
duke@435 270 SIZE_FORMAT "K to "
duke@435 271 SIZE_FORMAT "K",
duke@435 272 name(), old_mem_size/K, bytes/K, new_mem_size/K);
duke@435 273 }
duke@435 274
duke@435 275 return result;
duke@435 276 }
duke@435 277
duke@435 278 bool PSOldGen::expand_to_reserved() {
duke@435 279 assert_lock_strong(ExpandHeap_lock);
duke@435 280 assert_locked_or_safepoint(Heap_lock);
duke@435 281
duke@435 282 bool result = true;
duke@435 283 const size_t remaining_bytes = virtual_space()->uncommitted_size();
duke@435 284 if (remaining_bytes > 0) {
duke@435 285 result = expand_by(remaining_bytes);
duke@435 286 DEBUG_ONLY(if (!result) warning("grow to reserve failed"));
duke@435 287 }
duke@435 288 return result;
duke@435 289 }
duke@435 290
duke@435 291 void PSOldGen::shrink(size_t bytes) {
duke@435 292 assert_lock_strong(ExpandHeap_lock);
duke@435 293 assert_locked_or_safepoint(Heap_lock);
duke@435 294
duke@435 295 size_t size = align_size_down(bytes, virtual_space()->alignment());
duke@435 296 if (size > 0) {
duke@435 297 assert_lock_strong(ExpandHeap_lock);
duke@435 298 virtual_space()->shrink_by(bytes);
duke@435 299 post_resize();
duke@435 300
duke@435 301 if (Verbose && PrintGC) {
duke@435 302 size_t new_mem_size = virtual_space()->committed_size();
duke@435 303 size_t old_mem_size = new_mem_size + bytes;
duke@435 304 gclog_or_tty->print_cr("Shrinking %s from " SIZE_FORMAT "K by "
duke@435 305 SIZE_FORMAT "K to "
duke@435 306 SIZE_FORMAT "K",
duke@435 307 name(), old_mem_size/K, bytes/K, new_mem_size/K);
duke@435 308 }
duke@435 309 }
duke@435 310 }
duke@435 311
duke@435 312 void PSOldGen::resize(size_t desired_free_space) {
duke@435 313 const size_t alignment = virtual_space()->alignment();
duke@435 314 const size_t size_before = virtual_space()->committed_size();
duke@435 315 size_t new_size = used_in_bytes() + desired_free_space;
duke@435 316 if (new_size < used_in_bytes()) {
duke@435 317 // Overflowed the addition.
duke@435 318 new_size = gen_size_limit();
duke@435 319 }
duke@435 320 // Adjust according to our min and max
duke@435 321 new_size = MAX2(MIN2(new_size, gen_size_limit()), min_gen_size());
duke@435 322
duke@435 323 assert(gen_size_limit() >= reserved().byte_size(), "max new size problem?");
duke@435 324 new_size = align_size_up(new_size, alignment);
duke@435 325
duke@435 326 const size_t current_size = capacity_in_bytes();
duke@435 327
duke@435 328 if (PrintAdaptiveSizePolicy && Verbose) {
duke@435 329 gclog_or_tty->print_cr("AdaptiveSizePolicy::old generation size: "
duke@435 330 "desired free: " SIZE_FORMAT " used: " SIZE_FORMAT
duke@435 331 " new size: " SIZE_FORMAT " current size " SIZE_FORMAT
duke@435 332 " gen limits: " SIZE_FORMAT " / " SIZE_FORMAT,
duke@435 333 desired_free_space, used_in_bytes(), new_size, current_size,
duke@435 334 gen_size_limit(), min_gen_size());
duke@435 335 }
duke@435 336
duke@435 337 if (new_size == current_size) {
duke@435 338 // No change requested
duke@435 339 return;
duke@435 340 }
duke@435 341 if (new_size > current_size) {
duke@435 342 size_t change_bytes = new_size - current_size;
duke@435 343 expand(change_bytes);
duke@435 344 } else {
duke@435 345 size_t change_bytes = current_size - new_size;
duke@435 346 // shrink doesn't grab this lock, expand does. Is that right?
duke@435 347 MutexLocker x(ExpandHeap_lock);
duke@435 348 shrink(change_bytes);
duke@435 349 }
duke@435 350
duke@435 351 if (PrintAdaptiveSizePolicy) {
duke@435 352 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
duke@435 353 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
duke@435 354 gclog_or_tty->print_cr("AdaptiveSizePolicy::old generation size: "
duke@435 355 "collection: %d "
duke@435 356 "(" SIZE_FORMAT ") -> (" SIZE_FORMAT ") ",
duke@435 357 heap->total_collections(),
duke@435 358 size_before, virtual_space()->committed_size());
duke@435 359 }
duke@435 360 }
duke@435 361
duke@435 362 // NOTE! We need to be careful about resizing. During a GC, multiple
duke@435 363 // allocators may be active during heap expansion. If we allow the
duke@435 364 // heap resizing to become visible before we have correctly resized
duke@435 365 // all heap related data structures, we may cause program failures.
duke@435 366 void PSOldGen::post_resize() {
duke@435 367 // First construct a memregion representing the new size
duke@435 368 MemRegion new_memregion((HeapWord*)virtual_space()->low(),
duke@435 369 (HeapWord*)virtual_space()->high());
duke@435 370 size_t new_word_size = new_memregion.word_size();
duke@435 371
duke@435 372 start_array()->set_covered_region(new_memregion);
duke@435 373 Universe::heap()->barrier_set()->resize_covered_region(new_memregion);
duke@435 374
duke@435 375 HeapWord* const virtual_space_high = (HeapWord*) virtual_space()->high();
duke@435 376
duke@435 377 // ALWAYS do this last!!
duke@435 378 object_space()->set_end(virtual_space_high);
duke@435 379
duke@435 380 assert(new_word_size == heap_word_size(object_space()->capacity_in_bytes()),
duke@435 381 "Sanity");
duke@435 382 }
duke@435 383
duke@435 384 size_t PSOldGen::gen_size_limit() {
duke@435 385 return _max_gen_size;
duke@435 386 }
duke@435 387
duke@435 388 void PSOldGen::reset_after_change() {
duke@435 389 ShouldNotReachHere();
duke@435 390 return;
duke@435 391 }
duke@435 392
duke@435 393 size_t PSOldGen::available_for_expansion() {
duke@435 394 ShouldNotReachHere();
duke@435 395 return 0;
duke@435 396 }
duke@435 397
duke@435 398 size_t PSOldGen::available_for_contraction() {
duke@435 399 ShouldNotReachHere();
duke@435 400 return 0;
duke@435 401 }
duke@435 402
duke@435 403 void PSOldGen::print() const { print_on(tty);}
duke@435 404 void PSOldGen::print_on(outputStream* st) const {
duke@435 405 st->print(" %-15s", name());
duke@435 406 if (PrintGCDetails && Verbose) {
duke@435 407 st->print(" total " SIZE_FORMAT ", used " SIZE_FORMAT,
duke@435 408 capacity_in_bytes(), used_in_bytes());
duke@435 409 } else {
duke@435 410 st->print(" total " SIZE_FORMAT "K, used " SIZE_FORMAT "K",
duke@435 411 capacity_in_bytes()/K, used_in_bytes()/K);
duke@435 412 }
duke@435 413 st->print_cr(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " INTPTR_FORMAT ")",
duke@435 414 virtual_space()->low_boundary(),
duke@435 415 virtual_space()->high(),
duke@435 416 virtual_space()->high_boundary());
duke@435 417
duke@435 418 st->print(" object"); object_space()->print_on(st);
duke@435 419 }
duke@435 420
duke@435 421 void PSOldGen::print_used_change(size_t prev_used) const {
duke@435 422 gclog_or_tty->print(" [%s:", name());
duke@435 423 gclog_or_tty->print(" " SIZE_FORMAT "K"
duke@435 424 "->" SIZE_FORMAT "K"
duke@435 425 "(" SIZE_FORMAT "K)",
duke@435 426 prev_used / K, used_in_bytes() / K,
duke@435 427 capacity_in_bytes() / K);
duke@435 428 gclog_or_tty->print("]");
duke@435 429 }
duke@435 430
duke@435 431 void PSOldGen::update_counters() {
duke@435 432 if (UsePerfData) {
duke@435 433 _space_counters->update_all();
duke@435 434 _gen_counters->update_all();
duke@435 435 }
duke@435 436 }
duke@435 437
duke@435 438 #ifndef PRODUCT
duke@435 439
duke@435 440 void PSOldGen::space_invariants() {
duke@435 441 assert(object_space()->end() == (HeapWord*) virtual_space()->high(),
duke@435 442 "Space invariant");
duke@435 443 assert(object_space()->bottom() == (HeapWord*) virtual_space()->low(),
duke@435 444 "Space invariant");
duke@435 445 assert(virtual_space()->low_boundary() <= virtual_space()->low(),
duke@435 446 "Space invariant");
duke@435 447 assert(virtual_space()->high_boundary() >= virtual_space()->high(),
duke@435 448 "Space invariant");
duke@435 449 assert(virtual_space()->low_boundary() == (char*) _reserved.start(),
duke@435 450 "Space invariant");
duke@435 451 assert(virtual_space()->high_boundary() == (char*) _reserved.end(),
duke@435 452 "Space invariant");
duke@435 453 assert(virtual_space()->committed_size() <= virtual_space()->reserved_size(),
duke@435 454 "Space invariant");
duke@435 455 }
duke@435 456 #endif
duke@435 457
duke@435 458 void PSOldGen::verify(bool allow_dirty) {
duke@435 459 object_space()->verify(allow_dirty);
duke@435 460 }
duke@435 461 class VerifyObjectStartArrayClosure : public ObjectClosure {
duke@435 462 PSOldGen* _gen;
duke@435 463 ObjectStartArray* _start_array;
duke@435 464
duke@435 465 public:
duke@435 466 VerifyObjectStartArrayClosure(PSOldGen* gen, ObjectStartArray* start_array) :
duke@435 467 _gen(gen), _start_array(start_array) { }
duke@435 468
duke@435 469 virtual void do_object(oop obj) {
duke@435 470 HeapWord* test_addr = (HeapWord*)obj + 1;
duke@435 471 guarantee(_start_array->object_start(test_addr) == (HeapWord*)obj, "ObjectStartArray cannot find start of object");
duke@435 472 guarantee(_start_array->is_block_allocated((HeapWord*)obj), "ObjectStartArray missing block allocation");
duke@435 473 }
duke@435 474 };
duke@435 475
duke@435 476 void PSOldGen::verify_object_start_array() {
duke@435 477 VerifyObjectStartArrayClosure check( this, &_start_array );
duke@435 478 object_iterate(&check);
duke@435 479 }
jmasa@698 480
jmasa@698 481 #ifndef PRODUCT
jmasa@698 482 void PSOldGen::record_spaces_top() {
jmasa@698 483 assert(ZapUnusedHeapArea, "Not mangling unused space");
jmasa@698 484 object_space()->set_top_for_allocations();
jmasa@698 485 }
jmasa@698 486 #endif

mercurial