src/share/vm/gc_implementation/parallelScavenge/psOldGen.cpp

Wed, 27 Apr 2016 01:25:04 +0800

author
aoqi
date
Wed, 27 Apr 2016 01:25:04 +0800
changeset 0
f90c822e73f8
child 1
2d8a650513c2
permissions
-rw-r--r--

Initial load
http://hg.openjdk.java.net/jdk8u/jdk8u/hotspot/
changeset: 6782:28b50d07f6f8
tag: jdk8u25-b17

aoqi@0 1 /*
aoqi@0 2 * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
aoqi@0 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
aoqi@0 4 *
aoqi@0 5 * This code is free software; you can redistribute it and/or modify it
aoqi@0 6 * under the terms of the GNU General Public License version 2 only, as
aoqi@0 7 * published by the Free Software Foundation.
aoqi@0 8 *
aoqi@0 9 * This code is distributed in the hope that it will be useful, but WITHOUT
aoqi@0 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
aoqi@0 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
aoqi@0 12 * version 2 for more details (a copy is included in the LICENSE file that
aoqi@0 13 * accompanied this code).
aoqi@0 14 *
aoqi@0 15 * You should have received a copy of the GNU General Public License version
aoqi@0 16 * 2 along with this work; if not, write to the Free Software Foundation,
aoqi@0 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
aoqi@0 18 *
aoqi@0 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
aoqi@0 20 * or visit www.oracle.com if you need additional information or have any
aoqi@0 21 * questions.
aoqi@0 22 *
aoqi@0 23 */
aoqi@0 24
aoqi@0 25 #include "precompiled.hpp"
aoqi@0 26 #include "gc_implementation/parallelScavenge/parallelScavengeHeap.hpp"
aoqi@0 27 #include "gc_implementation/parallelScavenge/psAdaptiveSizePolicy.hpp"
aoqi@0 28 #include "gc_implementation/parallelScavenge/psMarkSweepDecorator.hpp"
aoqi@0 29 #include "gc_implementation/parallelScavenge/psOldGen.hpp"
aoqi@0 30 #include "gc_implementation/shared/spaceDecorator.hpp"
aoqi@0 31 #include "memory/cardTableModRefBS.hpp"
aoqi@0 32 #include "memory/gcLocker.inline.hpp"
aoqi@0 33 #include "oops/oop.inline.hpp"
aoqi@0 34 #include "runtime/java.hpp"
aoqi@0 35
aoqi@0 36 PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
aoqi@0 37
aoqi@0 38 inline const char* PSOldGen::select_name() {
aoqi@0 39 return UseParallelOldGC ? "ParOldGen" : "PSOldGen";
aoqi@0 40 }
aoqi@0 41
aoqi@0 42 PSOldGen::PSOldGen(ReservedSpace rs, size_t alignment,
aoqi@0 43 size_t initial_size, size_t min_size, size_t max_size,
aoqi@0 44 const char* perf_data_name, int level):
aoqi@0 45 _name(select_name()), _init_gen_size(initial_size), _min_gen_size(min_size),
aoqi@0 46 _max_gen_size(max_size)
aoqi@0 47 {
aoqi@0 48 initialize(rs, alignment, perf_data_name, level);
aoqi@0 49 }
aoqi@0 50
aoqi@0 51 PSOldGen::PSOldGen(size_t initial_size,
aoqi@0 52 size_t min_size, size_t max_size,
aoqi@0 53 const char* perf_data_name, int level):
aoqi@0 54 _name(select_name()), _init_gen_size(initial_size), _min_gen_size(min_size),
aoqi@0 55 _max_gen_size(max_size)
aoqi@0 56 {}
aoqi@0 57
aoqi@0 58 void PSOldGen::initialize(ReservedSpace rs, size_t alignment,
aoqi@0 59 const char* perf_data_name, int level) {
aoqi@0 60 initialize_virtual_space(rs, alignment);
aoqi@0 61 initialize_work(perf_data_name, level);
aoqi@0 62
aoqi@0 63 // The old gen can grow to gen_size_limit(). _reserve reflects only
aoqi@0 64 // the current maximum that can be committed.
aoqi@0 65 assert(_reserved.byte_size() <= gen_size_limit(), "Consistency check");
aoqi@0 66
aoqi@0 67 initialize_performance_counters(perf_data_name, level);
aoqi@0 68 }
aoqi@0 69
aoqi@0 70 void PSOldGen::initialize_virtual_space(ReservedSpace rs, size_t alignment) {
aoqi@0 71
aoqi@0 72 _virtual_space = new PSVirtualSpace(rs, alignment);
aoqi@0 73 if (!_virtual_space->expand_by(_init_gen_size)) {
aoqi@0 74 vm_exit_during_initialization("Could not reserve enough space for "
aoqi@0 75 "object heap");
aoqi@0 76 }
aoqi@0 77 }
aoqi@0 78
aoqi@0 79 void PSOldGen::initialize_work(const char* perf_data_name, int level) {
aoqi@0 80 //
aoqi@0 81 // Basic memory initialization
aoqi@0 82 //
aoqi@0 83
aoqi@0 84 MemRegion limit_reserved((HeapWord*)virtual_space()->low_boundary(),
aoqi@0 85 heap_word_size(_max_gen_size));
aoqi@0 86 assert(limit_reserved.byte_size() == _max_gen_size,
aoqi@0 87 "word vs bytes confusion");
aoqi@0 88 //
aoqi@0 89 // Object start stuff
aoqi@0 90 //
aoqi@0 91
aoqi@0 92 start_array()->initialize(limit_reserved);
aoqi@0 93
aoqi@0 94 _reserved = MemRegion((HeapWord*)virtual_space()->low_boundary(),
aoqi@0 95 (HeapWord*)virtual_space()->high_boundary());
aoqi@0 96
aoqi@0 97 //
aoqi@0 98 // Card table stuff
aoqi@0 99 //
aoqi@0 100
aoqi@0 101 MemRegion cmr((HeapWord*)virtual_space()->low(),
aoqi@0 102 (HeapWord*)virtual_space()->high());
aoqi@0 103 if (ZapUnusedHeapArea) {
aoqi@0 104 // Mangle newly committed space immediately rather than
aoqi@0 105 // waiting for the initialization of the space even though
aoqi@0 106 // mangling is related to spaces. Doing it here eliminates
aoqi@0 107 // the need to carry along information that a complete mangling
aoqi@0 108 // (bottom to end) needs to be done.
aoqi@0 109 SpaceMangler::mangle_region(cmr);
aoqi@0 110 }
aoqi@0 111
aoqi@0 112 Universe::heap()->barrier_set()->resize_covered_region(cmr);
aoqi@0 113
aoqi@0 114 CardTableModRefBS* _ct = (CardTableModRefBS*)Universe::heap()->barrier_set();
aoqi@0 115 assert (_ct->kind() == BarrierSet::CardTableModRef, "Sanity");
aoqi@0 116
aoqi@0 117 // Verify that the start and end of this generation is the start of a card.
aoqi@0 118 // If this wasn't true, a single card could span more than one generation,
aoqi@0 119 // which would cause problems when we commit/uncommit memory, and when we
aoqi@0 120 // clear and dirty cards.
aoqi@0 121 guarantee(_ct->is_card_aligned(_reserved.start()), "generation must be card aligned");
aoqi@0 122 if (_reserved.end() != Universe::heap()->reserved_region().end()) {
aoqi@0 123 // Don't check at the very end of the heap as we'll assert that we're probing off
aoqi@0 124 // the end if we try.
aoqi@0 125 guarantee(_ct->is_card_aligned(_reserved.end()), "generation must be card aligned");
aoqi@0 126 }
aoqi@0 127
aoqi@0 128 //
aoqi@0 129 // ObjectSpace stuff
aoqi@0 130 //
aoqi@0 131
aoqi@0 132 _object_space = new MutableSpace(virtual_space()->alignment());
aoqi@0 133
aoqi@0 134 if (_object_space == NULL)
aoqi@0 135 vm_exit_during_initialization("Could not allocate an old gen space");
aoqi@0 136
aoqi@0 137 object_space()->initialize(cmr,
aoqi@0 138 SpaceDecorator::Clear,
aoqi@0 139 SpaceDecorator::Mangle);
aoqi@0 140
aoqi@0 141 _object_mark_sweep = new PSMarkSweepDecorator(_object_space, start_array(), MarkSweepDeadRatio);
aoqi@0 142
aoqi@0 143 if (_object_mark_sweep == NULL)
aoqi@0 144 vm_exit_during_initialization("Could not complete allocation of old generation");
aoqi@0 145
aoqi@0 146 // Update the start_array
aoqi@0 147 start_array()->set_covered_region(cmr);
aoqi@0 148 }
aoqi@0 149
aoqi@0 150 void PSOldGen::initialize_performance_counters(const char* perf_data_name, int level) {
aoqi@0 151 // Generation Counters, generation 'level', 1 subspace
aoqi@0 152 _gen_counters = new PSGenerationCounters(perf_data_name, level, 1,
aoqi@0 153 virtual_space());
aoqi@0 154 _space_counters = new SpaceCounters(perf_data_name, 0,
aoqi@0 155 virtual_space()->reserved_size(),
aoqi@0 156 _object_space, _gen_counters);
aoqi@0 157 }
aoqi@0 158
aoqi@0 159 // Assume that the generation has been allocated if its
aoqi@0 160 // reserved size is not 0.
aoqi@0 161 bool PSOldGen::is_allocated() {
aoqi@0 162 return virtual_space()->reserved_size() != 0;
aoqi@0 163 }
aoqi@0 164
aoqi@0 165 void PSOldGen::precompact() {
aoqi@0 166 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
aoqi@0 167 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
aoqi@0 168
aoqi@0 169 // Reset start array first.
aoqi@0 170 start_array()->reset();
aoqi@0 171
aoqi@0 172 object_mark_sweep()->precompact();
aoqi@0 173
aoqi@0 174 // Now compact the young gen
aoqi@0 175 heap->young_gen()->precompact();
aoqi@0 176 }
aoqi@0 177
aoqi@0 178 void PSOldGen::adjust_pointers() {
aoqi@0 179 object_mark_sweep()->adjust_pointers();
aoqi@0 180 }
aoqi@0 181
aoqi@0 182 void PSOldGen::compact() {
aoqi@0 183 object_mark_sweep()->compact(ZapUnusedHeapArea);
aoqi@0 184 }
aoqi@0 185
aoqi@0 186 size_t PSOldGen::contiguous_available() const {
aoqi@0 187 return object_space()->free_in_bytes() + virtual_space()->uncommitted_size();
aoqi@0 188 }
aoqi@0 189
aoqi@0 190 // Allocation. We report all successful allocations to the size policy
aoqi@0 191 // Note that the perm gen does not use this method, and should not!
aoqi@0 192 HeapWord* PSOldGen::allocate(size_t word_size) {
aoqi@0 193 assert_locked_or_safepoint(Heap_lock);
aoqi@0 194 HeapWord* res = allocate_noexpand(word_size);
aoqi@0 195
aoqi@0 196 if (res == NULL) {
aoqi@0 197 res = expand_and_allocate(word_size);
aoqi@0 198 }
aoqi@0 199
aoqi@0 200 // Allocations in the old generation need to be reported
aoqi@0 201 if (res != NULL) {
aoqi@0 202 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
aoqi@0 203 heap->size_policy()->tenured_allocation(word_size);
aoqi@0 204 }
aoqi@0 205
aoqi@0 206 return res;
aoqi@0 207 }
aoqi@0 208
aoqi@0 209 HeapWord* PSOldGen::expand_and_allocate(size_t word_size) {
aoqi@0 210 expand(word_size*HeapWordSize);
aoqi@0 211 if (GCExpandToAllocateDelayMillis > 0) {
aoqi@0 212 os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false);
aoqi@0 213 }
aoqi@0 214 return allocate_noexpand(word_size);
aoqi@0 215 }
aoqi@0 216
aoqi@0 217 HeapWord* PSOldGen::expand_and_cas_allocate(size_t word_size) {
aoqi@0 218 expand(word_size*HeapWordSize);
aoqi@0 219 if (GCExpandToAllocateDelayMillis > 0) {
aoqi@0 220 os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false);
aoqi@0 221 }
aoqi@0 222 return cas_allocate_noexpand(word_size);
aoqi@0 223 }
aoqi@0 224
aoqi@0 225 void PSOldGen::expand(size_t bytes) {
aoqi@0 226 if (bytes == 0) {
aoqi@0 227 return;
aoqi@0 228 }
aoqi@0 229 MutexLocker x(ExpandHeap_lock);
aoqi@0 230 const size_t alignment = virtual_space()->alignment();
aoqi@0 231 size_t aligned_bytes = align_size_up(bytes, alignment);
aoqi@0 232 size_t aligned_expand_bytes = align_size_up(MinHeapDeltaBytes, alignment);
aoqi@0 233
aoqi@0 234 if (UseNUMA) {
aoqi@0 235 // With NUMA we use round-robin page allocation for the old gen. Expand by at least
aoqi@0 236 // providing a page per lgroup. Alignment is larger or equal to the page size.
aoqi@0 237 aligned_expand_bytes = MAX2(aligned_expand_bytes, alignment * os::numa_get_groups_num());
aoqi@0 238 }
aoqi@0 239 if (aligned_bytes == 0){
aoqi@0 240 // The alignment caused the number of bytes to wrap. An expand_by(0) will
aoqi@0 241 // return true with the implication that and expansion was done when it
aoqi@0 242 // was not. A call to expand implies a best effort to expand by "bytes"
aoqi@0 243 // but not a guarantee. Align down to give a best effort. This is likely
aoqi@0 244 // the most that the generation can expand since it has some capacity to
aoqi@0 245 // start with.
aoqi@0 246 aligned_bytes = align_size_down(bytes, alignment);
aoqi@0 247 }
aoqi@0 248
aoqi@0 249 bool success = false;
aoqi@0 250 if (aligned_expand_bytes > aligned_bytes) {
aoqi@0 251 success = expand_by(aligned_expand_bytes);
aoqi@0 252 }
aoqi@0 253 if (!success) {
aoqi@0 254 success = expand_by(aligned_bytes);
aoqi@0 255 }
aoqi@0 256 if (!success) {
aoqi@0 257 success = expand_to_reserved();
aoqi@0 258 }
aoqi@0 259
aoqi@0 260 if (PrintGC && Verbose) {
aoqi@0 261 if (success && GC_locker::is_active_and_needs_gc()) {
aoqi@0 262 gclog_or_tty->print_cr("Garbage collection disabled, expanded heap instead");
aoqi@0 263 }
aoqi@0 264 }
aoqi@0 265 }
aoqi@0 266
aoqi@0 267 bool PSOldGen::expand_by(size_t bytes) {
aoqi@0 268 assert_lock_strong(ExpandHeap_lock);
aoqi@0 269 assert_locked_or_safepoint(Heap_lock);
aoqi@0 270 if (bytes == 0) {
aoqi@0 271 return true; // That's what virtual_space()->expand_by(0) would return
aoqi@0 272 }
aoqi@0 273 bool result = virtual_space()->expand_by(bytes);
aoqi@0 274 if (result) {
aoqi@0 275 if (ZapUnusedHeapArea) {
aoqi@0 276 // We need to mangle the newly expanded area. The memregion spans
aoqi@0 277 // end -> new_end, we assume that top -> end is already mangled.
aoqi@0 278 // Do the mangling before post_resize() is called because
aoqi@0 279 // the space is available for allocation after post_resize();
aoqi@0 280 HeapWord* const virtual_space_high = (HeapWord*) virtual_space()->high();
aoqi@0 281 assert(object_space()->end() < virtual_space_high,
aoqi@0 282 "Should be true before post_resize()");
aoqi@0 283 MemRegion mangle_region(object_space()->end(), virtual_space_high);
aoqi@0 284 // Note that the object space has not yet been updated to
aoqi@0 285 // coincede with the new underlying virtual space.
aoqi@0 286 SpaceMangler::mangle_region(mangle_region);
aoqi@0 287 }
aoqi@0 288 post_resize();
aoqi@0 289 if (UsePerfData) {
aoqi@0 290 _space_counters->update_capacity();
aoqi@0 291 _gen_counters->update_all();
aoqi@0 292 }
aoqi@0 293 }
aoqi@0 294
aoqi@0 295 if (result && Verbose && PrintGC) {
aoqi@0 296 size_t new_mem_size = virtual_space()->committed_size();
aoqi@0 297 size_t old_mem_size = new_mem_size - bytes;
aoqi@0 298 gclog_or_tty->print_cr("Expanding %s from " SIZE_FORMAT "K by "
aoqi@0 299 SIZE_FORMAT "K to "
aoqi@0 300 SIZE_FORMAT "K",
aoqi@0 301 name(), old_mem_size/K, bytes/K, new_mem_size/K);
aoqi@0 302 }
aoqi@0 303
aoqi@0 304 return result;
aoqi@0 305 }
aoqi@0 306
aoqi@0 307 bool PSOldGen::expand_to_reserved() {
aoqi@0 308 assert_lock_strong(ExpandHeap_lock);
aoqi@0 309 assert_locked_or_safepoint(Heap_lock);
aoqi@0 310
aoqi@0 311 bool result = true;
aoqi@0 312 const size_t remaining_bytes = virtual_space()->uncommitted_size();
aoqi@0 313 if (remaining_bytes > 0) {
aoqi@0 314 result = expand_by(remaining_bytes);
aoqi@0 315 DEBUG_ONLY(if (!result) warning("grow to reserve failed"));
aoqi@0 316 }
aoqi@0 317 return result;
aoqi@0 318 }
aoqi@0 319
aoqi@0 320 void PSOldGen::shrink(size_t bytes) {
aoqi@0 321 assert_lock_strong(ExpandHeap_lock);
aoqi@0 322 assert_locked_or_safepoint(Heap_lock);
aoqi@0 323
aoqi@0 324 size_t size = align_size_down(bytes, virtual_space()->alignment());
aoqi@0 325 if (size > 0) {
aoqi@0 326 assert_lock_strong(ExpandHeap_lock);
aoqi@0 327 virtual_space()->shrink_by(bytes);
aoqi@0 328 post_resize();
aoqi@0 329
aoqi@0 330 if (Verbose && PrintGC) {
aoqi@0 331 size_t new_mem_size = virtual_space()->committed_size();
aoqi@0 332 size_t old_mem_size = new_mem_size + bytes;
aoqi@0 333 gclog_or_tty->print_cr("Shrinking %s from " SIZE_FORMAT "K by "
aoqi@0 334 SIZE_FORMAT "K to "
aoqi@0 335 SIZE_FORMAT "K",
aoqi@0 336 name(), old_mem_size/K, bytes/K, new_mem_size/K);
aoqi@0 337 }
aoqi@0 338 }
aoqi@0 339 }
aoqi@0 340
aoqi@0 341 void PSOldGen::resize(size_t desired_free_space) {
aoqi@0 342 const size_t alignment = virtual_space()->alignment();
aoqi@0 343 const size_t size_before = virtual_space()->committed_size();
aoqi@0 344 size_t new_size = used_in_bytes() + desired_free_space;
aoqi@0 345 if (new_size < used_in_bytes()) {
aoqi@0 346 // Overflowed the addition.
aoqi@0 347 new_size = gen_size_limit();
aoqi@0 348 }
aoqi@0 349 // Adjust according to our min and max
aoqi@0 350 new_size = MAX2(MIN2(new_size, gen_size_limit()), min_gen_size());
aoqi@0 351
aoqi@0 352 assert(gen_size_limit() >= reserved().byte_size(), "max new size problem?");
aoqi@0 353 new_size = align_size_up(new_size, alignment);
aoqi@0 354
aoqi@0 355 const size_t current_size = capacity_in_bytes();
aoqi@0 356
aoqi@0 357 if (PrintAdaptiveSizePolicy && Verbose) {
aoqi@0 358 gclog_or_tty->print_cr("AdaptiveSizePolicy::old generation size: "
aoqi@0 359 "desired free: " SIZE_FORMAT " used: " SIZE_FORMAT
aoqi@0 360 " new size: " SIZE_FORMAT " current size " SIZE_FORMAT
aoqi@0 361 " gen limits: " SIZE_FORMAT " / " SIZE_FORMAT,
aoqi@0 362 desired_free_space, used_in_bytes(), new_size, current_size,
aoqi@0 363 gen_size_limit(), min_gen_size());
aoqi@0 364 }
aoqi@0 365
aoqi@0 366 if (new_size == current_size) {
aoqi@0 367 // No change requested
aoqi@0 368 return;
aoqi@0 369 }
aoqi@0 370 if (new_size > current_size) {
aoqi@0 371 size_t change_bytes = new_size - current_size;
aoqi@0 372 expand(change_bytes);
aoqi@0 373 } else {
aoqi@0 374 size_t change_bytes = current_size - new_size;
aoqi@0 375 // shrink doesn't grab this lock, expand does. Is that right?
aoqi@0 376 MutexLocker x(ExpandHeap_lock);
aoqi@0 377 shrink(change_bytes);
aoqi@0 378 }
aoqi@0 379
aoqi@0 380 if (PrintAdaptiveSizePolicy) {
aoqi@0 381 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
aoqi@0 382 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
aoqi@0 383 gclog_or_tty->print_cr("AdaptiveSizePolicy::old generation size: "
aoqi@0 384 "collection: %d "
aoqi@0 385 "(" SIZE_FORMAT ") -> (" SIZE_FORMAT ") ",
aoqi@0 386 heap->total_collections(),
aoqi@0 387 size_before, virtual_space()->committed_size());
aoqi@0 388 }
aoqi@0 389 }
aoqi@0 390
aoqi@0 391 // NOTE! We need to be careful about resizing. During a GC, multiple
aoqi@0 392 // allocators may be active during heap expansion. If we allow the
aoqi@0 393 // heap resizing to become visible before we have correctly resized
aoqi@0 394 // all heap related data structures, we may cause program failures.
aoqi@0 395 void PSOldGen::post_resize() {
aoqi@0 396 // First construct a memregion representing the new size
aoqi@0 397 MemRegion new_memregion((HeapWord*)virtual_space()->low(),
aoqi@0 398 (HeapWord*)virtual_space()->high());
aoqi@0 399 size_t new_word_size = new_memregion.word_size();
aoqi@0 400
aoqi@0 401 start_array()->set_covered_region(new_memregion);
aoqi@0 402 Universe::heap()->barrier_set()->resize_covered_region(new_memregion);
aoqi@0 403
aoqi@0 404 // ALWAYS do this last!!
aoqi@0 405 object_space()->initialize(new_memregion,
aoqi@0 406 SpaceDecorator::DontClear,
aoqi@0 407 SpaceDecorator::DontMangle);
aoqi@0 408
aoqi@0 409 assert(new_word_size == heap_word_size(object_space()->capacity_in_bytes()),
aoqi@0 410 "Sanity");
aoqi@0 411 }
aoqi@0 412
aoqi@0 413 size_t PSOldGen::gen_size_limit() {
aoqi@0 414 return _max_gen_size;
aoqi@0 415 }
aoqi@0 416
aoqi@0 417 void PSOldGen::reset_after_change() {
aoqi@0 418 ShouldNotReachHere();
aoqi@0 419 return;
aoqi@0 420 }
aoqi@0 421
aoqi@0 422 size_t PSOldGen::available_for_expansion() {
aoqi@0 423 ShouldNotReachHere();
aoqi@0 424 return 0;
aoqi@0 425 }
aoqi@0 426
aoqi@0 427 size_t PSOldGen::available_for_contraction() {
aoqi@0 428 ShouldNotReachHere();
aoqi@0 429 return 0;
aoqi@0 430 }
aoqi@0 431
aoqi@0 432 void PSOldGen::print() const { print_on(tty);}
aoqi@0 433 void PSOldGen::print_on(outputStream* st) const {
aoqi@0 434 st->print(" %-15s", name());
aoqi@0 435 if (PrintGCDetails && Verbose) {
aoqi@0 436 st->print(" total " SIZE_FORMAT ", used " SIZE_FORMAT,
aoqi@0 437 capacity_in_bytes(), used_in_bytes());
aoqi@0 438 } else {
aoqi@0 439 st->print(" total " SIZE_FORMAT "K, used " SIZE_FORMAT "K",
aoqi@0 440 capacity_in_bytes()/K, used_in_bytes()/K);
aoqi@0 441 }
aoqi@0 442 st->print_cr(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " INTPTR_FORMAT ")",
aoqi@0 443 virtual_space()->low_boundary(),
aoqi@0 444 virtual_space()->high(),
aoqi@0 445 virtual_space()->high_boundary());
aoqi@0 446
aoqi@0 447 st->print(" object"); object_space()->print_on(st);
aoqi@0 448 }
aoqi@0 449
aoqi@0 450 void PSOldGen::print_used_change(size_t prev_used) const {
aoqi@0 451 gclog_or_tty->print(" [%s:", name());
aoqi@0 452 gclog_or_tty->print(" " SIZE_FORMAT "K"
aoqi@0 453 "->" SIZE_FORMAT "K"
aoqi@0 454 "(" SIZE_FORMAT "K)",
aoqi@0 455 prev_used / K, used_in_bytes() / K,
aoqi@0 456 capacity_in_bytes() / K);
aoqi@0 457 gclog_or_tty->print("]");
aoqi@0 458 }
aoqi@0 459
aoqi@0 460 void PSOldGen::update_counters() {
aoqi@0 461 if (UsePerfData) {
aoqi@0 462 _space_counters->update_all();
aoqi@0 463 _gen_counters->update_all();
aoqi@0 464 }
aoqi@0 465 }
aoqi@0 466
aoqi@0 467 #ifndef PRODUCT
aoqi@0 468
aoqi@0 469 void PSOldGen::space_invariants() {
aoqi@0 470 assert(object_space()->end() == (HeapWord*) virtual_space()->high(),
aoqi@0 471 "Space invariant");
aoqi@0 472 assert(object_space()->bottom() == (HeapWord*) virtual_space()->low(),
aoqi@0 473 "Space invariant");
aoqi@0 474 assert(virtual_space()->low_boundary() <= virtual_space()->low(),
aoqi@0 475 "Space invariant");
aoqi@0 476 assert(virtual_space()->high_boundary() >= virtual_space()->high(),
aoqi@0 477 "Space invariant");
aoqi@0 478 assert(virtual_space()->low_boundary() == (char*) _reserved.start(),
aoqi@0 479 "Space invariant");
aoqi@0 480 assert(virtual_space()->high_boundary() == (char*) _reserved.end(),
aoqi@0 481 "Space invariant");
aoqi@0 482 assert(virtual_space()->committed_size() <= virtual_space()->reserved_size(),
aoqi@0 483 "Space invariant");
aoqi@0 484 }
aoqi@0 485 #endif
aoqi@0 486
aoqi@0 487 void PSOldGen::verify() {
aoqi@0 488 object_space()->verify();
aoqi@0 489 }
aoqi@0 490 class VerifyObjectStartArrayClosure : public ObjectClosure {
aoqi@0 491 PSOldGen* _gen;
aoqi@0 492 ObjectStartArray* _start_array;
aoqi@0 493
aoqi@0 494 public:
aoqi@0 495 VerifyObjectStartArrayClosure(PSOldGen* gen, ObjectStartArray* start_array) :
aoqi@0 496 _gen(gen), _start_array(start_array) { }
aoqi@0 497
aoqi@0 498 virtual void do_object(oop obj) {
aoqi@0 499 HeapWord* test_addr = (HeapWord*)obj + 1;
aoqi@0 500 guarantee(_start_array->object_start(test_addr) == (HeapWord*)obj, "ObjectStartArray cannot find start of object");
aoqi@0 501 guarantee(_start_array->is_block_allocated((HeapWord*)obj), "ObjectStartArray missing block allocation");
aoqi@0 502 }
aoqi@0 503 };
aoqi@0 504
aoqi@0 505 void PSOldGen::verify_object_start_array() {
aoqi@0 506 VerifyObjectStartArrayClosure check( this, &_start_array );
aoqi@0 507 object_iterate(&check);
aoqi@0 508 }
aoqi@0 509
aoqi@0 510 #ifndef PRODUCT
aoqi@0 511 void PSOldGen::record_spaces_top() {
aoqi@0 512 assert(ZapUnusedHeapArea, "Not mangling unused space");
aoqi@0 513 object_space()->set_top_for_allocations();
aoqi@0 514 }
aoqi@0 515 #endif

mercurial