src/share/vm/gc_implementation/parallelScavenge/psOldGen.cpp

Fri, 29 Apr 2016 00:06:10 +0800

author
aoqi
date
Fri, 29 Apr 2016 00:06:10 +0800
changeset 1
2d8a650513c2
parent 0
f90c822e73f8
child 25
873fd82b133d
permissions
-rw-r--r--

Added MIPS 64-bit port.

aoqi@0 1 /*
aoqi@0 2 * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
aoqi@0 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
aoqi@0 4 *
aoqi@0 5 * This code is free software; you can redistribute it and/or modify it
aoqi@0 6 * under the terms of the GNU General Public License version 2 only, as
aoqi@0 7 * published by the Free Software Foundation.
aoqi@0 8 *
aoqi@0 9 * This code is distributed in the hope that it will be useful, but WITHOUT
aoqi@0 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
aoqi@0 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
aoqi@0 12 * version 2 for more details (a copy is included in the LICENSE file that
aoqi@0 13 * accompanied this code).
aoqi@0 14 *
aoqi@0 15 * You should have received a copy of the GNU General Public License version
aoqi@0 16 * 2 along with this work; if not, write to the Free Software Foundation,
aoqi@0 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
aoqi@0 18 *
aoqi@0 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
aoqi@0 20 * or visit www.oracle.com if you need additional information or have any
aoqi@0 21 * questions.
aoqi@0 22 *
aoqi@0 23 */
aoqi@0 24
aoqi@1 25 /*
aoqi@1 26 * This file has been modified by Loongson Technology in 2015. These
aoqi@1 27 * modifications are Copyright (c) 2015 Loongson Technology, and are made
aoqi@1 28 * available on the same license terms set forth above.
aoqi@1 29 */
aoqi@1 30
aoqi@0 31 #include "precompiled.hpp"
aoqi@0 32 #include "gc_implementation/parallelScavenge/parallelScavengeHeap.hpp"
aoqi@0 33 #include "gc_implementation/parallelScavenge/psAdaptiveSizePolicy.hpp"
aoqi@0 34 #include "gc_implementation/parallelScavenge/psMarkSweepDecorator.hpp"
aoqi@0 35 #include "gc_implementation/parallelScavenge/psOldGen.hpp"
aoqi@1 36 #include "gc_implementation/shared/mutableNUMASpace.hpp"
aoqi@0 37 #include "gc_implementation/shared/spaceDecorator.hpp"
aoqi@0 38 #include "memory/cardTableModRefBS.hpp"
aoqi@0 39 #include "memory/gcLocker.inline.hpp"
aoqi@0 40 #include "oops/oop.inline.hpp"
aoqi@0 41 #include "runtime/java.hpp"
aoqi@0 42
aoqi@0 43 PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
aoqi@0 44
aoqi@0 45 inline const char* PSOldGen::select_name() {
aoqi@0 46 return UseParallelOldGC ? "ParOldGen" : "PSOldGen";
aoqi@0 47 }
aoqi@0 48
aoqi@0 49 PSOldGen::PSOldGen(ReservedSpace rs, size_t alignment,
aoqi@0 50 size_t initial_size, size_t min_size, size_t max_size,
aoqi@0 51 const char* perf_data_name, int level):
aoqi@0 52 _name(select_name()), _init_gen_size(initial_size), _min_gen_size(min_size),
aoqi@0 53 _max_gen_size(max_size)
aoqi@0 54 {
aoqi@0 55 initialize(rs, alignment, perf_data_name, level);
aoqi@0 56 }
aoqi@0 57
aoqi@0 58 PSOldGen::PSOldGen(size_t initial_size,
aoqi@0 59 size_t min_size, size_t max_size,
aoqi@0 60 const char* perf_data_name, int level):
aoqi@0 61 _name(select_name()), _init_gen_size(initial_size), _min_gen_size(min_size),
aoqi@0 62 _max_gen_size(max_size)
aoqi@0 63 {}
aoqi@0 64
aoqi@0 65 void PSOldGen::initialize(ReservedSpace rs, size_t alignment,
aoqi@0 66 const char* perf_data_name, int level) {
aoqi@0 67 initialize_virtual_space(rs, alignment);
aoqi@0 68 initialize_work(perf_data_name, level);
aoqi@0 69
aoqi@0 70 // The old gen can grow to gen_size_limit(). _reserve reflects only
aoqi@0 71 // the current maximum that can be committed.
aoqi@0 72 assert(_reserved.byte_size() <= gen_size_limit(), "Consistency check");
aoqi@0 73
aoqi@0 74 initialize_performance_counters(perf_data_name, level);
aoqi@0 75 }
aoqi@0 76
aoqi@0 77 void PSOldGen::initialize_virtual_space(ReservedSpace rs, size_t alignment) {
aoqi@0 78
aoqi@0 79 _virtual_space = new PSVirtualSpace(rs, alignment);
aoqi@0 80 if (!_virtual_space->expand_by(_init_gen_size)) {
aoqi@0 81 vm_exit_during_initialization("Could not reserve enough space for "
aoqi@0 82 "object heap");
aoqi@0 83 }
aoqi@0 84 }
aoqi@0 85
aoqi@0 86 void PSOldGen::initialize_work(const char* perf_data_name, int level) {
aoqi@0 87 //
aoqi@0 88 // Basic memory initialization
aoqi@0 89 //
aoqi@0 90
aoqi@0 91 MemRegion limit_reserved((HeapWord*)virtual_space()->low_boundary(),
aoqi@0 92 heap_word_size(_max_gen_size));
aoqi@0 93 assert(limit_reserved.byte_size() == _max_gen_size,
aoqi@0 94 "word vs bytes confusion");
aoqi@0 95 //
aoqi@0 96 // Object start stuff
aoqi@0 97 //
aoqi@0 98
aoqi@0 99 start_array()->initialize(limit_reserved);
aoqi@0 100
aoqi@0 101 _reserved = MemRegion((HeapWord*)virtual_space()->low_boundary(),
aoqi@0 102 (HeapWord*)virtual_space()->high_boundary());
aoqi@0 103
aoqi@0 104 //
aoqi@0 105 // Card table stuff
aoqi@0 106 //
aoqi@0 107
aoqi@0 108 MemRegion cmr((HeapWord*)virtual_space()->low(),
aoqi@0 109 (HeapWord*)virtual_space()->high());
aoqi@0 110 if (ZapUnusedHeapArea) {
aoqi@0 111 // Mangle newly committed space immediately rather than
aoqi@0 112 // waiting for the initialization of the space even though
aoqi@0 113 // mangling is related to spaces. Doing it here eliminates
aoqi@0 114 // the need to carry along information that a complete mangling
aoqi@0 115 // (bottom to end) needs to be done.
aoqi@0 116 SpaceMangler::mangle_region(cmr);
aoqi@0 117 }
aoqi@0 118
aoqi@0 119 Universe::heap()->barrier_set()->resize_covered_region(cmr);
aoqi@0 120
aoqi@0 121 CardTableModRefBS* _ct = (CardTableModRefBS*)Universe::heap()->barrier_set();
aoqi@0 122 assert (_ct->kind() == BarrierSet::CardTableModRef, "Sanity");
aoqi@0 123
aoqi@0 124 // Verify that the start and end of this generation is the start of a card.
aoqi@0 125 // If this wasn't true, a single card could span more than one generation,
aoqi@0 126 // which would cause problems when we commit/uncommit memory, and when we
aoqi@0 127 // clear and dirty cards.
aoqi@0 128 guarantee(_ct->is_card_aligned(_reserved.start()), "generation must be card aligned");
aoqi@0 129 if (_reserved.end() != Universe::heap()->reserved_region().end()) {
aoqi@0 130 // Don't check at the very end of the heap as we'll assert that we're probing off
aoqi@0 131 // the end if we try.
aoqi@0 132 guarantee(_ct->is_card_aligned(_reserved.end()), "generation must be card aligned");
aoqi@0 133 }
aoqi@0 134
aoqi@0 135 //
aoqi@0 136 // ObjectSpace stuff
aoqi@0 137 //
aoqi@1 138 if(UseOldNUMA) {
aoqi@1 139 _object_space = new MutableNUMASpace(virtual_space()->alignment());
aoqi@1 140 }
aoqi@1 141 else {
aoqi@1 142 _object_space = new MutableSpace(virtual_space()->alignment());
aoqi@1 143 }
aoqi@0 144
aoqi@0 145 if (_object_space == NULL)
aoqi@0 146 vm_exit_during_initialization("Could not allocate an old gen space");
aoqi@0 147
aoqi@0 148 object_space()->initialize(cmr,
aoqi@0 149 SpaceDecorator::Clear,
aoqi@0 150 SpaceDecorator::Mangle);
aoqi@0 151
aoqi@0 152 _object_mark_sweep = new PSMarkSweepDecorator(_object_space, start_array(), MarkSweepDeadRatio);
aoqi@0 153
aoqi@0 154 if (_object_mark_sweep == NULL)
aoqi@0 155 vm_exit_during_initialization("Could not complete allocation of old generation");
aoqi@0 156
aoqi@0 157 // Update the start_array
aoqi@0 158 start_array()->set_covered_region(cmr);
aoqi@0 159 }
aoqi@0 160
aoqi@0 161 void PSOldGen::initialize_performance_counters(const char* perf_data_name, int level) {
aoqi@0 162 // Generation Counters, generation 'level', 1 subspace
aoqi@0 163 _gen_counters = new PSGenerationCounters(perf_data_name, level, 1,
aoqi@0 164 virtual_space());
aoqi@0 165 _space_counters = new SpaceCounters(perf_data_name, 0,
aoqi@0 166 virtual_space()->reserved_size(),
aoqi@0 167 _object_space, _gen_counters);
aoqi@0 168 }
aoqi@0 169
aoqi@0 170 // Assume that the generation has been allocated if its
aoqi@0 171 // reserved size is not 0.
aoqi@0 172 bool PSOldGen::is_allocated() {
aoqi@0 173 return virtual_space()->reserved_size() != 0;
aoqi@0 174 }
aoqi@0 175
aoqi@0 176 void PSOldGen::precompact() {
aoqi@0 177 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
aoqi@0 178 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
aoqi@0 179
aoqi@0 180 // Reset start array first.
aoqi@0 181 start_array()->reset();
aoqi@0 182
aoqi@0 183 object_mark_sweep()->precompact();
aoqi@0 184
aoqi@0 185 // Now compact the young gen
aoqi@0 186 heap->young_gen()->precompact();
aoqi@0 187 }
aoqi@0 188
aoqi@0 189 void PSOldGen::adjust_pointers() {
aoqi@0 190 object_mark_sweep()->adjust_pointers();
aoqi@0 191 }
aoqi@0 192
aoqi@0 193 void PSOldGen::compact() {
aoqi@0 194 object_mark_sweep()->compact(ZapUnusedHeapArea);
aoqi@0 195 }
aoqi@0 196
aoqi@0 197 size_t PSOldGen::contiguous_available() const {
aoqi@0 198 return object_space()->free_in_bytes() + virtual_space()->uncommitted_size();
aoqi@0 199 }
aoqi@0 200
aoqi@0 201 // Allocation. We report all successful allocations to the size policy
aoqi@0 202 // Note that the perm gen does not use this method, and should not!
aoqi@0 203 HeapWord* PSOldGen::allocate(size_t word_size) {
aoqi@0 204 assert_locked_or_safepoint(Heap_lock);
aoqi@0 205 HeapWord* res = allocate_noexpand(word_size);
aoqi@0 206
aoqi@0 207 if (res == NULL) {
aoqi@0 208 res = expand_and_allocate(word_size);
aoqi@0 209 }
aoqi@0 210
aoqi@0 211 // Allocations in the old generation need to be reported
aoqi@0 212 if (res != NULL) {
aoqi@0 213 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
aoqi@0 214 heap->size_policy()->tenured_allocation(word_size);
aoqi@0 215 }
aoqi@0 216
aoqi@0 217 return res;
aoqi@0 218 }
aoqi@0 219
aoqi@0 220 HeapWord* PSOldGen::expand_and_allocate(size_t word_size) {
aoqi@0 221 expand(word_size*HeapWordSize);
aoqi@0 222 if (GCExpandToAllocateDelayMillis > 0) {
aoqi@0 223 os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false);
aoqi@0 224 }
aoqi@0 225 return allocate_noexpand(word_size);
aoqi@0 226 }
aoqi@0 227
aoqi@1 228 HeapWord* PSOldGen::expand_and_cas_allocate(size_t word_size, int node) {
aoqi@0 229 expand(word_size*HeapWordSize);
aoqi@0 230 if (GCExpandToAllocateDelayMillis > 0) {
aoqi@0 231 os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false);
aoqi@0 232 }
aoqi@1 233 return cas_allocate_noexpand(word_size, node);
aoqi@0 234 }
aoqi@0 235
aoqi@0 236 void PSOldGen::expand(size_t bytes) {
aoqi@0 237 if (bytes == 0) {
aoqi@0 238 return;
aoqi@0 239 }
aoqi@0 240 MutexLocker x(ExpandHeap_lock);
aoqi@0 241 const size_t alignment = virtual_space()->alignment();
aoqi@0 242 size_t aligned_bytes = align_size_up(bytes, alignment);
aoqi@0 243 size_t aligned_expand_bytes = align_size_up(MinHeapDeltaBytes, alignment);
aoqi@0 244
aoqi@0 245 if (UseNUMA) {
aoqi@0 246 // With NUMA we use round-robin page allocation for the old gen. Expand by at least
aoqi@0 247 // providing a page per lgroup. Alignment is larger or equal to the page size.
aoqi@0 248 aligned_expand_bytes = MAX2(aligned_expand_bytes, alignment * os::numa_get_groups_num());
aoqi@0 249 }
aoqi@0 250 if (aligned_bytes == 0){
aoqi@0 251 // The alignment caused the number of bytes to wrap. An expand_by(0) will
aoqi@0 252 // return true with the implication that and expansion was done when it
aoqi@0 253 // was not. A call to expand implies a best effort to expand by "bytes"
aoqi@0 254 // but not a guarantee. Align down to give a best effort. This is likely
aoqi@0 255 // the most that the generation can expand since it has some capacity to
aoqi@0 256 // start with.
aoqi@0 257 aligned_bytes = align_size_down(bytes, alignment);
aoqi@0 258 }
aoqi@0 259
aoqi@0 260 bool success = false;
aoqi@0 261 if (aligned_expand_bytes > aligned_bytes) {
aoqi@0 262 success = expand_by(aligned_expand_bytes);
aoqi@0 263 }
aoqi@0 264 if (!success) {
aoqi@0 265 success = expand_by(aligned_bytes);
aoqi@0 266 }
aoqi@0 267 if (!success) {
aoqi@0 268 success = expand_to_reserved();
aoqi@0 269 }
aoqi@0 270
aoqi@0 271 if (PrintGC && Verbose) {
aoqi@0 272 if (success && GC_locker::is_active_and_needs_gc()) {
aoqi@0 273 gclog_or_tty->print_cr("Garbage collection disabled, expanded heap instead");
aoqi@0 274 }
aoqi@0 275 }
aoqi@0 276 }
aoqi@0 277
aoqi@0 278 bool PSOldGen::expand_by(size_t bytes) {
aoqi@0 279 assert_lock_strong(ExpandHeap_lock);
aoqi@0 280 assert_locked_or_safepoint(Heap_lock);
aoqi@0 281 if (bytes == 0) {
aoqi@0 282 return true; // That's what virtual_space()->expand_by(0) would return
aoqi@0 283 }
aoqi@0 284 bool result = virtual_space()->expand_by(bytes);
aoqi@0 285 if (result) {
aoqi@0 286 if (ZapUnusedHeapArea) {
aoqi@0 287 // We need to mangle the newly expanded area. The memregion spans
aoqi@0 288 // end -> new_end, we assume that top -> end is already mangled.
aoqi@0 289 // Do the mangling before post_resize() is called because
aoqi@0 290 // the space is available for allocation after post_resize();
aoqi@0 291 HeapWord* const virtual_space_high = (HeapWord*) virtual_space()->high();
aoqi@0 292 assert(object_space()->end() < virtual_space_high,
aoqi@0 293 "Should be true before post_resize()");
aoqi@0 294 MemRegion mangle_region(object_space()->end(), virtual_space_high);
aoqi@0 295 // Note that the object space has not yet been updated to
aoqi@0 296 // coincede with the new underlying virtual space.
aoqi@0 297 SpaceMangler::mangle_region(mangle_region);
aoqi@0 298 }
aoqi@0 299 post_resize();
aoqi@0 300 if (UsePerfData) {
aoqi@0 301 _space_counters->update_capacity();
aoqi@0 302 _gen_counters->update_all();
aoqi@0 303 }
aoqi@0 304 }
aoqi@0 305
aoqi@0 306 if (result && Verbose && PrintGC) {
aoqi@0 307 size_t new_mem_size = virtual_space()->committed_size();
aoqi@0 308 size_t old_mem_size = new_mem_size - bytes;
aoqi@0 309 gclog_or_tty->print_cr("Expanding %s from " SIZE_FORMAT "K by "
aoqi@0 310 SIZE_FORMAT "K to "
aoqi@0 311 SIZE_FORMAT "K",
aoqi@0 312 name(), old_mem_size/K, bytes/K, new_mem_size/K);
aoqi@0 313 }
aoqi@0 314
aoqi@0 315 return result;
aoqi@0 316 }
aoqi@0 317
aoqi@0 318 bool PSOldGen::expand_to_reserved() {
aoqi@0 319 assert_lock_strong(ExpandHeap_lock);
aoqi@0 320 assert_locked_or_safepoint(Heap_lock);
aoqi@0 321
aoqi@0 322 bool result = true;
aoqi@0 323 const size_t remaining_bytes = virtual_space()->uncommitted_size();
aoqi@0 324 if (remaining_bytes > 0) {
aoqi@0 325 result = expand_by(remaining_bytes);
aoqi@0 326 DEBUG_ONLY(if (!result) warning("grow to reserve failed"));
aoqi@0 327 }
aoqi@0 328 return result;
aoqi@0 329 }
aoqi@0 330
aoqi@0 331 void PSOldGen::shrink(size_t bytes) {
aoqi@0 332 assert_lock_strong(ExpandHeap_lock);
aoqi@0 333 assert_locked_or_safepoint(Heap_lock);
aoqi@0 334
aoqi@0 335 size_t size = align_size_down(bytes, virtual_space()->alignment());
aoqi@0 336 if (size > 0) {
aoqi@0 337 assert_lock_strong(ExpandHeap_lock);
aoqi@0 338 virtual_space()->shrink_by(bytes);
aoqi@0 339 post_resize();
aoqi@0 340
aoqi@0 341 if (Verbose && PrintGC) {
aoqi@0 342 size_t new_mem_size = virtual_space()->committed_size();
aoqi@0 343 size_t old_mem_size = new_mem_size + bytes;
aoqi@0 344 gclog_or_tty->print_cr("Shrinking %s from " SIZE_FORMAT "K by "
aoqi@0 345 SIZE_FORMAT "K to "
aoqi@0 346 SIZE_FORMAT "K",
aoqi@0 347 name(), old_mem_size/K, bytes/K, new_mem_size/K);
aoqi@0 348 }
aoqi@0 349 }
aoqi@0 350 }
aoqi@0 351
aoqi@0 352 void PSOldGen::resize(size_t desired_free_space) {
aoqi@0 353 const size_t alignment = virtual_space()->alignment();
aoqi@0 354 const size_t size_before = virtual_space()->committed_size();
aoqi@0 355 size_t new_size = used_in_bytes() + desired_free_space;
aoqi@0 356 if (new_size < used_in_bytes()) {
aoqi@0 357 // Overflowed the addition.
aoqi@0 358 new_size = gen_size_limit();
aoqi@0 359 }
aoqi@0 360 // Adjust according to our min and max
aoqi@0 361 new_size = MAX2(MIN2(new_size, gen_size_limit()), min_gen_size());
aoqi@0 362
aoqi@0 363 assert(gen_size_limit() >= reserved().byte_size(), "max new size problem?");
aoqi@0 364 new_size = align_size_up(new_size, alignment);
aoqi@0 365
aoqi@0 366 const size_t current_size = capacity_in_bytes();
aoqi@0 367
aoqi@0 368 if (PrintAdaptiveSizePolicy && Verbose) {
aoqi@0 369 gclog_or_tty->print_cr("AdaptiveSizePolicy::old generation size: "
aoqi@0 370 "desired free: " SIZE_FORMAT " used: " SIZE_FORMAT
aoqi@0 371 " new size: " SIZE_FORMAT " current size " SIZE_FORMAT
aoqi@0 372 " gen limits: " SIZE_FORMAT " / " SIZE_FORMAT,
aoqi@0 373 desired_free_space, used_in_bytes(), new_size, current_size,
aoqi@0 374 gen_size_limit(), min_gen_size());
aoqi@0 375 }
aoqi@0 376
aoqi@0 377 if (new_size == current_size) {
aoqi@0 378 // No change requested
aoqi@0 379 return;
aoqi@0 380 }
aoqi@0 381 if (new_size > current_size) {
aoqi@0 382 size_t change_bytes = new_size - current_size;
aoqi@0 383 expand(change_bytes);
aoqi@0 384 } else {
aoqi@0 385 size_t change_bytes = current_size - new_size;
aoqi@0 386 // shrink doesn't grab this lock, expand does. Is that right?
aoqi@0 387 MutexLocker x(ExpandHeap_lock);
aoqi@0 388 shrink(change_bytes);
aoqi@0 389 }
aoqi@0 390
aoqi@0 391 if (PrintAdaptiveSizePolicy) {
aoqi@0 392 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
aoqi@0 393 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
aoqi@0 394 gclog_or_tty->print_cr("AdaptiveSizePolicy::old generation size: "
aoqi@0 395 "collection: %d "
aoqi@0 396 "(" SIZE_FORMAT ") -> (" SIZE_FORMAT ") ",
aoqi@0 397 heap->total_collections(),
aoqi@0 398 size_before, virtual_space()->committed_size());
aoqi@0 399 }
aoqi@0 400 }
aoqi@0 401
aoqi@0 402 // NOTE! We need to be careful about resizing. During a GC, multiple
aoqi@0 403 // allocators may be active during heap expansion. If we allow the
aoqi@0 404 // heap resizing to become visible before we have correctly resized
aoqi@0 405 // all heap related data structures, we may cause program failures.
aoqi@0 406 void PSOldGen::post_resize() {
aoqi@0 407 // First construct a memregion representing the new size
aoqi@0 408 MemRegion new_memregion((HeapWord*)virtual_space()->low(),
aoqi@0 409 (HeapWord*)virtual_space()->high());
aoqi@0 410 size_t new_word_size = new_memregion.word_size();
aoqi@0 411
aoqi@0 412 start_array()->set_covered_region(new_memregion);
aoqi@0 413 Universe::heap()->barrier_set()->resize_covered_region(new_memregion);
aoqi@0 414
aoqi@0 415 // ALWAYS do this last!!
aoqi@1 416 if(UseOldNUMA) {
aoqi@1 417 HeapWord* const virtual_space_high = (HeapWord*) virtual_space()->high();
aoqi@1 418 object_space()->set_end(virtual_space_high);
aoqi@1 419 }
aoqi@1 420 else {
aoqi@1 421 object_space()->initialize(new_memregion,
aoqi@1 422 SpaceDecorator::DontClear,
aoqi@1 423 SpaceDecorator::DontMangle);
aoqi@1 424 }
aoqi@1 425
aoqi@0 426 assert(new_word_size == heap_word_size(object_space()->capacity_in_bytes()),
aoqi@0 427 "Sanity");
aoqi@0 428 }
aoqi@0 429
aoqi@0 430 size_t PSOldGen::gen_size_limit() {
aoqi@0 431 return _max_gen_size;
aoqi@0 432 }
aoqi@0 433
aoqi@0 434 void PSOldGen::reset_after_change() {
aoqi@0 435 ShouldNotReachHere();
aoqi@0 436 return;
aoqi@0 437 }
aoqi@0 438
aoqi@0 439 size_t PSOldGen::available_for_expansion() {
aoqi@0 440 ShouldNotReachHere();
aoqi@0 441 return 0;
aoqi@0 442 }
aoqi@0 443
aoqi@0 444 size_t PSOldGen::available_for_contraction() {
aoqi@0 445 ShouldNotReachHere();
aoqi@0 446 return 0;
aoqi@0 447 }
aoqi@0 448
aoqi@0 449 void PSOldGen::print() const { print_on(tty);}
aoqi@0 450 void PSOldGen::print_on(outputStream* st) const {
aoqi@0 451 st->print(" %-15s", name());
aoqi@0 452 if (PrintGCDetails && Verbose) {
aoqi@0 453 st->print(" total " SIZE_FORMAT ", used " SIZE_FORMAT,
aoqi@0 454 capacity_in_bytes(), used_in_bytes());
aoqi@0 455 } else {
aoqi@0 456 st->print(" total " SIZE_FORMAT "K, used " SIZE_FORMAT "K",
aoqi@0 457 capacity_in_bytes()/K, used_in_bytes()/K);
aoqi@0 458 }
aoqi@0 459 st->print_cr(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " INTPTR_FORMAT ")",
aoqi@0 460 virtual_space()->low_boundary(),
aoqi@0 461 virtual_space()->high(),
aoqi@0 462 virtual_space()->high_boundary());
aoqi@0 463
aoqi@0 464 st->print(" object"); object_space()->print_on(st);
aoqi@0 465 }
aoqi@0 466
aoqi@0 467 void PSOldGen::print_used_change(size_t prev_used) const {
aoqi@0 468 gclog_or_tty->print(" [%s:", name());
aoqi@0 469 gclog_or_tty->print(" " SIZE_FORMAT "K"
aoqi@0 470 "->" SIZE_FORMAT "K"
aoqi@0 471 "(" SIZE_FORMAT "K)",
aoqi@0 472 prev_used / K, used_in_bytes() / K,
aoqi@0 473 capacity_in_bytes() / K);
aoqi@0 474 gclog_or_tty->print("]");
aoqi@0 475 }
aoqi@0 476
aoqi@0 477 void PSOldGen::update_counters() {
aoqi@0 478 if (UsePerfData) {
aoqi@0 479 _space_counters->update_all();
aoqi@0 480 _gen_counters->update_all();
aoqi@0 481 }
aoqi@0 482 }
aoqi@0 483
aoqi@0 484 #ifndef PRODUCT
aoqi@0 485
aoqi@0 486 void PSOldGen::space_invariants() {
aoqi@0 487 assert(object_space()->end() == (HeapWord*) virtual_space()->high(),
aoqi@0 488 "Space invariant");
aoqi@0 489 assert(object_space()->bottom() == (HeapWord*) virtual_space()->low(),
aoqi@0 490 "Space invariant");
aoqi@0 491 assert(virtual_space()->low_boundary() <= virtual_space()->low(),
aoqi@0 492 "Space invariant");
aoqi@0 493 assert(virtual_space()->high_boundary() >= virtual_space()->high(),
aoqi@0 494 "Space invariant");
aoqi@0 495 assert(virtual_space()->low_boundary() == (char*) _reserved.start(),
aoqi@0 496 "Space invariant");
aoqi@0 497 assert(virtual_space()->high_boundary() == (char*) _reserved.end(),
aoqi@0 498 "Space invariant");
aoqi@0 499 assert(virtual_space()->committed_size() <= virtual_space()->reserved_size(),
aoqi@0 500 "Space invariant");
aoqi@0 501 }
aoqi@0 502 #endif
aoqi@0 503
aoqi@0 504 void PSOldGen::verify() {
aoqi@0 505 object_space()->verify();
aoqi@0 506 }
aoqi@0 507 class VerifyObjectStartArrayClosure : public ObjectClosure {
aoqi@0 508 PSOldGen* _gen;
aoqi@0 509 ObjectStartArray* _start_array;
aoqi@0 510
aoqi@0 511 public:
aoqi@0 512 VerifyObjectStartArrayClosure(PSOldGen* gen, ObjectStartArray* start_array) :
aoqi@0 513 _gen(gen), _start_array(start_array) { }
aoqi@0 514
aoqi@0 515 virtual void do_object(oop obj) {
aoqi@0 516 HeapWord* test_addr = (HeapWord*)obj + 1;
aoqi@0 517 guarantee(_start_array->object_start(test_addr) == (HeapWord*)obj, "ObjectStartArray cannot find start of object");
aoqi@0 518 guarantee(_start_array->is_block_allocated((HeapWord*)obj), "ObjectStartArray missing block allocation");
aoqi@0 519 }
aoqi@0 520 };
aoqi@0 521
aoqi@0 522 void PSOldGen::verify_object_start_array() {
aoqi@0 523 VerifyObjectStartArrayClosure check( this, &_start_array );
aoqi@0 524 object_iterate(&check);
aoqi@0 525 }
aoqi@0 526
aoqi@0 527 #ifndef PRODUCT
aoqi@0 528 void PSOldGen::record_spaces_top() {
aoqi@0 529 assert(ZapUnusedHeapArea, "Not mangling unused space");
aoqi@0 530 object_space()->set_top_for_allocations();
aoqi@0 531 }
aoqi@0 532 #endif

mercurial