src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp

Thu, 22 Sep 2011 10:57:37 -0700

author
johnc
date
Thu, 22 Sep 2011 10:57:37 -0700
changeset 3175
4dfb2df418f2
parent 3115
c2bf0120ee5d
child 3269
53074c2c4600
permissions
-rw-r--r--

6484982: G1: process references during evacuation pauses
Summary: G1 now uses two reference processors - one is used by concurrent marking and the other is used by STW GCs (both full and incremental evacuation pauses). In an evacuation pause, the reference processor is embedded into the closures used to scan objects. Doing so causes causes reference objects to be 'discovered' by the reference processor. At the end of the evacuation pause, these discovered reference objects are processed - preserving (and copying) referent objects (and their reachable graphs) as appropriate.
Reviewed-by: ysr, jwilhelm, brutisso, stefank, tonyp

duke@435 1 /*
kvn@2558 2 * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
duke@435 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
duke@435 4 *
duke@435 5 * This code is free software; you can redistribute it and/or modify it
duke@435 6 * under the terms of the GNU General Public License version 2 only, as
duke@435 7 * published by the Free Software Foundation.
duke@435 8 *
duke@435 9 * This code is distributed in the hope that it will be useful, but WITHOUT
duke@435 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
duke@435 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
duke@435 12 * version 2 for more details (a copy is included in the LICENSE file that
duke@435 13 * accompanied this code).
duke@435 14 *
duke@435 15 * You should have received a copy of the GNU General Public License version
duke@435 16 * 2 along with this work; if not, write to the Free Software Foundation,
duke@435 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
duke@435 18 *
trims@1907 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
trims@1907 20 * or visit www.oracle.com if you need additional information or have any
trims@1907 21 * questions.
duke@435 22 *
duke@435 23 */
duke@435 24
stefank@2314 25 #include "precompiled.hpp"
stefank@2314 26 #include "gc_implementation/parallelScavenge/adjoiningGenerations.hpp"
stefank@2314 27 #include "gc_implementation/parallelScavenge/adjoiningVirtualSpaces.hpp"
stefank@2314 28 #include "gc_implementation/parallelScavenge/cardTableExtension.hpp"
stefank@2314 29 #include "gc_implementation/parallelScavenge/gcTaskManager.hpp"
stefank@2314 30 #include "gc_implementation/parallelScavenge/generationSizer.hpp"
stefank@2314 31 #include "gc_implementation/parallelScavenge/parallelScavengeHeap.inline.hpp"
stefank@2314 32 #include "gc_implementation/parallelScavenge/psAdaptiveSizePolicy.hpp"
stefank@2314 33 #include "gc_implementation/parallelScavenge/psMarkSweep.hpp"
stefank@2314 34 #include "gc_implementation/parallelScavenge/psParallelCompact.hpp"
stefank@2314 35 #include "gc_implementation/parallelScavenge/psPromotionManager.hpp"
stefank@2314 36 #include "gc_implementation/parallelScavenge/psScavenge.hpp"
stefank@2314 37 #include "gc_implementation/parallelScavenge/vmPSOperations.hpp"
stefank@2314 38 #include "memory/gcLocker.inline.hpp"
stefank@2314 39 #include "oops/oop.inline.hpp"
stefank@2314 40 #include "runtime/handles.inline.hpp"
stefank@2314 41 #include "runtime/java.hpp"
stefank@2314 42 #include "runtime/vmThread.hpp"
stefank@2314 43 #include "utilities/vmError.hpp"
duke@435 44
duke@435 45 PSYoungGen* ParallelScavengeHeap::_young_gen = NULL;
duke@435 46 PSOldGen* ParallelScavengeHeap::_old_gen = NULL;
duke@435 47 PSPermGen* ParallelScavengeHeap::_perm_gen = NULL;
duke@435 48 PSAdaptiveSizePolicy* ParallelScavengeHeap::_size_policy = NULL;
duke@435 49 PSGCAdaptivePolicyCounters* ParallelScavengeHeap::_gc_policy_counters = NULL;
duke@435 50 ParallelScavengeHeap* ParallelScavengeHeap::_psh = NULL;
duke@435 51 GCTaskManager* ParallelScavengeHeap::_gc_task_manager = NULL;
duke@435 52
duke@435 53 static void trace_gen_sizes(const char* const str,
duke@435 54 size_t pg_min, size_t pg_max,
duke@435 55 size_t og_min, size_t og_max,
duke@435 56 size_t yg_min, size_t yg_max)
duke@435 57 {
duke@435 58 if (TracePageSizes) {
duke@435 59 tty->print_cr("%s: " SIZE_FORMAT "," SIZE_FORMAT " "
duke@435 60 SIZE_FORMAT "," SIZE_FORMAT " "
duke@435 61 SIZE_FORMAT "," SIZE_FORMAT " "
duke@435 62 SIZE_FORMAT,
duke@435 63 str, pg_min / K, pg_max / K,
duke@435 64 og_min / K, og_max / K,
duke@435 65 yg_min / K, yg_max / K,
duke@435 66 (pg_max + og_max + yg_max) / K);
duke@435 67 }
duke@435 68 }
duke@435 69
duke@435 70 jint ParallelScavengeHeap::initialize() {
ysr@1601 71 CollectedHeap::pre_initialize();
ysr@1601 72
duke@435 73 // Cannot be initialized until after the flags are parsed
jmasa@1822 74 // GenerationSizer flag_parser;
jmasa@1822 75 _collector_policy = new GenerationSizer();
duke@435 76
jmasa@1822 77 size_t yg_min_size = _collector_policy->min_young_gen_size();
jmasa@1822 78 size_t yg_max_size = _collector_policy->max_young_gen_size();
jmasa@1822 79 size_t og_min_size = _collector_policy->min_old_gen_size();
jmasa@1822 80 size_t og_max_size = _collector_policy->max_old_gen_size();
duke@435 81 // Why isn't there a min_perm_gen_size()?
jmasa@1822 82 size_t pg_min_size = _collector_policy->perm_gen_size();
jmasa@1822 83 size_t pg_max_size = _collector_policy->max_perm_gen_size();
duke@435 84
duke@435 85 trace_gen_sizes("ps heap raw",
duke@435 86 pg_min_size, pg_max_size,
duke@435 87 og_min_size, og_max_size,
duke@435 88 yg_min_size, yg_max_size);
duke@435 89
duke@435 90 // The ReservedSpace ctor used below requires that the page size for the perm
duke@435 91 // gen is <= the page size for the rest of the heap (young + old gens).
duke@435 92 const size_t og_page_sz = os::page_size_for_region(yg_min_size + og_min_size,
duke@435 93 yg_max_size + og_max_size,
duke@435 94 8);
duke@435 95 const size_t pg_page_sz = MIN2(os::page_size_for_region(pg_min_size,
duke@435 96 pg_max_size, 16),
duke@435 97 og_page_sz);
duke@435 98
duke@435 99 const size_t pg_align = set_alignment(_perm_gen_alignment, pg_page_sz);
duke@435 100 const size_t og_align = set_alignment(_old_gen_alignment, og_page_sz);
duke@435 101 const size_t yg_align = set_alignment(_young_gen_alignment, og_page_sz);
duke@435 102
duke@435 103 // Update sizes to reflect the selected page size(s).
duke@435 104 //
duke@435 105 // NEEDS_CLEANUP. The default TwoGenerationCollectorPolicy uses NewRatio; it
duke@435 106 // should check UseAdaptiveSizePolicy. Changes from generationSizer could
duke@435 107 // move to the common code.
duke@435 108 yg_min_size = align_size_up(yg_min_size, yg_align);
duke@435 109 yg_max_size = align_size_up(yg_max_size, yg_align);
jmasa@1822 110 size_t yg_cur_size =
jmasa@1822 111 align_size_up(_collector_policy->young_gen_size(), yg_align);
duke@435 112 yg_cur_size = MAX2(yg_cur_size, yg_min_size);
duke@435 113
duke@435 114 og_min_size = align_size_up(og_min_size, og_align);
kvn@2558 115 // Align old gen size down to preserve specified heap size.
kvn@2558 116 assert(og_align == yg_align, "sanity");
kvn@2558 117 og_max_size = align_size_down(og_max_size, og_align);
kvn@2558 118 og_max_size = MAX2(og_max_size, og_min_size);
jmasa@1822 119 size_t og_cur_size =
kvn@2558 120 align_size_down(_collector_policy->old_gen_size(), og_align);
duke@435 121 og_cur_size = MAX2(og_cur_size, og_min_size);
duke@435 122
duke@435 123 pg_min_size = align_size_up(pg_min_size, pg_align);
duke@435 124 pg_max_size = align_size_up(pg_max_size, pg_align);
duke@435 125 size_t pg_cur_size = pg_min_size;
duke@435 126
duke@435 127 trace_gen_sizes("ps heap rnd",
duke@435 128 pg_min_size, pg_max_size,
duke@435 129 og_min_size, og_max_size,
duke@435 130 yg_min_size, yg_max_size);
duke@435 131
kvn@1077 132 const size_t total_reserved = pg_max_size + og_max_size + yg_max_size;
kvn@1077 133 char* addr = Universe::preferred_heap_base(total_reserved, Universe::UnscaledNarrowOop);
kvn@1077 134
duke@435 135 // The main part of the heap (old gen + young gen) can often use a larger page
duke@435 136 // size than is needed or wanted for the perm gen. Use the "compound
duke@435 137 // alignment" ReservedSpace ctor to avoid having to use the same page size for
duke@435 138 // all gens.
kvn@1077 139
coleenp@672 140 ReservedHeapSpace heap_rs(pg_max_size, pg_align, og_max_size + yg_max_size,
kvn@1077 141 og_align, addr);
kvn@1077 142
kvn@1077 143 if (UseCompressedOops) {
kvn@1077 144 if (addr != NULL && !heap_rs.is_reserved()) {
kvn@1077 145 // Failed to reserve at specified address - the requested memory
kvn@1077 146 // region is taken already, for example, by 'java' launcher.
kvn@1077 147 // Try again to reserver heap higher.
kvn@1077 148 addr = Universe::preferred_heap_base(total_reserved, Universe::ZeroBasedNarrowOop);
kvn@1077 149 ReservedHeapSpace heap_rs0(pg_max_size, pg_align, og_max_size + yg_max_size,
kvn@1077 150 og_align, addr);
kvn@1077 151 if (addr != NULL && !heap_rs0.is_reserved()) {
kvn@1077 152 // Failed to reserve at specified address again - give up.
kvn@1077 153 addr = Universe::preferred_heap_base(total_reserved, Universe::HeapBasedNarrowOop);
kvn@1077 154 assert(addr == NULL, "");
kvn@1077 155 ReservedHeapSpace heap_rs1(pg_max_size, pg_align, og_max_size + yg_max_size,
kvn@1077 156 og_align, addr);
kvn@1077 157 heap_rs = heap_rs1;
kvn@1077 158 } else {
kvn@1077 159 heap_rs = heap_rs0;
kvn@1077 160 }
kvn@1077 161 }
kvn@1077 162 }
kvn@1077 163
duke@435 164 os::trace_page_sizes("ps perm", pg_min_size, pg_max_size, pg_page_sz,
duke@435 165 heap_rs.base(), pg_max_size);
duke@435 166 os::trace_page_sizes("ps main", og_min_size + yg_min_size,
duke@435 167 og_max_size + yg_max_size, og_page_sz,
duke@435 168 heap_rs.base() + pg_max_size,
duke@435 169 heap_rs.size() - pg_max_size);
duke@435 170 if (!heap_rs.is_reserved()) {
duke@435 171 vm_shutdown_during_initialization(
duke@435 172 "Could not reserve enough space for object heap");
duke@435 173 return JNI_ENOMEM;
duke@435 174 }
duke@435 175
duke@435 176 _reserved = MemRegion((HeapWord*)heap_rs.base(),
duke@435 177 (HeapWord*)(heap_rs.base() + heap_rs.size()));
duke@435 178
duke@435 179 CardTableExtension* const barrier_set = new CardTableExtension(_reserved, 3);
duke@435 180 _barrier_set = barrier_set;
duke@435 181 oopDesc::set_bs(_barrier_set);
duke@435 182 if (_barrier_set == NULL) {
duke@435 183 vm_shutdown_during_initialization(
duke@435 184 "Could not reserve enough space for barrier set");
duke@435 185 return JNI_ENOMEM;
duke@435 186 }
duke@435 187
duke@435 188 // Initial young gen size is 4 Mb
duke@435 189 //
duke@435 190 // XXX - what about flag_parser.young_gen_size()?
duke@435 191 const size_t init_young_size = align_size_up(4 * M, yg_align);
duke@435 192 yg_cur_size = MAX2(MIN2(init_young_size, yg_max_size), yg_cur_size);
duke@435 193
duke@435 194 // Split the reserved space into perm gen and the main heap (everything else).
duke@435 195 // The main heap uses a different alignment.
duke@435 196 ReservedSpace perm_rs = heap_rs.first_part(pg_max_size);
duke@435 197 ReservedSpace main_rs = heap_rs.last_part(pg_max_size, og_align);
duke@435 198
duke@435 199 // Make up the generations
duke@435 200 // Calculate the maximum size that a generation can grow. This
duke@435 201 // includes growth into the other generation. Note that the
duke@435 202 // parameter _max_gen_size is kept as the maximum
duke@435 203 // size of the generation as the boundaries currently stand.
duke@435 204 // _max_gen_size is still used as that value.
duke@435 205 double max_gc_pause_sec = ((double) MaxGCPauseMillis)/1000.0;
duke@435 206 double max_gc_minor_pause_sec = ((double) MaxGCMinorPauseMillis)/1000.0;
duke@435 207
duke@435 208 _gens = new AdjoiningGenerations(main_rs,
duke@435 209 og_cur_size,
duke@435 210 og_min_size,
duke@435 211 og_max_size,
duke@435 212 yg_cur_size,
duke@435 213 yg_min_size,
duke@435 214 yg_max_size,
duke@435 215 yg_align);
duke@435 216
duke@435 217 _old_gen = _gens->old_gen();
duke@435 218 _young_gen = _gens->young_gen();
duke@435 219
duke@435 220 const size_t eden_capacity = _young_gen->eden_space()->capacity_in_bytes();
duke@435 221 const size_t old_capacity = _old_gen->capacity_in_bytes();
duke@435 222 const size_t initial_promo_size = MIN2(eden_capacity, old_capacity);
duke@435 223 _size_policy =
duke@435 224 new PSAdaptiveSizePolicy(eden_capacity,
duke@435 225 initial_promo_size,
duke@435 226 young_gen()->to_space()->capacity_in_bytes(),
jmasa@448 227 intra_heap_alignment(),
duke@435 228 max_gc_pause_sec,
duke@435 229 max_gc_minor_pause_sec,
duke@435 230 GCTimeRatio
duke@435 231 );
duke@435 232
duke@435 233 _perm_gen = new PSPermGen(perm_rs,
duke@435 234 pg_align,
duke@435 235 pg_cur_size,
duke@435 236 pg_cur_size,
duke@435 237 pg_max_size,
duke@435 238 "perm", 2);
duke@435 239
duke@435 240 assert(!UseAdaptiveGCBoundary ||
duke@435 241 (old_gen()->virtual_space()->high_boundary() ==
duke@435 242 young_gen()->virtual_space()->low_boundary()),
duke@435 243 "Boundaries must meet");
duke@435 244 // initialize the policy counters - 2 collectors, 3 generations
duke@435 245 _gc_policy_counters =
duke@435 246 new PSGCAdaptivePolicyCounters("ParScav:MSC", 2, 3, _size_policy);
duke@435 247 _psh = this;
duke@435 248
duke@435 249 // Set up the GCTaskManager
duke@435 250 _gc_task_manager = GCTaskManager::create(ParallelGCThreads);
duke@435 251
duke@435 252 if (UseParallelOldGC && !PSParallelCompact::initialize()) {
duke@435 253 return JNI_ENOMEM;
duke@435 254 }
duke@435 255
duke@435 256 return JNI_OK;
duke@435 257 }
duke@435 258
duke@435 259 void ParallelScavengeHeap::post_initialize() {
duke@435 260 // Need to init the tenuring threshold
duke@435 261 PSScavenge::initialize();
duke@435 262 if (UseParallelOldGC) {
duke@435 263 PSParallelCompact::post_initialize();
duke@435 264 } else {
duke@435 265 PSMarkSweep::initialize();
duke@435 266 }
duke@435 267 PSPromotionManager::initialize();
duke@435 268 }
duke@435 269
duke@435 270 void ParallelScavengeHeap::update_counters() {
duke@435 271 young_gen()->update_counters();
duke@435 272 old_gen()->update_counters();
duke@435 273 perm_gen()->update_counters();
duke@435 274 }
duke@435 275
duke@435 276 size_t ParallelScavengeHeap::capacity() const {
duke@435 277 size_t value = young_gen()->capacity_in_bytes() + old_gen()->capacity_in_bytes();
duke@435 278 return value;
duke@435 279 }
duke@435 280
duke@435 281 size_t ParallelScavengeHeap::used() const {
duke@435 282 size_t value = young_gen()->used_in_bytes() + old_gen()->used_in_bytes();
duke@435 283 return value;
duke@435 284 }
duke@435 285
duke@435 286 bool ParallelScavengeHeap::is_maximal_no_gc() const {
duke@435 287 return old_gen()->is_maximal_no_gc() && young_gen()->is_maximal_no_gc();
duke@435 288 }
duke@435 289
duke@435 290
duke@435 291 size_t ParallelScavengeHeap::permanent_capacity() const {
duke@435 292 return perm_gen()->capacity_in_bytes();
duke@435 293 }
duke@435 294
duke@435 295 size_t ParallelScavengeHeap::permanent_used() const {
duke@435 296 return perm_gen()->used_in_bytes();
duke@435 297 }
duke@435 298
duke@435 299 size_t ParallelScavengeHeap::max_capacity() const {
duke@435 300 size_t estimated = reserved_region().byte_size();
duke@435 301 estimated -= perm_gen()->reserved().byte_size();
duke@435 302 if (UseAdaptiveSizePolicy) {
duke@435 303 estimated -= _size_policy->max_survivor_size(young_gen()->max_size());
duke@435 304 } else {
duke@435 305 estimated -= young_gen()->to_space()->capacity_in_bytes();
duke@435 306 }
duke@435 307 return MAX2(estimated, capacity());
duke@435 308 }
duke@435 309
duke@435 310 bool ParallelScavengeHeap::is_in(const void* p) const {
duke@435 311 if (young_gen()->is_in(p)) {
duke@435 312 return true;
duke@435 313 }
duke@435 314
duke@435 315 if (old_gen()->is_in(p)) {
duke@435 316 return true;
duke@435 317 }
duke@435 318
duke@435 319 if (perm_gen()->is_in(p)) {
duke@435 320 return true;
duke@435 321 }
duke@435 322
duke@435 323 return false;
duke@435 324 }
duke@435 325
duke@435 326 bool ParallelScavengeHeap::is_in_reserved(const void* p) const {
duke@435 327 if (young_gen()->is_in_reserved(p)) {
duke@435 328 return true;
duke@435 329 }
duke@435 330
duke@435 331 if (old_gen()->is_in_reserved(p)) {
duke@435 332 return true;
duke@435 333 }
duke@435 334
duke@435 335 if (perm_gen()->is_in_reserved(p)) {
duke@435 336 return true;
duke@435 337 }
duke@435 338
duke@435 339 return false;
duke@435 340 }
duke@435 341
jmasa@2909 342 bool ParallelScavengeHeap::is_scavengable(const void* addr) {
jmasa@2909 343 return is_in_young((oop)addr);
jmasa@2909 344 }
jmasa@2909 345
jmasa@2909 346 #ifdef ASSERT
jmasa@2909 347 // Don't implement this by using is_in_young(). This method is used
jmasa@2909 348 // in some cases to check that is_in_young() is correct.
jmasa@2909 349 bool ParallelScavengeHeap::is_in_partial_collection(const void *p) {
jmasa@2909 350 assert(is_in_reserved(p) || p == NULL,
jmasa@2909 351 "Does not work if address is non-null and outside of the heap");
jmasa@2909 352 // The order of the generations is perm (low addr), old, young (high addr)
jmasa@2909 353 return p >= old_gen()->reserved().end();
jmasa@2909 354 }
jmasa@2909 355 #endif
jmasa@2909 356
duke@435 357 // There are two levels of allocation policy here.
duke@435 358 //
duke@435 359 // When an allocation request fails, the requesting thread must invoke a VM
duke@435 360 // operation, transfer control to the VM thread, and await the results of a
duke@435 361 // garbage collection. That is quite expensive, and we should avoid doing it
duke@435 362 // multiple times if possible.
duke@435 363 //
duke@435 364 // To accomplish this, we have a basic allocation policy, and also a
duke@435 365 // failed allocation policy.
duke@435 366 //
duke@435 367 // The basic allocation policy controls how you allocate memory without
duke@435 368 // attempting garbage collection. It is okay to grab locks and
duke@435 369 // expand the heap, if that can be done without coming to a safepoint.
duke@435 370 // It is likely that the basic allocation policy will not be very
duke@435 371 // aggressive.
duke@435 372 //
duke@435 373 // The failed allocation policy is invoked from the VM thread after
duke@435 374 // the basic allocation policy is unable to satisfy a mem_allocate
duke@435 375 // request. This policy needs to cover the entire range of collection,
duke@435 376 // heap expansion, and out-of-memory conditions. It should make every
duke@435 377 // attempt to allocate the requested memory.
duke@435 378
duke@435 379 // Basic allocation policy. Should never be called at a safepoint, or
duke@435 380 // from the VM thread.
duke@435 381 //
duke@435 382 // This method must handle cases where many mem_allocate requests fail
duke@435 383 // simultaneously. When that happens, only one VM operation will succeed,
duke@435 384 // and the rest will not be executed. For that reason, this method loops
duke@435 385 // during failed allocation attempts. If the java heap becomes exhausted,
duke@435 386 // we rely on the size_policy object to force a bail out.
duke@435 387 HeapWord* ParallelScavengeHeap::mem_allocate(
duke@435 388 size_t size,
duke@435 389 bool* gc_overhead_limit_was_exceeded) {
duke@435 390 assert(!SafepointSynchronize::is_at_safepoint(), "should not be at safepoint");
duke@435 391 assert(Thread::current() != (Thread*)VMThread::vm_thread(), "should not be in vm thread");
duke@435 392 assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock");
duke@435 393
jmasa@1822 394 // In general gc_overhead_limit_was_exceeded should be false so
jmasa@1822 395 // set it so here and reset it to true only if the gc time
jmasa@1822 396 // limit is being exceeded as checked below.
jmasa@1822 397 *gc_overhead_limit_was_exceeded = false;
jmasa@1822 398
tonyp@2971 399 HeapWord* result = young_gen()->allocate(size);
duke@435 400
duke@435 401 uint loop_count = 0;
duke@435 402 uint gc_count = 0;
duke@435 403
duke@435 404 while (result == NULL) {
duke@435 405 // We don't want to have multiple collections for a single filled generation.
duke@435 406 // To prevent this, each thread tracks the total_collections() value, and if
duke@435 407 // the count has changed, does not do a new collection.
duke@435 408 //
duke@435 409 // The collection count must be read only while holding the heap lock. VM
duke@435 410 // operations also hold the heap lock during collections. There is a lock
duke@435 411 // contention case where thread A blocks waiting on the Heap_lock, while
duke@435 412 // thread B is holding it doing a collection. When thread A gets the lock,
duke@435 413 // the collection count has already changed. To prevent duplicate collections,
duke@435 414 // The policy MUST attempt allocations during the same period it reads the
duke@435 415 // total_collections() value!
duke@435 416 {
duke@435 417 MutexLocker ml(Heap_lock);
duke@435 418 gc_count = Universe::heap()->total_collections();
duke@435 419
tonyp@2971 420 result = young_gen()->allocate(size);
duke@435 421
duke@435 422 // (1) If the requested object is too large to easily fit in the
duke@435 423 // young_gen, or
duke@435 424 // (2) If GC is locked out via GCLocker, young gen is full and
duke@435 425 // the need for a GC already signalled to GCLocker (done
duke@435 426 // at a safepoint),
duke@435 427 // ... then, rather than force a safepoint and (a potentially futile)
duke@435 428 // collection (attempt) for each allocation, try allocation directly
duke@435 429 // in old_gen. For case (2) above, we may in the future allow
duke@435 430 // TLAB allocation directly in the old gen.
duke@435 431 if (result != NULL) {
duke@435 432 return result;
duke@435 433 }
tonyp@2971 434 if (size >= (young_gen()->eden_space()->capacity_in_words(Thread::current()) / 2)) {
tonyp@2971 435 result = old_gen()->allocate(size);
duke@435 436 if (result != NULL) {
duke@435 437 return result;
duke@435 438 }
duke@435 439 }
duke@435 440 if (GC_locker::is_active_and_needs_gc()) {
duke@435 441 // If this thread is not in a jni critical section, we stall
duke@435 442 // the requestor until the critical section has cleared and
duke@435 443 // GC allowed. When the critical section clears, a GC is
duke@435 444 // initiated by the last thread exiting the critical section; so
duke@435 445 // we retry the allocation sequence from the beginning of the loop,
duke@435 446 // rather than causing more, now probably unnecessary, GC attempts.
duke@435 447 JavaThread* jthr = JavaThread::current();
duke@435 448 if (!jthr->in_critical()) {
duke@435 449 MutexUnlocker mul(Heap_lock);
duke@435 450 GC_locker::stall_until_clear();
duke@435 451 continue;
duke@435 452 } else {
duke@435 453 if (CheckJNICalls) {
duke@435 454 fatal("Possible deadlock due to allocating while"
duke@435 455 " in jni critical section");
duke@435 456 }
duke@435 457 return NULL;
duke@435 458 }
duke@435 459 }
duke@435 460 }
duke@435 461
duke@435 462 if (result == NULL) {
duke@435 463
duke@435 464 // Generate a VM operation
tonyp@2971 465 VM_ParallelGCFailedAllocation op(size, gc_count);
duke@435 466 VMThread::execute(&op);
duke@435 467
duke@435 468 // Did the VM operation execute? If so, return the result directly.
duke@435 469 // This prevents us from looping until time out on requests that can
duke@435 470 // not be satisfied.
duke@435 471 if (op.prologue_succeeded()) {
duke@435 472 assert(Universe::heap()->is_in_or_null(op.result()),
duke@435 473 "result not in heap");
duke@435 474
duke@435 475 // If GC was locked out during VM operation then retry allocation
duke@435 476 // and/or stall as necessary.
duke@435 477 if (op.gc_locked()) {
duke@435 478 assert(op.result() == NULL, "must be NULL if gc_locked() is true");
duke@435 479 continue; // retry and/or stall as necessary
duke@435 480 }
jmasa@1822 481
jmasa@1822 482 // Exit the loop if the gc time limit has been exceeded.
jmasa@1822 483 // The allocation must have failed above ("result" guarding
jmasa@1822 484 // this path is NULL) and the most recent collection has exceeded the
jmasa@1822 485 // gc overhead limit (although enough may have been collected to
jmasa@1822 486 // satisfy the allocation). Exit the loop so that an out-of-memory
jmasa@1822 487 // will be thrown (return a NULL ignoring the contents of
jmasa@1822 488 // op.result()),
jmasa@1822 489 // but clear gc_overhead_limit_exceeded so that the next collection
jmasa@1822 490 // starts with a clean slate (i.e., forgets about previous overhead
jmasa@1822 491 // excesses). Fill op.result() with a filler object so that the
jmasa@1822 492 // heap remains parsable.
jmasa@1822 493 const bool limit_exceeded = size_policy()->gc_overhead_limit_exceeded();
jmasa@1822 494 const bool softrefs_clear = collector_policy()->all_soft_refs_clear();
jmasa@1822 495 assert(!limit_exceeded || softrefs_clear, "Should have been cleared");
jmasa@1822 496 if (limit_exceeded && softrefs_clear) {
jmasa@1822 497 *gc_overhead_limit_was_exceeded = true;
jmasa@1822 498 size_policy()->set_gc_overhead_limit_exceeded(false);
jmasa@1822 499 if (PrintGCDetails && Verbose) {
jmasa@1822 500 gclog_or_tty->print_cr("ParallelScavengeHeap::mem_allocate: "
jmasa@1822 501 "return NULL because gc_overhead_limit_exceeded is set");
jmasa@1822 502 }
jmasa@1822 503 if (op.result() != NULL) {
jmasa@1822 504 CollectedHeap::fill_with_object(op.result(), size);
jmasa@1822 505 }
jmasa@1822 506 return NULL;
duke@435 507 }
jmasa@1822 508
duke@435 509 return op.result();
duke@435 510 }
duke@435 511 }
duke@435 512
duke@435 513 // The policy object will prevent us from looping forever. If the
duke@435 514 // time spent in gc crosses a threshold, we will bail out.
duke@435 515 loop_count++;
duke@435 516 if ((result == NULL) && (QueuedAllocationWarningCount > 0) &&
duke@435 517 (loop_count % QueuedAllocationWarningCount == 0)) {
duke@435 518 warning("ParallelScavengeHeap::mem_allocate retries %d times \n\t"
tonyp@2971 519 " size=%d", loop_count, size);
duke@435 520 }
duke@435 521 }
duke@435 522
duke@435 523 return result;
duke@435 524 }
duke@435 525
duke@435 526 // Failed allocation policy. Must be called from the VM thread, and
duke@435 527 // only at a safepoint! Note that this method has policy for allocation
duke@435 528 // flow, and NOT collection policy. So we do not check for gc collection
duke@435 529 // time over limit here, that is the responsibility of the heap specific
duke@435 530 // collection methods. This method decides where to attempt allocations,
duke@435 531 // and when to attempt collections, but no collection specific policy.
tonyp@2971 532 HeapWord* ParallelScavengeHeap::failed_mem_allocate(size_t size) {
duke@435 533 assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
duke@435 534 assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
duke@435 535 assert(!Universe::heap()->is_gc_active(), "not reentrant");
duke@435 536 assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock");
duke@435 537
duke@435 538 size_t mark_sweep_invocation_count = total_invocations();
duke@435 539
duke@435 540 // We assume (and assert!) that an allocation at this point will fail
duke@435 541 // unless we collect.
duke@435 542
duke@435 543 // First level allocation failure, scavenge and allocate in young gen.
duke@435 544 GCCauseSetter gccs(this, GCCause::_allocation_failure);
duke@435 545 PSScavenge::invoke();
tonyp@2971 546 HeapWord* result = young_gen()->allocate(size);
duke@435 547
duke@435 548 // Second level allocation failure.
duke@435 549 // Mark sweep and allocate in young generation.
duke@435 550 if (result == NULL) {
duke@435 551 // There is some chance the scavenge method decided to invoke mark_sweep.
duke@435 552 // Don't mark sweep twice if so.
duke@435 553 if (mark_sweep_invocation_count == total_invocations()) {
duke@435 554 invoke_full_gc(false);
tonyp@2971 555 result = young_gen()->allocate(size);
duke@435 556 }
duke@435 557 }
duke@435 558
duke@435 559 // Third level allocation failure.
duke@435 560 // After mark sweep and young generation allocation failure,
duke@435 561 // allocate in old generation.
tonyp@2971 562 if (result == NULL) {
tonyp@2971 563 result = old_gen()->allocate(size);
duke@435 564 }
duke@435 565
duke@435 566 // Fourth level allocation failure. We're running out of memory.
duke@435 567 // More complete mark sweep and allocate in young generation.
duke@435 568 if (result == NULL) {
duke@435 569 invoke_full_gc(true);
tonyp@2971 570 result = young_gen()->allocate(size);
duke@435 571 }
duke@435 572
duke@435 573 // Fifth level allocation failure.
duke@435 574 // After more complete mark sweep, allocate in old generation.
tonyp@2971 575 if (result == NULL) {
tonyp@2971 576 result = old_gen()->allocate(size);
duke@435 577 }
duke@435 578
duke@435 579 return result;
duke@435 580 }
duke@435 581
duke@435 582 //
duke@435 583 // This is the policy loop for allocating in the permanent generation.
duke@435 584 // If the initial allocation fails, we create a vm operation which will
duke@435 585 // cause a collection.
duke@435 586 HeapWord* ParallelScavengeHeap::permanent_mem_allocate(size_t size) {
duke@435 587 assert(!SafepointSynchronize::is_at_safepoint(), "should not be at safepoint");
duke@435 588 assert(Thread::current() != (Thread*)VMThread::vm_thread(), "should not be in vm thread");
duke@435 589 assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock");
duke@435 590
duke@435 591 HeapWord* result;
duke@435 592
duke@435 593 uint loop_count = 0;
duke@435 594 uint gc_count = 0;
duke@435 595 uint full_gc_count = 0;
duke@435 596
duke@435 597 do {
duke@435 598 // We don't want to have multiple collections for a single filled generation.
duke@435 599 // To prevent this, each thread tracks the total_collections() value, and if
duke@435 600 // the count has changed, does not do a new collection.
duke@435 601 //
duke@435 602 // The collection count must be read only while holding the heap lock. VM
duke@435 603 // operations also hold the heap lock during collections. There is a lock
duke@435 604 // contention case where thread A blocks waiting on the Heap_lock, while
duke@435 605 // thread B is holding it doing a collection. When thread A gets the lock,
duke@435 606 // the collection count has already changed. To prevent duplicate collections,
duke@435 607 // The policy MUST attempt allocations during the same period it reads the
duke@435 608 // total_collections() value!
duke@435 609 {
duke@435 610 MutexLocker ml(Heap_lock);
duke@435 611 gc_count = Universe::heap()->total_collections();
duke@435 612 full_gc_count = Universe::heap()->total_full_collections();
duke@435 613
duke@435 614 result = perm_gen()->allocate_permanent(size);
apetrusenko@574 615
apetrusenko@574 616 if (result != NULL) {
apetrusenko@574 617 return result;
apetrusenko@574 618 }
apetrusenko@574 619
apetrusenko@574 620 if (GC_locker::is_active_and_needs_gc()) {
apetrusenko@574 621 // If this thread is not in a jni critical section, we stall
apetrusenko@574 622 // the requestor until the critical section has cleared and
apetrusenko@574 623 // GC allowed. When the critical section clears, a GC is
apetrusenko@574 624 // initiated by the last thread exiting the critical section; so
apetrusenko@574 625 // we retry the allocation sequence from the beginning of the loop,
apetrusenko@574 626 // rather than causing more, now probably unnecessary, GC attempts.
apetrusenko@574 627 JavaThread* jthr = JavaThread::current();
apetrusenko@574 628 if (!jthr->in_critical()) {
apetrusenko@574 629 MutexUnlocker mul(Heap_lock);
apetrusenko@574 630 GC_locker::stall_until_clear();
apetrusenko@574 631 continue;
apetrusenko@574 632 } else {
apetrusenko@574 633 if (CheckJNICalls) {
apetrusenko@574 634 fatal("Possible deadlock due to allocating while"
apetrusenko@574 635 " in jni critical section");
apetrusenko@574 636 }
apetrusenko@574 637 return NULL;
apetrusenko@574 638 }
apetrusenko@574 639 }
duke@435 640 }
duke@435 641
duke@435 642 if (result == NULL) {
duke@435 643
duke@435 644 // Exit the loop if the gc time limit has been exceeded.
duke@435 645 // The allocation must have failed above (result must be NULL),
duke@435 646 // and the most recent collection must have exceeded the
duke@435 647 // gc time limit. Exit the loop so that an out-of-memory
duke@435 648 // will be thrown (returning a NULL will do that), but
jmasa@1822 649 // clear gc_overhead_limit_exceeded so that the next collection
duke@435 650 // will succeeded if the applications decides to handle the
duke@435 651 // out-of-memory and tries to go on.
jmasa@1822 652 const bool limit_exceeded = size_policy()->gc_overhead_limit_exceeded();
jmasa@1822 653 if (limit_exceeded) {
jmasa@1822 654 size_policy()->set_gc_overhead_limit_exceeded(false);
duke@435 655 if (PrintGCDetails && Verbose) {
jmasa@1822 656 gclog_or_tty->print_cr("ParallelScavengeHeap::permanent_mem_allocate:"
jmasa@1822 657 " return NULL because gc_overhead_limit_exceeded is set");
duke@435 658 }
duke@435 659 assert(result == NULL, "Allocation did not fail");
duke@435 660 return NULL;
duke@435 661 }
duke@435 662
duke@435 663 // Generate a VM operation
duke@435 664 VM_ParallelGCFailedPermanentAllocation op(size, gc_count, full_gc_count);
duke@435 665 VMThread::execute(&op);
duke@435 666
duke@435 667 // Did the VM operation execute? If so, return the result directly.
duke@435 668 // This prevents us from looping until time out on requests that can
duke@435 669 // not be satisfied.
duke@435 670 if (op.prologue_succeeded()) {
duke@435 671 assert(Universe::heap()->is_in_permanent_or_null(op.result()),
duke@435 672 "result not in heap");
apetrusenko@574 673 // If GC was locked out during VM operation then retry allocation
apetrusenko@574 674 // and/or stall as necessary.
apetrusenko@574 675 if (op.gc_locked()) {
apetrusenko@574 676 assert(op.result() == NULL, "must be NULL if gc_locked() is true");
apetrusenko@574 677 continue; // retry and/or stall as necessary
apetrusenko@574 678 }
duke@435 679 // If a NULL results is being returned, an out-of-memory
jmasa@1822 680 // will be thrown now. Clear the gc_overhead_limit_exceeded
duke@435 681 // flag to avoid the following situation.
jmasa@1822 682 // gc_overhead_limit_exceeded is set during a collection
duke@435 683 // the collection fails to return enough space and an OOM is thrown
jmasa@1822 684 // a subsequent GC prematurely throws an out-of-memory because
jmasa@1822 685 // the gc_overhead_limit_exceeded counts did not start
jmasa@1822 686 // again from 0.
duke@435 687 if (op.result() == NULL) {
jmasa@1822 688 size_policy()->reset_gc_overhead_limit_count();
duke@435 689 }
duke@435 690 return op.result();
duke@435 691 }
duke@435 692 }
duke@435 693
duke@435 694 // The policy object will prevent us from looping forever. If the
duke@435 695 // time spent in gc crosses a threshold, we will bail out.
duke@435 696 loop_count++;
duke@435 697 if ((QueuedAllocationWarningCount > 0) &&
duke@435 698 (loop_count % QueuedAllocationWarningCount == 0)) {
duke@435 699 warning("ParallelScavengeHeap::permanent_mem_allocate retries %d times \n\t"
duke@435 700 " size=%d", loop_count, size);
duke@435 701 }
duke@435 702 } while (result == NULL);
duke@435 703
duke@435 704 return result;
duke@435 705 }
duke@435 706
duke@435 707 //
duke@435 708 // This is the policy code for permanent allocations which have failed
duke@435 709 // and require a collection. Note that just as in failed_mem_allocate,
duke@435 710 // we do not set collection policy, only where & when to allocate and
duke@435 711 // collect.
duke@435 712 HeapWord* ParallelScavengeHeap::failed_permanent_mem_allocate(size_t size) {
duke@435 713 assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
duke@435 714 assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
duke@435 715 assert(!Universe::heap()->is_gc_active(), "not reentrant");
duke@435 716 assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock");
duke@435 717 assert(size > perm_gen()->free_in_words(), "Allocation should fail");
duke@435 718
duke@435 719 // We assume (and assert!) that an allocation at this point will fail
duke@435 720 // unless we collect.
duke@435 721
duke@435 722 // First level allocation failure. Mark-sweep and allocate in perm gen.
duke@435 723 GCCauseSetter gccs(this, GCCause::_allocation_failure);
duke@435 724 invoke_full_gc(false);
duke@435 725 HeapWord* result = perm_gen()->allocate_permanent(size);
duke@435 726
duke@435 727 // Second level allocation failure. We're running out of memory.
duke@435 728 if (result == NULL) {
duke@435 729 invoke_full_gc(true);
duke@435 730 result = perm_gen()->allocate_permanent(size);
duke@435 731 }
duke@435 732
duke@435 733 return result;
duke@435 734 }
duke@435 735
duke@435 736 void ParallelScavengeHeap::ensure_parsability(bool retire_tlabs) {
duke@435 737 CollectedHeap::ensure_parsability(retire_tlabs);
duke@435 738 young_gen()->eden_space()->ensure_parsability();
duke@435 739 }
duke@435 740
duke@435 741 size_t ParallelScavengeHeap::unsafe_max_alloc() {
duke@435 742 return young_gen()->eden_space()->free_in_bytes();
duke@435 743 }
duke@435 744
duke@435 745 size_t ParallelScavengeHeap::tlab_capacity(Thread* thr) const {
duke@435 746 return young_gen()->eden_space()->tlab_capacity(thr);
duke@435 747 }
duke@435 748
duke@435 749 size_t ParallelScavengeHeap::unsafe_max_tlab_alloc(Thread* thr) const {
duke@435 750 return young_gen()->eden_space()->unsafe_max_tlab_alloc(thr);
duke@435 751 }
duke@435 752
duke@435 753 HeapWord* ParallelScavengeHeap::allocate_new_tlab(size_t size) {
tonyp@2971 754 return young_gen()->allocate(size);
duke@435 755 }
duke@435 756
duke@435 757 void ParallelScavengeHeap::accumulate_statistics_all_tlabs() {
duke@435 758 CollectedHeap::accumulate_statistics_all_tlabs();
duke@435 759 }
duke@435 760
duke@435 761 void ParallelScavengeHeap::resize_all_tlabs() {
duke@435 762 CollectedHeap::resize_all_tlabs();
duke@435 763 }
duke@435 764
ysr@1462 765 bool ParallelScavengeHeap::can_elide_initializing_store_barrier(oop new_obj) {
ysr@1462 766 // We don't need barriers for stores to objects in the
ysr@1462 767 // young gen and, a fortiori, for initializing stores to
ysr@1462 768 // objects therein.
ysr@1462 769 return is_in_young(new_obj);
ysr@1462 770 }
ysr@1462 771
duke@435 772 // This method is used by System.gc() and JVMTI.
duke@435 773 void ParallelScavengeHeap::collect(GCCause::Cause cause) {
duke@435 774 assert(!Heap_lock->owned_by_self(),
duke@435 775 "this thread should not own the Heap_lock");
duke@435 776
duke@435 777 unsigned int gc_count = 0;
duke@435 778 unsigned int full_gc_count = 0;
duke@435 779 {
duke@435 780 MutexLocker ml(Heap_lock);
duke@435 781 // This value is guarded by the Heap_lock
duke@435 782 gc_count = Universe::heap()->total_collections();
duke@435 783 full_gc_count = Universe::heap()->total_full_collections();
duke@435 784 }
duke@435 785
duke@435 786 VM_ParallelGCSystemGC op(gc_count, full_gc_count, cause);
duke@435 787 VMThread::execute(&op);
duke@435 788 }
duke@435 789
duke@435 790 // This interface assumes that it's being called by the
duke@435 791 // vm thread. It collects the heap assuming that the
duke@435 792 // heap lock is already held and that we are executing in
duke@435 793 // the context of the vm thread.
duke@435 794 void ParallelScavengeHeap::collect_as_vm_thread(GCCause::Cause cause) {
duke@435 795 assert(Thread::current()->is_VM_thread(), "Precondition#1");
duke@435 796 assert(Heap_lock->is_locked(), "Precondition#2");
duke@435 797 GCCauseSetter gcs(this, cause);
duke@435 798 switch (cause) {
duke@435 799 case GCCause::_heap_inspection:
duke@435 800 case GCCause::_heap_dump: {
duke@435 801 HandleMark hm;
duke@435 802 invoke_full_gc(false);
duke@435 803 break;
duke@435 804 }
duke@435 805 default: // XXX FIX ME
duke@435 806 ShouldNotReachHere();
duke@435 807 }
duke@435 808 }
duke@435 809
duke@435 810
duke@435 811 void ParallelScavengeHeap::oop_iterate(OopClosure* cl) {
duke@435 812 Unimplemented();
duke@435 813 }
duke@435 814
duke@435 815 void ParallelScavengeHeap::object_iterate(ObjectClosure* cl) {
duke@435 816 young_gen()->object_iterate(cl);
duke@435 817 old_gen()->object_iterate(cl);
duke@435 818 perm_gen()->object_iterate(cl);
duke@435 819 }
duke@435 820
duke@435 821 void ParallelScavengeHeap::permanent_oop_iterate(OopClosure* cl) {
duke@435 822 Unimplemented();
duke@435 823 }
duke@435 824
duke@435 825 void ParallelScavengeHeap::permanent_object_iterate(ObjectClosure* cl) {
duke@435 826 perm_gen()->object_iterate(cl);
duke@435 827 }
duke@435 828
duke@435 829 HeapWord* ParallelScavengeHeap::block_start(const void* addr) const {
duke@435 830 if (young_gen()->is_in_reserved(addr)) {
duke@435 831 assert(young_gen()->is_in(addr),
duke@435 832 "addr should be in allocated part of young gen");
never@2262 833 // called from os::print_location by find or VMError
never@2262 834 if (Debugging || VMError::fatal_error_in_progress()) return NULL;
duke@435 835 Unimplemented();
duke@435 836 } else if (old_gen()->is_in_reserved(addr)) {
duke@435 837 assert(old_gen()->is_in(addr),
duke@435 838 "addr should be in allocated part of old gen");
duke@435 839 return old_gen()->start_array()->object_start((HeapWord*)addr);
duke@435 840 } else if (perm_gen()->is_in_reserved(addr)) {
duke@435 841 assert(perm_gen()->is_in(addr),
duke@435 842 "addr should be in allocated part of perm gen");
duke@435 843 return perm_gen()->start_array()->object_start((HeapWord*)addr);
duke@435 844 }
duke@435 845 return 0;
duke@435 846 }
duke@435 847
duke@435 848 size_t ParallelScavengeHeap::block_size(const HeapWord* addr) const {
duke@435 849 return oop(addr)->size();
duke@435 850 }
duke@435 851
duke@435 852 bool ParallelScavengeHeap::block_is_obj(const HeapWord* addr) const {
duke@435 853 return block_start(addr) == addr;
duke@435 854 }
duke@435 855
duke@435 856 jlong ParallelScavengeHeap::millis_since_last_gc() {
duke@435 857 return UseParallelOldGC ?
duke@435 858 PSParallelCompact::millis_since_last_gc() :
duke@435 859 PSMarkSweep::millis_since_last_gc();
duke@435 860 }
duke@435 861
duke@435 862 void ParallelScavengeHeap::prepare_for_verify() {
duke@435 863 ensure_parsability(false); // no need to retire TLABs for verification
duke@435 864 }
duke@435 865
duke@435 866 void ParallelScavengeHeap::print() const { print_on(tty); }
duke@435 867
duke@435 868 void ParallelScavengeHeap::print_on(outputStream* st) const {
duke@435 869 young_gen()->print_on(st);
duke@435 870 old_gen()->print_on(st);
duke@435 871 perm_gen()->print_on(st);
duke@435 872 }
duke@435 873
duke@435 874 void ParallelScavengeHeap::gc_threads_do(ThreadClosure* tc) const {
duke@435 875 PSScavenge::gc_task_manager()->threads_do(tc);
duke@435 876 }
duke@435 877
duke@435 878 void ParallelScavengeHeap::print_gc_threads_on(outputStream* st) const {
duke@435 879 PSScavenge::gc_task_manager()->print_threads_on(st);
duke@435 880 }
duke@435 881
duke@435 882 void ParallelScavengeHeap::print_tracing_info() const {
duke@435 883 if (TraceGen0Time) {
duke@435 884 double time = PSScavenge::accumulated_time()->seconds();
duke@435 885 tty->print_cr("[Accumulated GC generation 0 time %3.7f secs]", time);
duke@435 886 }
duke@435 887 if (TraceGen1Time) {
duke@435 888 double time = PSMarkSweep::accumulated_time()->seconds();
duke@435 889 tty->print_cr("[Accumulated GC generation 1 time %3.7f secs]", time);
duke@435 890 }
duke@435 891 }
duke@435 892
duke@435 893
johnc@2969 894 void ParallelScavengeHeap::verify(bool allow_dirty, bool silent, VerifyOption option /* ignored */) {
duke@435 895 // Why do we need the total_collections()-filter below?
duke@435 896 if (total_collections() > 0) {
duke@435 897 if (!silent) {
duke@435 898 gclog_or_tty->print("permanent ");
duke@435 899 }
duke@435 900 perm_gen()->verify(allow_dirty);
duke@435 901
duke@435 902 if (!silent) {
duke@435 903 gclog_or_tty->print("tenured ");
duke@435 904 }
duke@435 905 old_gen()->verify(allow_dirty);
duke@435 906
duke@435 907 if (!silent) {
duke@435 908 gclog_or_tty->print("eden ");
duke@435 909 }
duke@435 910 young_gen()->verify(allow_dirty);
duke@435 911 }
duke@435 912 }
duke@435 913
duke@435 914 void ParallelScavengeHeap::print_heap_change(size_t prev_used) {
duke@435 915 if (PrintGCDetails && Verbose) {
duke@435 916 gclog_or_tty->print(" " SIZE_FORMAT
duke@435 917 "->" SIZE_FORMAT
duke@435 918 "(" SIZE_FORMAT ")",
duke@435 919 prev_used, used(), capacity());
duke@435 920 } else {
duke@435 921 gclog_or_tty->print(" " SIZE_FORMAT "K"
duke@435 922 "->" SIZE_FORMAT "K"
duke@435 923 "(" SIZE_FORMAT "K)",
duke@435 924 prev_used / K, used() / K, capacity() / K);
duke@435 925 }
duke@435 926 }
duke@435 927
duke@435 928 ParallelScavengeHeap* ParallelScavengeHeap::heap() {
duke@435 929 assert(_psh != NULL, "Uninitialized access to ParallelScavengeHeap::heap()");
duke@435 930 assert(_psh->kind() == CollectedHeap::ParallelScavengeHeap, "not a parallel scavenge heap");
duke@435 931 return _psh;
duke@435 932 }
duke@435 933
duke@435 934 // Before delegating the resize to the young generation,
duke@435 935 // the reserved space for the young and old generations
duke@435 936 // may be changed to accomodate the desired resize.
duke@435 937 void ParallelScavengeHeap::resize_young_gen(size_t eden_size,
duke@435 938 size_t survivor_size) {
duke@435 939 if (UseAdaptiveGCBoundary) {
duke@435 940 if (size_policy()->bytes_absorbed_from_eden() != 0) {
duke@435 941 size_policy()->reset_bytes_absorbed_from_eden();
duke@435 942 return; // The generation changed size already.
duke@435 943 }
duke@435 944 gens()->adjust_boundary_for_young_gen_needs(eden_size, survivor_size);
duke@435 945 }
duke@435 946
duke@435 947 // Delegate the resize to the generation.
duke@435 948 _young_gen->resize(eden_size, survivor_size);
duke@435 949 }
duke@435 950
duke@435 951 // Before delegating the resize to the old generation,
duke@435 952 // the reserved space for the young and old generations
duke@435 953 // may be changed to accomodate the desired resize.
duke@435 954 void ParallelScavengeHeap::resize_old_gen(size_t desired_free_space) {
duke@435 955 if (UseAdaptiveGCBoundary) {
duke@435 956 if (size_policy()->bytes_absorbed_from_eden() != 0) {
duke@435 957 size_policy()->reset_bytes_absorbed_from_eden();
duke@435 958 return; // The generation changed size already.
duke@435 959 }
duke@435 960 gens()->adjust_boundary_for_old_gen_needs(desired_free_space);
duke@435 961 }
duke@435 962
duke@435 963 // Delegate the resize to the generation.
duke@435 964 _old_gen->resize(desired_free_space);
duke@435 965 }
jmasa@698 966
jrose@1424 967 ParallelScavengeHeap::ParStrongRootsScope::ParStrongRootsScope() {
jrose@1424 968 // nothing particular
jrose@1424 969 }
jrose@1424 970
jrose@1424 971 ParallelScavengeHeap::ParStrongRootsScope::~ParStrongRootsScope() {
jrose@1424 972 // nothing particular
jrose@1424 973 }
jrose@1424 974
jmasa@698 975 #ifndef PRODUCT
jmasa@698 976 void ParallelScavengeHeap::record_gen_tops_before_GC() {
jmasa@698 977 if (ZapUnusedHeapArea) {
jmasa@698 978 young_gen()->record_spaces_top();
jmasa@698 979 old_gen()->record_spaces_top();
jmasa@698 980 perm_gen()->record_spaces_top();
jmasa@698 981 }
jmasa@698 982 }
jmasa@698 983
jmasa@698 984 void ParallelScavengeHeap::gen_mangle_unused_area() {
jmasa@698 985 if (ZapUnusedHeapArea) {
jmasa@698 986 young_gen()->eden_space()->mangle_unused_area();
jmasa@698 987 young_gen()->to_space()->mangle_unused_area();
jmasa@698 988 young_gen()->from_space()->mangle_unused_area();
jmasa@698 989 old_gen()->object_space()->mangle_unused_area();
jmasa@698 990 perm_gen()->object_space()->mangle_unused_area();
jmasa@698 991 }
jmasa@698 992 }
jmasa@698 993 #endif

mercurial