src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp

Thu, 12 Oct 2017 21:27:07 +0800

author
aoqi
date
Thu, 12 Oct 2017 21:27:07 +0800
changeset 7535
7ae4e26cb1e0
parent 7070
439f0d76cff3
parent 6876
710a3c8b516e
child 7994
04ff2f6cd0eb
permissions
-rw-r--r--

merge

aoqi@0 1 /*
aoqi@0 2 * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
aoqi@0 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
aoqi@0 4 *
aoqi@0 5 * This code is free software; you can redistribute it and/or modify it
aoqi@0 6 * under the terms of the GNU General Public License version 2 only, as
aoqi@0 7 * published by the Free Software Foundation.
aoqi@0 8 *
aoqi@0 9 * This code is distributed in the hope that it will be useful, but WITHOUT
aoqi@0 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
aoqi@0 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
aoqi@0 12 * version 2 for more details (a copy is included in the LICENSE file that
aoqi@0 13 * accompanied this code).
aoqi@0 14 *
aoqi@0 15 * You should have received a copy of the GNU General Public License version
aoqi@0 16 * 2 along with this work; if not, write to the Free Software Foundation,
aoqi@0 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
aoqi@0 18 *
aoqi@0 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
aoqi@0 20 * or visit www.oracle.com if you need additional information or have any
aoqi@0 21 * questions.
aoqi@0 22 *
aoqi@0 23 */
aoqi@0 24
aoqi@0 25 #include "precompiled.hpp"
aoqi@0 26 #include "gc_implementation/parallelScavenge/adjoiningGenerations.hpp"
aoqi@0 27 #include "gc_implementation/parallelScavenge/adjoiningVirtualSpaces.hpp"
aoqi@0 28 #include "gc_implementation/parallelScavenge/cardTableExtension.hpp"
aoqi@0 29 #include "gc_implementation/parallelScavenge/gcTaskManager.hpp"
aoqi@0 30 #include "gc_implementation/parallelScavenge/generationSizer.hpp"
aoqi@0 31 #include "gc_implementation/parallelScavenge/parallelScavengeHeap.inline.hpp"
aoqi@0 32 #include "gc_implementation/parallelScavenge/psAdaptiveSizePolicy.hpp"
aoqi@0 33 #include "gc_implementation/parallelScavenge/psMarkSweep.hpp"
aoqi@0 34 #include "gc_implementation/parallelScavenge/psParallelCompact.hpp"
aoqi@0 35 #include "gc_implementation/parallelScavenge/psPromotionManager.hpp"
aoqi@0 36 #include "gc_implementation/parallelScavenge/psScavenge.hpp"
aoqi@0 37 #include "gc_implementation/parallelScavenge/vmPSOperations.hpp"
aoqi@0 38 #include "gc_implementation/shared/gcHeapSummary.hpp"
aoqi@0 39 #include "gc_implementation/shared/gcWhen.hpp"
aoqi@0 40 #include "memory/gcLocker.inline.hpp"
aoqi@0 41 #include "oops/oop.inline.hpp"
aoqi@0 42 #include "runtime/handles.inline.hpp"
aoqi@0 43 #include "runtime/java.hpp"
aoqi@0 44 #include "runtime/vmThread.hpp"
aoqi@0 45 #include "services/memTracker.hpp"
aoqi@0 46 #include "utilities/vmError.hpp"
aoqi@0 47
aoqi@0 48 PSYoungGen* ParallelScavengeHeap::_young_gen = NULL;
aoqi@0 49 PSOldGen* ParallelScavengeHeap::_old_gen = NULL;
aoqi@0 50 PSAdaptiveSizePolicy* ParallelScavengeHeap::_size_policy = NULL;
aoqi@0 51 PSGCAdaptivePolicyCounters* ParallelScavengeHeap::_gc_policy_counters = NULL;
aoqi@0 52 ParallelScavengeHeap* ParallelScavengeHeap::_psh = NULL;
aoqi@0 53 GCTaskManager* ParallelScavengeHeap::_gc_task_manager = NULL;
aoqi@0 54
aoqi@0 55 jint ParallelScavengeHeap::initialize() {
aoqi@0 56 CollectedHeap::pre_initialize();
aoqi@0 57
aoqi@0 58 // Initialize collector policy
aoqi@0 59 _collector_policy = new GenerationSizer();
aoqi@0 60 _collector_policy->initialize_all();
aoqi@0 61
aoqi@0 62 const size_t heap_size = _collector_policy->max_heap_byte_size();
aoqi@0 63
aoqi@0 64 ReservedSpace heap_rs = Universe::reserve_heap(heap_size, _collector_policy->heap_alignment());
aoqi@0 65 MemTracker::record_virtual_memory_type((address)heap_rs.base(), mtJavaHeap);
aoqi@0 66
aoqi@0 67 os::trace_page_sizes("ps main", _collector_policy->min_heap_byte_size(),
aoqi@0 68 heap_size, generation_alignment(),
aoqi@0 69 heap_rs.base(),
aoqi@0 70 heap_rs.size());
aoqi@0 71 if (!heap_rs.is_reserved()) {
aoqi@0 72 vm_shutdown_during_initialization(
aoqi@0 73 "Could not reserve enough space for object heap");
aoqi@0 74 return JNI_ENOMEM;
aoqi@0 75 }
aoqi@0 76
aoqi@0 77 _reserved = MemRegion((HeapWord*)heap_rs.base(),
aoqi@0 78 (HeapWord*)(heap_rs.base() + heap_rs.size()));
aoqi@0 79
aoqi@0 80 CardTableExtension* const barrier_set = new CardTableExtension(_reserved, 3);
tschatzl@7051 81 barrier_set->initialize();
aoqi@0 82 _barrier_set = barrier_set;
aoqi@0 83 oopDesc::set_bs(_barrier_set);
aoqi@0 84 if (_barrier_set == NULL) {
aoqi@0 85 vm_shutdown_during_initialization(
aoqi@0 86 "Could not reserve enough space for barrier set");
aoqi@0 87 return JNI_ENOMEM;
aoqi@0 88 }
aoqi@0 89
aoqi@0 90 // Make up the generations
aoqi@0 91 // Calculate the maximum size that a generation can grow. This
aoqi@0 92 // includes growth into the other generation. Note that the
aoqi@0 93 // parameter _max_gen_size is kept as the maximum
aoqi@0 94 // size of the generation as the boundaries currently stand.
aoqi@0 95 // _max_gen_size is still used as that value.
aoqi@0 96 double max_gc_pause_sec = ((double) MaxGCPauseMillis)/1000.0;
aoqi@0 97 double max_gc_minor_pause_sec = ((double) MaxGCMinorPauseMillis)/1000.0;
aoqi@0 98
aoqi@0 99 _gens = new AdjoiningGenerations(heap_rs, _collector_policy, generation_alignment());
aoqi@0 100
aoqi@0 101 _old_gen = _gens->old_gen();
aoqi@0 102 _young_gen = _gens->young_gen();
aoqi@0 103
aoqi@0 104 const size_t eden_capacity = _young_gen->eden_space()->capacity_in_bytes();
aoqi@0 105 const size_t old_capacity = _old_gen->capacity_in_bytes();
aoqi@0 106 const size_t initial_promo_size = MIN2(eden_capacity, old_capacity);
aoqi@0 107 _size_policy =
aoqi@0 108 new PSAdaptiveSizePolicy(eden_capacity,
aoqi@0 109 initial_promo_size,
aoqi@0 110 young_gen()->to_space()->capacity_in_bytes(),
aoqi@0 111 _collector_policy->gen_alignment(),
aoqi@0 112 max_gc_pause_sec,
aoqi@0 113 max_gc_minor_pause_sec,
aoqi@0 114 GCTimeRatio
aoqi@0 115 );
aoqi@0 116
aoqi@0 117 assert(!UseAdaptiveGCBoundary ||
aoqi@0 118 (old_gen()->virtual_space()->high_boundary() ==
aoqi@0 119 young_gen()->virtual_space()->low_boundary()),
aoqi@0 120 "Boundaries must meet");
aoqi@0 121 // initialize the policy counters - 2 collectors, 3 generations
aoqi@0 122 _gc_policy_counters =
aoqi@0 123 new PSGCAdaptivePolicyCounters("ParScav:MSC", 2, 3, _size_policy);
aoqi@0 124 _psh = this;
aoqi@0 125
aoqi@0 126 // Set up the GCTaskManager
aoqi@0 127 _gc_task_manager = GCTaskManager::create(ParallelGCThreads);
aoqi@0 128
aoqi@0 129 if (UseParallelOldGC && !PSParallelCompact::initialize()) {
aoqi@0 130 return JNI_ENOMEM;
aoqi@0 131 }
aoqi@0 132
aoqi@0 133 return JNI_OK;
aoqi@0 134 }
aoqi@0 135
aoqi@0 136 void ParallelScavengeHeap::post_initialize() {
aoqi@0 137 // Need to init the tenuring threshold
aoqi@0 138 PSScavenge::initialize();
aoqi@0 139 if (UseParallelOldGC) {
aoqi@0 140 PSParallelCompact::post_initialize();
aoqi@0 141 } else {
aoqi@0 142 PSMarkSweep::initialize();
aoqi@0 143 }
aoqi@0 144 PSPromotionManager::initialize();
aoqi@0 145 }
aoqi@0 146
aoqi@0 147 void ParallelScavengeHeap::update_counters() {
aoqi@0 148 young_gen()->update_counters();
aoqi@0 149 old_gen()->update_counters();
aoqi@0 150 MetaspaceCounters::update_performance_counters();
aoqi@0 151 CompressedClassSpaceCounters::update_performance_counters();
aoqi@0 152 }
aoqi@0 153
aoqi@0 154 size_t ParallelScavengeHeap::capacity() const {
aoqi@0 155 size_t value = young_gen()->capacity_in_bytes() + old_gen()->capacity_in_bytes();
aoqi@0 156 return value;
aoqi@0 157 }
aoqi@0 158
aoqi@0 159 size_t ParallelScavengeHeap::used() const {
aoqi@0 160 size_t value = young_gen()->used_in_bytes() + old_gen()->used_in_bytes();
aoqi@0 161 return value;
aoqi@0 162 }
aoqi@0 163
aoqi@0 164 bool ParallelScavengeHeap::is_maximal_no_gc() const {
aoqi@0 165 return old_gen()->is_maximal_no_gc() && young_gen()->is_maximal_no_gc();
aoqi@0 166 }
aoqi@0 167
aoqi@0 168
aoqi@0 169 size_t ParallelScavengeHeap::max_capacity() const {
aoqi@0 170 size_t estimated = reserved_region().byte_size();
aoqi@0 171 if (UseAdaptiveSizePolicy) {
aoqi@0 172 estimated -= _size_policy->max_survivor_size(young_gen()->max_size());
aoqi@0 173 } else {
aoqi@0 174 estimated -= young_gen()->to_space()->capacity_in_bytes();
aoqi@0 175 }
aoqi@0 176 return MAX2(estimated, capacity());
aoqi@0 177 }
aoqi@0 178
aoqi@0 179 bool ParallelScavengeHeap::is_in(const void* p) const {
aoqi@0 180 if (young_gen()->is_in(p)) {
aoqi@0 181 return true;
aoqi@0 182 }
aoqi@0 183
aoqi@0 184 if (old_gen()->is_in(p)) {
aoqi@0 185 return true;
aoqi@0 186 }
aoqi@0 187
aoqi@0 188 return false;
aoqi@0 189 }
aoqi@0 190
aoqi@0 191 bool ParallelScavengeHeap::is_in_reserved(const void* p) const {
aoqi@0 192 if (young_gen()->is_in_reserved(p)) {
aoqi@0 193 return true;
aoqi@0 194 }
aoqi@0 195
aoqi@0 196 if (old_gen()->is_in_reserved(p)) {
aoqi@0 197 return true;
aoqi@0 198 }
aoqi@0 199
aoqi@0 200 return false;
aoqi@0 201 }
aoqi@0 202
aoqi@0 203 bool ParallelScavengeHeap::is_scavengable(const void* addr) {
aoqi@0 204 return is_in_young((oop)addr);
aoqi@0 205 }
aoqi@0 206
aoqi@0 207 #ifdef ASSERT
aoqi@0 208 // Don't implement this by using is_in_young(). This method is used
aoqi@0 209 // in some cases to check that is_in_young() is correct.
aoqi@0 210 bool ParallelScavengeHeap::is_in_partial_collection(const void *p) {
aoqi@0 211 assert(is_in_reserved(p) || p == NULL,
aoqi@0 212 "Does not work if address is non-null and outside of the heap");
aoqi@0 213 // The order of the generations is old (low addr), young (high addr)
aoqi@0 214 return p >= old_gen()->reserved().end();
aoqi@0 215 }
aoqi@0 216 #endif
aoqi@0 217
aoqi@0 218 // There are two levels of allocation policy here.
aoqi@0 219 //
aoqi@0 220 // When an allocation request fails, the requesting thread must invoke a VM
aoqi@0 221 // operation, transfer control to the VM thread, and await the results of a
aoqi@0 222 // garbage collection. That is quite expensive, and we should avoid doing it
aoqi@0 223 // multiple times if possible.
aoqi@0 224 //
aoqi@0 225 // To accomplish this, we have a basic allocation policy, and also a
aoqi@0 226 // failed allocation policy.
aoqi@0 227 //
aoqi@0 228 // The basic allocation policy controls how you allocate memory without
aoqi@0 229 // attempting garbage collection. It is okay to grab locks and
aoqi@0 230 // expand the heap, if that can be done without coming to a safepoint.
aoqi@0 231 // It is likely that the basic allocation policy will not be very
aoqi@0 232 // aggressive.
aoqi@0 233 //
aoqi@0 234 // The failed allocation policy is invoked from the VM thread after
aoqi@0 235 // the basic allocation policy is unable to satisfy a mem_allocate
aoqi@0 236 // request. This policy needs to cover the entire range of collection,
aoqi@0 237 // heap expansion, and out-of-memory conditions. It should make every
aoqi@0 238 // attempt to allocate the requested memory.
aoqi@0 239
aoqi@0 240 // Basic allocation policy. Should never be called at a safepoint, or
aoqi@0 241 // from the VM thread.
aoqi@0 242 //
aoqi@0 243 // This method must handle cases where many mem_allocate requests fail
aoqi@0 244 // simultaneously. When that happens, only one VM operation will succeed,
aoqi@0 245 // and the rest will not be executed. For that reason, this method loops
aoqi@0 246 // during failed allocation attempts. If the java heap becomes exhausted,
aoqi@0 247 // we rely on the size_policy object to force a bail out.
aoqi@0 248 HeapWord* ParallelScavengeHeap::mem_allocate(
aoqi@0 249 size_t size,
aoqi@0 250 bool* gc_overhead_limit_was_exceeded) {
aoqi@0 251 assert(!SafepointSynchronize::is_at_safepoint(), "should not be at safepoint");
aoqi@0 252 assert(Thread::current() != (Thread*)VMThread::vm_thread(), "should not be in vm thread");
aoqi@0 253 assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock");
aoqi@0 254
aoqi@0 255 // In general gc_overhead_limit_was_exceeded should be false so
aoqi@0 256 // set it so here and reset it to true only if the gc time
aoqi@0 257 // limit is being exceeded as checked below.
aoqi@0 258 *gc_overhead_limit_was_exceeded = false;
aoqi@0 259
aoqi@0 260 HeapWord* result = young_gen()->allocate(size);
aoqi@0 261
aoqi@0 262 uint loop_count = 0;
aoqi@0 263 uint gc_count = 0;
aoqi@0 264 int gclocker_stalled_count = 0;
aoqi@0 265
aoqi@0 266 while (result == NULL) {
aoqi@0 267 // We don't want to have multiple collections for a single filled generation.
aoqi@0 268 // To prevent this, each thread tracks the total_collections() value, and if
aoqi@0 269 // the count has changed, does not do a new collection.
aoqi@0 270 //
aoqi@0 271 // The collection count must be read only while holding the heap lock. VM
aoqi@0 272 // operations also hold the heap lock during collections. There is a lock
aoqi@0 273 // contention case where thread A blocks waiting on the Heap_lock, while
aoqi@0 274 // thread B is holding it doing a collection. When thread A gets the lock,
aoqi@0 275 // the collection count has already changed. To prevent duplicate collections,
aoqi@0 276 // The policy MUST attempt allocations during the same period it reads the
aoqi@0 277 // total_collections() value!
aoqi@0 278 {
aoqi@0 279 MutexLocker ml(Heap_lock);
aoqi@0 280 gc_count = Universe::heap()->total_collections();
aoqi@0 281
aoqi@0 282 result = young_gen()->allocate(size);
aoqi@0 283 if (result != NULL) {
aoqi@0 284 return result;
aoqi@0 285 }
aoqi@0 286
aoqi@0 287 // If certain conditions hold, try allocating from the old gen.
aoqi@0 288 result = mem_allocate_old_gen(size);
aoqi@0 289 if (result != NULL) {
aoqi@0 290 return result;
aoqi@0 291 }
aoqi@0 292
aoqi@0 293 if (gclocker_stalled_count > GCLockerRetryAllocationCount) {
aoqi@0 294 return NULL;
aoqi@0 295 }
aoqi@0 296
aoqi@0 297 // Failed to allocate without a gc.
aoqi@0 298 if (GC_locker::is_active_and_needs_gc()) {
aoqi@0 299 // If this thread is not in a jni critical section, we stall
aoqi@0 300 // the requestor until the critical section has cleared and
aoqi@0 301 // GC allowed. When the critical section clears, a GC is
aoqi@0 302 // initiated by the last thread exiting the critical section; so
aoqi@0 303 // we retry the allocation sequence from the beginning of the loop,
aoqi@0 304 // rather than causing more, now probably unnecessary, GC attempts.
aoqi@0 305 JavaThread* jthr = JavaThread::current();
aoqi@0 306 if (!jthr->in_critical()) {
aoqi@0 307 MutexUnlocker mul(Heap_lock);
aoqi@0 308 GC_locker::stall_until_clear();
aoqi@0 309 gclocker_stalled_count += 1;
aoqi@0 310 continue;
aoqi@0 311 } else {
aoqi@0 312 if (CheckJNICalls) {
aoqi@0 313 fatal("Possible deadlock due to allocating while"
aoqi@0 314 " in jni critical section");
aoqi@0 315 }
aoqi@0 316 return NULL;
aoqi@0 317 }
aoqi@0 318 }
aoqi@0 319 }
aoqi@0 320
aoqi@0 321 if (result == NULL) {
aoqi@0 322 // Generate a VM operation
aoqi@0 323 VM_ParallelGCFailedAllocation op(size, gc_count);
aoqi@0 324 VMThread::execute(&op);
aoqi@0 325
aoqi@0 326 // Did the VM operation execute? If so, return the result directly.
aoqi@0 327 // This prevents us from looping until time out on requests that can
aoqi@0 328 // not be satisfied.
aoqi@0 329 if (op.prologue_succeeded()) {
aoqi@0 330 assert(Universe::heap()->is_in_or_null(op.result()),
aoqi@0 331 "result not in heap");
aoqi@0 332
aoqi@0 333 // If GC was locked out during VM operation then retry allocation
aoqi@0 334 // and/or stall as necessary.
aoqi@0 335 if (op.gc_locked()) {
aoqi@0 336 assert(op.result() == NULL, "must be NULL if gc_locked() is true");
aoqi@0 337 continue; // retry and/or stall as necessary
aoqi@0 338 }
aoqi@0 339
aoqi@0 340 // Exit the loop if the gc time limit has been exceeded.
aoqi@0 341 // The allocation must have failed above ("result" guarding
aoqi@0 342 // this path is NULL) and the most recent collection has exceeded the
aoqi@0 343 // gc overhead limit (although enough may have been collected to
aoqi@0 344 // satisfy the allocation). Exit the loop so that an out-of-memory
aoqi@0 345 // will be thrown (return a NULL ignoring the contents of
aoqi@0 346 // op.result()),
aoqi@0 347 // but clear gc_overhead_limit_exceeded so that the next collection
aoqi@0 348 // starts with a clean slate (i.e., forgets about previous overhead
aoqi@0 349 // excesses). Fill op.result() with a filler object so that the
aoqi@0 350 // heap remains parsable.
aoqi@0 351 const bool limit_exceeded = size_policy()->gc_overhead_limit_exceeded();
aoqi@0 352 const bool softrefs_clear = collector_policy()->all_soft_refs_clear();
aoqi@0 353
aoqi@0 354 if (limit_exceeded && softrefs_clear) {
aoqi@0 355 *gc_overhead_limit_was_exceeded = true;
aoqi@0 356 size_policy()->set_gc_overhead_limit_exceeded(false);
aoqi@0 357 if (PrintGCDetails && Verbose) {
aoqi@0 358 gclog_or_tty->print_cr("ParallelScavengeHeap::mem_allocate: "
aoqi@0 359 "return NULL because gc_overhead_limit_exceeded is set");
aoqi@0 360 }
aoqi@0 361 if (op.result() != NULL) {
aoqi@0 362 CollectedHeap::fill_with_object(op.result(), size);
aoqi@0 363 }
aoqi@0 364 return NULL;
aoqi@0 365 }
aoqi@0 366
aoqi@0 367 return op.result();
aoqi@0 368 }
aoqi@0 369 }
aoqi@0 370
aoqi@0 371 // The policy object will prevent us from looping forever. If the
aoqi@0 372 // time spent in gc crosses a threshold, we will bail out.
aoqi@0 373 loop_count++;
aoqi@0 374 if ((result == NULL) && (QueuedAllocationWarningCount > 0) &&
aoqi@0 375 (loop_count % QueuedAllocationWarningCount == 0)) {
aoqi@0 376 warning("ParallelScavengeHeap::mem_allocate retries %d times \n\t"
aoqi@0 377 " size=" SIZE_FORMAT, loop_count, size);
aoqi@0 378 }
aoqi@0 379 }
aoqi@0 380
aoqi@0 381 return result;
aoqi@0 382 }
aoqi@0 383
aoqi@0 384 // A "death march" is a series of ultra-slow allocations in which a full gc is
aoqi@0 385 // done before each allocation, and after the full gc the allocation still
aoqi@0 386 // cannot be satisfied from the young gen. This routine detects that condition;
aoqi@0 387 // it should be called after a full gc has been done and the allocation
aoqi@0 388 // attempted from the young gen. The parameter 'addr' should be the result of
aoqi@0 389 // that young gen allocation attempt.
aoqi@0 390 void
aoqi@0 391 ParallelScavengeHeap::death_march_check(HeapWord* const addr, size_t size) {
aoqi@0 392 if (addr != NULL) {
aoqi@0 393 _death_march_count = 0; // death march has ended
aoqi@0 394 } else if (_death_march_count == 0) {
aoqi@0 395 if (should_alloc_in_eden(size)) {
aoqi@0 396 _death_march_count = 1; // death march has started
aoqi@0 397 }
aoqi@0 398 }
aoqi@0 399 }
aoqi@0 400
aoqi@0 401 HeapWord* ParallelScavengeHeap::mem_allocate_old_gen(size_t size) {
aoqi@0 402 if (!should_alloc_in_eden(size) || GC_locker::is_active_and_needs_gc()) {
aoqi@0 403 // Size is too big for eden, or gc is locked out.
aoqi@0 404 return old_gen()->allocate(size);
aoqi@0 405 }
aoqi@0 406
aoqi@0 407 // If a "death march" is in progress, allocate from the old gen a limited
aoqi@0 408 // number of times before doing a GC.
aoqi@0 409 if (_death_march_count > 0) {
aoqi@0 410 if (_death_march_count < 64) {
aoqi@0 411 ++_death_march_count;
aoqi@0 412 return old_gen()->allocate(size);
aoqi@0 413 } else {
aoqi@0 414 _death_march_count = 0;
aoqi@0 415 }
aoqi@0 416 }
aoqi@0 417 return NULL;
aoqi@0 418 }
aoqi@0 419
aoqi@0 420 void ParallelScavengeHeap::do_full_collection(bool clear_all_soft_refs) {
aoqi@0 421 if (UseParallelOldGC) {
aoqi@0 422 // The do_full_collection() parameter clear_all_soft_refs
aoqi@0 423 // is interpreted here as maximum_compaction which will
aoqi@0 424 // cause SoftRefs to be cleared.
aoqi@0 425 bool maximum_compaction = clear_all_soft_refs;
aoqi@0 426 PSParallelCompact::invoke(maximum_compaction);
aoqi@0 427 } else {
aoqi@0 428 PSMarkSweep::invoke(clear_all_soft_refs);
aoqi@0 429 }
aoqi@0 430 }
aoqi@0 431
aoqi@0 432 // Failed allocation policy. Must be called from the VM thread, and
aoqi@0 433 // only at a safepoint! Note that this method has policy for allocation
aoqi@0 434 // flow, and NOT collection policy. So we do not check for gc collection
aoqi@0 435 // time over limit here, that is the responsibility of the heap specific
aoqi@0 436 // collection methods. This method decides where to attempt allocations,
aoqi@0 437 // and when to attempt collections, but no collection specific policy.
aoqi@0 438 HeapWord* ParallelScavengeHeap::failed_mem_allocate(size_t size) {
aoqi@0 439 assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
aoqi@0 440 assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
aoqi@0 441 assert(!Universe::heap()->is_gc_active(), "not reentrant");
aoqi@0 442 assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock");
aoqi@0 443
aoqi@0 444 // We assume that allocation in eden will fail unless we collect.
aoqi@0 445
aoqi@0 446 // First level allocation failure, scavenge and allocate in young gen.
aoqi@0 447 GCCauseSetter gccs(this, GCCause::_allocation_failure);
aoqi@0 448 const bool invoked_full_gc = PSScavenge::invoke();
aoqi@0 449 HeapWord* result = young_gen()->allocate(size);
aoqi@0 450
aoqi@0 451 // Second level allocation failure.
aoqi@0 452 // Mark sweep and allocate in young generation.
aoqi@0 453 if (result == NULL && !invoked_full_gc) {
aoqi@0 454 do_full_collection(false);
aoqi@0 455 result = young_gen()->allocate(size);
aoqi@0 456 }
aoqi@0 457
aoqi@0 458 death_march_check(result, size);
aoqi@0 459
aoqi@0 460 // Third level allocation failure.
aoqi@0 461 // After mark sweep and young generation allocation failure,
aoqi@0 462 // allocate in old generation.
aoqi@0 463 if (result == NULL) {
aoqi@0 464 result = old_gen()->allocate(size);
aoqi@0 465 }
aoqi@0 466
aoqi@0 467 // Fourth level allocation failure. We're running out of memory.
aoqi@0 468 // More complete mark sweep and allocate in young generation.
aoqi@0 469 if (result == NULL) {
aoqi@0 470 do_full_collection(true);
aoqi@0 471 result = young_gen()->allocate(size);
aoqi@0 472 }
aoqi@0 473
aoqi@0 474 // Fifth level allocation failure.
aoqi@0 475 // After more complete mark sweep, allocate in old generation.
aoqi@0 476 if (result == NULL) {
aoqi@0 477 result = old_gen()->allocate(size);
aoqi@0 478 }
aoqi@0 479
aoqi@0 480 return result;
aoqi@0 481 }
aoqi@0 482
aoqi@0 483 void ParallelScavengeHeap::ensure_parsability(bool retire_tlabs) {
aoqi@0 484 CollectedHeap::ensure_parsability(retire_tlabs);
aoqi@0 485 young_gen()->eden_space()->ensure_parsability();
aoqi@0 486 }
aoqi@0 487
aoqi@0 488 size_t ParallelScavengeHeap::tlab_capacity(Thread* thr) const {
aoqi@0 489 return young_gen()->eden_space()->tlab_capacity(thr);
aoqi@0 490 }
aoqi@0 491
aoqi@0 492 size_t ParallelScavengeHeap::tlab_used(Thread* thr) const {
aoqi@0 493 return young_gen()->eden_space()->tlab_used(thr);
aoqi@0 494 }
aoqi@0 495
aoqi@0 496 size_t ParallelScavengeHeap::unsafe_max_tlab_alloc(Thread* thr) const {
aoqi@0 497 return young_gen()->eden_space()->unsafe_max_tlab_alloc(thr);
aoqi@0 498 }
aoqi@0 499
aoqi@0 500 HeapWord* ParallelScavengeHeap::allocate_new_tlab(size_t size) {
aoqi@0 501 return young_gen()->allocate(size);
aoqi@0 502 }
aoqi@0 503
aoqi@0 504 void ParallelScavengeHeap::accumulate_statistics_all_tlabs() {
aoqi@0 505 CollectedHeap::accumulate_statistics_all_tlabs();
aoqi@0 506 }
aoqi@0 507
aoqi@0 508 void ParallelScavengeHeap::resize_all_tlabs() {
aoqi@0 509 CollectedHeap::resize_all_tlabs();
aoqi@0 510 }
aoqi@0 511
aoqi@0 512 bool ParallelScavengeHeap::can_elide_initializing_store_barrier(oop new_obj) {
aoqi@0 513 // We don't need barriers for stores to objects in the
aoqi@0 514 // young gen and, a fortiori, for initializing stores to
aoqi@0 515 // objects therein.
aoqi@0 516 return is_in_young(new_obj);
aoqi@0 517 }
aoqi@0 518
aoqi@0 519 // This method is used by System.gc() and JVMTI.
aoqi@0 520 void ParallelScavengeHeap::collect(GCCause::Cause cause) {
aoqi@0 521 assert(!Heap_lock->owned_by_self(),
aoqi@0 522 "this thread should not own the Heap_lock");
aoqi@0 523
aoqi@0 524 unsigned int gc_count = 0;
aoqi@0 525 unsigned int full_gc_count = 0;
aoqi@0 526 {
aoqi@0 527 MutexLocker ml(Heap_lock);
aoqi@0 528 // This value is guarded by the Heap_lock
aoqi@0 529 gc_count = Universe::heap()->total_collections();
aoqi@0 530 full_gc_count = Universe::heap()->total_full_collections();
aoqi@0 531 }
aoqi@0 532
aoqi@0 533 VM_ParallelGCSystemGC op(gc_count, full_gc_count, cause);
aoqi@0 534 VMThread::execute(&op);
aoqi@0 535 }
aoqi@0 536
aoqi@0 537 void ParallelScavengeHeap::oop_iterate(ExtendedOopClosure* cl) {
aoqi@0 538 Unimplemented();
aoqi@0 539 }
aoqi@0 540
aoqi@0 541 void ParallelScavengeHeap::object_iterate(ObjectClosure* cl) {
aoqi@0 542 young_gen()->object_iterate(cl);
aoqi@0 543 old_gen()->object_iterate(cl);
aoqi@0 544 }
aoqi@0 545
aoqi@0 546
aoqi@0 547 HeapWord* ParallelScavengeHeap::block_start(const void* addr) const {
aoqi@0 548 if (young_gen()->is_in_reserved(addr)) {
aoqi@0 549 assert(young_gen()->is_in(addr),
aoqi@0 550 "addr should be in allocated part of young gen");
aoqi@0 551 // called from os::print_location by find or VMError
aoqi@0 552 if (Debugging || VMError::fatal_error_in_progress()) return NULL;
aoqi@0 553 Unimplemented();
aoqi@0 554 } else if (old_gen()->is_in_reserved(addr)) {
aoqi@0 555 assert(old_gen()->is_in(addr),
aoqi@0 556 "addr should be in allocated part of old gen");
aoqi@0 557 return old_gen()->start_array()->object_start((HeapWord*)addr);
aoqi@0 558 }
aoqi@0 559 return 0;
aoqi@0 560 }
aoqi@0 561
aoqi@0 562 size_t ParallelScavengeHeap::block_size(const HeapWord* addr) const {
aoqi@0 563 return oop(addr)->size();
aoqi@0 564 }
aoqi@0 565
aoqi@0 566 bool ParallelScavengeHeap::block_is_obj(const HeapWord* addr) const {
aoqi@0 567 return block_start(addr) == addr;
aoqi@0 568 }
aoqi@0 569
aoqi@0 570 jlong ParallelScavengeHeap::millis_since_last_gc() {
aoqi@0 571 return UseParallelOldGC ?
aoqi@0 572 PSParallelCompact::millis_since_last_gc() :
aoqi@0 573 PSMarkSweep::millis_since_last_gc();
aoqi@0 574 }
aoqi@0 575
aoqi@0 576 void ParallelScavengeHeap::prepare_for_verify() {
aoqi@0 577 ensure_parsability(false); // no need to retire TLABs for verification
aoqi@0 578 }
aoqi@0 579
aoqi@0 580 PSHeapSummary ParallelScavengeHeap::create_ps_heap_summary() {
aoqi@0 581 PSOldGen* old = old_gen();
aoqi@0 582 HeapWord* old_committed_end = (HeapWord*)old->virtual_space()->committed_high_addr();
aoqi@0 583 VirtualSpaceSummary old_summary(old->reserved().start(), old_committed_end, old->reserved().end());
aoqi@0 584 SpaceSummary old_space(old->reserved().start(), old_committed_end, old->used_in_bytes());
aoqi@0 585
aoqi@0 586 PSYoungGen* young = young_gen();
aoqi@0 587 VirtualSpaceSummary young_summary(young->reserved().start(),
aoqi@0 588 (HeapWord*)young->virtual_space()->committed_high_addr(), young->reserved().end());
aoqi@0 589
aoqi@0 590 MutableSpace* eden = young_gen()->eden_space();
aoqi@0 591 SpaceSummary eden_space(eden->bottom(), eden->end(), eden->used_in_bytes());
aoqi@0 592
aoqi@0 593 MutableSpace* from = young_gen()->from_space();
aoqi@0 594 SpaceSummary from_space(from->bottom(), from->end(), from->used_in_bytes());
aoqi@0 595
aoqi@0 596 MutableSpace* to = young_gen()->to_space();
aoqi@0 597 SpaceSummary to_space(to->bottom(), to->end(), to->used_in_bytes());
aoqi@0 598
aoqi@0 599 VirtualSpaceSummary heap_summary = create_heap_space_summary();
aoqi@0 600 return PSHeapSummary(heap_summary, used(), old_summary, old_space, young_summary, eden_space, from_space, to_space);
aoqi@0 601 }
aoqi@0 602
aoqi@0 603 void ParallelScavengeHeap::print_on(outputStream* st) const {
aoqi@0 604 young_gen()->print_on(st);
aoqi@0 605 old_gen()->print_on(st);
aoqi@0 606 MetaspaceAux::print_on(st);
aoqi@0 607 }
aoqi@0 608
aoqi@0 609 void ParallelScavengeHeap::print_on_error(outputStream* st) const {
aoqi@0 610 this->CollectedHeap::print_on_error(st);
aoqi@0 611
aoqi@0 612 if (UseParallelOldGC) {
aoqi@0 613 st->cr();
aoqi@0 614 PSParallelCompact::print_on_error(st);
aoqi@0 615 }
aoqi@0 616 }
aoqi@0 617
aoqi@0 618 void ParallelScavengeHeap::gc_threads_do(ThreadClosure* tc) const {
aoqi@0 619 PSScavenge::gc_task_manager()->threads_do(tc);
aoqi@0 620 }
aoqi@0 621
aoqi@0 622 void ParallelScavengeHeap::print_gc_threads_on(outputStream* st) const {
aoqi@0 623 PSScavenge::gc_task_manager()->print_threads_on(st);
aoqi@0 624 }
aoqi@0 625
aoqi@0 626 void ParallelScavengeHeap::print_tracing_info() const {
aoqi@0 627 if (TraceGen0Time) {
aoqi@0 628 double time = PSScavenge::accumulated_time()->seconds();
aoqi@0 629 tty->print_cr("[Accumulated GC generation 0 time %3.7f secs]", time);
aoqi@0 630 }
aoqi@0 631 if (TraceGen1Time) {
aoqi@0 632 double time = UseParallelOldGC ? PSParallelCompact::accumulated_time()->seconds() : PSMarkSweep::accumulated_time()->seconds();
aoqi@0 633 tty->print_cr("[Accumulated GC generation 1 time %3.7f secs]", time);
aoqi@0 634 }
aoqi@0 635 }
aoqi@0 636
aoqi@0 637
aoqi@0 638 void ParallelScavengeHeap::verify(bool silent, VerifyOption option /* ignored */) {
aoqi@0 639 // Why do we need the total_collections()-filter below?
aoqi@0 640 if (total_collections() > 0) {
aoqi@0 641 if (!silent) {
aoqi@0 642 gclog_or_tty->print("tenured ");
aoqi@0 643 }
aoqi@0 644 old_gen()->verify();
aoqi@0 645
aoqi@0 646 if (!silent) {
aoqi@0 647 gclog_or_tty->print("eden ");
aoqi@0 648 }
aoqi@0 649 young_gen()->verify();
aoqi@0 650 }
aoqi@0 651 }
aoqi@0 652
aoqi@0 653 void ParallelScavengeHeap::print_heap_change(size_t prev_used) {
aoqi@0 654 if (PrintGCDetails && Verbose) {
aoqi@0 655 gclog_or_tty->print(" " SIZE_FORMAT
aoqi@0 656 "->" SIZE_FORMAT
aoqi@0 657 "(" SIZE_FORMAT ")",
aoqi@0 658 prev_used, used(), capacity());
aoqi@0 659 } else {
aoqi@0 660 gclog_or_tty->print(" " SIZE_FORMAT "K"
aoqi@0 661 "->" SIZE_FORMAT "K"
aoqi@0 662 "(" SIZE_FORMAT "K)",
aoqi@0 663 prev_used / K, used() / K, capacity() / K);
aoqi@0 664 }
aoqi@0 665 }
aoqi@0 666
aoqi@0 667 void ParallelScavengeHeap::trace_heap(GCWhen::Type when, GCTracer* gc_tracer) {
aoqi@0 668 const PSHeapSummary& heap_summary = create_ps_heap_summary();
aoqi@0 669 gc_tracer->report_gc_heap_summary(when, heap_summary);
aoqi@0 670
aoqi@0 671 const MetaspaceSummary& metaspace_summary = create_metaspace_summary();
aoqi@0 672 gc_tracer->report_metaspace_summary(when, metaspace_summary);
aoqi@0 673 }
aoqi@0 674
aoqi@0 675 ParallelScavengeHeap* ParallelScavengeHeap::heap() {
aoqi@0 676 assert(_psh != NULL, "Uninitialized access to ParallelScavengeHeap::heap()");
aoqi@0 677 assert(_psh->kind() == CollectedHeap::ParallelScavengeHeap, "not a parallel scavenge heap");
aoqi@0 678 return _psh;
aoqi@0 679 }
aoqi@0 680
aoqi@0 681 // Before delegating the resize to the young generation,
aoqi@0 682 // the reserved space for the young and old generations
aoqi@0 683 // may be changed to accomodate the desired resize.
aoqi@0 684 void ParallelScavengeHeap::resize_young_gen(size_t eden_size,
aoqi@0 685 size_t survivor_size) {
aoqi@0 686 if (UseAdaptiveGCBoundary) {
aoqi@0 687 if (size_policy()->bytes_absorbed_from_eden() != 0) {
aoqi@0 688 size_policy()->reset_bytes_absorbed_from_eden();
aoqi@0 689 return; // The generation changed size already.
aoqi@0 690 }
aoqi@0 691 gens()->adjust_boundary_for_young_gen_needs(eden_size, survivor_size);
aoqi@0 692 }
aoqi@0 693
aoqi@0 694 // Delegate the resize to the generation.
aoqi@0 695 _young_gen->resize(eden_size, survivor_size);
aoqi@0 696 }
aoqi@0 697
aoqi@0 698 // Before delegating the resize to the old generation,
aoqi@0 699 // the reserved space for the young and old generations
aoqi@0 700 // may be changed to accomodate the desired resize.
aoqi@0 701 void ParallelScavengeHeap::resize_old_gen(size_t desired_free_space) {
aoqi@0 702 if (UseAdaptiveGCBoundary) {
aoqi@0 703 if (size_policy()->bytes_absorbed_from_eden() != 0) {
aoqi@0 704 size_policy()->reset_bytes_absorbed_from_eden();
aoqi@0 705 return; // The generation changed size already.
aoqi@0 706 }
aoqi@0 707 gens()->adjust_boundary_for_old_gen_needs(desired_free_space);
aoqi@0 708 }
aoqi@0 709
aoqi@0 710 // Delegate the resize to the generation.
aoqi@0 711 _old_gen->resize(desired_free_space);
aoqi@0 712 }
aoqi@0 713
aoqi@0 714 ParallelScavengeHeap::ParStrongRootsScope::ParStrongRootsScope() {
aoqi@0 715 // nothing particular
aoqi@0 716 }
aoqi@0 717
aoqi@0 718 ParallelScavengeHeap::ParStrongRootsScope::~ParStrongRootsScope() {
aoqi@0 719 // nothing particular
aoqi@0 720 }
aoqi@0 721
aoqi@0 722 #ifndef PRODUCT
aoqi@0 723 void ParallelScavengeHeap::record_gen_tops_before_GC() {
aoqi@0 724 if (ZapUnusedHeapArea) {
aoqi@0 725 young_gen()->record_spaces_top();
aoqi@0 726 old_gen()->record_spaces_top();
aoqi@0 727 }
aoqi@0 728 }
aoqi@0 729
aoqi@0 730 void ParallelScavengeHeap::gen_mangle_unused_area() {
aoqi@0 731 if (ZapUnusedHeapArea) {
aoqi@0 732 young_gen()->eden_space()->mangle_unused_area();
aoqi@0 733 young_gen()->to_space()->mangle_unused_area();
aoqi@0 734 young_gen()->from_space()->mangle_unused_area();
aoqi@0 735 old_gen()->object_space()->mangle_unused_area();
aoqi@0 736 }
aoqi@0 737 }
aoqi@0 738 #endif

mercurial