Sat, 23 Nov 2013 12:25:13 +0100
8028128: Add a type safe alternative for working with counter based data
Reviewed-by: dholmes, egahlin
1 /*
2 * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "precompiled.hpp"
26 #include "gc_implementation/parallelScavenge/adjoiningGenerations.hpp"
27 #include "gc_implementation/parallelScavenge/adjoiningVirtualSpaces.hpp"
28 #include "gc_implementation/parallelScavenge/cardTableExtension.hpp"
29 #include "gc_implementation/parallelScavenge/gcTaskManager.hpp"
30 #include "gc_implementation/parallelScavenge/generationSizer.hpp"
31 #include "gc_implementation/parallelScavenge/parallelScavengeHeap.inline.hpp"
32 #include "gc_implementation/parallelScavenge/psAdaptiveSizePolicy.hpp"
33 #include "gc_implementation/parallelScavenge/psMarkSweep.hpp"
34 #include "gc_implementation/parallelScavenge/psParallelCompact.hpp"
35 #include "gc_implementation/parallelScavenge/psPromotionManager.hpp"
36 #include "gc_implementation/parallelScavenge/psScavenge.hpp"
37 #include "gc_implementation/parallelScavenge/vmPSOperations.hpp"
38 #include "gc_implementation/shared/gcHeapSummary.hpp"
39 #include "gc_implementation/shared/gcWhen.hpp"
40 #include "memory/gcLocker.inline.hpp"
41 #include "oops/oop.inline.hpp"
42 #include "runtime/handles.inline.hpp"
43 #include "runtime/java.hpp"
44 #include "runtime/vmThread.hpp"
45 #include "services/memTracker.hpp"
46 #include "utilities/vmError.hpp"
48 PSYoungGen* ParallelScavengeHeap::_young_gen = NULL;
49 PSOldGen* ParallelScavengeHeap::_old_gen = NULL;
50 PSAdaptiveSizePolicy* ParallelScavengeHeap::_size_policy = NULL;
51 PSGCAdaptivePolicyCounters* ParallelScavengeHeap::_gc_policy_counters = NULL;
52 ParallelScavengeHeap* ParallelScavengeHeap::_psh = NULL;
53 GCTaskManager* ParallelScavengeHeap::_gc_task_manager = NULL;
55 jint ParallelScavengeHeap::initialize() {
56 CollectedHeap::pre_initialize();
58 // Initialize collector policy
59 _collector_policy = new GenerationSizer();
60 _collector_policy->initialize_all();
62 const size_t heap_size = _collector_policy->max_heap_byte_size();
64 ReservedSpace heap_rs = Universe::reserve_heap(heap_size, _collector_policy->heap_alignment());
65 MemTracker::record_virtual_memory_type((address)heap_rs.base(), mtJavaHeap);
67 os::trace_page_sizes("ps main", _collector_policy->min_heap_byte_size(),
68 heap_size, generation_alignment(),
69 heap_rs.base(),
70 heap_rs.size());
71 if (!heap_rs.is_reserved()) {
72 vm_shutdown_during_initialization(
73 "Could not reserve enough space for object heap");
74 return JNI_ENOMEM;
75 }
77 _reserved = MemRegion((HeapWord*)heap_rs.base(),
78 (HeapWord*)(heap_rs.base() + heap_rs.size()));
80 CardTableExtension* const barrier_set = new CardTableExtension(_reserved, 3);
81 _barrier_set = barrier_set;
82 oopDesc::set_bs(_barrier_set);
83 if (_barrier_set == NULL) {
84 vm_shutdown_during_initialization(
85 "Could not reserve enough space for barrier set");
86 return JNI_ENOMEM;
87 }
89 // Make up the generations
90 // Calculate the maximum size that a generation can grow. This
91 // includes growth into the other generation. Note that the
92 // parameter _max_gen_size is kept as the maximum
93 // size of the generation as the boundaries currently stand.
94 // _max_gen_size is still used as that value.
95 double max_gc_pause_sec = ((double) MaxGCPauseMillis)/1000.0;
96 double max_gc_minor_pause_sec = ((double) MaxGCMinorPauseMillis)/1000.0;
98 _gens = new AdjoiningGenerations(heap_rs, _collector_policy, generation_alignment());
100 _old_gen = _gens->old_gen();
101 _young_gen = _gens->young_gen();
103 const size_t eden_capacity = _young_gen->eden_space()->capacity_in_bytes();
104 const size_t old_capacity = _old_gen->capacity_in_bytes();
105 const size_t initial_promo_size = MIN2(eden_capacity, old_capacity);
106 _size_policy =
107 new PSAdaptiveSizePolicy(eden_capacity,
108 initial_promo_size,
109 young_gen()->to_space()->capacity_in_bytes(),
110 _collector_policy->gen_alignment(),
111 max_gc_pause_sec,
112 max_gc_minor_pause_sec,
113 GCTimeRatio
114 );
116 assert(!UseAdaptiveGCBoundary ||
117 (old_gen()->virtual_space()->high_boundary() ==
118 young_gen()->virtual_space()->low_boundary()),
119 "Boundaries must meet");
120 // initialize the policy counters - 2 collectors, 3 generations
121 _gc_policy_counters =
122 new PSGCAdaptivePolicyCounters("ParScav:MSC", 2, 3, _size_policy);
123 _psh = this;
125 // Set up the GCTaskManager
126 _gc_task_manager = GCTaskManager::create(ParallelGCThreads);
128 if (UseParallelOldGC && !PSParallelCompact::initialize()) {
129 return JNI_ENOMEM;
130 }
132 return JNI_OK;
133 }
135 void ParallelScavengeHeap::post_initialize() {
136 // Need to init the tenuring threshold
137 PSScavenge::initialize();
138 if (UseParallelOldGC) {
139 PSParallelCompact::post_initialize();
140 } else {
141 PSMarkSweep::initialize();
142 }
143 PSPromotionManager::initialize();
144 }
146 void ParallelScavengeHeap::update_counters() {
147 young_gen()->update_counters();
148 old_gen()->update_counters();
149 MetaspaceCounters::update_performance_counters();
150 CompressedClassSpaceCounters::update_performance_counters();
151 }
153 size_t ParallelScavengeHeap::capacity() const {
154 size_t value = young_gen()->capacity_in_bytes() + old_gen()->capacity_in_bytes();
155 return value;
156 }
158 size_t ParallelScavengeHeap::used() const {
159 size_t value = young_gen()->used_in_bytes() + old_gen()->used_in_bytes();
160 return value;
161 }
163 bool ParallelScavengeHeap::is_maximal_no_gc() const {
164 return old_gen()->is_maximal_no_gc() && young_gen()->is_maximal_no_gc();
165 }
168 size_t ParallelScavengeHeap::max_capacity() const {
169 size_t estimated = reserved_region().byte_size();
170 if (UseAdaptiveSizePolicy) {
171 estimated -= _size_policy->max_survivor_size(young_gen()->max_size());
172 } else {
173 estimated -= young_gen()->to_space()->capacity_in_bytes();
174 }
175 return MAX2(estimated, capacity());
176 }
178 bool ParallelScavengeHeap::is_in(const void* p) const {
179 if (young_gen()->is_in(p)) {
180 return true;
181 }
183 if (old_gen()->is_in(p)) {
184 return true;
185 }
187 return false;
188 }
190 bool ParallelScavengeHeap::is_in_reserved(const void* p) const {
191 if (young_gen()->is_in_reserved(p)) {
192 return true;
193 }
195 if (old_gen()->is_in_reserved(p)) {
196 return true;
197 }
199 return false;
200 }
202 bool ParallelScavengeHeap::is_scavengable(const void* addr) {
203 return is_in_young((oop)addr);
204 }
206 #ifdef ASSERT
207 // Don't implement this by using is_in_young(). This method is used
208 // in some cases to check that is_in_young() is correct.
209 bool ParallelScavengeHeap::is_in_partial_collection(const void *p) {
210 assert(is_in_reserved(p) || p == NULL,
211 "Does not work if address is non-null and outside of the heap");
212 // The order of the generations is old (low addr), young (high addr)
213 return p >= old_gen()->reserved().end();
214 }
215 #endif
217 // There are two levels of allocation policy here.
218 //
219 // When an allocation request fails, the requesting thread must invoke a VM
220 // operation, transfer control to the VM thread, and await the results of a
221 // garbage collection. That is quite expensive, and we should avoid doing it
222 // multiple times if possible.
223 //
224 // To accomplish this, we have a basic allocation policy, and also a
225 // failed allocation policy.
226 //
227 // The basic allocation policy controls how you allocate memory without
228 // attempting garbage collection. It is okay to grab locks and
229 // expand the heap, if that can be done without coming to a safepoint.
230 // It is likely that the basic allocation policy will not be very
231 // aggressive.
232 //
233 // The failed allocation policy is invoked from the VM thread after
234 // the basic allocation policy is unable to satisfy a mem_allocate
235 // request. This policy needs to cover the entire range of collection,
236 // heap expansion, and out-of-memory conditions. It should make every
237 // attempt to allocate the requested memory.
239 // Basic allocation policy. Should never be called at a safepoint, or
240 // from the VM thread.
241 //
242 // This method must handle cases where many mem_allocate requests fail
243 // simultaneously. When that happens, only one VM operation will succeed,
244 // and the rest will not be executed. For that reason, this method loops
245 // during failed allocation attempts. If the java heap becomes exhausted,
246 // we rely on the size_policy object to force a bail out.
247 HeapWord* ParallelScavengeHeap::mem_allocate(
248 size_t size,
249 bool* gc_overhead_limit_was_exceeded) {
250 assert(!SafepointSynchronize::is_at_safepoint(), "should not be at safepoint");
251 assert(Thread::current() != (Thread*)VMThread::vm_thread(), "should not be in vm thread");
252 assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock");
254 // In general gc_overhead_limit_was_exceeded should be false so
255 // set it so here and reset it to true only if the gc time
256 // limit is being exceeded as checked below.
257 *gc_overhead_limit_was_exceeded = false;
259 HeapWord* result = young_gen()->allocate(size);
261 uint loop_count = 0;
262 uint gc_count = 0;
263 int gclocker_stalled_count = 0;
265 while (result == NULL) {
266 // We don't want to have multiple collections for a single filled generation.
267 // To prevent this, each thread tracks the total_collections() value, and if
268 // the count has changed, does not do a new collection.
269 //
270 // The collection count must be read only while holding the heap lock. VM
271 // operations also hold the heap lock during collections. There is a lock
272 // contention case where thread A blocks waiting on the Heap_lock, while
273 // thread B is holding it doing a collection. When thread A gets the lock,
274 // the collection count has already changed. To prevent duplicate collections,
275 // The policy MUST attempt allocations during the same period it reads the
276 // total_collections() value!
277 {
278 MutexLocker ml(Heap_lock);
279 gc_count = Universe::heap()->total_collections();
281 result = young_gen()->allocate(size);
282 if (result != NULL) {
283 return result;
284 }
286 // If certain conditions hold, try allocating from the old gen.
287 result = mem_allocate_old_gen(size);
288 if (result != NULL) {
289 return result;
290 }
292 if (gclocker_stalled_count > GCLockerRetryAllocationCount) {
293 return NULL;
294 }
296 // Failed to allocate without a gc.
297 if (GC_locker::is_active_and_needs_gc()) {
298 // If this thread is not in a jni critical section, we stall
299 // the requestor until the critical section has cleared and
300 // GC allowed. When the critical section clears, a GC is
301 // initiated by the last thread exiting the critical section; so
302 // we retry the allocation sequence from the beginning of the loop,
303 // rather than causing more, now probably unnecessary, GC attempts.
304 JavaThread* jthr = JavaThread::current();
305 if (!jthr->in_critical()) {
306 MutexUnlocker mul(Heap_lock);
307 GC_locker::stall_until_clear();
308 gclocker_stalled_count += 1;
309 continue;
310 } else {
311 if (CheckJNICalls) {
312 fatal("Possible deadlock due to allocating while"
313 " in jni critical section");
314 }
315 return NULL;
316 }
317 }
318 }
320 if (result == NULL) {
321 // Generate a VM operation
322 VM_ParallelGCFailedAllocation op(size, gc_count);
323 VMThread::execute(&op);
325 // Did the VM operation execute? If so, return the result directly.
326 // This prevents us from looping until time out on requests that can
327 // not be satisfied.
328 if (op.prologue_succeeded()) {
329 assert(Universe::heap()->is_in_or_null(op.result()),
330 "result not in heap");
332 // If GC was locked out during VM operation then retry allocation
333 // and/or stall as necessary.
334 if (op.gc_locked()) {
335 assert(op.result() == NULL, "must be NULL if gc_locked() is true");
336 continue; // retry and/or stall as necessary
337 }
339 // Exit the loop if the gc time limit has been exceeded.
340 // The allocation must have failed above ("result" guarding
341 // this path is NULL) and the most recent collection has exceeded the
342 // gc overhead limit (although enough may have been collected to
343 // satisfy the allocation). Exit the loop so that an out-of-memory
344 // will be thrown (return a NULL ignoring the contents of
345 // op.result()),
346 // but clear gc_overhead_limit_exceeded so that the next collection
347 // starts with a clean slate (i.e., forgets about previous overhead
348 // excesses). Fill op.result() with a filler object so that the
349 // heap remains parsable.
350 const bool limit_exceeded = size_policy()->gc_overhead_limit_exceeded();
351 const bool softrefs_clear = collector_policy()->all_soft_refs_clear();
353 if (limit_exceeded && softrefs_clear) {
354 *gc_overhead_limit_was_exceeded = true;
355 size_policy()->set_gc_overhead_limit_exceeded(false);
356 if (PrintGCDetails && Verbose) {
357 gclog_or_tty->print_cr("ParallelScavengeHeap::mem_allocate: "
358 "return NULL because gc_overhead_limit_exceeded is set");
359 }
360 if (op.result() != NULL) {
361 CollectedHeap::fill_with_object(op.result(), size);
362 }
363 return NULL;
364 }
366 return op.result();
367 }
368 }
370 // The policy object will prevent us from looping forever. If the
371 // time spent in gc crosses a threshold, we will bail out.
372 loop_count++;
373 if ((result == NULL) && (QueuedAllocationWarningCount > 0) &&
374 (loop_count % QueuedAllocationWarningCount == 0)) {
375 warning("ParallelScavengeHeap::mem_allocate retries %d times \n\t"
376 " size=%d", loop_count, size);
377 }
378 }
380 return result;
381 }
383 // A "death march" is a series of ultra-slow allocations in which a full gc is
384 // done before each allocation, and after the full gc the allocation still
385 // cannot be satisfied from the young gen. This routine detects that condition;
386 // it should be called after a full gc has been done and the allocation
387 // attempted from the young gen. The parameter 'addr' should be the result of
388 // that young gen allocation attempt.
389 void
390 ParallelScavengeHeap::death_march_check(HeapWord* const addr, size_t size) {
391 if (addr != NULL) {
392 _death_march_count = 0; // death march has ended
393 } else if (_death_march_count == 0) {
394 if (should_alloc_in_eden(size)) {
395 _death_march_count = 1; // death march has started
396 }
397 }
398 }
400 HeapWord* ParallelScavengeHeap::mem_allocate_old_gen(size_t size) {
401 if (!should_alloc_in_eden(size) || GC_locker::is_active_and_needs_gc()) {
402 // Size is too big for eden, or gc is locked out.
403 return old_gen()->allocate(size);
404 }
406 // If a "death march" is in progress, allocate from the old gen a limited
407 // number of times before doing a GC.
408 if (_death_march_count > 0) {
409 if (_death_march_count < 64) {
410 ++_death_march_count;
411 return old_gen()->allocate(size);
412 } else {
413 _death_march_count = 0;
414 }
415 }
416 return NULL;
417 }
419 void ParallelScavengeHeap::do_full_collection(bool clear_all_soft_refs) {
420 if (UseParallelOldGC) {
421 // The do_full_collection() parameter clear_all_soft_refs
422 // is interpreted here as maximum_compaction which will
423 // cause SoftRefs to be cleared.
424 bool maximum_compaction = clear_all_soft_refs;
425 PSParallelCompact::invoke(maximum_compaction);
426 } else {
427 PSMarkSweep::invoke(clear_all_soft_refs);
428 }
429 }
431 // Failed allocation policy. Must be called from the VM thread, and
432 // only at a safepoint! Note that this method has policy for allocation
433 // flow, and NOT collection policy. So we do not check for gc collection
434 // time over limit here, that is the responsibility of the heap specific
435 // collection methods. This method decides where to attempt allocations,
436 // and when to attempt collections, but no collection specific policy.
437 HeapWord* ParallelScavengeHeap::failed_mem_allocate(size_t size) {
438 assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
439 assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
440 assert(!Universe::heap()->is_gc_active(), "not reentrant");
441 assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock");
443 // We assume that allocation in eden will fail unless we collect.
445 // First level allocation failure, scavenge and allocate in young gen.
446 GCCauseSetter gccs(this, GCCause::_allocation_failure);
447 const bool invoked_full_gc = PSScavenge::invoke();
448 HeapWord* result = young_gen()->allocate(size);
450 // Second level allocation failure.
451 // Mark sweep and allocate in young generation.
452 if (result == NULL && !invoked_full_gc) {
453 do_full_collection(false);
454 result = young_gen()->allocate(size);
455 }
457 death_march_check(result, size);
459 // Third level allocation failure.
460 // After mark sweep and young generation allocation failure,
461 // allocate in old generation.
462 if (result == NULL) {
463 result = old_gen()->allocate(size);
464 }
466 // Fourth level allocation failure. We're running out of memory.
467 // More complete mark sweep and allocate in young generation.
468 if (result == NULL) {
469 do_full_collection(true);
470 result = young_gen()->allocate(size);
471 }
473 // Fifth level allocation failure.
474 // After more complete mark sweep, allocate in old generation.
475 if (result == NULL) {
476 result = old_gen()->allocate(size);
477 }
479 return result;
480 }
482 void ParallelScavengeHeap::ensure_parsability(bool retire_tlabs) {
483 CollectedHeap::ensure_parsability(retire_tlabs);
484 young_gen()->eden_space()->ensure_parsability();
485 }
487 size_t ParallelScavengeHeap::unsafe_max_alloc() {
488 return young_gen()->eden_space()->free_in_bytes();
489 }
491 size_t ParallelScavengeHeap::tlab_capacity(Thread* thr) const {
492 return young_gen()->eden_space()->tlab_capacity(thr);
493 }
495 size_t ParallelScavengeHeap::unsafe_max_tlab_alloc(Thread* thr) const {
496 return young_gen()->eden_space()->unsafe_max_tlab_alloc(thr);
497 }
499 HeapWord* ParallelScavengeHeap::allocate_new_tlab(size_t size) {
500 return young_gen()->allocate(size);
501 }
503 void ParallelScavengeHeap::accumulate_statistics_all_tlabs() {
504 CollectedHeap::accumulate_statistics_all_tlabs();
505 }
507 void ParallelScavengeHeap::resize_all_tlabs() {
508 CollectedHeap::resize_all_tlabs();
509 }
511 bool ParallelScavengeHeap::can_elide_initializing_store_barrier(oop new_obj) {
512 // We don't need barriers for stores to objects in the
513 // young gen and, a fortiori, for initializing stores to
514 // objects therein.
515 return is_in_young(new_obj);
516 }
518 // This method is used by System.gc() and JVMTI.
519 void ParallelScavengeHeap::collect(GCCause::Cause cause) {
520 assert(!Heap_lock->owned_by_self(),
521 "this thread should not own the Heap_lock");
523 unsigned int gc_count = 0;
524 unsigned int full_gc_count = 0;
525 {
526 MutexLocker ml(Heap_lock);
527 // This value is guarded by the Heap_lock
528 gc_count = Universe::heap()->total_collections();
529 full_gc_count = Universe::heap()->total_full_collections();
530 }
532 VM_ParallelGCSystemGC op(gc_count, full_gc_count, cause);
533 VMThread::execute(&op);
534 }
536 void ParallelScavengeHeap::oop_iterate(ExtendedOopClosure* cl) {
537 Unimplemented();
538 }
540 void ParallelScavengeHeap::object_iterate(ObjectClosure* cl) {
541 young_gen()->object_iterate(cl);
542 old_gen()->object_iterate(cl);
543 }
546 HeapWord* ParallelScavengeHeap::block_start(const void* addr) const {
547 if (young_gen()->is_in_reserved(addr)) {
548 assert(young_gen()->is_in(addr),
549 "addr should be in allocated part of young gen");
550 // called from os::print_location by find or VMError
551 if (Debugging || VMError::fatal_error_in_progress()) return NULL;
552 Unimplemented();
553 } else if (old_gen()->is_in_reserved(addr)) {
554 assert(old_gen()->is_in(addr),
555 "addr should be in allocated part of old gen");
556 return old_gen()->start_array()->object_start((HeapWord*)addr);
557 }
558 return 0;
559 }
561 size_t ParallelScavengeHeap::block_size(const HeapWord* addr) const {
562 return oop(addr)->size();
563 }
565 bool ParallelScavengeHeap::block_is_obj(const HeapWord* addr) const {
566 return block_start(addr) == addr;
567 }
569 jlong ParallelScavengeHeap::millis_since_last_gc() {
570 return UseParallelOldGC ?
571 PSParallelCompact::millis_since_last_gc() :
572 PSMarkSweep::millis_since_last_gc();
573 }
575 void ParallelScavengeHeap::prepare_for_verify() {
576 ensure_parsability(false); // no need to retire TLABs for verification
577 }
579 PSHeapSummary ParallelScavengeHeap::create_ps_heap_summary() {
580 PSOldGen* old = old_gen();
581 HeapWord* old_committed_end = (HeapWord*)old->virtual_space()->committed_high_addr();
582 VirtualSpaceSummary old_summary(old->reserved().start(), old_committed_end, old->reserved().end());
583 SpaceSummary old_space(old->reserved().start(), old_committed_end, old->used_in_bytes());
585 PSYoungGen* young = young_gen();
586 VirtualSpaceSummary young_summary(young->reserved().start(),
587 (HeapWord*)young->virtual_space()->committed_high_addr(), young->reserved().end());
589 MutableSpace* eden = young_gen()->eden_space();
590 SpaceSummary eden_space(eden->bottom(), eden->end(), eden->used_in_bytes());
592 MutableSpace* from = young_gen()->from_space();
593 SpaceSummary from_space(from->bottom(), from->end(), from->used_in_bytes());
595 MutableSpace* to = young_gen()->to_space();
596 SpaceSummary to_space(to->bottom(), to->end(), to->used_in_bytes());
598 VirtualSpaceSummary heap_summary = create_heap_space_summary();
599 return PSHeapSummary(heap_summary, used(), old_summary, old_space, young_summary, eden_space, from_space, to_space);
600 }
602 void ParallelScavengeHeap::print_on(outputStream* st) const {
603 young_gen()->print_on(st);
604 old_gen()->print_on(st);
605 MetaspaceAux::print_on(st);
606 }
608 void ParallelScavengeHeap::print_on_error(outputStream* st) const {
609 this->CollectedHeap::print_on_error(st);
611 if (UseParallelOldGC) {
612 st->cr();
613 PSParallelCompact::print_on_error(st);
614 }
615 }
617 void ParallelScavengeHeap::gc_threads_do(ThreadClosure* tc) const {
618 PSScavenge::gc_task_manager()->threads_do(tc);
619 }
621 void ParallelScavengeHeap::print_gc_threads_on(outputStream* st) const {
622 PSScavenge::gc_task_manager()->print_threads_on(st);
623 }
625 void ParallelScavengeHeap::print_tracing_info() const {
626 if (TraceGen0Time) {
627 double time = PSScavenge::accumulated_time()->seconds();
628 tty->print_cr("[Accumulated GC generation 0 time %3.7f secs]", time);
629 }
630 if (TraceGen1Time) {
631 double time = UseParallelOldGC ? PSParallelCompact::accumulated_time()->seconds() : PSMarkSweep::accumulated_time()->seconds();
632 tty->print_cr("[Accumulated GC generation 1 time %3.7f secs]", time);
633 }
634 }
637 void ParallelScavengeHeap::verify(bool silent, VerifyOption option /* ignored */) {
638 // Why do we need the total_collections()-filter below?
639 if (total_collections() > 0) {
640 if (!silent) {
641 gclog_or_tty->print("tenured ");
642 }
643 old_gen()->verify();
645 if (!silent) {
646 gclog_or_tty->print("eden ");
647 }
648 young_gen()->verify();
649 }
650 }
652 void ParallelScavengeHeap::print_heap_change(size_t prev_used) {
653 if (PrintGCDetails && Verbose) {
654 gclog_or_tty->print(" " SIZE_FORMAT
655 "->" SIZE_FORMAT
656 "(" SIZE_FORMAT ")",
657 prev_used, used(), capacity());
658 } else {
659 gclog_or_tty->print(" " SIZE_FORMAT "K"
660 "->" SIZE_FORMAT "K"
661 "(" SIZE_FORMAT "K)",
662 prev_used / K, used() / K, capacity() / K);
663 }
664 }
666 void ParallelScavengeHeap::trace_heap(GCWhen::Type when, GCTracer* gc_tracer) {
667 const PSHeapSummary& heap_summary = create_ps_heap_summary();
668 const MetaspaceSummary& metaspace_summary = create_metaspace_summary();
669 gc_tracer->report_gc_heap_summary(when, heap_summary, metaspace_summary);
670 }
672 ParallelScavengeHeap* ParallelScavengeHeap::heap() {
673 assert(_psh != NULL, "Uninitialized access to ParallelScavengeHeap::heap()");
674 assert(_psh->kind() == CollectedHeap::ParallelScavengeHeap, "not a parallel scavenge heap");
675 return _psh;
676 }
678 // Before delegating the resize to the young generation,
679 // the reserved space for the young and old generations
680 // may be changed to accomodate the desired resize.
681 void ParallelScavengeHeap::resize_young_gen(size_t eden_size,
682 size_t survivor_size) {
683 if (UseAdaptiveGCBoundary) {
684 if (size_policy()->bytes_absorbed_from_eden() != 0) {
685 size_policy()->reset_bytes_absorbed_from_eden();
686 return; // The generation changed size already.
687 }
688 gens()->adjust_boundary_for_young_gen_needs(eden_size, survivor_size);
689 }
691 // Delegate the resize to the generation.
692 _young_gen->resize(eden_size, survivor_size);
693 }
695 // Before delegating the resize to the old generation,
696 // the reserved space for the young and old generations
697 // may be changed to accomodate the desired resize.
698 void ParallelScavengeHeap::resize_old_gen(size_t desired_free_space) {
699 if (UseAdaptiveGCBoundary) {
700 if (size_policy()->bytes_absorbed_from_eden() != 0) {
701 size_policy()->reset_bytes_absorbed_from_eden();
702 return; // The generation changed size already.
703 }
704 gens()->adjust_boundary_for_old_gen_needs(desired_free_space);
705 }
707 // Delegate the resize to the generation.
708 _old_gen->resize(desired_free_space);
709 }
711 ParallelScavengeHeap::ParStrongRootsScope::ParStrongRootsScope() {
712 // nothing particular
713 }
715 ParallelScavengeHeap::ParStrongRootsScope::~ParStrongRootsScope() {
716 // nothing particular
717 }
719 #ifndef PRODUCT
720 void ParallelScavengeHeap::record_gen_tops_before_GC() {
721 if (ZapUnusedHeapArea) {
722 young_gen()->record_spaces_top();
723 old_gen()->record_spaces_top();
724 }
725 }
727 void ParallelScavengeHeap::gen_mangle_unused_area() {
728 if (ZapUnusedHeapArea) {
729 young_gen()->eden_space()->mangle_unused_area();
730 young_gen()->to_space()->mangle_unused_area();
731 young_gen()->from_space()->mangle_unused_area();
732 old_gen()->object_space()->mangle_unused_area();
733 }
734 }
735 #endif