Thu, 28 Mar 2013 10:27:28 +0100
7014552: gc/lock/jni/jnilockXXX works too slow on 1-processor machine
Summary: Keep a counter of how many times we were stalled by the GC locker, add a diagnostic flag which sets the limit.
Reviewed-by: brutisso, ehelin, johnc
1 /*
2 * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "precompiled.hpp"
26 #include "gc_implementation/parallelScavenge/adjoiningGenerations.hpp"
27 #include "gc_implementation/parallelScavenge/adjoiningVirtualSpaces.hpp"
28 #include "gc_implementation/parallelScavenge/cardTableExtension.hpp"
29 #include "gc_implementation/parallelScavenge/gcTaskManager.hpp"
30 #include "gc_implementation/parallelScavenge/generationSizer.hpp"
31 #include "gc_implementation/parallelScavenge/parallelScavengeHeap.inline.hpp"
32 #include "gc_implementation/parallelScavenge/psAdaptiveSizePolicy.hpp"
33 #include "gc_implementation/parallelScavenge/psMarkSweep.hpp"
34 #include "gc_implementation/parallelScavenge/psParallelCompact.hpp"
35 #include "gc_implementation/parallelScavenge/psPromotionManager.hpp"
36 #include "gc_implementation/parallelScavenge/psScavenge.hpp"
37 #include "gc_implementation/parallelScavenge/vmPSOperations.hpp"
38 #include "memory/gcLocker.inline.hpp"
39 #include "oops/oop.inline.hpp"
40 #include "runtime/handles.inline.hpp"
41 #include "runtime/java.hpp"
42 #include "runtime/vmThread.hpp"
43 #include "services/memTracker.hpp"
44 #include "utilities/vmError.hpp"
46 PSYoungGen* ParallelScavengeHeap::_young_gen = NULL;
47 PSOldGen* ParallelScavengeHeap::_old_gen = NULL;
48 PSAdaptiveSizePolicy* ParallelScavengeHeap::_size_policy = NULL;
49 PSGCAdaptivePolicyCounters* ParallelScavengeHeap::_gc_policy_counters = NULL;
50 ParallelScavengeHeap* ParallelScavengeHeap::_psh = NULL;
51 GCTaskManager* ParallelScavengeHeap::_gc_task_manager = NULL;
53 static void trace_gen_sizes(const char* const str,
54 size_t og_min, size_t og_max,
55 size_t yg_min, size_t yg_max)
56 {
57 if (TracePageSizes) {
58 tty->print_cr("%s: " SIZE_FORMAT "," SIZE_FORMAT " "
59 SIZE_FORMAT "," SIZE_FORMAT " "
60 SIZE_FORMAT,
61 str,
62 og_min / K, og_max / K,
63 yg_min / K, yg_max / K,
64 (og_max + yg_max) / K);
65 }
66 }
68 jint ParallelScavengeHeap::initialize() {
69 CollectedHeap::pre_initialize();
71 // Cannot be initialized until after the flags are parsed
72 // GenerationSizer flag_parser;
73 _collector_policy = new GenerationSizer();
75 size_t yg_min_size = _collector_policy->min_young_gen_size();
76 size_t yg_max_size = _collector_policy->max_young_gen_size();
77 size_t og_min_size = _collector_policy->min_old_gen_size();
78 size_t og_max_size = _collector_policy->max_old_gen_size();
80 trace_gen_sizes("ps heap raw",
81 og_min_size, og_max_size,
82 yg_min_size, yg_max_size);
84 const size_t og_page_sz = os::page_size_for_region(yg_min_size + og_min_size,
85 yg_max_size + og_max_size,
86 8);
88 const size_t og_align = set_alignment(_old_gen_alignment, og_page_sz);
89 const size_t yg_align = set_alignment(_young_gen_alignment, og_page_sz);
91 // Update sizes to reflect the selected page size(s).
92 //
93 // NEEDS_CLEANUP. The default TwoGenerationCollectorPolicy uses NewRatio; it
94 // should check UseAdaptiveSizePolicy. Changes from generationSizer could
95 // move to the common code.
96 yg_min_size = align_size_up(yg_min_size, yg_align);
97 yg_max_size = align_size_up(yg_max_size, yg_align);
98 size_t yg_cur_size =
99 align_size_up(_collector_policy->young_gen_size(), yg_align);
100 yg_cur_size = MAX2(yg_cur_size, yg_min_size);
102 og_min_size = align_size_up(og_min_size, og_align);
103 // Align old gen size down to preserve specified heap size.
104 assert(og_align == yg_align, "sanity");
105 og_max_size = align_size_down(og_max_size, og_align);
106 og_max_size = MAX2(og_max_size, og_min_size);
107 size_t og_cur_size =
108 align_size_down(_collector_policy->old_gen_size(), og_align);
109 og_cur_size = MAX2(og_cur_size, og_min_size);
111 trace_gen_sizes("ps heap rnd",
112 og_min_size, og_max_size,
113 yg_min_size, yg_max_size);
115 const size_t heap_size = og_max_size + yg_max_size;
117 ReservedSpace heap_rs = Universe::reserve_heap(heap_size, og_align);
119 MemTracker::record_virtual_memory_type((address)heap_rs.base(), mtJavaHeap);
121 os::trace_page_sizes("ps main", og_min_size + yg_min_size,
122 og_max_size + yg_max_size, og_page_sz,
123 heap_rs.base(),
124 heap_rs.size());
125 if (!heap_rs.is_reserved()) {
126 vm_shutdown_during_initialization(
127 "Could not reserve enough space for object heap");
128 return JNI_ENOMEM;
129 }
131 _reserved = MemRegion((HeapWord*)heap_rs.base(),
132 (HeapWord*)(heap_rs.base() + heap_rs.size()));
134 CardTableExtension* const barrier_set = new CardTableExtension(_reserved, 3);
135 _barrier_set = barrier_set;
136 oopDesc::set_bs(_barrier_set);
137 if (_barrier_set == NULL) {
138 vm_shutdown_during_initialization(
139 "Could not reserve enough space for barrier set");
140 return JNI_ENOMEM;
141 }
143 // Initial young gen size is 4 Mb
144 //
145 // XXX - what about flag_parser.young_gen_size()?
146 const size_t init_young_size = align_size_up(4 * M, yg_align);
147 yg_cur_size = MAX2(MIN2(init_young_size, yg_max_size), yg_cur_size);
149 // Make up the generations
150 // Calculate the maximum size that a generation can grow. This
151 // includes growth into the other generation. Note that the
152 // parameter _max_gen_size is kept as the maximum
153 // size of the generation as the boundaries currently stand.
154 // _max_gen_size is still used as that value.
155 double max_gc_pause_sec = ((double) MaxGCPauseMillis)/1000.0;
156 double max_gc_minor_pause_sec = ((double) MaxGCMinorPauseMillis)/1000.0;
158 _gens = new AdjoiningGenerations(heap_rs,
159 og_cur_size,
160 og_min_size,
161 og_max_size,
162 yg_cur_size,
163 yg_min_size,
164 yg_max_size,
165 yg_align);
167 _old_gen = _gens->old_gen();
168 _young_gen = _gens->young_gen();
170 const size_t eden_capacity = _young_gen->eden_space()->capacity_in_bytes();
171 const size_t old_capacity = _old_gen->capacity_in_bytes();
172 const size_t initial_promo_size = MIN2(eden_capacity, old_capacity);
173 _size_policy =
174 new PSAdaptiveSizePolicy(eden_capacity,
175 initial_promo_size,
176 young_gen()->to_space()->capacity_in_bytes(),
177 intra_heap_alignment(),
178 max_gc_pause_sec,
179 max_gc_minor_pause_sec,
180 GCTimeRatio
181 );
183 assert(!UseAdaptiveGCBoundary ||
184 (old_gen()->virtual_space()->high_boundary() ==
185 young_gen()->virtual_space()->low_boundary()),
186 "Boundaries must meet");
187 // initialize the policy counters - 2 collectors, 3 generations
188 _gc_policy_counters =
189 new PSGCAdaptivePolicyCounters("ParScav:MSC", 2, 3, _size_policy);
190 _psh = this;
192 // Set up the GCTaskManager
193 _gc_task_manager = GCTaskManager::create(ParallelGCThreads);
195 if (UseParallelOldGC && !PSParallelCompact::initialize()) {
196 return JNI_ENOMEM;
197 }
199 return JNI_OK;
200 }
202 void ParallelScavengeHeap::post_initialize() {
203 // Need to init the tenuring threshold
204 PSScavenge::initialize();
205 if (UseParallelOldGC) {
206 PSParallelCompact::post_initialize();
207 } else {
208 PSMarkSweep::initialize();
209 }
210 PSPromotionManager::initialize();
211 }
213 void ParallelScavengeHeap::update_counters() {
214 young_gen()->update_counters();
215 old_gen()->update_counters();
216 MetaspaceCounters::update_performance_counters();
217 }
219 size_t ParallelScavengeHeap::capacity() const {
220 size_t value = young_gen()->capacity_in_bytes() + old_gen()->capacity_in_bytes();
221 return value;
222 }
224 size_t ParallelScavengeHeap::used() const {
225 size_t value = young_gen()->used_in_bytes() + old_gen()->used_in_bytes();
226 return value;
227 }
229 bool ParallelScavengeHeap::is_maximal_no_gc() const {
230 return old_gen()->is_maximal_no_gc() && young_gen()->is_maximal_no_gc();
231 }
234 size_t ParallelScavengeHeap::max_capacity() const {
235 size_t estimated = reserved_region().byte_size();
236 if (UseAdaptiveSizePolicy) {
237 estimated -= _size_policy->max_survivor_size(young_gen()->max_size());
238 } else {
239 estimated -= young_gen()->to_space()->capacity_in_bytes();
240 }
241 return MAX2(estimated, capacity());
242 }
244 bool ParallelScavengeHeap::is_in(const void* p) const {
245 if (young_gen()->is_in(p)) {
246 return true;
247 }
249 if (old_gen()->is_in(p)) {
250 return true;
251 }
253 return false;
254 }
256 bool ParallelScavengeHeap::is_in_reserved(const void* p) const {
257 if (young_gen()->is_in_reserved(p)) {
258 return true;
259 }
261 if (old_gen()->is_in_reserved(p)) {
262 return true;
263 }
265 return false;
266 }
268 bool ParallelScavengeHeap::is_scavengable(const void* addr) {
269 return is_in_young((oop)addr);
270 }
272 #ifdef ASSERT
273 // Don't implement this by using is_in_young(). This method is used
274 // in some cases to check that is_in_young() is correct.
275 bool ParallelScavengeHeap::is_in_partial_collection(const void *p) {
276 assert(is_in_reserved(p) || p == NULL,
277 "Does not work if address is non-null and outside of the heap");
278 // The order of the generations is old (low addr), young (high addr)
279 return p >= old_gen()->reserved().end();
280 }
281 #endif
283 // There are two levels of allocation policy here.
284 //
285 // When an allocation request fails, the requesting thread must invoke a VM
286 // operation, transfer control to the VM thread, and await the results of a
287 // garbage collection. That is quite expensive, and we should avoid doing it
288 // multiple times if possible.
289 //
290 // To accomplish this, we have a basic allocation policy, and also a
291 // failed allocation policy.
292 //
293 // The basic allocation policy controls how you allocate memory without
294 // attempting garbage collection. It is okay to grab locks and
295 // expand the heap, if that can be done without coming to a safepoint.
296 // It is likely that the basic allocation policy will not be very
297 // aggressive.
298 //
299 // The failed allocation policy is invoked from the VM thread after
300 // the basic allocation policy is unable to satisfy a mem_allocate
301 // request. This policy needs to cover the entire range of collection,
302 // heap expansion, and out-of-memory conditions. It should make every
303 // attempt to allocate the requested memory.
305 // Basic allocation policy. Should never be called at a safepoint, or
306 // from the VM thread.
307 //
308 // This method must handle cases where many mem_allocate requests fail
309 // simultaneously. When that happens, only one VM operation will succeed,
310 // and the rest will not be executed. For that reason, this method loops
311 // during failed allocation attempts. If the java heap becomes exhausted,
312 // we rely on the size_policy object to force a bail out.
313 HeapWord* ParallelScavengeHeap::mem_allocate(
314 size_t size,
315 bool* gc_overhead_limit_was_exceeded) {
316 assert(!SafepointSynchronize::is_at_safepoint(), "should not be at safepoint");
317 assert(Thread::current() != (Thread*)VMThread::vm_thread(), "should not be in vm thread");
318 assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock");
320 // In general gc_overhead_limit_was_exceeded should be false so
321 // set it so here and reset it to true only if the gc time
322 // limit is being exceeded as checked below.
323 *gc_overhead_limit_was_exceeded = false;
325 HeapWord* result = young_gen()->allocate(size);
327 uint loop_count = 0;
328 uint gc_count = 0;
329 int gclocker_stalled_count = 0;
331 while (result == NULL) {
332 // We don't want to have multiple collections for a single filled generation.
333 // To prevent this, each thread tracks the total_collections() value, and if
334 // the count has changed, does not do a new collection.
335 //
336 // The collection count must be read only while holding the heap lock. VM
337 // operations also hold the heap lock during collections. There is a lock
338 // contention case where thread A blocks waiting on the Heap_lock, while
339 // thread B is holding it doing a collection. When thread A gets the lock,
340 // the collection count has already changed. To prevent duplicate collections,
341 // The policy MUST attempt allocations during the same period it reads the
342 // total_collections() value!
343 {
344 MutexLocker ml(Heap_lock);
345 gc_count = Universe::heap()->total_collections();
347 result = young_gen()->allocate(size);
348 if (result != NULL) {
349 return result;
350 }
352 // If certain conditions hold, try allocating from the old gen.
353 result = mem_allocate_old_gen(size);
354 if (result != NULL) {
355 return result;
356 }
358 if (gclocker_stalled_count > GCLockerRetryAllocationCount) {
359 return NULL;
360 }
362 // Failed to allocate without a gc.
363 if (GC_locker::is_active_and_needs_gc()) {
364 // If this thread is not in a jni critical section, we stall
365 // the requestor until the critical section has cleared and
366 // GC allowed. When the critical section clears, a GC is
367 // initiated by the last thread exiting the critical section; so
368 // we retry the allocation sequence from the beginning of the loop,
369 // rather than causing more, now probably unnecessary, GC attempts.
370 JavaThread* jthr = JavaThread::current();
371 if (!jthr->in_critical()) {
372 MutexUnlocker mul(Heap_lock);
373 GC_locker::stall_until_clear();
374 gclocker_stalled_count += 1;
375 continue;
376 } else {
377 if (CheckJNICalls) {
378 fatal("Possible deadlock due to allocating while"
379 " in jni critical section");
380 }
381 return NULL;
382 }
383 }
384 }
386 if (result == NULL) {
387 // Generate a VM operation
388 VM_ParallelGCFailedAllocation op(size, gc_count);
389 VMThread::execute(&op);
391 // Did the VM operation execute? If so, return the result directly.
392 // This prevents us from looping until time out on requests that can
393 // not be satisfied.
394 if (op.prologue_succeeded()) {
395 assert(Universe::heap()->is_in_or_null(op.result()),
396 "result not in heap");
398 // If GC was locked out during VM operation then retry allocation
399 // and/or stall as necessary.
400 if (op.gc_locked()) {
401 assert(op.result() == NULL, "must be NULL if gc_locked() is true");
402 continue; // retry and/or stall as necessary
403 }
405 // Exit the loop if the gc time limit has been exceeded.
406 // The allocation must have failed above ("result" guarding
407 // this path is NULL) and the most recent collection has exceeded the
408 // gc overhead limit (although enough may have been collected to
409 // satisfy the allocation). Exit the loop so that an out-of-memory
410 // will be thrown (return a NULL ignoring the contents of
411 // op.result()),
412 // but clear gc_overhead_limit_exceeded so that the next collection
413 // starts with a clean slate (i.e., forgets about previous overhead
414 // excesses). Fill op.result() with a filler object so that the
415 // heap remains parsable.
416 const bool limit_exceeded = size_policy()->gc_overhead_limit_exceeded();
417 const bool softrefs_clear = collector_policy()->all_soft_refs_clear();
419 if (limit_exceeded && softrefs_clear) {
420 *gc_overhead_limit_was_exceeded = true;
421 size_policy()->set_gc_overhead_limit_exceeded(false);
422 if (PrintGCDetails && Verbose) {
423 gclog_or_tty->print_cr("ParallelScavengeHeap::mem_allocate: "
424 "return NULL because gc_overhead_limit_exceeded is set");
425 }
426 if (op.result() != NULL) {
427 CollectedHeap::fill_with_object(op.result(), size);
428 }
429 return NULL;
430 }
432 return op.result();
433 }
434 }
436 // The policy object will prevent us from looping forever. If the
437 // time spent in gc crosses a threshold, we will bail out.
438 loop_count++;
439 if ((result == NULL) && (QueuedAllocationWarningCount > 0) &&
440 (loop_count % QueuedAllocationWarningCount == 0)) {
441 warning("ParallelScavengeHeap::mem_allocate retries %d times \n\t"
442 " size=%d", loop_count, size);
443 }
444 }
446 return result;
447 }
449 // A "death march" is a series of ultra-slow allocations in which a full gc is
450 // done before each allocation, and after the full gc the allocation still
451 // cannot be satisfied from the young gen. This routine detects that condition;
452 // it should be called after a full gc has been done and the allocation
453 // attempted from the young gen. The parameter 'addr' should be the result of
454 // that young gen allocation attempt.
455 void
456 ParallelScavengeHeap::death_march_check(HeapWord* const addr, size_t size) {
457 if (addr != NULL) {
458 _death_march_count = 0; // death march has ended
459 } else if (_death_march_count == 0) {
460 if (should_alloc_in_eden(size)) {
461 _death_march_count = 1; // death march has started
462 }
463 }
464 }
466 HeapWord* ParallelScavengeHeap::mem_allocate_old_gen(size_t size) {
467 if (!should_alloc_in_eden(size) || GC_locker::is_active_and_needs_gc()) {
468 // Size is too big for eden, or gc is locked out.
469 return old_gen()->allocate(size);
470 }
472 // If a "death march" is in progress, allocate from the old gen a limited
473 // number of times before doing a GC.
474 if (_death_march_count > 0) {
475 if (_death_march_count < 64) {
476 ++_death_march_count;
477 return old_gen()->allocate(size);
478 } else {
479 _death_march_count = 0;
480 }
481 }
482 return NULL;
483 }
485 void ParallelScavengeHeap::do_full_collection(bool clear_all_soft_refs) {
486 if (UseParallelOldGC) {
487 // The do_full_collection() parameter clear_all_soft_refs
488 // is interpreted here as maximum_compaction which will
489 // cause SoftRefs to be cleared.
490 bool maximum_compaction = clear_all_soft_refs;
491 PSParallelCompact::invoke(maximum_compaction);
492 } else {
493 PSMarkSweep::invoke(clear_all_soft_refs);
494 }
495 }
497 // Failed allocation policy. Must be called from the VM thread, and
498 // only at a safepoint! Note that this method has policy for allocation
499 // flow, and NOT collection policy. So we do not check for gc collection
500 // time over limit here, that is the responsibility of the heap specific
501 // collection methods. This method decides where to attempt allocations,
502 // and when to attempt collections, but no collection specific policy.
503 HeapWord* ParallelScavengeHeap::failed_mem_allocate(size_t size) {
504 assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
505 assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
506 assert(!Universe::heap()->is_gc_active(), "not reentrant");
507 assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock");
509 // We assume that allocation in eden will fail unless we collect.
511 // First level allocation failure, scavenge and allocate in young gen.
512 GCCauseSetter gccs(this, GCCause::_allocation_failure);
513 const bool invoked_full_gc = PSScavenge::invoke();
514 HeapWord* result = young_gen()->allocate(size);
516 // Second level allocation failure.
517 // Mark sweep and allocate in young generation.
518 if (result == NULL && !invoked_full_gc) {
519 do_full_collection(false);
520 result = young_gen()->allocate(size);
521 }
523 death_march_check(result, size);
525 // Third level allocation failure.
526 // After mark sweep and young generation allocation failure,
527 // allocate in old generation.
528 if (result == NULL) {
529 result = old_gen()->allocate(size);
530 }
532 // Fourth level allocation failure. We're running out of memory.
533 // More complete mark sweep and allocate in young generation.
534 if (result == NULL) {
535 do_full_collection(true);
536 result = young_gen()->allocate(size);
537 }
539 // Fifth level allocation failure.
540 // After more complete mark sweep, allocate in old generation.
541 if (result == NULL) {
542 result = old_gen()->allocate(size);
543 }
545 return result;
546 }
548 void ParallelScavengeHeap::ensure_parsability(bool retire_tlabs) {
549 CollectedHeap::ensure_parsability(retire_tlabs);
550 young_gen()->eden_space()->ensure_parsability();
551 }
553 size_t ParallelScavengeHeap::unsafe_max_alloc() {
554 return young_gen()->eden_space()->free_in_bytes();
555 }
557 size_t ParallelScavengeHeap::tlab_capacity(Thread* thr) const {
558 return young_gen()->eden_space()->tlab_capacity(thr);
559 }
561 size_t ParallelScavengeHeap::unsafe_max_tlab_alloc(Thread* thr) const {
562 return young_gen()->eden_space()->unsafe_max_tlab_alloc(thr);
563 }
565 HeapWord* ParallelScavengeHeap::allocate_new_tlab(size_t size) {
566 return young_gen()->allocate(size);
567 }
569 void ParallelScavengeHeap::accumulate_statistics_all_tlabs() {
570 CollectedHeap::accumulate_statistics_all_tlabs();
571 }
573 void ParallelScavengeHeap::resize_all_tlabs() {
574 CollectedHeap::resize_all_tlabs();
575 }
577 bool ParallelScavengeHeap::can_elide_initializing_store_barrier(oop new_obj) {
578 // We don't need barriers for stores to objects in the
579 // young gen and, a fortiori, for initializing stores to
580 // objects therein.
581 return is_in_young(new_obj);
582 }
584 // This method is used by System.gc() and JVMTI.
585 void ParallelScavengeHeap::collect(GCCause::Cause cause) {
586 assert(!Heap_lock->owned_by_self(),
587 "this thread should not own the Heap_lock");
589 unsigned int gc_count = 0;
590 unsigned int full_gc_count = 0;
591 {
592 MutexLocker ml(Heap_lock);
593 // This value is guarded by the Heap_lock
594 gc_count = Universe::heap()->total_collections();
595 full_gc_count = Universe::heap()->total_full_collections();
596 }
598 VM_ParallelGCSystemGC op(gc_count, full_gc_count, cause);
599 VMThread::execute(&op);
600 }
602 void ParallelScavengeHeap::oop_iterate(ExtendedOopClosure* cl) {
603 Unimplemented();
604 }
606 void ParallelScavengeHeap::object_iterate(ObjectClosure* cl) {
607 young_gen()->object_iterate(cl);
608 old_gen()->object_iterate(cl);
609 }
612 HeapWord* ParallelScavengeHeap::block_start(const void* addr) const {
613 if (young_gen()->is_in_reserved(addr)) {
614 assert(young_gen()->is_in(addr),
615 "addr should be in allocated part of young gen");
616 // called from os::print_location by find or VMError
617 if (Debugging || VMError::fatal_error_in_progress()) return NULL;
618 Unimplemented();
619 } else if (old_gen()->is_in_reserved(addr)) {
620 assert(old_gen()->is_in(addr),
621 "addr should be in allocated part of old gen");
622 return old_gen()->start_array()->object_start((HeapWord*)addr);
623 }
624 return 0;
625 }
627 size_t ParallelScavengeHeap::block_size(const HeapWord* addr) const {
628 return oop(addr)->size();
629 }
631 bool ParallelScavengeHeap::block_is_obj(const HeapWord* addr) const {
632 return block_start(addr) == addr;
633 }
635 jlong ParallelScavengeHeap::millis_since_last_gc() {
636 return UseParallelOldGC ?
637 PSParallelCompact::millis_since_last_gc() :
638 PSMarkSweep::millis_since_last_gc();
639 }
641 void ParallelScavengeHeap::prepare_for_verify() {
642 ensure_parsability(false); // no need to retire TLABs for verification
643 }
645 void ParallelScavengeHeap::print_on(outputStream* st) const {
646 young_gen()->print_on(st);
647 old_gen()->print_on(st);
648 MetaspaceAux::print_on(st);
649 }
651 void ParallelScavengeHeap::gc_threads_do(ThreadClosure* tc) const {
652 PSScavenge::gc_task_manager()->threads_do(tc);
653 }
655 void ParallelScavengeHeap::print_gc_threads_on(outputStream* st) const {
656 PSScavenge::gc_task_manager()->print_threads_on(st);
657 }
659 void ParallelScavengeHeap::print_tracing_info() const {
660 if (TraceGen0Time) {
661 double time = PSScavenge::accumulated_time()->seconds();
662 tty->print_cr("[Accumulated GC generation 0 time %3.7f secs]", time);
663 }
664 if (TraceGen1Time) {
665 double time = UseParallelOldGC ? PSParallelCompact::accumulated_time()->seconds() : PSMarkSweep::accumulated_time()->seconds();
666 tty->print_cr("[Accumulated GC generation 1 time %3.7f secs]", time);
667 }
668 }
671 void ParallelScavengeHeap::verify(bool silent, VerifyOption option /* ignored */) {
672 // Why do we need the total_collections()-filter below?
673 if (total_collections() > 0) {
674 if (!silent) {
675 gclog_or_tty->print("tenured ");
676 }
677 old_gen()->verify();
679 if (!silent) {
680 gclog_or_tty->print("eden ");
681 }
682 young_gen()->verify();
683 }
684 }
686 void ParallelScavengeHeap::print_heap_change(size_t prev_used) {
687 if (PrintGCDetails && Verbose) {
688 gclog_or_tty->print(" " SIZE_FORMAT
689 "->" SIZE_FORMAT
690 "(" SIZE_FORMAT ")",
691 prev_used, used(), capacity());
692 } else {
693 gclog_or_tty->print(" " SIZE_FORMAT "K"
694 "->" SIZE_FORMAT "K"
695 "(" SIZE_FORMAT "K)",
696 prev_used / K, used() / K, capacity() / K);
697 }
698 }
700 ParallelScavengeHeap* ParallelScavengeHeap::heap() {
701 assert(_psh != NULL, "Uninitialized access to ParallelScavengeHeap::heap()");
702 assert(_psh->kind() == CollectedHeap::ParallelScavengeHeap, "not a parallel scavenge heap");
703 return _psh;
704 }
706 // Before delegating the resize to the young generation,
707 // the reserved space for the young and old generations
708 // may be changed to accomodate the desired resize.
709 void ParallelScavengeHeap::resize_young_gen(size_t eden_size,
710 size_t survivor_size) {
711 if (UseAdaptiveGCBoundary) {
712 if (size_policy()->bytes_absorbed_from_eden() != 0) {
713 size_policy()->reset_bytes_absorbed_from_eden();
714 return; // The generation changed size already.
715 }
716 gens()->adjust_boundary_for_young_gen_needs(eden_size, survivor_size);
717 }
719 // Delegate the resize to the generation.
720 _young_gen->resize(eden_size, survivor_size);
721 }
723 // Before delegating the resize to the old generation,
724 // the reserved space for the young and old generations
725 // may be changed to accomodate the desired resize.
726 void ParallelScavengeHeap::resize_old_gen(size_t desired_free_space) {
727 if (UseAdaptiveGCBoundary) {
728 if (size_policy()->bytes_absorbed_from_eden() != 0) {
729 size_policy()->reset_bytes_absorbed_from_eden();
730 return; // The generation changed size already.
731 }
732 gens()->adjust_boundary_for_old_gen_needs(desired_free_space);
733 }
735 // Delegate the resize to the generation.
736 _old_gen->resize(desired_free_space);
737 }
739 ParallelScavengeHeap::ParStrongRootsScope::ParStrongRootsScope() {
740 // nothing particular
741 }
743 ParallelScavengeHeap::ParStrongRootsScope::~ParStrongRootsScope() {
744 // nothing particular
745 }
747 #ifndef PRODUCT
748 void ParallelScavengeHeap::record_gen_tops_before_GC() {
749 if (ZapUnusedHeapArea) {
750 young_gen()->record_spaces_top();
751 old_gen()->record_spaces_top();
752 }
753 }
755 void ParallelScavengeHeap::gen_mangle_unused_area() {
756 if (ZapUnusedHeapArea) {
757 young_gen()->eden_space()->mangle_unused_area();
758 young_gen()->to_space()->mangle_unused_area();
759 young_gen()->from_space()->mangle_unused_area();
760 old_gen()->object_space()->mangle_unused_area();
761 }
762 }
763 #endif