Tue, 08 Feb 2011 16:12:16 -0800
7014874: Incorrect COOPs modes on solaris-{sparcv9,amd64} with ParallelGC
Summary: Align old gen size down to keep specified heap size.
Reviewed-by: ysr
1 /*
2 * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "precompiled.hpp"
26 #include "gc_implementation/parallelScavenge/adjoiningGenerations.hpp"
27 #include "gc_implementation/parallelScavenge/adjoiningVirtualSpaces.hpp"
28 #include "gc_implementation/parallelScavenge/cardTableExtension.hpp"
29 #include "gc_implementation/parallelScavenge/gcTaskManager.hpp"
30 #include "gc_implementation/parallelScavenge/generationSizer.hpp"
31 #include "gc_implementation/parallelScavenge/parallelScavengeHeap.inline.hpp"
32 #include "gc_implementation/parallelScavenge/psAdaptiveSizePolicy.hpp"
33 #include "gc_implementation/parallelScavenge/psMarkSweep.hpp"
34 #include "gc_implementation/parallelScavenge/psParallelCompact.hpp"
35 #include "gc_implementation/parallelScavenge/psPromotionManager.hpp"
36 #include "gc_implementation/parallelScavenge/psScavenge.hpp"
37 #include "gc_implementation/parallelScavenge/vmPSOperations.hpp"
38 #include "memory/gcLocker.inline.hpp"
39 #include "oops/oop.inline.hpp"
40 #include "runtime/handles.inline.hpp"
41 #include "runtime/java.hpp"
42 #include "runtime/vmThread.hpp"
43 #include "utilities/vmError.hpp"
45 PSYoungGen* ParallelScavengeHeap::_young_gen = NULL;
46 PSOldGen* ParallelScavengeHeap::_old_gen = NULL;
47 PSPermGen* ParallelScavengeHeap::_perm_gen = NULL;
48 PSAdaptiveSizePolicy* ParallelScavengeHeap::_size_policy = NULL;
49 PSGCAdaptivePolicyCounters* ParallelScavengeHeap::_gc_policy_counters = NULL;
50 ParallelScavengeHeap* ParallelScavengeHeap::_psh = NULL;
51 GCTaskManager* ParallelScavengeHeap::_gc_task_manager = NULL;
53 static void trace_gen_sizes(const char* const str,
54 size_t pg_min, size_t pg_max,
55 size_t og_min, size_t og_max,
56 size_t yg_min, size_t yg_max)
57 {
58 if (TracePageSizes) {
59 tty->print_cr("%s: " SIZE_FORMAT "," SIZE_FORMAT " "
60 SIZE_FORMAT "," SIZE_FORMAT " "
61 SIZE_FORMAT "," SIZE_FORMAT " "
62 SIZE_FORMAT,
63 str, pg_min / K, pg_max / K,
64 og_min / K, og_max / K,
65 yg_min / K, yg_max / K,
66 (pg_max + og_max + yg_max) / K);
67 }
68 }
70 jint ParallelScavengeHeap::initialize() {
71 CollectedHeap::pre_initialize();
73 // Cannot be initialized until after the flags are parsed
74 // GenerationSizer flag_parser;
75 _collector_policy = new GenerationSizer();
77 size_t yg_min_size = _collector_policy->min_young_gen_size();
78 size_t yg_max_size = _collector_policy->max_young_gen_size();
79 size_t og_min_size = _collector_policy->min_old_gen_size();
80 size_t og_max_size = _collector_policy->max_old_gen_size();
81 // Why isn't there a min_perm_gen_size()?
82 size_t pg_min_size = _collector_policy->perm_gen_size();
83 size_t pg_max_size = _collector_policy->max_perm_gen_size();
85 trace_gen_sizes("ps heap raw",
86 pg_min_size, pg_max_size,
87 og_min_size, og_max_size,
88 yg_min_size, yg_max_size);
90 // The ReservedSpace ctor used below requires that the page size for the perm
91 // gen is <= the page size for the rest of the heap (young + old gens).
92 const size_t og_page_sz = os::page_size_for_region(yg_min_size + og_min_size,
93 yg_max_size + og_max_size,
94 8);
95 const size_t pg_page_sz = MIN2(os::page_size_for_region(pg_min_size,
96 pg_max_size, 16),
97 og_page_sz);
99 const size_t pg_align = set_alignment(_perm_gen_alignment, pg_page_sz);
100 const size_t og_align = set_alignment(_old_gen_alignment, og_page_sz);
101 const size_t yg_align = set_alignment(_young_gen_alignment, og_page_sz);
103 // Update sizes to reflect the selected page size(s).
104 //
105 // NEEDS_CLEANUP. The default TwoGenerationCollectorPolicy uses NewRatio; it
106 // should check UseAdaptiveSizePolicy. Changes from generationSizer could
107 // move to the common code.
108 yg_min_size = align_size_up(yg_min_size, yg_align);
109 yg_max_size = align_size_up(yg_max_size, yg_align);
110 size_t yg_cur_size =
111 align_size_up(_collector_policy->young_gen_size(), yg_align);
112 yg_cur_size = MAX2(yg_cur_size, yg_min_size);
114 og_min_size = align_size_up(og_min_size, og_align);
115 // Align old gen size down to preserve specified heap size.
116 assert(og_align == yg_align, "sanity");
117 og_max_size = align_size_down(og_max_size, og_align);
118 og_max_size = MAX2(og_max_size, og_min_size);
119 size_t og_cur_size =
120 align_size_down(_collector_policy->old_gen_size(), og_align);
121 og_cur_size = MAX2(og_cur_size, og_min_size);
123 pg_min_size = align_size_up(pg_min_size, pg_align);
124 pg_max_size = align_size_up(pg_max_size, pg_align);
125 size_t pg_cur_size = pg_min_size;
127 trace_gen_sizes("ps heap rnd",
128 pg_min_size, pg_max_size,
129 og_min_size, og_max_size,
130 yg_min_size, yg_max_size);
132 const size_t total_reserved = pg_max_size + og_max_size + yg_max_size;
133 char* addr = Universe::preferred_heap_base(total_reserved, Universe::UnscaledNarrowOop);
135 // The main part of the heap (old gen + young gen) can often use a larger page
136 // size than is needed or wanted for the perm gen. Use the "compound
137 // alignment" ReservedSpace ctor to avoid having to use the same page size for
138 // all gens.
140 ReservedHeapSpace heap_rs(pg_max_size, pg_align, og_max_size + yg_max_size,
141 og_align, addr);
143 if (UseCompressedOops) {
144 if (addr != NULL && !heap_rs.is_reserved()) {
145 // Failed to reserve at specified address - the requested memory
146 // region is taken already, for example, by 'java' launcher.
147 // Try again to reserver heap higher.
148 addr = Universe::preferred_heap_base(total_reserved, Universe::ZeroBasedNarrowOop);
149 ReservedHeapSpace heap_rs0(pg_max_size, pg_align, og_max_size + yg_max_size,
150 og_align, addr);
151 if (addr != NULL && !heap_rs0.is_reserved()) {
152 // Failed to reserve at specified address again - give up.
153 addr = Universe::preferred_heap_base(total_reserved, Universe::HeapBasedNarrowOop);
154 assert(addr == NULL, "");
155 ReservedHeapSpace heap_rs1(pg_max_size, pg_align, og_max_size + yg_max_size,
156 og_align, addr);
157 heap_rs = heap_rs1;
158 } else {
159 heap_rs = heap_rs0;
160 }
161 }
162 }
164 os::trace_page_sizes("ps perm", pg_min_size, pg_max_size, pg_page_sz,
165 heap_rs.base(), pg_max_size);
166 os::trace_page_sizes("ps main", og_min_size + yg_min_size,
167 og_max_size + yg_max_size, og_page_sz,
168 heap_rs.base() + pg_max_size,
169 heap_rs.size() - pg_max_size);
170 if (!heap_rs.is_reserved()) {
171 vm_shutdown_during_initialization(
172 "Could not reserve enough space for object heap");
173 return JNI_ENOMEM;
174 }
176 _reserved = MemRegion((HeapWord*)heap_rs.base(),
177 (HeapWord*)(heap_rs.base() + heap_rs.size()));
179 CardTableExtension* const barrier_set = new CardTableExtension(_reserved, 3);
180 _barrier_set = barrier_set;
181 oopDesc::set_bs(_barrier_set);
182 if (_barrier_set == NULL) {
183 vm_shutdown_during_initialization(
184 "Could not reserve enough space for barrier set");
185 return JNI_ENOMEM;
186 }
188 // Initial young gen size is 4 Mb
189 //
190 // XXX - what about flag_parser.young_gen_size()?
191 const size_t init_young_size = align_size_up(4 * M, yg_align);
192 yg_cur_size = MAX2(MIN2(init_young_size, yg_max_size), yg_cur_size);
194 // Split the reserved space into perm gen and the main heap (everything else).
195 // The main heap uses a different alignment.
196 ReservedSpace perm_rs = heap_rs.first_part(pg_max_size);
197 ReservedSpace main_rs = heap_rs.last_part(pg_max_size, og_align);
199 // Make up the generations
200 // Calculate the maximum size that a generation can grow. This
201 // includes growth into the other generation. Note that the
202 // parameter _max_gen_size is kept as the maximum
203 // size of the generation as the boundaries currently stand.
204 // _max_gen_size is still used as that value.
205 double max_gc_pause_sec = ((double) MaxGCPauseMillis)/1000.0;
206 double max_gc_minor_pause_sec = ((double) MaxGCMinorPauseMillis)/1000.0;
208 _gens = new AdjoiningGenerations(main_rs,
209 og_cur_size,
210 og_min_size,
211 og_max_size,
212 yg_cur_size,
213 yg_min_size,
214 yg_max_size,
215 yg_align);
217 _old_gen = _gens->old_gen();
218 _young_gen = _gens->young_gen();
220 const size_t eden_capacity = _young_gen->eden_space()->capacity_in_bytes();
221 const size_t old_capacity = _old_gen->capacity_in_bytes();
222 const size_t initial_promo_size = MIN2(eden_capacity, old_capacity);
223 _size_policy =
224 new PSAdaptiveSizePolicy(eden_capacity,
225 initial_promo_size,
226 young_gen()->to_space()->capacity_in_bytes(),
227 intra_heap_alignment(),
228 max_gc_pause_sec,
229 max_gc_minor_pause_sec,
230 GCTimeRatio
231 );
233 _perm_gen = new PSPermGen(perm_rs,
234 pg_align,
235 pg_cur_size,
236 pg_cur_size,
237 pg_max_size,
238 "perm", 2);
240 assert(!UseAdaptiveGCBoundary ||
241 (old_gen()->virtual_space()->high_boundary() ==
242 young_gen()->virtual_space()->low_boundary()),
243 "Boundaries must meet");
244 // initialize the policy counters - 2 collectors, 3 generations
245 _gc_policy_counters =
246 new PSGCAdaptivePolicyCounters("ParScav:MSC", 2, 3, _size_policy);
247 _psh = this;
249 // Set up the GCTaskManager
250 _gc_task_manager = GCTaskManager::create(ParallelGCThreads);
252 if (UseParallelOldGC && !PSParallelCompact::initialize()) {
253 return JNI_ENOMEM;
254 }
256 return JNI_OK;
257 }
259 void ParallelScavengeHeap::post_initialize() {
260 // Need to init the tenuring threshold
261 PSScavenge::initialize();
262 if (UseParallelOldGC) {
263 PSParallelCompact::post_initialize();
264 } else {
265 PSMarkSweep::initialize();
266 }
267 PSPromotionManager::initialize();
268 }
270 void ParallelScavengeHeap::update_counters() {
271 young_gen()->update_counters();
272 old_gen()->update_counters();
273 perm_gen()->update_counters();
274 }
276 size_t ParallelScavengeHeap::capacity() const {
277 size_t value = young_gen()->capacity_in_bytes() + old_gen()->capacity_in_bytes();
278 return value;
279 }
281 size_t ParallelScavengeHeap::used() const {
282 size_t value = young_gen()->used_in_bytes() + old_gen()->used_in_bytes();
283 return value;
284 }
286 bool ParallelScavengeHeap::is_maximal_no_gc() const {
287 return old_gen()->is_maximal_no_gc() && young_gen()->is_maximal_no_gc();
288 }
291 size_t ParallelScavengeHeap::permanent_capacity() const {
292 return perm_gen()->capacity_in_bytes();
293 }
295 size_t ParallelScavengeHeap::permanent_used() const {
296 return perm_gen()->used_in_bytes();
297 }
299 size_t ParallelScavengeHeap::max_capacity() const {
300 size_t estimated = reserved_region().byte_size();
301 estimated -= perm_gen()->reserved().byte_size();
302 if (UseAdaptiveSizePolicy) {
303 estimated -= _size_policy->max_survivor_size(young_gen()->max_size());
304 } else {
305 estimated -= young_gen()->to_space()->capacity_in_bytes();
306 }
307 return MAX2(estimated, capacity());
308 }
310 bool ParallelScavengeHeap::is_in(const void* p) const {
311 if (young_gen()->is_in(p)) {
312 return true;
313 }
315 if (old_gen()->is_in(p)) {
316 return true;
317 }
319 if (perm_gen()->is_in(p)) {
320 return true;
321 }
323 return false;
324 }
326 bool ParallelScavengeHeap::is_in_reserved(const void* p) const {
327 if (young_gen()->is_in_reserved(p)) {
328 return true;
329 }
331 if (old_gen()->is_in_reserved(p)) {
332 return true;
333 }
335 if (perm_gen()->is_in_reserved(p)) {
336 return true;
337 }
339 return false;
340 }
342 // There are two levels of allocation policy here.
343 //
344 // When an allocation request fails, the requesting thread must invoke a VM
345 // operation, transfer control to the VM thread, and await the results of a
346 // garbage collection. That is quite expensive, and we should avoid doing it
347 // multiple times if possible.
348 //
349 // To accomplish this, we have a basic allocation policy, and also a
350 // failed allocation policy.
351 //
352 // The basic allocation policy controls how you allocate memory without
353 // attempting garbage collection. It is okay to grab locks and
354 // expand the heap, if that can be done without coming to a safepoint.
355 // It is likely that the basic allocation policy will not be very
356 // aggressive.
357 //
358 // The failed allocation policy is invoked from the VM thread after
359 // the basic allocation policy is unable to satisfy a mem_allocate
360 // request. This policy needs to cover the entire range of collection,
361 // heap expansion, and out-of-memory conditions. It should make every
362 // attempt to allocate the requested memory.
364 // Basic allocation policy. Should never be called at a safepoint, or
365 // from the VM thread.
366 //
367 // This method must handle cases where many mem_allocate requests fail
368 // simultaneously. When that happens, only one VM operation will succeed,
369 // and the rest will not be executed. For that reason, this method loops
370 // during failed allocation attempts. If the java heap becomes exhausted,
371 // we rely on the size_policy object to force a bail out.
372 HeapWord* ParallelScavengeHeap::mem_allocate(
373 size_t size,
374 bool is_noref,
375 bool is_tlab,
376 bool* gc_overhead_limit_was_exceeded) {
377 assert(!SafepointSynchronize::is_at_safepoint(), "should not be at safepoint");
378 assert(Thread::current() != (Thread*)VMThread::vm_thread(), "should not be in vm thread");
379 assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock");
381 // In general gc_overhead_limit_was_exceeded should be false so
382 // set it so here and reset it to true only if the gc time
383 // limit is being exceeded as checked below.
384 *gc_overhead_limit_was_exceeded = false;
386 HeapWord* result = young_gen()->allocate(size, is_tlab);
388 uint loop_count = 0;
389 uint gc_count = 0;
391 while (result == NULL) {
392 // We don't want to have multiple collections for a single filled generation.
393 // To prevent this, each thread tracks the total_collections() value, and if
394 // the count has changed, does not do a new collection.
395 //
396 // The collection count must be read only while holding the heap lock. VM
397 // operations also hold the heap lock during collections. There is a lock
398 // contention case where thread A blocks waiting on the Heap_lock, while
399 // thread B is holding it doing a collection. When thread A gets the lock,
400 // the collection count has already changed. To prevent duplicate collections,
401 // The policy MUST attempt allocations during the same period it reads the
402 // total_collections() value!
403 {
404 MutexLocker ml(Heap_lock);
405 gc_count = Universe::heap()->total_collections();
407 result = young_gen()->allocate(size, is_tlab);
409 // (1) If the requested object is too large to easily fit in the
410 // young_gen, or
411 // (2) If GC is locked out via GCLocker, young gen is full and
412 // the need for a GC already signalled to GCLocker (done
413 // at a safepoint),
414 // ... then, rather than force a safepoint and (a potentially futile)
415 // collection (attempt) for each allocation, try allocation directly
416 // in old_gen. For case (2) above, we may in the future allow
417 // TLAB allocation directly in the old gen.
418 if (result != NULL) {
419 return result;
420 }
421 if (!is_tlab &&
422 size >= (young_gen()->eden_space()->capacity_in_words(Thread::current()) / 2)) {
423 result = old_gen()->allocate(size, is_tlab);
424 if (result != NULL) {
425 return result;
426 }
427 }
428 if (GC_locker::is_active_and_needs_gc()) {
429 // GC is locked out. If this is a TLAB allocation,
430 // return NULL; the requestor will retry allocation
431 // of an idividual object at a time.
432 if (is_tlab) {
433 return NULL;
434 }
436 // If this thread is not in a jni critical section, we stall
437 // the requestor until the critical section has cleared and
438 // GC allowed. When the critical section clears, a GC is
439 // initiated by the last thread exiting the critical section; so
440 // we retry the allocation sequence from the beginning of the loop,
441 // rather than causing more, now probably unnecessary, GC attempts.
442 JavaThread* jthr = JavaThread::current();
443 if (!jthr->in_critical()) {
444 MutexUnlocker mul(Heap_lock);
445 GC_locker::stall_until_clear();
446 continue;
447 } else {
448 if (CheckJNICalls) {
449 fatal("Possible deadlock due to allocating while"
450 " in jni critical section");
451 }
452 return NULL;
453 }
454 }
455 }
457 if (result == NULL) {
459 // Generate a VM operation
460 VM_ParallelGCFailedAllocation op(size, is_tlab, gc_count);
461 VMThread::execute(&op);
463 // Did the VM operation execute? If so, return the result directly.
464 // This prevents us from looping until time out on requests that can
465 // not be satisfied.
466 if (op.prologue_succeeded()) {
467 assert(Universe::heap()->is_in_or_null(op.result()),
468 "result not in heap");
470 // If GC was locked out during VM operation then retry allocation
471 // and/or stall as necessary.
472 if (op.gc_locked()) {
473 assert(op.result() == NULL, "must be NULL if gc_locked() is true");
474 continue; // retry and/or stall as necessary
475 }
477 // Exit the loop if the gc time limit has been exceeded.
478 // The allocation must have failed above ("result" guarding
479 // this path is NULL) and the most recent collection has exceeded the
480 // gc overhead limit (although enough may have been collected to
481 // satisfy the allocation). Exit the loop so that an out-of-memory
482 // will be thrown (return a NULL ignoring the contents of
483 // op.result()),
484 // but clear gc_overhead_limit_exceeded so that the next collection
485 // starts with a clean slate (i.e., forgets about previous overhead
486 // excesses). Fill op.result() with a filler object so that the
487 // heap remains parsable.
488 const bool limit_exceeded = size_policy()->gc_overhead_limit_exceeded();
489 const bool softrefs_clear = collector_policy()->all_soft_refs_clear();
490 assert(!limit_exceeded || softrefs_clear, "Should have been cleared");
491 if (limit_exceeded && softrefs_clear) {
492 *gc_overhead_limit_was_exceeded = true;
493 size_policy()->set_gc_overhead_limit_exceeded(false);
494 if (PrintGCDetails && Verbose) {
495 gclog_or_tty->print_cr("ParallelScavengeHeap::mem_allocate: "
496 "return NULL because gc_overhead_limit_exceeded is set");
497 }
498 if (op.result() != NULL) {
499 CollectedHeap::fill_with_object(op.result(), size);
500 }
501 return NULL;
502 }
504 return op.result();
505 }
506 }
508 // The policy object will prevent us from looping forever. If the
509 // time spent in gc crosses a threshold, we will bail out.
510 loop_count++;
511 if ((result == NULL) && (QueuedAllocationWarningCount > 0) &&
512 (loop_count % QueuedAllocationWarningCount == 0)) {
513 warning("ParallelScavengeHeap::mem_allocate retries %d times \n\t"
514 " size=%d %s", loop_count, size, is_tlab ? "(TLAB)" : "");
515 }
516 }
518 return result;
519 }
521 // Failed allocation policy. Must be called from the VM thread, and
522 // only at a safepoint! Note that this method has policy for allocation
523 // flow, and NOT collection policy. So we do not check for gc collection
524 // time over limit here, that is the responsibility of the heap specific
525 // collection methods. This method decides where to attempt allocations,
526 // and when to attempt collections, but no collection specific policy.
527 HeapWord* ParallelScavengeHeap::failed_mem_allocate(size_t size, bool is_tlab) {
528 assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
529 assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
530 assert(!Universe::heap()->is_gc_active(), "not reentrant");
531 assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock");
533 size_t mark_sweep_invocation_count = total_invocations();
535 // We assume (and assert!) that an allocation at this point will fail
536 // unless we collect.
538 // First level allocation failure, scavenge and allocate in young gen.
539 GCCauseSetter gccs(this, GCCause::_allocation_failure);
540 PSScavenge::invoke();
541 HeapWord* result = young_gen()->allocate(size, is_tlab);
543 // Second level allocation failure.
544 // Mark sweep and allocate in young generation.
545 if (result == NULL) {
546 // There is some chance the scavenge method decided to invoke mark_sweep.
547 // Don't mark sweep twice if so.
548 if (mark_sweep_invocation_count == total_invocations()) {
549 invoke_full_gc(false);
550 result = young_gen()->allocate(size, is_tlab);
551 }
552 }
554 // Third level allocation failure.
555 // After mark sweep and young generation allocation failure,
556 // allocate in old generation.
557 if (result == NULL && !is_tlab) {
558 result = old_gen()->allocate(size, is_tlab);
559 }
561 // Fourth level allocation failure. We're running out of memory.
562 // More complete mark sweep and allocate in young generation.
563 if (result == NULL) {
564 invoke_full_gc(true);
565 result = young_gen()->allocate(size, is_tlab);
566 }
568 // Fifth level allocation failure.
569 // After more complete mark sweep, allocate in old generation.
570 if (result == NULL && !is_tlab) {
571 result = old_gen()->allocate(size, is_tlab);
572 }
574 return result;
575 }
577 //
578 // This is the policy loop for allocating in the permanent generation.
579 // If the initial allocation fails, we create a vm operation which will
580 // cause a collection.
581 HeapWord* ParallelScavengeHeap::permanent_mem_allocate(size_t size) {
582 assert(!SafepointSynchronize::is_at_safepoint(), "should not be at safepoint");
583 assert(Thread::current() != (Thread*)VMThread::vm_thread(), "should not be in vm thread");
584 assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock");
586 HeapWord* result;
588 uint loop_count = 0;
589 uint gc_count = 0;
590 uint full_gc_count = 0;
592 do {
593 // We don't want to have multiple collections for a single filled generation.
594 // To prevent this, each thread tracks the total_collections() value, and if
595 // the count has changed, does not do a new collection.
596 //
597 // The collection count must be read only while holding the heap lock. VM
598 // operations also hold the heap lock during collections. There is a lock
599 // contention case where thread A blocks waiting on the Heap_lock, while
600 // thread B is holding it doing a collection. When thread A gets the lock,
601 // the collection count has already changed. To prevent duplicate collections,
602 // The policy MUST attempt allocations during the same period it reads the
603 // total_collections() value!
604 {
605 MutexLocker ml(Heap_lock);
606 gc_count = Universe::heap()->total_collections();
607 full_gc_count = Universe::heap()->total_full_collections();
609 result = perm_gen()->allocate_permanent(size);
611 if (result != NULL) {
612 return result;
613 }
615 if (GC_locker::is_active_and_needs_gc()) {
616 // If this thread is not in a jni critical section, we stall
617 // the requestor until the critical section has cleared and
618 // GC allowed. When the critical section clears, a GC is
619 // initiated by the last thread exiting the critical section; so
620 // we retry the allocation sequence from the beginning of the loop,
621 // rather than causing more, now probably unnecessary, GC attempts.
622 JavaThread* jthr = JavaThread::current();
623 if (!jthr->in_critical()) {
624 MutexUnlocker mul(Heap_lock);
625 GC_locker::stall_until_clear();
626 continue;
627 } else {
628 if (CheckJNICalls) {
629 fatal("Possible deadlock due to allocating while"
630 " in jni critical section");
631 }
632 return NULL;
633 }
634 }
635 }
637 if (result == NULL) {
639 // Exit the loop if the gc time limit has been exceeded.
640 // The allocation must have failed above (result must be NULL),
641 // and the most recent collection must have exceeded the
642 // gc time limit. Exit the loop so that an out-of-memory
643 // will be thrown (returning a NULL will do that), but
644 // clear gc_overhead_limit_exceeded so that the next collection
645 // will succeeded if the applications decides to handle the
646 // out-of-memory and tries to go on.
647 const bool limit_exceeded = size_policy()->gc_overhead_limit_exceeded();
648 if (limit_exceeded) {
649 size_policy()->set_gc_overhead_limit_exceeded(false);
650 if (PrintGCDetails && Verbose) {
651 gclog_or_tty->print_cr("ParallelScavengeHeap::permanent_mem_allocate:"
652 " return NULL because gc_overhead_limit_exceeded is set");
653 }
654 assert(result == NULL, "Allocation did not fail");
655 return NULL;
656 }
658 // Generate a VM operation
659 VM_ParallelGCFailedPermanentAllocation op(size, gc_count, full_gc_count);
660 VMThread::execute(&op);
662 // Did the VM operation execute? If so, return the result directly.
663 // This prevents us from looping until time out on requests that can
664 // not be satisfied.
665 if (op.prologue_succeeded()) {
666 assert(Universe::heap()->is_in_permanent_or_null(op.result()),
667 "result not in heap");
668 // If GC was locked out during VM operation then retry allocation
669 // and/or stall as necessary.
670 if (op.gc_locked()) {
671 assert(op.result() == NULL, "must be NULL if gc_locked() is true");
672 continue; // retry and/or stall as necessary
673 }
674 // If a NULL results is being returned, an out-of-memory
675 // will be thrown now. Clear the gc_overhead_limit_exceeded
676 // flag to avoid the following situation.
677 // gc_overhead_limit_exceeded is set during a collection
678 // the collection fails to return enough space and an OOM is thrown
679 // a subsequent GC prematurely throws an out-of-memory because
680 // the gc_overhead_limit_exceeded counts did not start
681 // again from 0.
682 if (op.result() == NULL) {
683 size_policy()->reset_gc_overhead_limit_count();
684 }
685 return op.result();
686 }
687 }
689 // The policy object will prevent us from looping forever. If the
690 // time spent in gc crosses a threshold, we will bail out.
691 loop_count++;
692 if ((QueuedAllocationWarningCount > 0) &&
693 (loop_count % QueuedAllocationWarningCount == 0)) {
694 warning("ParallelScavengeHeap::permanent_mem_allocate retries %d times \n\t"
695 " size=%d", loop_count, size);
696 }
697 } while (result == NULL);
699 return result;
700 }
702 //
703 // This is the policy code for permanent allocations which have failed
704 // and require a collection. Note that just as in failed_mem_allocate,
705 // we do not set collection policy, only where & when to allocate and
706 // collect.
707 HeapWord* ParallelScavengeHeap::failed_permanent_mem_allocate(size_t size) {
708 assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
709 assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
710 assert(!Universe::heap()->is_gc_active(), "not reentrant");
711 assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock");
712 assert(size > perm_gen()->free_in_words(), "Allocation should fail");
714 // We assume (and assert!) that an allocation at this point will fail
715 // unless we collect.
717 // First level allocation failure. Mark-sweep and allocate in perm gen.
718 GCCauseSetter gccs(this, GCCause::_allocation_failure);
719 invoke_full_gc(false);
720 HeapWord* result = perm_gen()->allocate_permanent(size);
722 // Second level allocation failure. We're running out of memory.
723 if (result == NULL) {
724 invoke_full_gc(true);
725 result = perm_gen()->allocate_permanent(size);
726 }
728 return result;
729 }
731 void ParallelScavengeHeap::ensure_parsability(bool retire_tlabs) {
732 CollectedHeap::ensure_parsability(retire_tlabs);
733 young_gen()->eden_space()->ensure_parsability();
734 }
736 size_t ParallelScavengeHeap::unsafe_max_alloc() {
737 return young_gen()->eden_space()->free_in_bytes();
738 }
740 size_t ParallelScavengeHeap::tlab_capacity(Thread* thr) const {
741 return young_gen()->eden_space()->tlab_capacity(thr);
742 }
744 size_t ParallelScavengeHeap::unsafe_max_tlab_alloc(Thread* thr) const {
745 return young_gen()->eden_space()->unsafe_max_tlab_alloc(thr);
746 }
748 HeapWord* ParallelScavengeHeap::allocate_new_tlab(size_t size) {
749 return young_gen()->allocate(size, true);
750 }
752 void ParallelScavengeHeap::accumulate_statistics_all_tlabs() {
753 CollectedHeap::accumulate_statistics_all_tlabs();
754 }
756 void ParallelScavengeHeap::resize_all_tlabs() {
757 CollectedHeap::resize_all_tlabs();
758 }
760 bool ParallelScavengeHeap::can_elide_initializing_store_barrier(oop new_obj) {
761 // We don't need barriers for stores to objects in the
762 // young gen and, a fortiori, for initializing stores to
763 // objects therein.
764 return is_in_young(new_obj);
765 }
767 // This method is used by System.gc() and JVMTI.
768 void ParallelScavengeHeap::collect(GCCause::Cause cause) {
769 assert(!Heap_lock->owned_by_self(),
770 "this thread should not own the Heap_lock");
772 unsigned int gc_count = 0;
773 unsigned int full_gc_count = 0;
774 {
775 MutexLocker ml(Heap_lock);
776 // This value is guarded by the Heap_lock
777 gc_count = Universe::heap()->total_collections();
778 full_gc_count = Universe::heap()->total_full_collections();
779 }
781 VM_ParallelGCSystemGC op(gc_count, full_gc_count, cause);
782 VMThread::execute(&op);
783 }
785 // This interface assumes that it's being called by the
786 // vm thread. It collects the heap assuming that the
787 // heap lock is already held and that we are executing in
788 // the context of the vm thread.
789 void ParallelScavengeHeap::collect_as_vm_thread(GCCause::Cause cause) {
790 assert(Thread::current()->is_VM_thread(), "Precondition#1");
791 assert(Heap_lock->is_locked(), "Precondition#2");
792 GCCauseSetter gcs(this, cause);
793 switch (cause) {
794 case GCCause::_heap_inspection:
795 case GCCause::_heap_dump: {
796 HandleMark hm;
797 invoke_full_gc(false);
798 break;
799 }
800 default: // XXX FIX ME
801 ShouldNotReachHere();
802 }
803 }
806 void ParallelScavengeHeap::oop_iterate(OopClosure* cl) {
807 Unimplemented();
808 }
810 void ParallelScavengeHeap::object_iterate(ObjectClosure* cl) {
811 young_gen()->object_iterate(cl);
812 old_gen()->object_iterate(cl);
813 perm_gen()->object_iterate(cl);
814 }
816 void ParallelScavengeHeap::permanent_oop_iterate(OopClosure* cl) {
817 Unimplemented();
818 }
820 void ParallelScavengeHeap::permanent_object_iterate(ObjectClosure* cl) {
821 perm_gen()->object_iterate(cl);
822 }
824 HeapWord* ParallelScavengeHeap::block_start(const void* addr) const {
825 if (young_gen()->is_in_reserved(addr)) {
826 assert(young_gen()->is_in(addr),
827 "addr should be in allocated part of young gen");
828 // called from os::print_location by find or VMError
829 if (Debugging || VMError::fatal_error_in_progress()) return NULL;
830 Unimplemented();
831 } else if (old_gen()->is_in_reserved(addr)) {
832 assert(old_gen()->is_in(addr),
833 "addr should be in allocated part of old gen");
834 return old_gen()->start_array()->object_start((HeapWord*)addr);
835 } else if (perm_gen()->is_in_reserved(addr)) {
836 assert(perm_gen()->is_in(addr),
837 "addr should be in allocated part of perm gen");
838 return perm_gen()->start_array()->object_start((HeapWord*)addr);
839 }
840 return 0;
841 }
843 size_t ParallelScavengeHeap::block_size(const HeapWord* addr) const {
844 return oop(addr)->size();
845 }
847 bool ParallelScavengeHeap::block_is_obj(const HeapWord* addr) const {
848 return block_start(addr) == addr;
849 }
851 jlong ParallelScavengeHeap::millis_since_last_gc() {
852 return UseParallelOldGC ?
853 PSParallelCompact::millis_since_last_gc() :
854 PSMarkSweep::millis_since_last_gc();
855 }
857 void ParallelScavengeHeap::prepare_for_verify() {
858 ensure_parsability(false); // no need to retire TLABs for verification
859 }
861 void ParallelScavengeHeap::print() const { print_on(tty); }
863 void ParallelScavengeHeap::print_on(outputStream* st) const {
864 young_gen()->print_on(st);
865 old_gen()->print_on(st);
866 perm_gen()->print_on(st);
867 }
869 void ParallelScavengeHeap::gc_threads_do(ThreadClosure* tc) const {
870 PSScavenge::gc_task_manager()->threads_do(tc);
871 }
873 void ParallelScavengeHeap::print_gc_threads_on(outputStream* st) const {
874 PSScavenge::gc_task_manager()->print_threads_on(st);
875 }
877 void ParallelScavengeHeap::print_tracing_info() const {
878 if (TraceGen0Time) {
879 double time = PSScavenge::accumulated_time()->seconds();
880 tty->print_cr("[Accumulated GC generation 0 time %3.7f secs]", time);
881 }
882 if (TraceGen1Time) {
883 double time = PSMarkSweep::accumulated_time()->seconds();
884 tty->print_cr("[Accumulated GC generation 1 time %3.7f secs]", time);
885 }
886 }
889 void ParallelScavengeHeap::verify(bool allow_dirty, bool silent, bool option /* ignored */) {
890 // Why do we need the total_collections()-filter below?
891 if (total_collections() > 0) {
892 if (!silent) {
893 gclog_or_tty->print("permanent ");
894 }
895 perm_gen()->verify(allow_dirty);
897 if (!silent) {
898 gclog_or_tty->print("tenured ");
899 }
900 old_gen()->verify(allow_dirty);
902 if (!silent) {
903 gclog_or_tty->print("eden ");
904 }
905 young_gen()->verify(allow_dirty);
906 }
907 if (!silent) {
908 gclog_or_tty->print("ref_proc ");
909 }
910 ReferenceProcessor::verify();
911 }
913 void ParallelScavengeHeap::print_heap_change(size_t prev_used) {
914 if (PrintGCDetails && Verbose) {
915 gclog_or_tty->print(" " SIZE_FORMAT
916 "->" SIZE_FORMAT
917 "(" SIZE_FORMAT ")",
918 prev_used, used(), capacity());
919 } else {
920 gclog_or_tty->print(" " SIZE_FORMAT "K"
921 "->" SIZE_FORMAT "K"
922 "(" SIZE_FORMAT "K)",
923 prev_used / K, used() / K, capacity() / K);
924 }
925 }
927 ParallelScavengeHeap* ParallelScavengeHeap::heap() {
928 assert(_psh != NULL, "Uninitialized access to ParallelScavengeHeap::heap()");
929 assert(_psh->kind() == CollectedHeap::ParallelScavengeHeap, "not a parallel scavenge heap");
930 return _psh;
931 }
933 // Before delegating the resize to the young generation,
934 // the reserved space for the young and old generations
935 // may be changed to accomodate the desired resize.
936 void ParallelScavengeHeap::resize_young_gen(size_t eden_size,
937 size_t survivor_size) {
938 if (UseAdaptiveGCBoundary) {
939 if (size_policy()->bytes_absorbed_from_eden() != 0) {
940 size_policy()->reset_bytes_absorbed_from_eden();
941 return; // The generation changed size already.
942 }
943 gens()->adjust_boundary_for_young_gen_needs(eden_size, survivor_size);
944 }
946 // Delegate the resize to the generation.
947 _young_gen->resize(eden_size, survivor_size);
948 }
950 // Before delegating the resize to the old generation,
951 // the reserved space for the young and old generations
952 // may be changed to accomodate the desired resize.
953 void ParallelScavengeHeap::resize_old_gen(size_t desired_free_space) {
954 if (UseAdaptiveGCBoundary) {
955 if (size_policy()->bytes_absorbed_from_eden() != 0) {
956 size_policy()->reset_bytes_absorbed_from_eden();
957 return; // The generation changed size already.
958 }
959 gens()->adjust_boundary_for_old_gen_needs(desired_free_space);
960 }
962 // Delegate the resize to the generation.
963 _old_gen->resize(desired_free_space);
964 }
966 ParallelScavengeHeap::ParStrongRootsScope::ParStrongRootsScope() {
967 // nothing particular
968 }
970 ParallelScavengeHeap::ParStrongRootsScope::~ParStrongRootsScope() {
971 // nothing particular
972 }
974 #ifndef PRODUCT
975 void ParallelScavengeHeap::record_gen_tops_before_GC() {
976 if (ZapUnusedHeapArea) {
977 young_gen()->record_spaces_top();
978 old_gen()->record_spaces_top();
979 perm_gen()->record_spaces_top();
980 }
981 }
983 void ParallelScavengeHeap::gen_mangle_unused_area() {
984 if (ZapUnusedHeapArea) {
985 young_gen()->eden_space()->mangle_unused_area();
986 young_gen()->to_space()->mangle_unused_area();
987 young_gen()->from_space()->mangle_unused_area();
988 old_gen()->object_space()->mangle_unused_area();
989 perm_gen()->object_space()->mangle_unused_area();
990 }
991 }
992 #endif