Mon, 16 Apr 2012 08:57:18 +0200
4988100: oop_verify_old_oop appears to be dead
Summary: removed oop_verify_old_oop and allow_dirty. Also reviewed by: alexlamsl@gmail.com
Reviewed-by: jmasa, jwilhelm
1 /*
2 * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "precompiled.hpp"
26 #include "gc_implementation/parallelScavenge/adjoiningGenerations.hpp"
27 #include "gc_implementation/parallelScavenge/adjoiningVirtualSpaces.hpp"
28 #include "gc_implementation/parallelScavenge/cardTableExtension.hpp"
29 #include "gc_implementation/parallelScavenge/gcTaskManager.hpp"
30 #include "gc_implementation/parallelScavenge/generationSizer.hpp"
31 #include "gc_implementation/parallelScavenge/parallelScavengeHeap.inline.hpp"
32 #include "gc_implementation/parallelScavenge/psAdaptiveSizePolicy.hpp"
33 #include "gc_implementation/parallelScavenge/psMarkSweep.hpp"
34 #include "gc_implementation/parallelScavenge/psParallelCompact.hpp"
35 #include "gc_implementation/parallelScavenge/psPromotionManager.hpp"
36 #include "gc_implementation/parallelScavenge/psScavenge.hpp"
37 #include "gc_implementation/parallelScavenge/vmPSOperations.hpp"
38 #include "memory/gcLocker.inline.hpp"
39 #include "oops/oop.inline.hpp"
40 #include "runtime/handles.inline.hpp"
41 #include "runtime/java.hpp"
42 #include "runtime/vmThread.hpp"
43 #include "utilities/vmError.hpp"
45 PSYoungGen* ParallelScavengeHeap::_young_gen = NULL;
46 PSOldGen* ParallelScavengeHeap::_old_gen = NULL;
47 PSPermGen* ParallelScavengeHeap::_perm_gen = NULL;
48 PSAdaptiveSizePolicy* ParallelScavengeHeap::_size_policy = NULL;
49 PSGCAdaptivePolicyCounters* ParallelScavengeHeap::_gc_policy_counters = NULL;
50 ParallelScavengeHeap* ParallelScavengeHeap::_psh = NULL;
51 GCTaskManager* ParallelScavengeHeap::_gc_task_manager = NULL;
53 static void trace_gen_sizes(const char* const str,
54 size_t pg_min, size_t pg_max,
55 size_t og_min, size_t og_max,
56 size_t yg_min, size_t yg_max)
57 {
58 if (TracePageSizes) {
59 tty->print_cr("%s: " SIZE_FORMAT "," SIZE_FORMAT " "
60 SIZE_FORMAT "," SIZE_FORMAT " "
61 SIZE_FORMAT "," SIZE_FORMAT " "
62 SIZE_FORMAT,
63 str, pg_min / K, pg_max / K,
64 og_min / K, og_max / K,
65 yg_min / K, yg_max / K,
66 (pg_max + og_max + yg_max) / K);
67 }
68 }
70 jint ParallelScavengeHeap::initialize() {
71 CollectedHeap::pre_initialize();
73 // Cannot be initialized until after the flags are parsed
74 // GenerationSizer flag_parser;
75 _collector_policy = new GenerationSizer();
77 size_t yg_min_size = _collector_policy->min_young_gen_size();
78 size_t yg_max_size = _collector_policy->max_young_gen_size();
79 size_t og_min_size = _collector_policy->min_old_gen_size();
80 size_t og_max_size = _collector_policy->max_old_gen_size();
81 // Why isn't there a min_perm_gen_size()?
82 size_t pg_min_size = _collector_policy->perm_gen_size();
83 size_t pg_max_size = _collector_policy->max_perm_gen_size();
85 trace_gen_sizes("ps heap raw",
86 pg_min_size, pg_max_size,
87 og_min_size, og_max_size,
88 yg_min_size, yg_max_size);
90 // The ReservedSpace ctor used below requires that the page size for the perm
91 // gen is <= the page size for the rest of the heap (young + old gens).
92 const size_t og_page_sz = os::page_size_for_region(yg_min_size + og_min_size,
93 yg_max_size + og_max_size,
94 8);
95 const size_t pg_page_sz = MIN2(os::page_size_for_region(pg_min_size,
96 pg_max_size, 16),
97 og_page_sz);
99 const size_t pg_align = set_alignment(_perm_gen_alignment, pg_page_sz);
100 const size_t og_align = set_alignment(_old_gen_alignment, og_page_sz);
101 const size_t yg_align = set_alignment(_young_gen_alignment, og_page_sz);
103 // Update sizes to reflect the selected page size(s).
104 //
105 // NEEDS_CLEANUP. The default TwoGenerationCollectorPolicy uses NewRatio; it
106 // should check UseAdaptiveSizePolicy. Changes from generationSizer could
107 // move to the common code.
108 yg_min_size = align_size_up(yg_min_size, yg_align);
109 yg_max_size = align_size_up(yg_max_size, yg_align);
110 size_t yg_cur_size =
111 align_size_up(_collector_policy->young_gen_size(), yg_align);
112 yg_cur_size = MAX2(yg_cur_size, yg_min_size);
114 og_min_size = align_size_up(og_min_size, og_align);
115 // Align old gen size down to preserve specified heap size.
116 assert(og_align == yg_align, "sanity");
117 og_max_size = align_size_down(og_max_size, og_align);
118 og_max_size = MAX2(og_max_size, og_min_size);
119 size_t og_cur_size =
120 align_size_down(_collector_policy->old_gen_size(), og_align);
121 og_cur_size = MAX2(og_cur_size, og_min_size);
123 pg_min_size = align_size_up(pg_min_size, pg_align);
124 pg_max_size = align_size_up(pg_max_size, pg_align);
125 size_t pg_cur_size = pg_min_size;
127 trace_gen_sizes("ps heap rnd",
128 pg_min_size, pg_max_size,
129 og_min_size, og_max_size,
130 yg_min_size, yg_max_size);
132 const size_t total_reserved = pg_max_size + og_max_size + yg_max_size;
133 char* addr = Universe::preferred_heap_base(total_reserved, Universe::UnscaledNarrowOop);
135 // The main part of the heap (old gen + young gen) can often use a larger page
136 // size than is needed or wanted for the perm gen. Use the "compound
137 // alignment" ReservedSpace ctor to avoid having to use the same page size for
138 // all gens.
140 ReservedHeapSpace heap_rs(pg_max_size, pg_align, og_max_size + yg_max_size,
141 og_align, addr);
143 if (UseCompressedOops) {
144 if (addr != NULL && !heap_rs.is_reserved()) {
145 // Failed to reserve at specified address - the requested memory
146 // region is taken already, for example, by 'java' launcher.
147 // Try again to reserver heap higher.
148 addr = Universe::preferred_heap_base(total_reserved, Universe::ZeroBasedNarrowOop);
149 ReservedHeapSpace heap_rs0(pg_max_size, pg_align, og_max_size + yg_max_size,
150 og_align, addr);
151 if (addr != NULL && !heap_rs0.is_reserved()) {
152 // Failed to reserve at specified address again - give up.
153 addr = Universe::preferred_heap_base(total_reserved, Universe::HeapBasedNarrowOop);
154 assert(addr == NULL, "");
155 ReservedHeapSpace heap_rs1(pg_max_size, pg_align, og_max_size + yg_max_size,
156 og_align, addr);
157 heap_rs = heap_rs1;
158 } else {
159 heap_rs = heap_rs0;
160 }
161 }
162 }
164 os::trace_page_sizes("ps perm", pg_min_size, pg_max_size, pg_page_sz,
165 heap_rs.base(), pg_max_size);
166 os::trace_page_sizes("ps main", og_min_size + yg_min_size,
167 og_max_size + yg_max_size, og_page_sz,
168 heap_rs.base() + pg_max_size,
169 heap_rs.size() - pg_max_size);
170 if (!heap_rs.is_reserved()) {
171 vm_shutdown_during_initialization(
172 "Could not reserve enough space for object heap");
173 return JNI_ENOMEM;
174 }
176 _reserved = MemRegion((HeapWord*)heap_rs.base(),
177 (HeapWord*)(heap_rs.base() + heap_rs.size()));
179 CardTableExtension* const barrier_set = new CardTableExtension(_reserved, 3);
180 _barrier_set = barrier_set;
181 oopDesc::set_bs(_barrier_set);
182 if (_barrier_set == NULL) {
183 vm_shutdown_during_initialization(
184 "Could not reserve enough space for barrier set");
185 return JNI_ENOMEM;
186 }
188 // Initial young gen size is 4 Mb
189 //
190 // XXX - what about flag_parser.young_gen_size()?
191 const size_t init_young_size = align_size_up(4 * M, yg_align);
192 yg_cur_size = MAX2(MIN2(init_young_size, yg_max_size), yg_cur_size);
194 // Split the reserved space into perm gen and the main heap (everything else).
195 // The main heap uses a different alignment.
196 ReservedSpace perm_rs = heap_rs.first_part(pg_max_size);
197 ReservedSpace main_rs = heap_rs.last_part(pg_max_size, og_align);
199 // Make up the generations
200 // Calculate the maximum size that a generation can grow. This
201 // includes growth into the other generation. Note that the
202 // parameter _max_gen_size is kept as the maximum
203 // size of the generation as the boundaries currently stand.
204 // _max_gen_size is still used as that value.
205 double max_gc_pause_sec = ((double) MaxGCPauseMillis)/1000.0;
206 double max_gc_minor_pause_sec = ((double) MaxGCMinorPauseMillis)/1000.0;
208 _gens = new AdjoiningGenerations(main_rs,
209 og_cur_size,
210 og_min_size,
211 og_max_size,
212 yg_cur_size,
213 yg_min_size,
214 yg_max_size,
215 yg_align);
217 _old_gen = _gens->old_gen();
218 _young_gen = _gens->young_gen();
220 const size_t eden_capacity = _young_gen->eden_space()->capacity_in_bytes();
221 const size_t old_capacity = _old_gen->capacity_in_bytes();
222 const size_t initial_promo_size = MIN2(eden_capacity, old_capacity);
223 _size_policy =
224 new PSAdaptiveSizePolicy(eden_capacity,
225 initial_promo_size,
226 young_gen()->to_space()->capacity_in_bytes(),
227 intra_heap_alignment(),
228 max_gc_pause_sec,
229 max_gc_minor_pause_sec,
230 GCTimeRatio
231 );
233 _perm_gen = new PSPermGen(perm_rs,
234 pg_align,
235 pg_cur_size,
236 pg_cur_size,
237 pg_max_size,
238 "perm", 2);
240 assert(!UseAdaptiveGCBoundary ||
241 (old_gen()->virtual_space()->high_boundary() ==
242 young_gen()->virtual_space()->low_boundary()),
243 "Boundaries must meet");
244 // initialize the policy counters - 2 collectors, 3 generations
245 _gc_policy_counters =
246 new PSGCAdaptivePolicyCounters("ParScav:MSC", 2, 3, _size_policy);
247 _psh = this;
249 // Set up the GCTaskManager
250 _gc_task_manager = GCTaskManager::create(ParallelGCThreads);
252 if (UseParallelOldGC && !PSParallelCompact::initialize()) {
253 return JNI_ENOMEM;
254 }
256 return JNI_OK;
257 }
259 void ParallelScavengeHeap::post_initialize() {
260 // Need to init the tenuring threshold
261 PSScavenge::initialize();
262 if (UseParallelOldGC) {
263 PSParallelCompact::post_initialize();
264 } else {
265 PSMarkSweep::initialize();
266 }
267 PSPromotionManager::initialize();
268 }
270 void ParallelScavengeHeap::update_counters() {
271 young_gen()->update_counters();
272 old_gen()->update_counters();
273 perm_gen()->update_counters();
274 }
276 size_t ParallelScavengeHeap::capacity() const {
277 size_t value = young_gen()->capacity_in_bytes() + old_gen()->capacity_in_bytes();
278 return value;
279 }
281 size_t ParallelScavengeHeap::used() const {
282 size_t value = young_gen()->used_in_bytes() + old_gen()->used_in_bytes();
283 return value;
284 }
286 bool ParallelScavengeHeap::is_maximal_no_gc() const {
287 return old_gen()->is_maximal_no_gc() && young_gen()->is_maximal_no_gc();
288 }
291 size_t ParallelScavengeHeap::permanent_capacity() const {
292 return perm_gen()->capacity_in_bytes();
293 }
295 size_t ParallelScavengeHeap::permanent_used() const {
296 return perm_gen()->used_in_bytes();
297 }
299 size_t ParallelScavengeHeap::max_capacity() const {
300 size_t estimated = reserved_region().byte_size();
301 estimated -= perm_gen()->reserved().byte_size();
302 if (UseAdaptiveSizePolicy) {
303 estimated -= _size_policy->max_survivor_size(young_gen()->max_size());
304 } else {
305 estimated -= young_gen()->to_space()->capacity_in_bytes();
306 }
307 return MAX2(estimated, capacity());
308 }
310 bool ParallelScavengeHeap::is_in(const void* p) const {
311 if (young_gen()->is_in(p)) {
312 return true;
313 }
315 if (old_gen()->is_in(p)) {
316 return true;
317 }
319 if (perm_gen()->is_in(p)) {
320 return true;
321 }
323 return false;
324 }
326 bool ParallelScavengeHeap::is_in_reserved(const void* p) const {
327 if (young_gen()->is_in_reserved(p)) {
328 return true;
329 }
331 if (old_gen()->is_in_reserved(p)) {
332 return true;
333 }
335 if (perm_gen()->is_in_reserved(p)) {
336 return true;
337 }
339 return false;
340 }
342 bool ParallelScavengeHeap::is_scavengable(const void* addr) {
343 return is_in_young((oop)addr);
344 }
346 #ifdef ASSERT
347 // Don't implement this by using is_in_young(). This method is used
348 // in some cases to check that is_in_young() is correct.
349 bool ParallelScavengeHeap::is_in_partial_collection(const void *p) {
350 assert(is_in_reserved(p) || p == NULL,
351 "Does not work if address is non-null and outside of the heap");
352 // The order of the generations is perm (low addr), old, young (high addr)
353 return p >= old_gen()->reserved().end();
354 }
355 #endif
357 // There are two levels of allocation policy here.
358 //
359 // When an allocation request fails, the requesting thread must invoke a VM
360 // operation, transfer control to the VM thread, and await the results of a
361 // garbage collection. That is quite expensive, and we should avoid doing it
362 // multiple times if possible.
363 //
364 // To accomplish this, we have a basic allocation policy, and also a
365 // failed allocation policy.
366 //
367 // The basic allocation policy controls how you allocate memory without
368 // attempting garbage collection. It is okay to grab locks and
369 // expand the heap, if that can be done without coming to a safepoint.
370 // It is likely that the basic allocation policy will not be very
371 // aggressive.
372 //
373 // The failed allocation policy is invoked from the VM thread after
374 // the basic allocation policy is unable to satisfy a mem_allocate
375 // request. This policy needs to cover the entire range of collection,
376 // heap expansion, and out-of-memory conditions. It should make every
377 // attempt to allocate the requested memory.
379 // Basic allocation policy. Should never be called at a safepoint, or
380 // from the VM thread.
381 //
382 // This method must handle cases where many mem_allocate requests fail
383 // simultaneously. When that happens, only one VM operation will succeed,
384 // and the rest will not be executed. For that reason, this method loops
385 // during failed allocation attempts. If the java heap becomes exhausted,
386 // we rely on the size_policy object to force a bail out.
387 HeapWord* ParallelScavengeHeap::mem_allocate(
388 size_t size,
389 bool* gc_overhead_limit_was_exceeded) {
390 assert(!SafepointSynchronize::is_at_safepoint(), "should not be at safepoint");
391 assert(Thread::current() != (Thread*)VMThread::vm_thread(), "should not be in vm thread");
392 assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock");
394 // In general gc_overhead_limit_was_exceeded should be false so
395 // set it so here and reset it to true only if the gc time
396 // limit is being exceeded as checked below.
397 *gc_overhead_limit_was_exceeded = false;
399 HeapWord* result = young_gen()->allocate(size);
401 uint loop_count = 0;
402 uint gc_count = 0;
404 while (result == NULL) {
405 // We don't want to have multiple collections for a single filled generation.
406 // To prevent this, each thread tracks the total_collections() value, and if
407 // the count has changed, does not do a new collection.
408 //
409 // The collection count must be read only while holding the heap lock. VM
410 // operations also hold the heap lock during collections. There is a lock
411 // contention case where thread A blocks waiting on the Heap_lock, while
412 // thread B is holding it doing a collection. When thread A gets the lock,
413 // the collection count has already changed. To prevent duplicate collections,
414 // The policy MUST attempt allocations during the same period it reads the
415 // total_collections() value!
416 {
417 MutexLocker ml(Heap_lock);
418 gc_count = Universe::heap()->total_collections();
420 result = young_gen()->allocate(size);
421 if (result != NULL) {
422 return result;
423 }
425 // If certain conditions hold, try allocating from the old gen.
426 result = mem_allocate_old_gen(size);
427 if (result != NULL) {
428 return result;
429 }
431 // Failed to allocate without a gc.
432 if (GC_locker::is_active_and_needs_gc()) {
433 // If this thread is not in a jni critical section, we stall
434 // the requestor until the critical section has cleared and
435 // GC allowed. When the critical section clears, a GC is
436 // initiated by the last thread exiting the critical section; so
437 // we retry the allocation sequence from the beginning of the loop,
438 // rather than causing more, now probably unnecessary, GC attempts.
439 JavaThread* jthr = JavaThread::current();
440 if (!jthr->in_critical()) {
441 MutexUnlocker mul(Heap_lock);
442 GC_locker::stall_until_clear();
443 continue;
444 } else {
445 if (CheckJNICalls) {
446 fatal("Possible deadlock due to allocating while"
447 " in jni critical section");
448 }
449 return NULL;
450 }
451 }
452 }
454 if (result == NULL) {
455 // Generate a VM operation
456 VM_ParallelGCFailedAllocation op(size, gc_count);
457 VMThread::execute(&op);
459 // Did the VM operation execute? If so, return the result directly.
460 // This prevents us from looping until time out on requests that can
461 // not be satisfied.
462 if (op.prologue_succeeded()) {
463 assert(Universe::heap()->is_in_or_null(op.result()),
464 "result not in heap");
466 // If GC was locked out during VM operation then retry allocation
467 // and/or stall as necessary.
468 if (op.gc_locked()) {
469 assert(op.result() == NULL, "must be NULL if gc_locked() is true");
470 continue; // retry and/or stall as necessary
471 }
473 // Exit the loop if the gc time limit has been exceeded.
474 // The allocation must have failed above ("result" guarding
475 // this path is NULL) and the most recent collection has exceeded the
476 // gc overhead limit (although enough may have been collected to
477 // satisfy the allocation). Exit the loop so that an out-of-memory
478 // will be thrown (return a NULL ignoring the contents of
479 // op.result()),
480 // but clear gc_overhead_limit_exceeded so that the next collection
481 // starts with a clean slate (i.e., forgets about previous overhead
482 // excesses). Fill op.result() with a filler object so that the
483 // heap remains parsable.
484 const bool limit_exceeded = size_policy()->gc_overhead_limit_exceeded();
485 const bool softrefs_clear = collector_policy()->all_soft_refs_clear();
486 assert(!limit_exceeded || softrefs_clear, "Should have been cleared");
487 if (limit_exceeded && softrefs_clear) {
488 *gc_overhead_limit_was_exceeded = true;
489 size_policy()->set_gc_overhead_limit_exceeded(false);
490 if (PrintGCDetails && Verbose) {
491 gclog_or_tty->print_cr("ParallelScavengeHeap::mem_allocate: "
492 "return NULL because gc_overhead_limit_exceeded is set");
493 }
494 if (op.result() != NULL) {
495 CollectedHeap::fill_with_object(op.result(), size);
496 }
497 return NULL;
498 }
500 return op.result();
501 }
502 }
504 // The policy object will prevent us from looping forever. If the
505 // time spent in gc crosses a threshold, we will bail out.
506 loop_count++;
507 if ((result == NULL) && (QueuedAllocationWarningCount > 0) &&
508 (loop_count % QueuedAllocationWarningCount == 0)) {
509 warning("ParallelScavengeHeap::mem_allocate retries %d times \n\t"
510 " size=%d", loop_count, size);
511 }
512 }
514 return result;
515 }
517 // A "death march" is a series of ultra-slow allocations in which a full gc is
518 // done before each allocation, and after the full gc the allocation still
519 // cannot be satisfied from the young gen. This routine detects that condition;
520 // it should be called after a full gc has been done and the allocation
521 // attempted from the young gen. The parameter 'addr' should be the result of
522 // that young gen allocation attempt.
523 void
524 ParallelScavengeHeap::death_march_check(HeapWord* const addr, size_t size) {
525 if (addr != NULL) {
526 _death_march_count = 0; // death march has ended
527 } else if (_death_march_count == 0) {
528 if (should_alloc_in_eden(size)) {
529 _death_march_count = 1; // death march has started
530 }
531 }
532 }
534 HeapWord* ParallelScavengeHeap::mem_allocate_old_gen(size_t size) {
535 if (!should_alloc_in_eden(size) || GC_locker::is_active_and_needs_gc()) {
536 // Size is too big for eden, or gc is locked out.
537 return old_gen()->allocate(size);
538 }
540 // If a "death march" is in progress, allocate from the old gen a limited
541 // number of times before doing a GC.
542 if (_death_march_count > 0) {
543 if (_death_march_count < 64) {
544 ++_death_march_count;
545 return old_gen()->allocate(size);
546 } else {
547 _death_march_count = 0;
548 }
549 }
550 return NULL;
551 }
553 // Failed allocation policy. Must be called from the VM thread, and
554 // only at a safepoint! Note that this method has policy for allocation
555 // flow, and NOT collection policy. So we do not check for gc collection
556 // time over limit here, that is the responsibility of the heap specific
557 // collection methods. This method decides where to attempt allocations,
558 // and when to attempt collections, but no collection specific policy.
559 HeapWord* ParallelScavengeHeap::failed_mem_allocate(size_t size) {
560 assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
561 assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
562 assert(!Universe::heap()->is_gc_active(), "not reentrant");
563 assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock");
565 // We assume that allocation in eden will fail unless we collect.
567 // First level allocation failure, scavenge and allocate in young gen.
568 GCCauseSetter gccs(this, GCCause::_allocation_failure);
569 const bool invoked_full_gc = PSScavenge::invoke();
570 HeapWord* result = young_gen()->allocate(size);
572 // Second level allocation failure.
573 // Mark sweep and allocate in young generation.
574 if (result == NULL && !invoked_full_gc) {
575 invoke_full_gc(false);
576 result = young_gen()->allocate(size);
577 }
579 death_march_check(result, size);
581 // Third level allocation failure.
582 // After mark sweep and young generation allocation failure,
583 // allocate in old generation.
584 if (result == NULL) {
585 result = old_gen()->allocate(size);
586 }
588 // Fourth level allocation failure. We're running out of memory.
589 // More complete mark sweep and allocate in young generation.
590 if (result == NULL) {
591 invoke_full_gc(true);
592 result = young_gen()->allocate(size);
593 }
595 // Fifth level allocation failure.
596 // After more complete mark sweep, allocate in old generation.
597 if (result == NULL) {
598 result = old_gen()->allocate(size);
599 }
601 return result;
602 }
604 //
605 // This is the policy loop for allocating in the permanent generation.
606 // If the initial allocation fails, we create a vm operation which will
607 // cause a collection.
608 HeapWord* ParallelScavengeHeap::permanent_mem_allocate(size_t size) {
609 assert(!SafepointSynchronize::is_at_safepoint(), "should not be at safepoint");
610 assert(Thread::current() != (Thread*)VMThread::vm_thread(), "should not be in vm thread");
611 assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock");
613 HeapWord* result;
615 uint loop_count = 0;
616 uint gc_count = 0;
617 uint full_gc_count = 0;
619 do {
620 // We don't want to have multiple collections for a single filled generation.
621 // To prevent this, each thread tracks the total_collections() value, and if
622 // the count has changed, does not do a new collection.
623 //
624 // The collection count must be read only while holding the heap lock. VM
625 // operations also hold the heap lock during collections. There is a lock
626 // contention case where thread A blocks waiting on the Heap_lock, while
627 // thread B is holding it doing a collection. When thread A gets the lock,
628 // the collection count has already changed. To prevent duplicate collections,
629 // The policy MUST attempt allocations during the same period it reads the
630 // total_collections() value!
631 {
632 MutexLocker ml(Heap_lock);
633 gc_count = Universe::heap()->total_collections();
634 full_gc_count = Universe::heap()->total_full_collections();
636 result = perm_gen()->allocate_permanent(size);
638 if (result != NULL) {
639 return result;
640 }
642 if (GC_locker::is_active_and_needs_gc()) {
643 // If this thread is not in a jni critical section, we stall
644 // the requestor until the critical section has cleared and
645 // GC allowed. When the critical section clears, a GC is
646 // initiated by the last thread exiting the critical section; so
647 // we retry the allocation sequence from the beginning of the loop,
648 // rather than causing more, now probably unnecessary, GC attempts.
649 JavaThread* jthr = JavaThread::current();
650 if (!jthr->in_critical()) {
651 MutexUnlocker mul(Heap_lock);
652 GC_locker::stall_until_clear();
653 continue;
654 } else {
655 if (CheckJNICalls) {
656 fatal("Possible deadlock due to allocating while"
657 " in jni critical section");
658 }
659 return NULL;
660 }
661 }
662 }
664 if (result == NULL) {
666 // Exit the loop if the gc time limit has been exceeded.
667 // The allocation must have failed above (result must be NULL),
668 // and the most recent collection must have exceeded the
669 // gc time limit. Exit the loop so that an out-of-memory
670 // will be thrown (returning a NULL will do that), but
671 // clear gc_overhead_limit_exceeded so that the next collection
672 // will succeeded if the applications decides to handle the
673 // out-of-memory and tries to go on.
674 const bool limit_exceeded = size_policy()->gc_overhead_limit_exceeded();
675 if (limit_exceeded) {
676 size_policy()->set_gc_overhead_limit_exceeded(false);
677 if (PrintGCDetails && Verbose) {
678 gclog_or_tty->print_cr("ParallelScavengeHeap::permanent_mem_allocate:"
679 " return NULL because gc_overhead_limit_exceeded is set");
680 }
681 assert(result == NULL, "Allocation did not fail");
682 return NULL;
683 }
685 // Generate a VM operation
686 VM_ParallelGCFailedPermanentAllocation op(size, gc_count, full_gc_count);
687 VMThread::execute(&op);
689 // Did the VM operation execute? If so, return the result directly.
690 // This prevents us from looping until time out on requests that can
691 // not be satisfied.
692 if (op.prologue_succeeded()) {
693 assert(Universe::heap()->is_in_permanent_or_null(op.result()),
694 "result not in heap");
695 // If GC was locked out during VM operation then retry allocation
696 // and/or stall as necessary.
697 if (op.gc_locked()) {
698 assert(op.result() == NULL, "must be NULL if gc_locked() is true");
699 continue; // retry and/or stall as necessary
700 }
701 // If a NULL results is being returned, an out-of-memory
702 // will be thrown now. Clear the gc_overhead_limit_exceeded
703 // flag to avoid the following situation.
704 // gc_overhead_limit_exceeded is set during a collection
705 // the collection fails to return enough space and an OOM is thrown
706 // a subsequent GC prematurely throws an out-of-memory because
707 // the gc_overhead_limit_exceeded counts did not start
708 // again from 0.
709 if (op.result() == NULL) {
710 size_policy()->reset_gc_overhead_limit_count();
711 }
712 return op.result();
713 }
714 }
716 // The policy object will prevent us from looping forever. If the
717 // time spent in gc crosses a threshold, we will bail out.
718 loop_count++;
719 if ((QueuedAllocationWarningCount > 0) &&
720 (loop_count % QueuedAllocationWarningCount == 0)) {
721 warning("ParallelScavengeHeap::permanent_mem_allocate retries %d times \n\t"
722 " size=%d", loop_count, size);
723 }
724 } while (result == NULL);
726 return result;
727 }
729 //
730 // This is the policy code for permanent allocations which have failed
731 // and require a collection. Note that just as in failed_mem_allocate,
732 // we do not set collection policy, only where & when to allocate and
733 // collect.
734 HeapWord* ParallelScavengeHeap::failed_permanent_mem_allocate(size_t size) {
735 assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
736 assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
737 assert(!Universe::heap()->is_gc_active(), "not reentrant");
738 assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock");
739 assert(size > perm_gen()->free_in_words(), "Allocation should fail");
741 // We assume (and assert!) that an allocation at this point will fail
742 // unless we collect.
744 // First level allocation failure. Mark-sweep and allocate in perm gen.
745 GCCauseSetter gccs(this, GCCause::_allocation_failure);
746 invoke_full_gc(false);
747 HeapWord* result = perm_gen()->allocate_permanent(size);
749 // Second level allocation failure. We're running out of memory.
750 if (result == NULL) {
751 invoke_full_gc(true);
752 result = perm_gen()->allocate_permanent(size);
753 }
755 return result;
756 }
758 void ParallelScavengeHeap::ensure_parsability(bool retire_tlabs) {
759 CollectedHeap::ensure_parsability(retire_tlabs);
760 young_gen()->eden_space()->ensure_parsability();
761 }
763 size_t ParallelScavengeHeap::unsafe_max_alloc() {
764 return young_gen()->eden_space()->free_in_bytes();
765 }
767 size_t ParallelScavengeHeap::tlab_capacity(Thread* thr) const {
768 return young_gen()->eden_space()->tlab_capacity(thr);
769 }
771 size_t ParallelScavengeHeap::unsafe_max_tlab_alloc(Thread* thr) const {
772 return young_gen()->eden_space()->unsafe_max_tlab_alloc(thr);
773 }
775 HeapWord* ParallelScavengeHeap::allocate_new_tlab(size_t size) {
776 return young_gen()->allocate(size);
777 }
779 void ParallelScavengeHeap::accumulate_statistics_all_tlabs() {
780 CollectedHeap::accumulate_statistics_all_tlabs();
781 }
783 void ParallelScavengeHeap::resize_all_tlabs() {
784 CollectedHeap::resize_all_tlabs();
785 }
787 bool ParallelScavengeHeap::can_elide_initializing_store_barrier(oop new_obj) {
788 // We don't need barriers for stores to objects in the
789 // young gen and, a fortiori, for initializing stores to
790 // objects therein.
791 return is_in_young(new_obj);
792 }
794 // This method is used by System.gc() and JVMTI.
795 void ParallelScavengeHeap::collect(GCCause::Cause cause) {
796 assert(!Heap_lock->owned_by_self(),
797 "this thread should not own the Heap_lock");
799 unsigned int gc_count = 0;
800 unsigned int full_gc_count = 0;
801 {
802 MutexLocker ml(Heap_lock);
803 // This value is guarded by the Heap_lock
804 gc_count = Universe::heap()->total_collections();
805 full_gc_count = Universe::heap()->total_full_collections();
806 }
808 VM_ParallelGCSystemGC op(gc_count, full_gc_count, cause);
809 VMThread::execute(&op);
810 }
812 // This interface assumes that it's being called by the
813 // vm thread. It collects the heap assuming that the
814 // heap lock is already held and that we are executing in
815 // the context of the vm thread.
816 void ParallelScavengeHeap::collect_as_vm_thread(GCCause::Cause cause) {
817 assert(Thread::current()->is_VM_thread(), "Precondition#1");
818 assert(Heap_lock->is_locked(), "Precondition#2");
819 GCCauseSetter gcs(this, cause);
820 switch (cause) {
821 case GCCause::_heap_inspection:
822 case GCCause::_heap_dump: {
823 HandleMark hm;
824 invoke_full_gc(false);
825 break;
826 }
827 default: // XXX FIX ME
828 ShouldNotReachHere();
829 }
830 }
833 void ParallelScavengeHeap::oop_iterate(OopClosure* cl) {
834 Unimplemented();
835 }
837 void ParallelScavengeHeap::object_iterate(ObjectClosure* cl) {
838 young_gen()->object_iterate(cl);
839 old_gen()->object_iterate(cl);
840 perm_gen()->object_iterate(cl);
841 }
843 void ParallelScavengeHeap::permanent_oop_iterate(OopClosure* cl) {
844 Unimplemented();
845 }
847 void ParallelScavengeHeap::permanent_object_iterate(ObjectClosure* cl) {
848 perm_gen()->object_iterate(cl);
849 }
851 HeapWord* ParallelScavengeHeap::block_start(const void* addr) const {
852 if (young_gen()->is_in_reserved(addr)) {
853 assert(young_gen()->is_in(addr),
854 "addr should be in allocated part of young gen");
855 // called from os::print_location by find or VMError
856 if (Debugging || VMError::fatal_error_in_progress()) return NULL;
857 Unimplemented();
858 } else if (old_gen()->is_in_reserved(addr)) {
859 assert(old_gen()->is_in(addr),
860 "addr should be in allocated part of old gen");
861 return old_gen()->start_array()->object_start((HeapWord*)addr);
862 } else if (perm_gen()->is_in_reserved(addr)) {
863 assert(perm_gen()->is_in(addr),
864 "addr should be in allocated part of perm gen");
865 return perm_gen()->start_array()->object_start((HeapWord*)addr);
866 }
867 return 0;
868 }
870 size_t ParallelScavengeHeap::block_size(const HeapWord* addr) const {
871 return oop(addr)->size();
872 }
874 bool ParallelScavengeHeap::block_is_obj(const HeapWord* addr) const {
875 return block_start(addr) == addr;
876 }
878 jlong ParallelScavengeHeap::millis_since_last_gc() {
879 return UseParallelOldGC ?
880 PSParallelCompact::millis_since_last_gc() :
881 PSMarkSweep::millis_since_last_gc();
882 }
884 void ParallelScavengeHeap::prepare_for_verify() {
885 ensure_parsability(false); // no need to retire TLABs for verification
886 }
888 void ParallelScavengeHeap::print_on(outputStream* st) const {
889 young_gen()->print_on(st);
890 old_gen()->print_on(st);
891 perm_gen()->print_on(st);
892 }
894 void ParallelScavengeHeap::gc_threads_do(ThreadClosure* tc) const {
895 PSScavenge::gc_task_manager()->threads_do(tc);
896 }
898 void ParallelScavengeHeap::print_gc_threads_on(outputStream* st) const {
899 PSScavenge::gc_task_manager()->print_threads_on(st);
900 }
902 void ParallelScavengeHeap::print_tracing_info() const {
903 if (TraceGen0Time) {
904 double time = PSScavenge::accumulated_time()->seconds();
905 tty->print_cr("[Accumulated GC generation 0 time %3.7f secs]", time);
906 }
907 if (TraceGen1Time) {
908 double time = PSMarkSweep::accumulated_time()->seconds();
909 tty->print_cr("[Accumulated GC generation 1 time %3.7f secs]", time);
910 }
911 }
914 void ParallelScavengeHeap::verify(bool silent, VerifyOption option /* ignored */) {
915 // Why do we need the total_collections()-filter below?
916 if (total_collections() > 0) {
917 if (!silent) {
918 gclog_or_tty->print("permanent ");
919 }
920 perm_gen()->verify();
922 if (!silent) {
923 gclog_or_tty->print("tenured ");
924 }
925 old_gen()->verify();
927 if (!silent) {
928 gclog_or_tty->print("eden ");
929 }
930 young_gen()->verify();
931 }
932 }
934 void ParallelScavengeHeap::print_heap_change(size_t prev_used) {
935 if (PrintGCDetails && Verbose) {
936 gclog_or_tty->print(" " SIZE_FORMAT
937 "->" SIZE_FORMAT
938 "(" SIZE_FORMAT ")",
939 prev_used, used(), capacity());
940 } else {
941 gclog_or_tty->print(" " SIZE_FORMAT "K"
942 "->" SIZE_FORMAT "K"
943 "(" SIZE_FORMAT "K)",
944 prev_used / K, used() / K, capacity() / K);
945 }
946 }
948 ParallelScavengeHeap* ParallelScavengeHeap::heap() {
949 assert(_psh != NULL, "Uninitialized access to ParallelScavengeHeap::heap()");
950 assert(_psh->kind() == CollectedHeap::ParallelScavengeHeap, "not a parallel scavenge heap");
951 return _psh;
952 }
954 // Before delegating the resize to the young generation,
955 // the reserved space for the young and old generations
956 // may be changed to accomodate the desired resize.
957 void ParallelScavengeHeap::resize_young_gen(size_t eden_size,
958 size_t survivor_size) {
959 if (UseAdaptiveGCBoundary) {
960 if (size_policy()->bytes_absorbed_from_eden() != 0) {
961 size_policy()->reset_bytes_absorbed_from_eden();
962 return; // The generation changed size already.
963 }
964 gens()->adjust_boundary_for_young_gen_needs(eden_size, survivor_size);
965 }
967 // Delegate the resize to the generation.
968 _young_gen->resize(eden_size, survivor_size);
969 }
971 // Before delegating the resize to the old generation,
972 // the reserved space for the young and old generations
973 // may be changed to accomodate the desired resize.
974 void ParallelScavengeHeap::resize_old_gen(size_t desired_free_space) {
975 if (UseAdaptiveGCBoundary) {
976 if (size_policy()->bytes_absorbed_from_eden() != 0) {
977 size_policy()->reset_bytes_absorbed_from_eden();
978 return; // The generation changed size already.
979 }
980 gens()->adjust_boundary_for_old_gen_needs(desired_free_space);
981 }
983 // Delegate the resize to the generation.
984 _old_gen->resize(desired_free_space);
985 }
987 ParallelScavengeHeap::ParStrongRootsScope::ParStrongRootsScope() {
988 // nothing particular
989 }
991 ParallelScavengeHeap::ParStrongRootsScope::~ParStrongRootsScope() {
992 // nothing particular
993 }
995 #ifndef PRODUCT
996 void ParallelScavengeHeap::record_gen_tops_before_GC() {
997 if (ZapUnusedHeapArea) {
998 young_gen()->record_spaces_top();
999 old_gen()->record_spaces_top();
1000 perm_gen()->record_spaces_top();
1001 }
1002 }
1004 void ParallelScavengeHeap::gen_mangle_unused_area() {
1005 if (ZapUnusedHeapArea) {
1006 young_gen()->eden_space()->mangle_unused_area();
1007 young_gen()->to_space()->mangle_unused_area();
1008 young_gen()->from_space()->mangle_unused_area();
1009 old_gen()->object_space()->mangle_unused_area();
1010 perm_gen()->object_space()->mangle_unused_area();
1011 }
1012 }
1013 #endif