Mon, 09 Mar 2009 13:28:46 -0700
6814575: Update copyright year
Summary: Update copyright for files that have been modified in 2009, up to 03/09
Reviewed-by: katleman, tbell, ohair
1 /*
2 * Copyright 2001-2008 Sun Microsystems, Inc. All Rights Reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
20 * CA 95054 USA or visit www.sun.com if you need additional information or
21 * have any questions.
22 *
23 */
25 # include "incls/_precompiled.incl"
26 # include "incls/_parallelScavengeHeap.cpp.incl"
28 PSYoungGen* ParallelScavengeHeap::_young_gen = NULL;
29 PSOldGen* ParallelScavengeHeap::_old_gen = NULL;
30 PSPermGen* ParallelScavengeHeap::_perm_gen = NULL;
31 PSAdaptiveSizePolicy* ParallelScavengeHeap::_size_policy = NULL;
32 PSGCAdaptivePolicyCounters* ParallelScavengeHeap::_gc_policy_counters = NULL;
33 ParallelScavengeHeap* ParallelScavengeHeap::_psh = NULL;
34 GCTaskManager* ParallelScavengeHeap::_gc_task_manager = NULL;
36 static void trace_gen_sizes(const char* const str,
37 size_t pg_min, size_t pg_max,
38 size_t og_min, size_t og_max,
39 size_t yg_min, size_t yg_max)
40 {
41 if (TracePageSizes) {
42 tty->print_cr("%s: " SIZE_FORMAT "," SIZE_FORMAT " "
43 SIZE_FORMAT "," SIZE_FORMAT " "
44 SIZE_FORMAT "," SIZE_FORMAT " "
45 SIZE_FORMAT,
46 str, pg_min / K, pg_max / K,
47 og_min / K, og_max / K,
48 yg_min / K, yg_max / K,
49 (pg_max + og_max + yg_max) / K);
50 }
51 }
53 jint ParallelScavengeHeap::initialize() {
54 // Cannot be initialized until after the flags are parsed
55 GenerationSizer flag_parser;
57 size_t yg_min_size = flag_parser.min_young_gen_size();
58 size_t yg_max_size = flag_parser.max_young_gen_size();
59 size_t og_min_size = flag_parser.min_old_gen_size();
60 size_t og_max_size = flag_parser.max_old_gen_size();
61 // Why isn't there a min_perm_gen_size()?
62 size_t pg_min_size = flag_parser.perm_gen_size();
63 size_t pg_max_size = flag_parser.max_perm_gen_size();
65 trace_gen_sizes("ps heap raw",
66 pg_min_size, pg_max_size,
67 og_min_size, og_max_size,
68 yg_min_size, yg_max_size);
70 // The ReservedSpace ctor used below requires that the page size for the perm
71 // gen is <= the page size for the rest of the heap (young + old gens).
72 const size_t og_page_sz = os::page_size_for_region(yg_min_size + og_min_size,
73 yg_max_size + og_max_size,
74 8);
75 const size_t pg_page_sz = MIN2(os::page_size_for_region(pg_min_size,
76 pg_max_size, 16),
77 og_page_sz);
79 const size_t pg_align = set_alignment(_perm_gen_alignment, pg_page_sz);
80 const size_t og_align = set_alignment(_old_gen_alignment, og_page_sz);
81 const size_t yg_align = set_alignment(_young_gen_alignment, og_page_sz);
83 // Update sizes to reflect the selected page size(s).
84 //
85 // NEEDS_CLEANUP. The default TwoGenerationCollectorPolicy uses NewRatio; it
86 // should check UseAdaptiveSizePolicy. Changes from generationSizer could
87 // move to the common code.
88 yg_min_size = align_size_up(yg_min_size, yg_align);
89 yg_max_size = align_size_up(yg_max_size, yg_align);
90 size_t yg_cur_size = align_size_up(flag_parser.young_gen_size(), yg_align);
91 yg_cur_size = MAX2(yg_cur_size, yg_min_size);
93 og_min_size = align_size_up(og_min_size, og_align);
94 og_max_size = align_size_up(og_max_size, og_align);
95 size_t og_cur_size = align_size_up(flag_parser.old_gen_size(), og_align);
96 og_cur_size = MAX2(og_cur_size, og_min_size);
98 pg_min_size = align_size_up(pg_min_size, pg_align);
99 pg_max_size = align_size_up(pg_max_size, pg_align);
100 size_t pg_cur_size = pg_min_size;
102 trace_gen_sizes("ps heap rnd",
103 pg_min_size, pg_max_size,
104 og_min_size, og_max_size,
105 yg_min_size, yg_max_size);
107 // The main part of the heap (old gen + young gen) can often use a larger page
108 // size than is needed or wanted for the perm gen. Use the "compound
109 // alignment" ReservedSpace ctor to avoid having to use the same page size for
110 // all gens.
111 ReservedHeapSpace heap_rs(pg_max_size, pg_align, og_max_size + yg_max_size,
112 og_align);
113 os::trace_page_sizes("ps perm", pg_min_size, pg_max_size, pg_page_sz,
114 heap_rs.base(), pg_max_size);
115 os::trace_page_sizes("ps main", og_min_size + yg_min_size,
116 og_max_size + yg_max_size, og_page_sz,
117 heap_rs.base() + pg_max_size,
118 heap_rs.size() - pg_max_size);
119 if (!heap_rs.is_reserved()) {
120 vm_shutdown_during_initialization(
121 "Could not reserve enough space for object heap");
122 return JNI_ENOMEM;
123 }
125 _reserved = MemRegion((HeapWord*)heap_rs.base(),
126 (HeapWord*)(heap_rs.base() + heap_rs.size()));
128 CardTableExtension* const barrier_set = new CardTableExtension(_reserved, 3);
129 _barrier_set = barrier_set;
130 oopDesc::set_bs(_barrier_set);
131 if (_barrier_set == NULL) {
132 vm_shutdown_during_initialization(
133 "Could not reserve enough space for barrier set");
134 return JNI_ENOMEM;
135 }
137 // Initial young gen size is 4 Mb
138 //
139 // XXX - what about flag_parser.young_gen_size()?
140 const size_t init_young_size = align_size_up(4 * M, yg_align);
141 yg_cur_size = MAX2(MIN2(init_young_size, yg_max_size), yg_cur_size);
143 // Split the reserved space into perm gen and the main heap (everything else).
144 // The main heap uses a different alignment.
145 ReservedSpace perm_rs = heap_rs.first_part(pg_max_size);
146 ReservedSpace main_rs = heap_rs.last_part(pg_max_size, og_align);
148 // Make up the generations
149 // Calculate the maximum size that a generation can grow. This
150 // includes growth into the other generation. Note that the
151 // parameter _max_gen_size is kept as the maximum
152 // size of the generation as the boundaries currently stand.
153 // _max_gen_size is still used as that value.
154 double max_gc_pause_sec = ((double) MaxGCPauseMillis)/1000.0;
155 double max_gc_minor_pause_sec = ((double) MaxGCMinorPauseMillis)/1000.0;
157 _gens = new AdjoiningGenerations(main_rs,
158 og_cur_size,
159 og_min_size,
160 og_max_size,
161 yg_cur_size,
162 yg_min_size,
163 yg_max_size,
164 yg_align);
166 _old_gen = _gens->old_gen();
167 _young_gen = _gens->young_gen();
169 const size_t eden_capacity = _young_gen->eden_space()->capacity_in_bytes();
170 const size_t old_capacity = _old_gen->capacity_in_bytes();
171 const size_t initial_promo_size = MIN2(eden_capacity, old_capacity);
172 _size_policy =
173 new PSAdaptiveSizePolicy(eden_capacity,
174 initial_promo_size,
175 young_gen()->to_space()->capacity_in_bytes(),
176 intra_heap_alignment(),
177 max_gc_pause_sec,
178 max_gc_minor_pause_sec,
179 GCTimeRatio
180 );
182 _perm_gen = new PSPermGen(perm_rs,
183 pg_align,
184 pg_cur_size,
185 pg_cur_size,
186 pg_max_size,
187 "perm", 2);
189 assert(!UseAdaptiveGCBoundary ||
190 (old_gen()->virtual_space()->high_boundary() ==
191 young_gen()->virtual_space()->low_boundary()),
192 "Boundaries must meet");
193 // initialize the policy counters - 2 collectors, 3 generations
194 _gc_policy_counters =
195 new PSGCAdaptivePolicyCounters("ParScav:MSC", 2, 3, _size_policy);
196 _psh = this;
198 // Set up the GCTaskManager
199 _gc_task_manager = GCTaskManager::create(ParallelGCThreads);
201 if (UseParallelOldGC && !PSParallelCompact::initialize()) {
202 return JNI_ENOMEM;
203 }
205 return JNI_OK;
206 }
208 void ParallelScavengeHeap::post_initialize() {
209 // Need to init the tenuring threshold
210 PSScavenge::initialize();
211 if (UseParallelOldGC) {
212 PSParallelCompact::post_initialize();
213 } else {
214 PSMarkSweep::initialize();
215 }
216 PSPromotionManager::initialize();
217 }
219 void ParallelScavengeHeap::update_counters() {
220 young_gen()->update_counters();
221 old_gen()->update_counters();
222 perm_gen()->update_counters();
223 }
225 size_t ParallelScavengeHeap::capacity() const {
226 size_t value = young_gen()->capacity_in_bytes() + old_gen()->capacity_in_bytes();
227 return value;
228 }
230 size_t ParallelScavengeHeap::used() const {
231 size_t value = young_gen()->used_in_bytes() + old_gen()->used_in_bytes();
232 return value;
233 }
235 bool ParallelScavengeHeap::is_maximal_no_gc() const {
236 return old_gen()->is_maximal_no_gc() && young_gen()->is_maximal_no_gc();
237 }
240 size_t ParallelScavengeHeap::permanent_capacity() const {
241 return perm_gen()->capacity_in_bytes();
242 }
244 size_t ParallelScavengeHeap::permanent_used() const {
245 return perm_gen()->used_in_bytes();
246 }
248 size_t ParallelScavengeHeap::max_capacity() const {
249 size_t estimated = reserved_region().byte_size();
250 estimated -= perm_gen()->reserved().byte_size();
251 if (UseAdaptiveSizePolicy) {
252 estimated -= _size_policy->max_survivor_size(young_gen()->max_size());
253 } else {
254 estimated -= young_gen()->to_space()->capacity_in_bytes();
255 }
256 return MAX2(estimated, capacity());
257 }
259 bool ParallelScavengeHeap::is_in(const void* p) const {
260 if (young_gen()->is_in(p)) {
261 return true;
262 }
264 if (old_gen()->is_in(p)) {
265 return true;
266 }
268 if (perm_gen()->is_in(p)) {
269 return true;
270 }
272 return false;
273 }
275 bool ParallelScavengeHeap::is_in_reserved(const void* p) const {
276 if (young_gen()->is_in_reserved(p)) {
277 return true;
278 }
280 if (old_gen()->is_in_reserved(p)) {
281 return true;
282 }
284 if (perm_gen()->is_in_reserved(p)) {
285 return true;
286 }
288 return false;
289 }
291 // Static method
292 bool ParallelScavengeHeap::is_in_young(oop* p) {
293 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
294 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap,
295 "Must be ParallelScavengeHeap");
297 PSYoungGen* young_gen = heap->young_gen();
299 if (young_gen->is_in_reserved(p)) {
300 return true;
301 }
303 return false;
304 }
306 // Static method
307 bool ParallelScavengeHeap::is_in_old_or_perm(oop* p) {
308 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
309 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap,
310 "Must be ParallelScavengeHeap");
312 PSOldGen* old_gen = heap->old_gen();
313 PSPermGen* perm_gen = heap->perm_gen();
315 if (old_gen->is_in_reserved(p)) {
316 return true;
317 }
319 if (perm_gen->is_in_reserved(p)) {
320 return true;
321 }
323 return false;
324 }
326 // There are two levels of allocation policy here.
327 //
328 // When an allocation request fails, the requesting thread must invoke a VM
329 // operation, transfer control to the VM thread, and await the results of a
330 // garbage collection. That is quite expensive, and we should avoid doing it
331 // multiple times if possible.
332 //
333 // To accomplish this, we have a basic allocation policy, and also a
334 // failed allocation policy.
335 //
336 // The basic allocation policy controls how you allocate memory without
337 // attempting garbage collection. It is okay to grab locks and
338 // expand the heap, if that can be done without coming to a safepoint.
339 // It is likely that the basic allocation policy will not be very
340 // aggressive.
341 //
342 // The failed allocation policy is invoked from the VM thread after
343 // the basic allocation policy is unable to satisfy a mem_allocate
344 // request. This policy needs to cover the entire range of collection,
345 // heap expansion, and out-of-memory conditions. It should make every
346 // attempt to allocate the requested memory.
348 // Basic allocation policy. Should never be called at a safepoint, or
349 // from the VM thread.
350 //
351 // This method must handle cases where many mem_allocate requests fail
352 // simultaneously. When that happens, only one VM operation will succeed,
353 // and the rest will not be executed. For that reason, this method loops
354 // during failed allocation attempts. If the java heap becomes exhausted,
355 // we rely on the size_policy object to force a bail out.
356 HeapWord* ParallelScavengeHeap::mem_allocate(
357 size_t size,
358 bool is_noref,
359 bool is_tlab,
360 bool* gc_overhead_limit_was_exceeded) {
361 assert(!SafepointSynchronize::is_at_safepoint(), "should not be at safepoint");
362 assert(Thread::current() != (Thread*)VMThread::vm_thread(), "should not be in vm thread");
363 assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock");
365 HeapWord* result = young_gen()->allocate(size, is_tlab);
367 uint loop_count = 0;
368 uint gc_count = 0;
370 while (result == NULL) {
371 // We don't want to have multiple collections for a single filled generation.
372 // To prevent this, each thread tracks the total_collections() value, and if
373 // the count has changed, does not do a new collection.
374 //
375 // The collection count must be read only while holding the heap lock. VM
376 // operations also hold the heap lock during collections. There is a lock
377 // contention case where thread A blocks waiting on the Heap_lock, while
378 // thread B is holding it doing a collection. When thread A gets the lock,
379 // the collection count has already changed. To prevent duplicate collections,
380 // The policy MUST attempt allocations during the same period it reads the
381 // total_collections() value!
382 {
383 MutexLocker ml(Heap_lock);
384 gc_count = Universe::heap()->total_collections();
386 result = young_gen()->allocate(size, is_tlab);
388 // (1) If the requested object is too large to easily fit in the
389 // young_gen, or
390 // (2) If GC is locked out via GCLocker, young gen is full and
391 // the need for a GC already signalled to GCLocker (done
392 // at a safepoint),
393 // ... then, rather than force a safepoint and (a potentially futile)
394 // collection (attempt) for each allocation, try allocation directly
395 // in old_gen. For case (2) above, we may in the future allow
396 // TLAB allocation directly in the old gen.
397 if (result != NULL) {
398 return result;
399 }
400 if (!is_tlab &&
401 size >= (young_gen()->eden_space()->capacity_in_words(Thread::current()) / 2)) {
402 result = old_gen()->allocate(size, is_tlab);
403 if (result != NULL) {
404 return result;
405 }
406 }
407 if (GC_locker::is_active_and_needs_gc()) {
408 // GC is locked out. If this is a TLAB allocation,
409 // return NULL; the requestor will retry allocation
410 // of an idividual object at a time.
411 if (is_tlab) {
412 return NULL;
413 }
415 // If this thread is not in a jni critical section, we stall
416 // the requestor until the critical section has cleared and
417 // GC allowed. When the critical section clears, a GC is
418 // initiated by the last thread exiting the critical section; so
419 // we retry the allocation sequence from the beginning of the loop,
420 // rather than causing more, now probably unnecessary, GC attempts.
421 JavaThread* jthr = JavaThread::current();
422 if (!jthr->in_critical()) {
423 MutexUnlocker mul(Heap_lock);
424 GC_locker::stall_until_clear();
425 continue;
426 } else {
427 if (CheckJNICalls) {
428 fatal("Possible deadlock due to allocating while"
429 " in jni critical section");
430 }
431 return NULL;
432 }
433 }
434 }
436 if (result == NULL) {
438 // Exit the loop if if the gc time limit has been exceeded.
439 // The allocation must have failed above (result must be NULL),
440 // and the most recent collection must have exceeded the
441 // gc time limit. Exit the loop so that an out-of-memory
442 // will be thrown (returning a NULL will do that), but
443 // clear gc_time_limit_exceeded so that the next collection
444 // will succeeded if the applications decides to handle the
445 // out-of-memory and tries to go on.
446 *gc_overhead_limit_was_exceeded = size_policy()->gc_time_limit_exceeded();
447 if (size_policy()->gc_time_limit_exceeded()) {
448 size_policy()->set_gc_time_limit_exceeded(false);
449 if (PrintGCDetails && Verbose) {
450 gclog_or_tty->print_cr("ParallelScavengeHeap::mem_allocate: "
451 "return NULL because gc_time_limit_exceeded is set");
452 }
453 return NULL;
454 }
456 // Generate a VM operation
457 VM_ParallelGCFailedAllocation op(size, is_tlab, gc_count);
458 VMThread::execute(&op);
460 // Did the VM operation execute? If so, return the result directly.
461 // This prevents us from looping until time out on requests that can
462 // not be satisfied.
463 if (op.prologue_succeeded()) {
464 assert(Universe::heap()->is_in_or_null(op.result()),
465 "result not in heap");
467 // If GC was locked out during VM operation then retry allocation
468 // and/or stall as necessary.
469 if (op.gc_locked()) {
470 assert(op.result() == NULL, "must be NULL if gc_locked() is true");
471 continue; // retry and/or stall as necessary
472 }
473 // If a NULL result is being returned, an out-of-memory
474 // will be thrown now. Clear the gc_time_limit_exceeded
475 // flag to avoid the following situation.
476 // gc_time_limit_exceeded is set during a collection
477 // the collection fails to return enough space and an OOM is thrown
478 // the next GC is skipped because the gc_time_limit_exceeded
479 // flag is set and another OOM is thrown
480 if (op.result() == NULL) {
481 size_policy()->set_gc_time_limit_exceeded(false);
482 }
483 return op.result();
484 }
485 }
487 // The policy object will prevent us from looping forever. If the
488 // time spent in gc crosses a threshold, we will bail out.
489 loop_count++;
490 if ((result == NULL) && (QueuedAllocationWarningCount > 0) &&
491 (loop_count % QueuedAllocationWarningCount == 0)) {
492 warning("ParallelScavengeHeap::mem_allocate retries %d times \n\t"
493 " size=%d %s", loop_count, size, is_tlab ? "(TLAB)" : "");
494 }
495 }
497 return result;
498 }
500 // Failed allocation policy. Must be called from the VM thread, and
501 // only at a safepoint! Note that this method has policy for allocation
502 // flow, and NOT collection policy. So we do not check for gc collection
503 // time over limit here, that is the responsibility of the heap specific
504 // collection methods. This method decides where to attempt allocations,
505 // and when to attempt collections, but no collection specific policy.
506 HeapWord* ParallelScavengeHeap::failed_mem_allocate(size_t size, bool is_tlab) {
507 assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
508 assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
509 assert(!Universe::heap()->is_gc_active(), "not reentrant");
510 assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock");
512 size_t mark_sweep_invocation_count = total_invocations();
514 // We assume (and assert!) that an allocation at this point will fail
515 // unless we collect.
517 // First level allocation failure, scavenge and allocate in young gen.
518 GCCauseSetter gccs(this, GCCause::_allocation_failure);
519 PSScavenge::invoke();
520 HeapWord* result = young_gen()->allocate(size, is_tlab);
522 // Second level allocation failure.
523 // Mark sweep and allocate in young generation.
524 if (result == NULL) {
525 // There is some chance the scavenge method decided to invoke mark_sweep.
526 // Don't mark sweep twice if so.
527 if (mark_sweep_invocation_count == total_invocations()) {
528 invoke_full_gc(false);
529 result = young_gen()->allocate(size, is_tlab);
530 }
531 }
533 // Third level allocation failure.
534 // After mark sweep and young generation allocation failure,
535 // allocate in old generation.
536 if (result == NULL && !is_tlab) {
537 result = old_gen()->allocate(size, is_tlab);
538 }
540 // Fourth level allocation failure. We're running out of memory.
541 // More complete mark sweep and allocate in young generation.
542 if (result == NULL) {
543 invoke_full_gc(true);
544 result = young_gen()->allocate(size, is_tlab);
545 }
547 // Fifth level allocation failure.
548 // After more complete mark sweep, allocate in old generation.
549 if (result == NULL && !is_tlab) {
550 result = old_gen()->allocate(size, is_tlab);
551 }
553 return result;
554 }
556 //
557 // This is the policy loop for allocating in the permanent generation.
558 // If the initial allocation fails, we create a vm operation which will
559 // cause a collection.
560 HeapWord* ParallelScavengeHeap::permanent_mem_allocate(size_t size) {
561 assert(!SafepointSynchronize::is_at_safepoint(), "should not be at safepoint");
562 assert(Thread::current() != (Thread*)VMThread::vm_thread(), "should not be in vm thread");
563 assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock");
565 HeapWord* result;
567 uint loop_count = 0;
568 uint gc_count = 0;
569 uint full_gc_count = 0;
571 do {
572 // We don't want to have multiple collections for a single filled generation.
573 // To prevent this, each thread tracks the total_collections() value, and if
574 // the count has changed, does not do a new collection.
575 //
576 // The collection count must be read only while holding the heap lock. VM
577 // operations also hold the heap lock during collections. There is a lock
578 // contention case where thread A blocks waiting on the Heap_lock, while
579 // thread B is holding it doing a collection. When thread A gets the lock,
580 // the collection count has already changed. To prevent duplicate collections,
581 // The policy MUST attempt allocations during the same period it reads the
582 // total_collections() value!
583 {
584 MutexLocker ml(Heap_lock);
585 gc_count = Universe::heap()->total_collections();
586 full_gc_count = Universe::heap()->total_full_collections();
588 result = perm_gen()->allocate_permanent(size);
590 if (result != NULL) {
591 return result;
592 }
594 if (GC_locker::is_active_and_needs_gc()) {
595 // If this thread is not in a jni critical section, we stall
596 // the requestor until the critical section has cleared and
597 // GC allowed. When the critical section clears, a GC is
598 // initiated by the last thread exiting the critical section; so
599 // we retry the allocation sequence from the beginning of the loop,
600 // rather than causing more, now probably unnecessary, GC attempts.
601 JavaThread* jthr = JavaThread::current();
602 if (!jthr->in_critical()) {
603 MutexUnlocker mul(Heap_lock);
604 GC_locker::stall_until_clear();
605 continue;
606 } else {
607 if (CheckJNICalls) {
608 fatal("Possible deadlock due to allocating while"
609 " in jni critical section");
610 }
611 return NULL;
612 }
613 }
614 }
616 if (result == NULL) {
618 // Exit the loop if the gc time limit has been exceeded.
619 // The allocation must have failed above (result must be NULL),
620 // and the most recent collection must have exceeded the
621 // gc time limit. Exit the loop so that an out-of-memory
622 // will be thrown (returning a NULL will do that), but
623 // clear gc_time_limit_exceeded so that the next collection
624 // will succeeded if the applications decides to handle the
625 // out-of-memory and tries to go on.
626 if (size_policy()->gc_time_limit_exceeded()) {
627 size_policy()->set_gc_time_limit_exceeded(false);
628 if (PrintGCDetails && Verbose) {
629 gclog_or_tty->print_cr("ParallelScavengeHeap::permanent_mem_allocate: "
630 "return NULL because gc_time_limit_exceeded is set");
631 }
632 assert(result == NULL, "Allocation did not fail");
633 return NULL;
634 }
636 // Generate a VM operation
637 VM_ParallelGCFailedPermanentAllocation op(size, gc_count, full_gc_count);
638 VMThread::execute(&op);
640 // Did the VM operation execute? If so, return the result directly.
641 // This prevents us from looping until time out on requests that can
642 // not be satisfied.
643 if (op.prologue_succeeded()) {
644 assert(Universe::heap()->is_in_permanent_or_null(op.result()),
645 "result not in heap");
646 // If GC was locked out during VM operation then retry allocation
647 // and/or stall as necessary.
648 if (op.gc_locked()) {
649 assert(op.result() == NULL, "must be NULL if gc_locked() is true");
650 continue; // retry and/or stall as necessary
651 }
652 // If a NULL results is being returned, an out-of-memory
653 // will be thrown now. Clear the gc_time_limit_exceeded
654 // flag to avoid the following situation.
655 // gc_time_limit_exceeded is set during a collection
656 // the collection fails to return enough space and an OOM is thrown
657 // the next GC is skipped because the gc_time_limit_exceeded
658 // flag is set and another OOM is thrown
659 if (op.result() == NULL) {
660 size_policy()->set_gc_time_limit_exceeded(false);
661 }
662 return op.result();
663 }
664 }
666 // The policy object will prevent us from looping forever. If the
667 // time spent in gc crosses a threshold, we will bail out.
668 loop_count++;
669 if ((QueuedAllocationWarningCount > 0) &&
670 (loop_count % QueuedAllocationWarningCount == 0)) {
671 warning("ParallelScavengeHeap::permanent_mem_allocate retries %d times \n\t"
672 " size=%d", loop_count, size);
673 }
674 } while (result == NULL);
676 return result;
677 }
679 //
680 // This is the policy code for permanent allocations which have failed
681 // and require a collection. Note that just as in failed_mem_allocate,
682 // we do not set collection policy, only where & when to allocate and
683 // collect.
684 HeapWord* ParallelScavengeHeap::failed_permanent_mem_allocate(size_t size) {
685 assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
686 assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
687 assert(!Universe::heap()->is_gc_active(), "not reentrant");
688 assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock");
689 assert(size > perm_gen()->free_in_words(), "Allocation should fail");
691 // We assume (and assert!) that an allocation at this point will fail
692 // unless we collect.
694 // First level allocation failure. Mark-sweep and allocate in perm gen.
695 GCCauseSetter gccs(this, GCCause::_allocation_failure);
696 invoke_full_gc(false);
697 HeapWord* result = perm_gen()->allocate_permanent(size);
699 // Second level allocation failure. We're running out of memory.
700 if (result == NULL) {
701 invoke_full_gc(true);
702 result = perm_gen()->allocate_permanent(size);
703 }
705 return result;
706 }
708 void ParallelScavengeHeap::ensure_parsability(bool retire_tlabs) {
709 CollectedHeap::ensure_parsability(retire_tlabs);
710 young_gen()->eden_space()->ensure_parsability();
711 }
713 size_t ParallelScavengeHeap::unsafe_max_alloc() {
714 return young_gen()->eden_space()->free_in_bytes();
715 }
717 size_t ParallelScavengeHeap::tlab_capacity(Thread* thr) const {
718 return young_gen()->eden_space()->tlab_capacity(thr);
719 }
721 size_t ParallelScavengeHeap::unsafe_max_tlab_alloc(Thread* thr) const {
722 return young_gen()->eden_space()->unsafe_max_tlab_alloc(thr);
723 }
725 HeapWord* ParallelScavengeHeap::allocate_new_tlab(size_t size) {
726 return young_gen()->allocate(size, true);
727 }
729 void ParallelScavengeHeap::fill_all_tlabs(bool retire) {
730 CollectedHeap::fill_all_tlabs(retire);
731 }
733 void ParallelScavengeHeap::accumulate_statistics_all_tlabs() {
734 CollectedHeap::accumulate_statistics_all_tlabs();
735 }
737 void ParallelScavengeHeap::resize_all_tlabs() {
738 CollectedHeap::resize_all_tlabs();
739 }
741 // This method is used by System.gc() and JVMTI.
742 void ParallelScavengeHeap::collect(GCCause::Cause cause) {
743 assert(!Heap_lock->owned_by_self(),
744 "this thread should not own the Heap_lock");
746 unsigned int gc_count = 0;
747 unsigned int full_gc_count = 0;
748 {
749 MutexLocker ml(Heap_lock);
750 // This value is guarded by the Heap_lock
751 gc_count = Universe::heap()->total_collections();
752 full_gc_count = Universe::heap()->total_full_collections();
753 }
755 VM_ParallelGCSystemGC op(gc_count, full_gc_count, cause);
756 VMThread::execute(&op);
757 }
759 // This interface assumes that it's being called by the
760 // vm thread. It collects the heap assuming that the
761 // heap lock is already held and that we are executing in
762 // the context of the vm thread.
763 void ParallelScavengeHeap::collect_as_vm_thread(GCCause::Cause cause) {
764 assert(Thread::current()->is_VM_thread(), "Precondition#1");
765 assert(Heap_lock->is_locked(), "Precondition#2");
766 GCCauseSetter gcs(this, cause);
767 switch (cause) {
768 case GCCause::_heap_inspection:
769 case GCCause::_heap_dump: {
770 HandleMark hm;
771 invoke_full_gc(false);
772 break;
773 }
774 default: // XXX FIX ME
775 ShouldNotReachHere();
776 }
777 }
780 void ParallelScavengeHeap::oop_iterate(OopClosure* cl) {
781 Unimplemented();
782 }
784 void ParallelScavengeHeap::object_iterate(ObjectClosure* cl) {
785 young_gen()->object_iterate(cl);
786 old_gen()->object_iterate(cl);
787 perm_gen()->object_iterate(cl);
788 }
790 void ParallelScavengeHeap::permanent_oop_iterate(OopClosure* cl) {
791 Unimplemented();
792 }
794 void ParallelScavengeHeap::permanent_object_iterate(ObjectClosure* cl) {
795 perm_gen()->object_iterate(cl);
796 }
798 HeapWord* ParallelScavengeHeap::block_start(const void* addr) const {
799 if (young_gen()->is_in_reserved(addr)) {
800 assert(young_gen()->is_in(addr),
801 "addr should be in allocated part of young gen");
802 Unimplemented();
803 } else if (old_gen()->is_in_reserved(addr)) {
804 assert(old_gen()->is_in(addr),
805 "addr should be in allocated part of old gen");
806 return old_gen()->start_array()->object_start((HeapWord*)addr);
807 } else if (perm_gen()->is_in_reserved(addr)) {
808 assert(perm_gen()->is_in(addr),
809 "addr should be in allocated part of perm gen");
810 return perm_gen()->start_array()->object_start((HeapWord*)addr);
811 }
812 return 0;
813 }
815 size_t ParallelScavengeHeap::block_size(const HeapWord* addr) const {
816 return oop(addr)->size();
817 }
819 bool ParallelScavengeHeap::block_is_obj(const HeapWord* addr) const {
820 return block_start(addr) == addr;
821 }
823 jlong ParallelScavengeHeap::millis_since_last_gc() {
824 return UseParallelOldGC ?
825 PSParallelCompact::millis_since_last_gc() :
826 PSMarkSweep::millis_since_last_gc();
827 }
829 void ParallelScavengeHeap::prepare_for_verify() {
830 ensure_parsability(false); // no need to retire TLABs for verification
831 }
833 void ParallelScavengeHeap::print() const { print_on(tty); }
835 void ParallelScavengeHeap::print_on(outputStream* st) const {
836 young_gen()->print_on(st);
837 old_gen()->print_on(st);
838 perm_gen()->print_on(st);
839 }
841 void ParallelScavengeHeap::gc_threads_do(ThreadClosure* tc) const {
842 PSScavenge::gc_task_manager()->threads_do(tc);
843 }
845 void ParallelScavengeHeap::print_gc_threads_on(outputStream* st) const {
846 PSScavenge::gc_task_manager()->print_threads_on(st);
847 }
849 void ParallelScavengeHeap::print_tracing_info() const {
850 if (TraceGen0Time) {
851 double time = PSScavenge::accumulated_time()->seconds();
852 tty->print_cr("[Accumulated GC generation 0 time %3.7f secs]", time);
853 }
854 if (TraceGen1Time) {
855 double time = PSMarkSweep::accumulated_time()->seconds();
856 tty->print_cr("[Accumulated GC generation 1 time %3.7f secs]", time);
857 }
858 }
861 void ParallelScavengeHeap::verify(bool allow_dirty, bool silent) {
862 // Why do we need the total_collections()-filter below?
863 if (total_collections() > 0) {
864 if (!silent) {
865 gclog_or_tty->print("permanent ");
866 }
867 perm_gen()->verify(allow_dirty);
869 if (!silent) {
870 gclog_or_tty->print("tenured ");
871 }
872 old_gen()->verify(allow_dirty);
874 if (!silent) {
875 gclog_or_tty->print("eden ");
876 }
877 young_gen()->verify(allow_dirty);
878 }
879 if (!silent) {
880 gclog_or_tty->print("ref_proc ");
881 }
882 ReferenceProcessor::verify();
883 }
885 void ParallelScavengeHeap::print_heap_change(size_t prev_used) {
886 if (PrintGCDetails && Verbose) {
887 gclog_or_tty->print(" " SIZE_FORMAT
888 "->" SIZE_FORMAT
889 "(" SIZE_FORMAT ")",
890 prev_used, used(), capacity());
891 } else {
892 gclog_or_tty->print(" " SIZE_FORMAT "K"
893 "->" SIZE_FORMAT "K"
894 "(" SIZE_FORMAT "K)",
895 prev_used / K, used() / K, capacity() / K);
896 }
897 }
899 ParallelScavengeHeap* ParallelScavengeHeap::heap() {
900 assert(_psh != NULL, "Uninitialized access to ParallelScavengeHeap::heap()");
901 assert(_psh->kind() == CollectedHeap::ParallelScavengeHeap, "not a parallel scavenge heap");
902 return _psh;
903 }
905 // Before delegating the resize to the young generation,
906 // the reserved space for the young and old generations
907 // may be changed to accomodate the desired resize.
908 void ParallelScavengeHeap::resize_young_gen(size_t eden_size,
909 size_t survivor_size) {
910 if (UseAdaptiveGCBoundary) {
911 if (size_policy()->bytes_absorbed_from_eden() != 0) {
912 size_policy()->reset_bytes_absorbed_from_eden();
913 return; // The generation changed size already.
914 }
915 gens()->adjust_boundary_for_young_gen_needs(eden_size, survivor_size);
916 }
918 // Delegate the resize to the generation.
919 _young_gen->resize(eden_size, survivor_size);
920 }
922 // Before delegating the resize to the old generation,
923 // the reserved space for the young and old generations
924 // may be changed to accomodate the desired resize.
925 void ParallelScavengeHeap::resize_old_gen(size_t desired_free_space) {
926 if (UseAdaptiveGCBoundary) {
927 if (size_policy()->bytes_absorbed_from_eden() != 0) {
928 size_policy()->reset_bytes_absorbed_from_eden();
929 return; // The generation changed size already.
930 }
931 gens()->adjust_boundary_for_old_gen_needs(desired_free_space);
932 }
934 // Delegate the resize to the generation.
935 _old_gen->resize(desired_free_space);
936 }
938 #ifndef PRODUCT
939 void ParallelScavengeHeap::record_gen_tops_before_GC() {
940 if (ZapUnusedHeapArea) {
941 young_gen()->record_spaces_top();
942 old_gen()->record_spaces_top();
943 perm_gen()->record_spaces_top();
944 }
945 }
947 void ParallelScavengeHeap::gen_mangle_unused_area() {
948 if (ZapUnusedHeapArea) {
949 young_gen()->eden_space()->mangle_unused_area();
950 young_gen()->to_space()->mangle_unused_area();
951 young_gen()->from_space()->mangle_unused_area();
952 old_gen()->object_space()->mangle_unused_area();
953 perm_gen()->object_space()->mangle_unused_area();
954 }
955 }
956 #endif