Thu, 28 Jun 2012 17:03:16 -0400
6995781: Native Memory Tracking (Phase 1)
7151532: DCmd for hotspot native memory tracking
Summary: Implementation of native memory tracking phase 1, which tracks VM native memory usage, and related DCmd
Reviewed-by: acorn, coleenp, fparain
1 /*
2 * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "precompiled.hpp"
26 #include "gc_implementation/parallelScavenge/adjoiningGenerations.hpp"
27 #include "gc_implementation/parallelScavenge/adjoiningVirtualSpaces.hpp"
28 #include "gc_implementation/parallelScavenge/cardTableExtension.hpp"
29 #include "gc_implementation/parallelScavenge/gcTaskManager.hpp"
30 #include "gc_implementation/parallelScavenge/generationSizer.hpp"
31 #include "gc_implementation/parallelScavenge/parallelScavengeHeap.inline.hpp"
32 #include "gc_implementation/parallelScavenge/psAdaptiveSizePolicy.hpp"
33 #include "gc_implementation/parallelScavenge/psMarkSweep.hpp"
34 #include "gc_implementation/parallelScavenge/psParallelCompact.hpp"
35 #include "gc_implementation/parallelScavenge/psPromotionManager.hpp"
36 #include "gc_implementation/parallelScavenge/psScavenge.hpp"
37 #include "gc_implementation/parallelScavenge/vmPSOperations.hpp"
38 #include "memory/gcLocker.inline.hpp"
39 #include "oops/oop.inline.hpp"
40 #include "runtime/handles.inline.hpp"
41 #include "runtime/java.hpp"
42 #include "runtime/vmThread.hpp"
43 #include "services/memTracker.hpp"
44 #include "utilities/vmError.hpp"
46 PSYoungGen* ParallelScavengeHeap::_young_gen = NULL;
47 PSOldGen* ParallelScavengeHeap::_old_gen = NULL;
48 PSPermGen* ParallelScavengeHeap::_perm_gen = NULL;
49 PSAdaptiveSizePolicy* ParallelScavengeHeap::_size_policy = NULL;
50 PSGCAdaptivePolicyCounters* ParallelScavengeHeap::_gc_policy_counters = NULL;
51 ParallelScavengeHeap* ParallelScavengeHeap::_psh = NULL;
52 GCTaskManager* ParallelScavengeHeap::_gc_task_manager = NULL;
54 static void trace_gen_sizes(const char* const str,
55 size_t pg_min, size_t pg_max,
56 size_t og_min, size_t og_max,
57 size_t yg_min, size_t yg_max)
58 {
59 if (TracePageSizes) {
60 tty->print_cr("%s: " SIZE_FORMAT "," SIZE_FORMAT " "
61 SIZE_FORMAT "," SIZE_FORMAT " "
62 SIZE_FORMAT "," SIZE_FORMAT " "
63 SIZE_FORMAT,
64 str, pg_min / K, pg_max / K,
65 og_min / K, og_max / K,
66 yg_min / K, yg_max / K,
67 (pg_max + og_max + yg_max) / K);
68 }
69 }
71 jint ParallelScavengeHeap::initialize() {
72 CollectedHeap::pre_initialize();
74 // Cannot be initialized until after the flags are parsed
75 // GenerationSizer flag_parser;
76 _collector_policy = new GenerationSizer();
78 size_t yg_min_size = _collector_policy->min_young_gen_size();
79 size_t yg_max_size = _collector_policy->max_young_gen_size();
80 size_t og_min_size = _collector_policy->min_old_gen_size();
81 size_t og_max_size = _collector_policy->max_old_gen_size();
82 // Why isn't there a min_perm_gen_size()?
83 size_t pg_min_size = _collector_policy->perm_gen_size();
84 size_t pg_max_size = _collector_policy->max_perm_gen_size();
86 trace_gen_sizes("ps heap raw",
87 pg_min_size, pg_max_size,
88 og_min_size, og_max_size,
89 yg_min_size, yg_max_size);
91 // The ReservedSpace ctor used below requires that the page size for the perm
92 // gen is <= the page size for the rest of the heap (young + old gens).
93 const size_t og_page_sz = os::page_size_for_region(yg_min_size + og_min_size,
94 yg_max_size + og_max_size,
95 8);
96 const size_t pg_page_sz = MIN2(os::page_size_for_region(pg_min_size,
97 pg_max_size, 16),
98 og_page_sz);
100 const size_t pg_align = set_alignment(_perm_gen_alignment, pg_page_sz);
101 const size_t og_align = set_alignment(_old_gen_alignment, og_page_sz);
102 const size_t yg_align = set_alignment(_young_gen_alignment, og_page_sz);
104 // Update sizes to reflect the selected page size(s).
105 //
106 // NEEDS_CLEANUP. The default TwoGenerationCollectorPolicy uses NewRatio; it
107 // should check UseAdaptiveSizePolicy. Changes from generationSizer could
108 // move to the common code.
109 yg_min_size = align_size_up(yg_min_size, yg_align);
110 yg_max_size = align_size_up(yg_max_size, yg_align);
111 size_t yg_cur_size =
112 align_size_up(_collector_policy->young_gen_size(), yg_align);
113 yg_cur_size = MAX2(yg_cur_size, yg_min_size);
115 og_min_size = align_size_up(og_min_size, og_align);
116 // Align old gen size down to preserve specified heap size.
117 assert(og_align == yg_align, "sanity");
118 og_max_size = align_size_down(og_max_size, og_align);
119 og_max_size = MAX2(og_max_size, og_min_size);
120 size_t og_cur_size =
121 align_size_down(_collector_policy->old_gen_size(), og_align);
122 og_cur_size = MAX2(og_cur_size, og_min_size);
124 pg_min_size = align_size_up(pg_min_size, pg_align);
125 pg_max_size = align_size_up(pg_max_size, pg_align);
126 size_t pg_cur_size = pg_min_size;
128 trace_gen_sizes("ps heap rnd",
129 pg_min_size, pg_max_size,
130 og_min_size, og_max_size,
131 yg_min_size, yg_max_size);
133 const size_t total_reserved = pg_max_size + og_max_size + yg_max_size;
134 char* addr = Universe::preferred_heap_base(total_reserved, Universe::UnscaledNarrowOop);
136 // The main part of the heap (old gen + young gen) can often use a larger page
137 // size than is needed or wanted for the perm gen. Use the "compound
138 // alignment" ReservedSpace ctor to avoid having to use the same page size for
139 // all gens.
141 ReservedHeapSpace heap_rs(pg_max_size, pg_align, og_max_size + yg_max_size,
142 og_align, addr);
144 if (UseCompressedOops) {
145 if (addr != NULL && !heap_rs.is_reserved()) {
146 // Failed to reserve at specified address - the requested memory
147 // region is taken already, for example, by 'java' launcher.
148 // Try again to reserver heap higher.
149 addr = Universe::preferred_heap_base(total_reserved, Universe::ZeroBasedNarrowOop);
150 ReservedHeapSpace heap_rs0(pg_max_size, pg_align, og_max_size + yg_max_size,
151 og_align, addr);
152 if (addr != NULL && !heap_rs0.is_reserved()) {
153 // Failed to reserve at specified address again - give up.
154 addr = Universe::preferred_heap_base(total_reserved, Universe::HeapBasedNarrowOop);
155 assert(addr == NULL, "");
156 ReservedHeapSpace heap_rs1(pg_max_size, pg_align, og_max_size + yg_max_size,
157 og_align, addr);
158 heap_rs = heap_rs1;
159 } else {
160 heap_rs = heap_rs0;
161 }
162 }
163 }
165 MemTracker::record_virtual_memory_type((address)heap_rs.base(), mtJavaHeap);
167 os::trace_page_sizes("ps perm", pg_min_size, pg_max_size, pg_page_sz,
168 heap_rs.base(), pg_max_size);
169 os::trace_page_sizes("ps main", og_min_size + yg_min_size,
170 og_max_size + yg_max_size, og_page_sz,
171 heap_rs.base() + pg_max_size,
172 heap_rs.size() - pg_max_size);
173 if (!heap_rs.is_reserved()) {
174 vm_shutdown_during_initialization(
175 "Could not reserve enough space for object heap");
176 return JNI_ENOMEM;
177 }
179 _reserved = MemRegion((HeapWord*)heap_rs.base(),
180 (HeapWord*)(heap_rs.base() + heap_rs.size()));
182 CardTableExtension* const barrier_set = new CardTableExtension(_reserved, 3);
183 _barrier_set = barrier_set;
184 oopDesc::set_bs(_barrier_set);
185 if (_barrier_set == NULL) {
186 vm_shutdown_during_initialization(
187 "Could not reserve enough space for barrier set");
188 return JNI_ENOMEM;
189 }
191 // Initial young gen size is 4 Mb
192 //
193 // XXX - what about flag_parser.young_gen_size()?
194 const size_t init_young_size = align_size_up(4 * M, yg_align);
195 yg_cur_size = MAX2(MIN2(init_young_size, yg_max_size), yg_cur_size);
197 // Split the reserved space into perm gen and the main heap (everything else).
198 // The main heap uses a different alignment.
199 ReservedSpace perm_rs = heap_rs.first_part(pg_max_size);
200 ReservedSpace main_rs = heap_rs.last_part(pg_max_size, og_align);
202 // Make up the generations
203 // Calculate the maximum size that a generation can grow. This
204 // includes growth into the other generation. Note that the
205 // parameter _max_gen_size is kept as the maximum
206 // size of the generation as the boundaries currently stand.
207 // _max_gen_size is still used as that value.
208 double max_gc_pause_sec = ((double) MaxGCPauseMillis)/1000.0;
209 double max_gc_minor_pause_sec = ((double) MaxGCMinorPauseMillis)/1000.0;
211 _gens = new AdjoiningGenerations(main_rs,
212 og_cur_size,
213 og_min_size,
214 og_max_size,
215 yg_cur_size,
216 yg_min_size,
217 yg_max_size,
218 yg_align);
220 _old_gen = _gens->old_gen();
221 _young_gen = _gens->young_gen();
223 const size_t eden_capacity = _young_gen->eden_space()->capacity_in_bytes();
224 const size_t old_capacity = _old_gen->capacity_in_bytes();
225 const size_t initial_promo_size = MIN2(eden_capacity, old_capacity);
226 _size_policy =
227 new PSAdaptiveSizePolicy(eden_capacity,
228 initial_promo_size,
229 young_gen()->to_space()->capacity_in_bytes(),
230 intra_heap_alignment(),
231 max_gc_pause_sec,
232 max_gc_minor_pause_sec,
233 GCTimeRatio
234 );
236 _perm_gen = new PSPermGen(perm_rs,
237 pg_align,
238 pg_cur_size,
239 pg_cur_size,
240 pg_max_size,
241 "perm", 2);
243 assert(!UseAdaptiveGCBoundary ||
244 (old_gen()->virtual_space()->high_boundary() ==
245 young_gen()->virtual_space()->low_boundary()),
246 "Boundaries must meet");
247 // initialize the policy counters - 2 collectors, 3 generations
248 _gc_policy_counters =
249 new PSGCAdaptivePolicyCounters("ParScav:MSC", 2, 3, _size_policy);
250 _psh = this;
252 // Set up the GCTaskManager
253 _gc_task_manager = GCTaskManager::create(ParallelGCThreads);
255 if (UseParallelOldGC && !PSParallelCompact::initialize()) {
256 return JNI_ENOMEM;
257 }
259 return JNI_OK;
260 }
262 void ParallelScavengeHeap::post_initialize() {
263 // Need to init the tenuring threshold
264 PSScavenge::initialize();
265 if (UseParallelOldGC) {
266 PSParallelCompact::post_initialize();
267 } else {
268 PSMarkSweep::initialize();
269 }
270 PSPromotionManager::initialize();
271 }
273 void ParallelScavengeHeap::update_counters() {
274 young_gen()->update_counters();
275 old_gen()->update_counters();
276 perm_gen()->update_counters();
277 }
279 size_t ParallelScavengeHeap::capacity() const {
280 size_t value = young_gen()->capacity_in_bytes() + old_gen()->capacity_in_bytes();
281 return value;
282 }
284 size_t ParallelScavengeHeap::used() const {
285 size_t value = young_gen()->used_in_bytes() + old_gen()->used_in_bytes();
286 return value;
287 }
289 bool ParallelScavengeHeap::is_maximal_no_gc() const {
290 return old_gen()->is_maximal_no_gc() && young_gen()->is_maximal_no_gc();
291 }
294 size_t ParallelScavengeHeap::permanent_capacity() const {
295 return perm_gen()->capacity_in_bytes();
296 }
298 size_t ParallelScavengeHeap::permanent_used() const {
299 return perm_gen()->used_in_bytes();
300 }
302 size_t ParallelScavengeHeap::max_capacity() const {
303 size_t estimated = reserved_region().byte_size();
304 estimated -= perm_gen()->reserved().byte_size();
305 if (UseAdaptiveSizePolicy) {
306 estimated -= _size_policy->max_survivor_size(young_gen()->max_size());
307 } else {
308 estimated -= young_gen()->to_space()->capacity_in_bytes();
309 }
310 return MAX2(estimated, capacity());
311 }
313 bool ParallelScavengeHeap::is_in(const void* p) const {
314 if (young_gen()->is_in(p)) {
315 return true;
316 }
318 if (old_gen()->is_in(p)) {
319 return true;
320 }
322 if (perm_gen()->is_in(p)) {
323 return true;
324 }
326 return false;
327 }
329 bool ParallelScavengeHeap::is_in_reserved(const void* p) const {
330 if (young_gen()->is_in_reserved(p)) {
331 return true;
332 }
334 if (old_gen()->is_in_reserved(p)) {
335 return true;
336 }
338 if (perm_gen()->is_in_reserved(p)) {
339 return true;
340 }
342 return false;
343 }
345 bool ParallelScavengeHeap::is_scavengable(const void* addr) {
346 return is_in_young((oop)addr);
347 }
349 #ifdef ASSERT
350 // Don't implement this by using is_in_young(). This method is used
351 // in some cases to check that is_in_young() is correct.
352 bool ParallelScavengeHeap::is_in_partial_collection(const void *p) {
353 assert(is_in_reserved(p) || p == NULL,
354 "Does not work if address is non-null and outside of the heap");
355 // The order of the generations is perm (low addr), old, young (high addr)
356 return p >= old_gen()->reserved().end();
357 }
358 #endif
360 // There are two levels of allocation policy here.
361 //
362 // When an allocation request fails, the requesting thread must invoke a VM
363 // operation, transfer control to the VM thread, and await the results of a
364 // garbage collection. That is quite expensive, and we should avoid doing it
365 // multiple times if possible.
366 //
367 // To accomplish this, we have a basic allocation policy, and also a
368 // failed allocation policy.
369 //
370 // The basic allocation policy controls how you allocate memory without
371 // attempting garbage collection. It is okay to grab locks and
372 // expand the heap, if that can be done without coming to a safepoint.
373 // It is likely that the basic allocation policy will not be very
374 // aggressive.
375 //
376 // The failed allocation policy is invoked from the VM thread after
377 // the basic allocation policy is unable to satisfy a mem_allocate
378 // request. This policy needs to cover the entire range of collection,
379 // heap expansion, and out-of-memory conditions. It should make every
380 // attempt to allocate the requested memory.
382 // Basic allocation policy. Should never be called at a safepoint, or
383 // from the VM thread.
384 //
385 // This method must handle cases where many mem_allocate requests fail
386 // simultaneously. When that happens, only one VM operation will succeed,
387 // and the rest will not be executed. For that reason, this method loops
388 // during failed allocation attempts. If the java heap becomes exhausted,
389 // we rely on the size_policy object to force a bail out.
390 HeapWord* ParallelScavengeHeap::mem_allocate(
391 size_t size,
392 bool* gc_overhead_limit_was_exceeded) {
393 assert(!SafepointSynchronize::is_at_safepoint(), "should not be at safepoint");
394 assert(Thread::current() != (Thread*)VMThread::vm_thread(), "should not be in vm thread");
395 assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock");
397 // In general gc_overhead_limit_was_exceeded should be false so
398 // set it so here and reset it to true only if the gc time
399 // limit is being exceeded as checked below.
400 *gc_overhead_limit_was_exceeded = false;
402 HeapWord* result = young_gen()->allocate(size);
404 uint loop_count = 0;
405 uint gc_count = 0;
407 while (result == NULL) {
408 // We don't want to have multiple collections for a single filled generation.
409 // To prevent this, each thread tracks the total_collections() value, and if
410 // the count has changed, does not do a new collection.
411 //
412 // The collection count must be read only while holding the heap lock. VM
413 // operations also hold the heap lock during collections. There is a lock
414 // contention case where thread A blocks waiting on the Heap_lock, while
415 // thread B is holding it doing a collection. When thread A gets the lock,
416 // the collection count has already changed. To prevent duplicate collections,
417 // The policy MUST attempt allocations during the same period it reads the
418 // total_collections() value!
419 {
420 MutexLocker ml(Heap_lock);
421 gc_count = Universe::heap()->total_collections();
423 result = young_gen()->allocate(size);
424 if (result != NULL) {
425 return result;
426 }
428 // If certain conditions hold, try allocating from the old gen.
429 result = mem_allocate_old_gen(size);
430 if (result != NULL) {
431 return result;
432 }
434 // Failed to allocate without a gc.
435 if (GC_locker::is_active_and_needs_gc()) {
436 // If this thread is not in a jni critical section, we stall
437 // the requestor until the critical section has cleared and
438 // GC allowed. When the critical section clears, a GC is
439 // initiated by the last thread exiting the critical section; so
440 // we retry the allocation sequence from the beginning of the loop,
441 // rather than causing more, now probably unnecessary, GC attempts.
442 JavaThread* jthr = JavaThread::current();
443 if (!jthr->in_critical()) {
444 MutexUnlocker mul(Heap_lock);
445 GC_locker::stall_until_clear();
446 continue;
447 } else {
448 if (CheckJNICalls) {
449 fatal("Possible deadlock due to allocating while"
450 " in jni critical section");
451 }
452 return NULL;
453 }
454 }
455 }
457 if (result == NULL) {
458 // Generate a VM operation
459 VM_ParallelGCFailedAllocation op(size, gc_count);
460 VMThread::execute(&op);
462 // Did the VM operation execute? If so, return the result directly.
463 // This prevents us from looping until time out on requests that can
464 // not be satisfied.
465 if (op.prologue_succeeded()) {
466 assert(Universe::heap()->is_in_or_null(op.result()),
467 "result not in heap");
469 // If GC was locked out during VM operation then retry allocation
470 // and/or stall as necessary.
471 if (op.gc_locked()) {
472 assert(op.result() == NULL, "must be NULL if gc_locked() is true");
473 continue; // retry and/or stall as necessary
474 }
476 // Exit the loop if the gc time limit has been exceeded.
477 // The allocation must have failed above ("result" guarding
478 // this path is NULL) and the most recent collection has exceeded the
479 // gc overhead limit (although enough may have been collected to
480 // satisfy the allocation). Exit the loop so that an out-of-memory
481 // will be thrown (return a NULL ignoring the contents of
482 // op.result()),
483 // but clear gc_overhead_limit_exceeded so that the next collection
484 // starts with a clean slate (i.e., forgets about previous overhead
485 // excesses). Fill op.result() with a filler object so that the
486 // heap remains parsable.
487 const bool limit_exceeded = size_policy()->gc_overhead_limit_exceeded();
488 const bool softrefs_clear = collector_policy()->all_soft_refs_clear();
489 assert(!limit_exceeded || softrefs_clear, "Should have been cleared");
490 if (limit_exceeded && softrefs_clear) {
491 *gc_overhead_limit_was_exceeded = true;
492 size_policy()->set_gc_overhead_limit_exceeded(false);
493 if (PrintGCDetails && Verbose) {
494 gclog_or_tty->print_cr("ParallelScavengeHeap::mem_allocate: "
495 "return NULL because gc_overhead_limit_exceeded is set");
496 }
497 if (op.result() != NULL) {
498 CollectedHeap::fill_with_object(op.result(), size);
499 }
500 return NULL;
501 }
503 return op.result();
504 }
505 }
507 // The policy object will prevent us from looping forever. If the
508 // time spent in gc crosses a threshold, we will bail out.
509 loop_count++;
510 if ((result == NULL) && (QueuedAllocationWarningCount > 0) &&
511 (loop_count % QueuedAllocationWarningCount == 0)) {
512 warning("ParallelScavengeHeap::mem_allocate retries %d times \n\t"
513 " size=%d", loop_count, size);
514 }
515 }
517 return result;
518 }
520 // A "death march" is a series of ultra-slow allocations in which a full gc is
521 // done before each allocation, and after the full gc the allocation still
522 // cannot be satisfied from the young gen. This routine detects that condition;
523 // it should be called after a full gc has been done and the allocation
524 // attempted from the young gen. The parameter 'addr' should be the result of
525 // that young gen allocation attempt.
526 void
527 ParallelScavengeHeap::death_march_check(HeapWord* const addr, size_t size) {
528 if (addr != NULL) {
529 _death_march_count = 0; // death march has ended
530 } else if (_death_march_count == 0) {
531 if (should_alloc_in_eden(size)) {
532 _death_march_count = 1; // death march has started
533 }
534 }
535 }
537 HeapWord* ParallelScavengeHeap::mem_allocate_old_gen(size_t size) {
538 if (!should_alloc_in_eden(size) || GC_locker::is_active_and_needs_gc()) {
539 // Size is too big for eden, or gc is locked out.
540 return old_gen()->allocate(size);
541 }
543 // If a "death march" is in progress, allocate from the old gen a limited
544 // number of times before doing a GC.
545 if (_death_march_count > 0) {
546 if (_death_march_count < 64) {
547 ++_death_march_count;
548 return old_gen()->allocate(size);
549 } else {
550 _death_march_count = 0;
551 }
552 }
553 return NULL;
554 }
556 // Failed allocation policy. Must be called from the VM thread, and
557 // only at a safepoint! Note that this method has policy for allocation
558 // flow, and NOT collection policy. So we do not check for gc collection
559 // time over limit here, that is the responsibility of the heap specific
560 // collection methods. This method decides where to attempt allocations,
561 // and when to attempt collections, but no collection specific policy.
562 HeapWord* ParallelScavengeHeap::failed_mem_allocate(size_t size) {
563 assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
564 assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
565 assert(!Universe::heap()->is_gc_active(), "not reentrant");
566 assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock");
568 // We assume that allocation in eden will fail unless we collect.
570 // First level allocation failure, scavenge and allocate in young gen.
571 GCCauseSetter gccs(this, GCCause::_allocation_failure);
572 const bool invoked_full_gc = PSScavenge::invoke();
573 HeapWord* result = young_gen()->allocate(size);
575 // Second level allocation failure.
576 // Mark sweep and allocate in young generation.
577 if (result == NULL && !invoked_full_gc) {
578 invoke_full_gc(false);
579 result = young_gen()->allocate(size);
580 }
582 death_march_check(result, size);
584 // Third level allocation failure.
585 // After mark sweep and young generation allocation failure,
586 // allocate in old generation.
587 if (result == NULL) {
588 result = old_gen()->allocate(size);
589 }
591 // Fourth level allocation failure. We're running out of memory.
592 // More complete mark sweep and allocate in young generation.
593 if (result == NULL) {
594 invoke_full_gc(true);
595 result = young_gen()->allocate(size);
596 }
598 // Fifth level allocation failure.
599 // After more complete mark sweep, allocate in old generation.
600 if (result == NULL) {
601 result = old_gen()->allocate(size);
602 }
604 return result;
605 }
607 //
608 // This is the policy loop for allocating in the permanent generation.
609 // If the initial allocation fails, we create a vm operation which will
610 // cause a collection.
611 HeapWord* ParallelScavengeHeap::permanent_mem_allocate(size_t size) {
612 assert(!SafepointSynchronize::is_at_safepoint(), "should not be at safepoint");
613 assert(Thread::current() != (Thread*)VMThread::vm_thread(), "should not be in vm thread");
614 assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock");
616 HeapWord* result;
618 uint loop_count = 0;
619 uint gc_count = 0;
620 uint full_gc_count = 0;
622 do {
623 // We don't want to have multiple collections for a single filled generation.
624 // To prevent this, each thread tracks the total_collections() value, and if
625 // the count has changed, does not do a new collection.
626 //
627 // The collection count must be read only while holding the heap lock. VM
628 // operations also hold the heap lock during collections. There is a lock
629 // contention case where thread A blocks waiting on the Heap_lock, while
630 // thread B is holding it doing a collection. When thread A gets the lock,
631 // the collection count has already changed. To prevent duplicate collections,
632 // The policy MUST attempt allocations during the same period it reads the
633 // total_collections() value!
634 {
635 MutexLocker ml(Heap_lock);
636 gc_count = Universe::heap()->total_collections();
637 full_gc_count = Universe::heap()->total_full_collections();
639 result = perm_gen()->allocate_permanent(size);
641 if (result != NULL) {
642 return result;
643 }
645 if (GC_locker::is_active_and_needs_gc()) {
646 // If this thread is not in a jni critical section, we stall
647 // the requestor until the critical section has cleared and
648 // GC allowed. When the critical section clears, a GC is
649 // initiated by the last thread exiting the critical section; so
650 // we retry the allocation sequence from the beginning of the loop,
651 // rather than causing more, now probably unnecessary, GC attempts.
652 JavaThread* jthr = JavaThread::current();
653 if (!jthr->in_critical()) {
654 MutexUnlocker mul(Heap_lock);
655 GC_locker::stall_until_clear();
656 continue;
657 } else {
658 if (CheckJNICalls) {
659 fatal("Possible deadlock due to allocating while"
660 " in jni critical section");
661 }
662 return NULL;
663 }
664 }
665 }
667 if (result == NULL) {
669 // Exit the loop if the gc time limit has been exceeded.
670 // The allocation must have failed above (result must be NULL),
671 // and the most recent collection must have exceeded the
672 // gc time limit. Exit the loop so that an out-of-memory
673 // will be thrown (returning a NULL will do that), but
674 // clear gc_overhead_limit_exceeded so that the next collection
675 // will succeeded if the applications decides to handle the
676 // out-of-memory and tries to go on.
677 const bool limit_exceeded = size_policy()->gc_overhead_limit_exceeded();
678 if (limit_exceeded) {
679 size_policy()->set_gc_overhead_limit_exceeded(false);
680 if (PrintGCDetails && Verbose) {
681 gclog_or_tty->print_cr("ParallelScavengeHeap::permanent_mem_allocate:"
682 " return NULL because gc_overhead_limit_exceeded is set");
683 }
684 assert(result == NULL, "Allocation did not fail");
685 return NULL;
686 }
688 // Generate a VM operation
689 VM_ParallelGCFailedPermanentAllocation op(size, gc_count, full_gc_count);
690 VMThread::execute(&op);
692 // Did the VM operation execute? If so, return the result directly.
693 // This prevents us from looping until time out on requests that can
694 // not be satisfied.
695 if (op.prologue_succeeded()) {
696 assert(Universe::heap()->is_in_permanent_or_null(op.result()),
697 "result not in heap");
698 // If GC was locked out during VM operation then retry allocation
699 // and/or stall as necessary.
700 if (op.gc_locked()) {
701 assert(op.result() == NULL, "must be NULL if gc_locked() is true");
702 continue; // retry and/or stall as necessary
703 }
704 // If a NULL results is being returned, an out-of-memory
705 // will be thrown now. Clear the gc_overhead_limit_exceeded
706 // flag to avoid the following situation.
707 // gc_overhead_limit_exceeded is set during a collection
708 // the collection fails to return enough space and an OOM is thrown
709 // a subsequent GC prematurely throws an out-of-memory because
710 // the gc_overhead_limit_exceeded counts did not start
711 // again from 0.
712 if (op.result() == NULL) {
713 size_policy()->reset_gc_overhead_limit_count();
714 }
715 return op.result();
716 }
717 }
719 // The policy object will prevent us from looping forever. If the
720 // time spent in gc crosses a threshold, we will bail out.
721 loop_count++;
722 if ((QueuedAllocationWarningCount > 0) &&
723 (loop_count % QueuedAllocationWarningCount == 0)) {
724 warning("ParallelScavengeHeap::permanent_mem_allocate retries %d times \n\t"
725 " size=%d", loop_count, size);
726 }
727 } while (result == NULL);
729 return result;
730 }
732 //
733 // This is the policy code for permanent allocations which have failed
734 // and require a collection. Note that just as in failed_mem_allocate,
735 // we do not set collection policy, only where & when to allocate and
736 // collect.
737 HeapWord* ParallelScavengeHeap::failed_permanent_mem_allocate(size_t size) {
738 assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
739 assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
740 assert(!Universe::heap()->is_gc_active(), "not reentrant");
741 assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock");
742 assert(size > perm_gen()->free_in_words(), "Allocation should fail");
744 // We assume (and assert!) that an allocation at this point will fail
745 // unless we collect.
747 // First level allocation failure. Mark-sweep and allocate in perm gen.
748 GCCauseSetter gccs(this, GCCause::_allocation_failure);
749 invoke_full_gc(false);
750 HeapWord* result = perm_gen()->allocate_permanent(size);
752 // Second level allocation failure. We're running out of memory.
753 if (result == NULL) {
754 invoke_full_gc(true);
755 result = perm_gen()->allocate_permanent(size);
756 }
758 return result;
759 }
761 void ParallelScavengeHeap::ensure_parsability(bool retire_tlabs) {
762 CollectedHeap::ensure_parsability(retire_tlabs);
763 young_gen()->eden_space()->ensure_parsability();
764 }
766 size_t ParallelScavengeHeap::unsafe_max_alloc() {
767 return young_gen()->eden_space()->free_in_bytes();
768 }
770 size_t ParallelScavengeHeap::tlab_capacity(Thread* thr) const {
771 return young_gen()->eden_space()->tlab_capacity(thr);
772 }
774 size_t ParallelScavengeHeap::unsafe_max_tlab_alloc(Thread* thr) const {
775 return young_gen()->eden_space()->unsafe_max_tlab_alloc(thr);
776 }
778 HeapWord* ParallelScavengeHeap::allocate_new_tlab(size_t size) {
779 return young_gen()->allocate(size);
780 }
782 void ParallelScavengeHeap::accumulate_statistics_all_tlabs() {
783 CollectedHeap::accumulate_statistics_all_tlabs();
784 }
786 void ParallelScavengeHeap::resize_all_tlabs() {
787 CollectedHeap::resize_all_tlabs();
788 }
790 bool ParallelScavengeHeap::can_elide_initializing_store_barrier(oop new_obj) {
791 // We don't need barriers for stores to objects in the
792 // young gen and, a fortiori, for initializing stores to
793 // objects therein.
794 return is_in_young(new_obj);
795 }
797 // This method is used by System.gc() and JVMTI.
798 void ParallelScavengeHeap::collect(GCCause::Cause cause) {
799 assert(!Heap_lock->owned_by_self(),
800 "this thread should not own the Heap_lock");
802 unsigned int gc_count = 0;
803 unsigned int full_gc_count = 0;
804 {
805 MutexLocker ml(Heap_lock);
806 // This value is guarded by the Heap_lock
807 gc_count = Universe::heap()->total_collections();
808 full_gc_count = Universe::heap()->total_full_collections();
809 }
811 VM_ParallelGCSystemGC op(gc_count, full_gc_count, cause);
812 VMThread::execute(&op);
813 }
815 // This interface assumes that it's being called by the
816 // vm thread. It collects the heap assuming that the
817 // heap lock is already held and that we are executing in
818 // the context of the vm thread.
819 void ParallelScavengeHeap::collect_as_vm_thread(GCCause::Cause cause) {
820 assert(Thread::current()->is_VM_thread(), "Precondition#1");
821 assert(Heap_lock->is_locked(), "Precondition#2");
822 GCCauseSetter gcs(this, cause);
823 switch (cause) {
824 case GCCause::_heap_inspection:
825 case GCCause::_heap_dump: {
826 HandleMark hm;
827 invoke_full_gc(false);
828 break;
829 }
830 default: // XXX FIX ME
831 ShouldNotReachHere();
832 }
833 }
836 void ParallelScavengeHeap::oop_iterate(OopClosure* cl) {
837 Unimplemented();
838 }
840 void ParallelScavengeHeap::object_iterate(ObjectClosure* cl) {
841 young_gen()->object_iterate(cl);
842 old_gen()->object_iterate(cl);
843 perm_gen()->object_iterate(cl);
844 }
846 void ParallelScavengeHeap::permanent_oop_iterate(OopClosure* cl) {
847 Unimplemented();
848 }
850 void ParallelScavengeHeap::permanent_object_iterate(ObjectClosure* cl) {
851 perm_gen()->object_iterate(cl);
852 }
854 HeapWord* ParallelScavengeHeap::block_start(const void* addr) const {
855 if (young_gen()->is_in_reserved(addr)) {
856 assert(young_gen()->is_in(addr),
857 "addr should be in allocated part of young gen");
858 // called from os::print_location by find or VMError
859 if (Debugging || VMError::fatal_error_in_progress()) return NULL;
860 Unimplemented();
861 } else if (old_gen()->is_in_reserved(addr)) {
862 assert(old_gen()->is_in(addr),
863 "addr should be in allocated part of old gen");
864 return old_gen()->start_array()->object_start((HeapWord*)addr);
865 } else if (perm_gen()->is_in_reserved(addr)) {
866 assert(perm_gen()->is_in(addr),
867 "addr should be in allocated part of perm gen");
868 return perm_gen()->start_array()->object_start((HeapWord*)addr);
869 }
870 return 0;
871 }
873 size_t ParallelScavengeHeap::block_size(const HeapWord* addr) const {
874 return oop(addr)->size();
875 }
877 bool ParallelScavengeHeap::block_is_obj(const HeapWord* addr) const {
878 return block_start(addr) == addr;
879 }
881 jlong ParallelScavengeHeap::millis_since_last_gc() {
882 return UseParallelOldGC ?
883 PSParallelCompact::millis_since_last_gc() :
884 PSMarkSweep::millis_since_last_gc();
885 }
887 void ParallelScavengeHeap::prepare_for_verify() {
888 ensure_parsability(false); // no need to retire TLABs for verification
889 }
891 void ParallelScavengeHeap::print_on(outputStream* st) const {
892 young_gen()->print_on(st);
893 old_gen()->print_on(st);
894 perm_gen()->print_on(st);
895 }
897 void ParallelScavengeHeap::gc_threads_do(ThreadClosure* tc) const {
898 PSScavenge::gc_task_manager()->threads_do(tc);
899 }
901 void ParallelScavengeHeap::print_gc_threads_on(outputStream* st) const {
902 PSScavenge::gc_task_manager()->print_threads_on(st);
903 }
905 void ParallelScavengeHeap::print_tracing_info() const {
906 if (TraceGen0Time) {
907 double time = PSScavenge::accumulated_time()->seconds();
908 tty->print_cr("[Accumulated GC generation 0 time %3.7f secs]", time);
909 }
910 if (TraceGen1Time) {
911 double time = PSMarkSweep::accumulated_time()->seconds();
912 tty->print_cr("[Accumulated GC generation 1 time %3.7f secs]", time);
913 }
914 }
917 void ParallelScavengeHeap::verify(bool silent, VerifyOption option /* ignored */) {
918 // Why do we need the total_collections()-filter below?
919 if (total_collections() > 0) {
920 if (!silent) {
921 gclog_or_tty->print("permanent ");
922 }
923 perm_gen()->verify();
925 if (!silent) {
926 gclog_or_tty->print("tenured ");
927 }
928 old_gen()->verify();
930 if (!silent) {
931 gclog_or_tty->print("eden ");
932 }
933 young_gen()->verify();
934 }
935 }
937 void ParallelScavengeHeap::print_heap_change(size_t prev_used) {
938 if (PrintGCDetails && Verbose) {
939 gclog_or_tty->print(" " SIZE_FORMAT
940 "->" SIZE_FORMAT
941 "(" SIZE_FORMAT ")",
942 prev_used, used(), capacity());
943 } else {
944 gclog_or_tty->print(" " SIZE_FORMAT "K"
945 "->" SIZE_FORMAT "K"
946 "(" SIZE_FORMAT "K)",
947 prev_used / K, used() / K, capacity() / K);
948 }
949 }
951 ParallelScavengeHeap* ParallelScavengeHeap::heap() {
952 assert(_psh != NULL, "Uninitialized access to ParallelScavengeHeap::heap()");
953 assert(_psh->kind() == CollectedHeap::ParallelScavengeHeap, "not a parallel scavenge heap");
954 return _psh;
955 }
957 // Before delegating the resize to the young generation,
958 // the reserved space for the young and old generations
959 // may be changed to accomodate the desired resize.
960 void ParallelScavengeHeap::resize_young_gen(size_t eden_size,
961 size_t survivor_size) {
962 if (UseAdaptiveGCBoundary) {
963 if (size_policy()->bytes_absorbed_from_eden() != 0) {
964 size_policy()->reset_bytes_absorbed_from_eden();
965 return; // The generation changed size already.
966 }
967 gens()->adjust_boundary_for_young_gen_needs(eden_size, survivor_size);
968 }
970 // Delegate the resize to the generation.
971 _young_gen->resize(eden_size, survivor_size);
972 }
974 // Before delegating the resize to the old generation,
975 // the reserved space for the young and old generations
976 // may be changed to accomodate the desired resize.
977 void ParallelScavengeHeap::resize_old_gen(size_t desired_free_space) {
978 if (UseAdaptiveGCBoundary) {
979 if (size_policy()->bytes_absorbed_from_eden() != 0) {
980 size_policy()->reset_bytes_absorbed_from_eden();
981 return; // The generation changed size already.
982 }
983 gens()->adjust_boundary_for_old_gen_needs(desired_free_space);
984 }
986 // Delegate the resize to the generation.
987 _old_gen->resize(desired_free_space);
988 }
990 ParallelScavengeHeap::ParStrongRootsScope::ParStrongRootsScope() {
991 // nothing particular
992 }
994 ParallelScavengeHeap::ParStrongRootsScope::~ParStrongRootsScope() {
995 // nothing particular
996 }
998 #ifndef PRODUCT
999 void ParallelScavengeHeap::record_gen_tops_before_GC() {
1000 if (ZapUnusedHeapArea) {
1001 young_gen()->record_spaces_top();
1002 old_gen()->record_spaces_top();
1003 perm_gen()->record_spaces_top();
1004 }
1005 }
1007 void ParallelScavengeHeap::gen_mangle_unused_area() {
1008 if (ZapUnusedHeapArea) {
1009 young_gen()->eden_space()->mangle_unused_area();
1010 young_gen()->to_space()->mangle_unused_area();
1011 young_gen()->from_space()->mangle_unused_area();
1012 old_gen()->object_space()->mangle_unused_area();
1013 perm_gen()->object_space()->mangle_unused_area();
1014 }
1015 }
1016 #endif