duke@435: /* drchase@6680: * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved. duke@435: * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. duke@435: * duke@435: * This code is free software; you can redistribute it and/or modify it duke@435: * under the terms of the GNU General Public License version 2 only, as duke@435: * published by the Free Software Foundation. duke@435: * duke@435: * This code is distributed in the hope that it will be useful, but WITHOUT duke@435: * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or duke@435: * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License duke@435: * version 2 for more details (a copy is included in the LICENSE file that duke@435: * accompanied this code). duke@435: * duke@435: * You should have received a copy of the GNU General Public License version duke@435: * 2 along with this work; if not, write to the Free Software Foundation, duke@435: * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. duke@435: * trims@1907: * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA trims@1907: * or visit www.oracle.com if you need additional information or have any trims@1907: * questions. duke@435: * duke@435: */ duke@435: stefank@2314: #include "precompiled.hpp" stefank@2314: #include "gc_implementation/shared/collectorCounters.hpp" stefank@2314: #include "gc_implementation/shared/gcPolicyCounters.hpp" sla@5237: #include "gc_implementation/shared/gcHeapSummary.hpp" sla@5237: #include "gc_implementation/shared/gcTimer.hpp" sla@5237: #include "gc_implementation/shared/gcTraceTime.hpp" sla@5237: #include "gc_implementation/shared/gcTrace.hpp" stefank@2314: #include "gc_implementation/shared/spaceDecorator.hpp" stefank@2314: #include "memory/defNewGeneration.inline.hpp" stefank@2314: #include "memory/gcLocker.inline.hpp" stefank@2314: #include "memory/genCollectedHeap.hpp" stefank@2314: #include "memory/genOopClosures.inline.hpp" coleenp@4037: #include "memory/genRemSet.hpp" stefank@2314: #include "memory/generationSpec.hpp" stefank@2314: #include "memory/iterator.hpp" stefank@2314: #include "memory/referencePolicy.hpp" stefank@2314: #include "memory/space.inline.hpp" stefank@2314: #include "oops/instanceRefKlass.hpp" stefank@2314: #include "oops/oop.inline.hpp" stefank@2314: #include "runtime/java.hpp" goetz@6912: #include "runtime/prefetch.inline.hpp" stefank@4299: #include "runtime/thread.inline.hpp" stefank@2314: #include "utilities/copy.hpp" stefank@2314: #include "utilities/stack.inline.hpp" duke@435: drchase@6680: PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC drchase@6680: duke@435: // duke@435: // DefNewGeneration functions. duke@435: duke@435: // Methods of protected closure types. duke@435: duke@435: DefNewGeneration::IsAliveClosure::IsAliveClosure(Generation* g) : _g(g) { duke@435: assert(g->level() == 0, "Optimized for youngest gen."); duke@435: } duke@435: bool DefNewGeneration::IsAliveClosure::do_object_b(oop p) { duke@435: return (HeapWord*)p >= _g->reserved().end() || p->is_forwarded(); duke@435: } duke@435: duke@435: DefNewGeneration::KeepAliveClosure:: duke@435: KeepAliveClosure(ScanWeakRefClosure* cl) : _cl(cl) { duke@435: GenRemSet* rs = GenCollectedHeap::heap()->rem_set(); duke@435: assert(rs->rs_kind() == GenRemSet::CardTable, "Wrong rem set kind."); duke@435: _rs = (CardTableRS*)rs; duke@435: } duke@435: coleenp@548: void DefNewGeneration::KeepAliveClosure::do_oop(oop* p) { DefNewGeneration::KeepAliveClosure::do_oop_work(p); } coleenp@548: void DefNewGeneration::KeepAliveClosure::do_oop(narrowOop* p) { DefNewGeneration::KeepAliveClosure::do_oop_work(p); } duke@435: duke@435: duke@435: DefNewGeneration::FastKeepAliveClosure:: duke@435: FastKeepAliveClosure(DefNewGeneration* g, ScanWeakRefClosure* cl) : duke@435: DefNewGeneration::KeepAliveClosure(cl) { duke@435: _boundary = g->reserved().end(); duke@435: } duke@435: coleenp@548: void DefNewGeneration::FastKeepAliveClosure::do_oop(oop* p) { DefNewGeneration::FastKeepAliveClosure::do_oop_work(p); } coleenp@548: void DefNewGeneration::FastKeepAliveClosure::do_oop(narrowOop* p) { DefNewGeneration::FastKeepAliveClosure::do_oop_work(p); } duke@435: duke@435: DefNewGeneration::EvacuateFollowersClosure:: duke@435: EvacuateFollowersClosure(GenCollectedHeap* gch, int level, duke@435: ScanClosure* cur, ScanClosure* older) : duke@435: _gch(gch), _level(level), duke@435: _scan_cur_or_nonheap(cur), _scan_older(older) duke@435: {} duke@435: duke@435: void DefNewGeneration::EvacuateFollowersClosure::do_void() { duke@435: do { duke@435: _gch->oop_since_save_marks_iterate(_level, _scan_cur_or_nonheap, duke@435: _scan_older); duke@435: } while (!_gch->no_allocs_since_save_marks(_level)); duke@435: } duke@435: duke@435: DefNewGeneration::FastEvacuateFollowersClosure:: duke@435: FastEvacuateFollowersClosure(GenCollectedHeap* gch, int level, duke@435: DefNewGeneration* gen, duke@435: FastScanClosure* cur, FastScanClosure* older) : duke@435: _gch(gch), _level(level), _gen(gen), duke@435: _scan_cur_or_nonheap(cur), _scan_older(older) duke@435: {} duke@435: duke@435: void DefNewGeneration::FastEvacuateFollowersClosure::do_void() { duke@435: do { duke@435: _gch->oop_since_save_marks_iterate(_level, _scan_cur_or_nonheap, duke@435: _scan_older); duke@435: } while (!_gch->no_allocs_since_save_marks(_level)); jcoomes@2191: guarantee(_gen->promo_failure_scan_is_complete(), "Failed to finish scan"); duke@435: } duke@435: duke@435: ScanClosure::ScanClosure(DefNewGeneration* g, bool gc_barrier) : coleenp@4037: OopsInKlassOrGenClosure(g), _g(g), _gc_barrier(gc_barrier) duke@435: { duke@435: assert(_g->level() == 0, "Optimized for youngest generation"); duke@435: _boundary = _g->reserved().end(); duke@435: } duke@435: coleenp@548: void ScanClosure::do_oop(oop* p) { ScanClosure::do_oop_work(p); } coleenp@548: void ScanClosure::do_oop(narrowOop* p) { ScanClosure::do_oop_work(p); } coleenp@548: duke@435: FastScanClosure::FastScanClosure(DefNewGeneration* g, bool gc_barrier) : coleenp@4037: OopsInKlassOrGenClosure(g), _g(g), _gc_barrier(gc_barrier) duke@435: { duke@435: assert(_g->level() == 0, "Optimized for youngest generation"); duke@435: _boundary = _g->reserved().end(); duke@435: } duke@435: coleenp@548: void FastScanClosure::do_oop(oop* p) { FastScanClosure::do_oop_work(p); } coleenp@548: void FastScanClosure::do_oop(narrowOop* p) { FastScanClosure::do_oop_work(p); } coleenp@548: coleenp@4037: void KlassScanClosure::do_klass(Klass* klass) { coleenp@4037: #ifndef PRODUCT coleenp@4037: if (TraceScavenge) { coleenp@4037: ResourceMark rm; coleenp@4037: gclog_or_tty->print_cr("KlassScanClosure::do_klass %p, %s, dirty: %s", coleenp@4037: klass, coleenp@4037: klass->external_name(), coleenp@4037: klass->has_modified_oops() ? "true" : "false"); coleenp@4037: } coleenp@4037: #endif coleenp@4037: coleenp@4037: // If the klass has not been dirtied we know that there's coleenp@4037: // no references into the young gen and we can skip it. coleenp@4037: if (klass->has_modified_oops()) { coleenp@4037: if (_accumulate_modified_oops) { coleenp@4037: klass->accumulate_modified_oops(); coleenp@4037: } coleenp@4037: coleenp@4037: // Clear this state since we're going to scavenge all the metadata. coleenp@4037: klass->clear_modified_oops(); coleenp@4037: coleenp@4037: // Tell the closure which Klass is being scanned so that it can be dirtied coleenp@4037: // if oops are left pointing into the young gen. coleenp@4037: _scavenge_closure->set_scanned_klass(klass); coleenp@4037: coleenp@4037: klass->oops_do(_scavenge_closure); coleenp@4037: coleenp@4037: _scavenge_closure->set_scanned_klass(NULL); coleenp@4037: } coleenp@4037: } coleenp@4037: duke@435: ScanWeakRefClosure::ScanWeakRefClosure(DefNewGeneration* g) : coleenp@4037: _g(g) duke@435: { duke@435: assert(_g->level() == 0, "Optimized for youngest generation"); duke@435: _boundary = _g->reserved().end(); duke@435: } duke@435: coleenp@548: void ScanWeakRefClosure::do_oop(oop* p) { ScanWeakRefClosure::do_oop_work(p); } coleenp@548: void ScanWeakRefClosure::do_oop(narrowOop* p) { ScanWeakRefClosure::do_oop_work(p); } coleenp@548: coleenp@548: void FilteringClosure::do_oop(oop* p) { FilteringClosure::do_oop_work(p); } coleenp@548: void FilteringClosure::do_oop(narrowOop* p) { FilteringClosure::do_oop_work(p); } duke@435: coleenp@4037: KlassScanClosure::KlassScanClosure(OopsInKlassOrGenClosure* scavenge_closure, coleenp@4037: KlassRemSet* klass_rem_set) coleenp@4037: : _scavenge_closure(scavenge_closure), coleenp@4037: _accumulate_modified_oops(klass_rem_set->accumulate_modified_oops()) {} coleenp@4037: coleenp@4037: duke@435: DefNewGeneration::DefNewGeneration(ReservedSpace rs, duke@435: size_t initial_size, duke@435: int level, duke@435: const char* policy) duke@435: : Generation(rs, initial_size, level), duke@435: _promo_failure_drain_in_progress(false), duke@435: _should_allocate_from_space(false) duke@435: { duke@435: MemRegion cmr((HeapWord*)_virtual_space.low(), duke@435: (HeapWord*)_virtual_space.high()); duke@435: Universe::heap()->barrier_set()->resize_covered_region(cmr); duke@435: duke@435: if (GenCollectedHeap::heap()->collector_policy()->has_soft_ended_eden()) { duke@435: _eden_space = new ConcEdenSpace(this); duke@435: } else { duke@435: _eden_space = new EdenSpace(this); duke@435: } duke@435: _from_space = new ContiguousSpace(); duke@435: _to_space = new ContiguousSpace(); duke@435: duke@435: if (_eden_space == NULL || _from_space == NULL || _to_space == NULL) duke@435: vm_exit_during_initialization("Could not allocate a new gen space"); duke@435: duke@435: // Compute the maximum eden and survivor space sizes. These sizes duke@435: // are computed assuming the entire reserved space is committed. duke@435: // These values are exported as performance counters. jwilhelm@6085: uintx alignment = GenCollectedHeap::heap()->collector_policy()->space_alignment(); duke@435: uintx size = _virtual_space.reserved_size(); duke@435: _max_survivor_size = compute_survivor_size(size, alignment); duke@435: _max_eden_size = size - (2*_max_survivor_size); duke@435: duke@435: // allocate the performance counters duke@435: duke@435: // Generation counters -- generation 0, 3 subspaces duke@435: _gen_counters = new GenerationCounters("new", 0, 3, &_virtual_space); duke@435: _gc_counters = new CollectorCounters(policy, 0); duke@435: duke@435: _eden_counters = new CSpaceCounters("eden", 0, _max_eden_size, _eden_space, duke@435: _gen_counters); duke@435: _from_counters = new CSpaceCounters("s0", 1, _max_survivor_size, _from_space, duke@435: _gen_counters); duke@435: _to_counters = new CSpaceCounters("s1", 2, _max_survivor_size, _to_space, duke@435: _gen_counters); duke@435: jmasa@698: compute_space_boundaries(0, SpaceDecorator::Clear, SpaceDecorator::Mangle); duke@435: update_counters(); duke@435: _next_gen = NULL; duke@435: _tenuring_threshold = MaxTenuringThreshold; duke@435: _pretenure_size_threshold_words = PretenureSizeThreshold >> LogHeapWordSize; sla@5237: sla@5237: _gc_timer = new (ResourceObj::C_HEAP, mtGC) STWGCTimer(); duke@435: } duke@435: jmasa@698: void DefNewGeneration::compute_space_boundaries(uintx minimum_eden_size, jmasa@698: bool clear_space, jmasa@698: bool mangle_space) { jmasa@698: uintx alignment = jwilhelm@6085: GenCollectedHeap::heap()->collector_policy()->space_alignment(); jmasa@698: jmasa@698: // If the spaces are being cleared (only done at heap initialization jmasa@698: // currently), the survivor spaces need not be empty. jmasa@698: // Otherwise, no care is taken for used areas in the survivor spaces jmasa@698: // so check. jmasa@698: assert(clear_space || (to()->is_empty() && from()->is_empty()), jmasa@698: "Initialization of the survivor spaces assumes these are empty"); duke@435: duke@435: // Compute sizes duke@435: uintx size = _virtual_space.committed_size(); duke@435: uintx survivor_size = compute_survivor_size(size, alignment); duke@435: uintx eden_size = size - (2*survivor_size); duke@435: assert(eden_size > 0 && survivor_size <= eden_size, "just checking"); duke@435: duke@435: if (eden_size < minimum_eden_size) { duke@435: // May happen due to 64Kb rounding, if so adjust eden size back up duke@435: minimum_eden_size = align_size_up(minimum_eden_size, alignment); duke@435: uintx maximum_survivor_size = (size - minimum_eden_size) / 2; duke@435: uintx unaligned_survivor_size = duke@435: align_size_down(maximum_survivor_size, alignment); duke@435: survivor_size = MAX2(unaligned_survivor_size, alignment); duke@435: eden_size = size - (2*survivor_size); duke@435: assert(eden_size > 0 && survivor_size <= eden_size, "just checking"); duke@435: assert(eden_size >= minimum_eden_size, "just checking"); duke@435: } duke@435: duke@435: char *eden_start = _virtual_space.low(); duke@435: char *from_start = eden_start + eden_size; duke@435: char *to_start = from_start + survivor_size; duke@435: char *to_end = to_start + survivor_size; duke@435: duke@435: assert(to_end == _virtual_space.high(), "just checking"); duke@435: assert(Space::is_aligned((HeapWord*)eden_start), "checking alignment"); duke@435: assert(Space::is_aligned((HeapWord*)from_start), "checking alignment"); duke@435: assert(Space::is_aligned((HeapWord*)to_start), "checking alignment"); duke@435: duke@435: MemRegion edenMR((HeapWord*)eden_start, (HeapWord*)from_start); duke@435: MemRegion fromMR((HeapWord*)from_start, (HeapWord*)to_start); duke@435: MemRegion toMR ((HeapWord*)to_start, (HeapWord*)to_end); duke@435: jmasa@698: // A minimum eden size implies that there is a part of eden that jmasa@698: // is being used and that affects the initialization of any jmasa@698: // newly formed eden. jmasa@698: bool live_in_eden = minimum_eden_size > 0; jmasa@698: jmasa@698: // If not clearing the spaces, do some checking to verify that jmasa@698: // the space are already mangled. jmasa@698: if (!clear_space) { jmasa@698: // Must check mangling before the spaces are reshaped. Otherwise, jmasa@698: // the bottom or end of one space may have moved into another jmasa@698: // a failure of the check may not correctly indicate which space jmasa@698: // is not properly mangled. jmasa@698: if (ZapUnusedHeapArea) { jmasa@698: HeapWord* limit = (HeapWord*) _virtual_space.high(); jmasa@698: eden()->check_mangled_unused_area(limit); jmasa@698: from()->check_mangled_unused_area(limit); jmasa@698: to()->check_mangled_unused_area(limit); jmasa@698: } jmasa@698: } jmasa@698: jmasa@698: // Reset the spaces for their new regions. jmasa@698: eden()->initialize(edenMR, jmasa@698: clear_space && !live_in_eden, jmasa@698: SpaceDecorator::Mangle); jmasa@698: // If clear_space and live_in_eden, we will not have cleared any duke@435: // portion of eden above its top. This can cause newly duke@435: // expanded space not to be mangled if using ZapUnusedHeapArea. duke@435: // We explicitly do such mangling here. jmasa@698: if (ZapUnusedHeapArea && clear_space && live_in_eden && mangle_space) { duke@435: eden()->mangle_unused_area(); duke@435: } jmasa@698: from()->initialize(fromMR, clear_space, mangle_space); jmasa@698: to()->initialize(toMR, clear_space, mangle_space); jmasa@698: jmasa@698: // Set next compaction spaces. duke@435: eden()->set_next_compaction_space(from()); duke@435: // The to-space is normally empty before a compaction so need duke@435: // not be considered. The exception is during promotion duke@435: // failure handling when to-space can contain live objects. duke@435: from()->set_next_compaction_space(NULL); duke@435: } duke@435: duke@435: void DefNewGeneration::swap_spaces() { duke@435: ContiguousSpace* s = from(); duke@435: _from_space = to(); duke@435: _to_space = s; duke@435: eden()->set_next_compaction_space(from()); duke@435: // The to-space is normally empty before a compaction so need duke@435: // not be considered. The exception is during promotion duke@435: // failure handling when to-space can contain live objects. duke@435: from()->set_next_compaction_space(NULL); duke@435: duke@435: if (UsePerfData) { duke@435: CSpaceCounters* c = _from_counters; duke@435: _from_counters = _to_counters; duke@435: _to_counters = c; duke@435: } duke@435: } duke@435: duke@435: bool DefNewGeneration::expand(size_t bytes) { duke@435: MutexLocker x(ExpandHeap_lock); jmasa@698: HeapWord* prev_high = (HeapWord*) _virtual_space.high(); duke@435: bool success = _virtual_space.expand_by(bytes); jmasa@698: if (success && ZapUnusedHeapArea) { jmasa@698: // Mangle newly committed space immediately because it jmasa@698: // can be done here more simply that after the new jmasa@698: // spaces have been computed. jmasa@698: HeapWord* new_high = (HeapWord*) _virtual_space.high(); jmasa@698: MemRegion mangle_region(prev_high, new_high); jmasa@698: SpaceMangler::mangle_region(mangle_region); jmasa@698: } duke@435: duke@435: // Do not attempt an expand-to-the reserve size. The duke@435: // request should properly observe the maximum size of duke@435: // the generation so an expand-to-reserve should be duke@435: // unnecessary. Also a second call to expand-to-reserve duke@435: // value potentially can cause an undue expansion. duke@435: // For example if the first expand fail for unknown reasons, duke@435: // but the second succeeds and expands the heap to its maximum duke@435: // value. duke@435: if (GC_locker::is_active()) { duke@435: if (PrintGC && Verbose) { jmasa@698: gclog_or_tty->print_cr("Garbage collection disabled, " jmasa@698: "expanded heap instead"); duke@435: } duke@435: } duke@435: duke@435: return success; duke@435: } duke@435: duke@435: duke@435: void DefNewGeneration::compute_new_size() { duke@435: // This is called after a gc that includes the following generation duke@435: // (which is required to exist.) So from-space will normally be empty. duke@435: // Note that we check both spaces, since if scavenge failed they revert roles. duke@435: // If not we bail out (otherwise we would have to relocate the objects) duke@435: if (!from()->is_empty() || !to()->is_empty()) { duke@435: return; duke@435: } duke@435: duke@435: int next_level = level() + 1; duke@435: GenCollectedHeap* gch = GenCollectedHeap::heap(); duke@435: assert(next_level < gch->_n_gens, duke@435: "DefNewGeneration cannot be an oldest gen"); duke@435: duke@435: Generation* next_gen = gch->_gens[next_level]; duke@435: size_t old_size = next_gen->capacity(); duke@435: size_t new_size_before = _virtual_space.committed_size(); duke@435: size_t min_new_size = spec()->init_size(); duke@435: size_t max_new_size = reserved().byte_size(); duke@435: assert(min_new_size <= new_size_before && duke@435: new_size_before <= max_new_size, duke@435: "just checking"); duke@435: // All space sizes must be multiples of Generation::GenGrain. duke@435: size_t alignment = Generation::GenGrain; duke@435: duke@435: // Compute desired new generation size based on NewRatio and duke@435: // NewSizeThreadIncrease duke@435: size_t desired_new_size = old_size/NewRatio; duke@435: int threads_count = Threads::number_of_non_daemon_threads(); duke@435: size_t thread_increase_size = threads_count * NewSizeThreadIncrease; duke@435: desired_new_size = align_size_up(desired_new_size + thread_increase_size, alignment); duke@435: duke@435: // Adjust new generation size duke@435: desired_new_size = MAX2(MIN2(desired_new_size, max_new_size), min_new_size); duke@435: assert(desired_new_size <= max_new_size, "just checking"); duke@435: duke@435: bool changed = false; duke@435: if (desired_new_size > new_size_before) { duke@435: size_t change = desired_new_size - new_size_before; duke@435: assert(change % alignment == 0, "just checking"); duke@435: if (expand(change)) { duke@435: changed = true; duke@435: } duke@435: // If the heap failed to expand to the desired size, duke@435: // "changed" will be false. If the expansion failed duke@435: // (and at this point it was expected to succeed), duke@435: // ignore the failure (leaving "changed" as false). duke@435: } duke@435: if (desired_new_size < new_size_before && eden()->is_empty()) { duke@435: // bail out of shrinking if objects in eden duke@435: size_t change = new_size_before - desired_new_size; duke@435: assert(change % alignment == 0, "just checking"); duke@435: _virtual_space.shrink_by(change); duke@435: changed = true; duke@435: } duke@435: if (changed) { jmasa@698: // The spaces have already been mangled at this point but jmasa@698: // may not have been cleared (set top = bottom) and should be. jmasa@698: // Mangling was done when the heap was being expanded. jmasa@698: compute_space_boundaries(eden()->used(), jmasa@698: SpaceDecorator::Clear, jmasa@698: SpaceDecorator::DontMangle); jmasa@698: MemRegion cmr((HeapWord*)_virtual_space.low(), jmasa@698: (HeapWord*)_virtual_space.high()); duke@435: Universe::heap()->barrier_set()->resize_covered_region(cmr); duke@435: if (Verbose && PrintGC) { duke@435: size_t new_size_after = _virtual_space.committed_size(); duke@435: size_t eden_size_after = eden()->capacity(); duke@435: size_t survivor_size_after = from()->capacity(); jmasa@698: gclog_or_tty->print("New generation size " SIZE_FORMAT "K->" jmasa@698: SIZE_FORMAT "K [eden=" duke@435: SIZE_FORMAT "K,survivor=" SIZE_FORMAT "K]", jmasa@698: new_size_before/K, new_size_after/K, jmasa@698: eden_size_after/K, survivor_size_after/K); duke@435: if (WizardMode) { duke@435: gclog_or_tty->print("[allowed " SIZE_FORMAT "K extra for %d threads]", duke@435: thread_increase_size/K, threads_count); duke@435: } duke@435: gclog_or_tty->cr(); duke@435: } duke@435: } duke@435: } duke@435: duke@435: void DefNewGeneration::younger_refs_iterate(OopsInGenClosure* cl) { duke@435: assert(false, "NYI -- are you sure you want to call this?"); duke@435: } duke@435: duke@435: duke@435: size_t DefNewGeneration::capacity() const { duke@435: return eden()->capacity() duke@435: + from()->capacity(); // to() is only used during scavenge duke@435: } duke@435: duke@435: duke@435: size_t DefNewGeneration::used() const { duke@435: return eden()->used() duke@435: + from()->used(); // to() is only used during scavenge duke@435: } duke@435: duke@435: duke@435: size_t DefNewGeneration::free() const { duke@435: return eden()->free() duke@435: + from()->free(); // to() is only used during scavenge duke@435: } duke@435: duke@435: size_t DefNewGeneration::max_capacity() const { jwilhelm@6085: const size_t alignment = GenCollectedHeap::heap()->collector_policy()->space_alignment(); duke@435: const size_t reserved_bytes = reserved().byte_size(); duke@435: return reserved_bytes - compute_survivor_size(reserved_bytes, alignment); duke@435: } duke@435: duke@435: size_t DefNewGeneration::unsafe_max_alloc_nogc() const { duke@435: return eden()->free(); duke@435: } duke@435: duke@435: size_t DefNewGeneration::capacity_before_gc() const { duke@435: return eden()->capacity(); duke@435: } duke@435: duke@435: size_t DefNewGeneration::contiguous_available() const { duke@435: return eden()->free(); duke@435: } duke@435: duke@435: duke@435: HeapWord** DefNewGeneration::top_addr() const { return eden()->top_addr(); } duke@435: HeapWord** DefNewGeneration::end_addr() const { return eden()->end_addr(); } duke@435: duke@435: void DefNewGeneration::object_iterate(ObjectClosure* blk) { duke@435: eden()->object_iterate(blk); duke@435: from()->object_iterate(blk); duke@435: } duke@435: duke@435: duke@435: void DefNewGeneration::space_iterate(SpaceClosure* blk, duke@435: bool usedOnly) { duke@435: blk->do_space(eden()); duke@435: blk->do_space(from()); duke@435: blk->do_space(to()); duke@435: } duke@435: duke@435: // The last collection bailed out, we are running out of heap space, duke@435: // so we try to allocate the from-space, too. duke@435: HeapWord* DefNewGeneration::allocate_from_space(size_t size) { duke@435: HeapWord* result = NULL; ysr@2336: if (Verbose && PrintGCDetails) { duke@435: gclog_or_tty->print("DefNewGeneration::allocate_from_space(%u):" ysr@2336: " will_fail: %s" ysr@2336: " heap_lock: %s" ysr@2336: " free: " SIZE_FORMAT, ysr@2336: size, ysr@2336: GenCollectedHeap::heap()->incremental_collection_will_fail(false /* don't consult_young */) ? ysr@2336: "true" : "false", ysr@2336: Heap_lock->is_locked() ? "locked" : "unlocked", ysr@2336: from()->free()); ysr@2336: } duke@435: if (should_allocate_from_space() || GC_locker::is_active_and_needs_gc()) { duke@435: if (Heap_lock->owned_by_self() || duke@435: (SafepointSynchronize::is_at_safepoint() && duke@435: Thread::current()->is_VM_thread())) { duke@435: // If the Heap_lock is not locked by this thread, this will be called duke@435: // again later with the Heap_lock held. duke@435: result = from()->allocate(size); duke@435: } else if (PrintGC && Verbose) { duke@435: gclog_or_tty->print_cr(" Heap_lock is not owned by self"); duke@435: } duke@435: } else if (PrintGC && Verbose) { duke@435: gclog_or_tty->print_cr(" should_allocate_from_space: NOT"); duke@435: } duke@435: if (PrintGC && Verbose) { duke@435: gclog_or_tty->print_cr(" returns %s", result == NULL ? "NULL" : "object"); duke@435: } duke@435: return result; duke@435: } duke@435: duke@435: HeapWord* DefNewGeneration::expand_and_allocate(size_t size, duke@435: bool is_tlab, duke@435: bool parallel) { duke@435: // We don't attempt to expand the young generation (but perhaps we should.) duke@435: return allocate(size, is_tlab); duke@435: } duke@435: brutisso@4452: void DefNewGeneration::adjust_desired_tenuring_threshold() { brutisso@4452: // Set the desired survivor size to half the real survivor space brutisso@4452: _tenuring_threshold = brutisso@4452: age_table()->compute_tenuring_threshold(to()->capacity()/HeapWordSize); brutisso@4452: } duke@435: duke@435: void DefNewGeneration::collect(bool full, duke@435: bool clear_all_soft_refs, duke@435: size_t size, duke@435: bool is_tlab) { duke@435: assert(full || size > 0, "otherwise we don't want to collect"); sla@5237: duke@435: GenCollectedHeap* gch = GenCollectedHeap::heap(); sla@5237: mgronlun@6131: _gc_timer->register_gc_start(); sla@5237: DefNewTracer gc_tracer; sla@5237: gc_tracer.report_gc_start(gch->gc_cause(), _gc_timer->gc_start()); sla@5237: duke@435: _next_gen = gch->next_gen(this); duke@435: sla@5237: // If the next generation is too full to accommodate promotion duke@435: // from this generation, pass on collection; let the next generation duke@435: // do it. duke@435: if (!collection_attempt_is_safe()) { ysr@2336: if (Verbose && PrintGCDetails) { ysr@2336: gclog_or_tty->print(" :: Collection attempt not safe :: "); ysr@2336: } ysr@2243: gch->set_incremental_collection_failed(); // Slight lie: we did not even attempt one duke@435: return; duke@435: } duke@435: assert(to()->is_empty(), "Else not collection_attempt_is_safe"); duke@435: duke@435: init_assuming_no_promotion_failure(); duke@435: brutisso@6904: GCTraceTime t1(GCCauseString("GC", gch->gc_cause()), PrintGC && !PrintGCDetails, true, NULL, gc_tracer.gc_id()); duke@435: // Capture heap used before collection (for printing). duke@435: size_t gch_prev_used = gch->used(); duke@435: sla@5237: gch->trace_heap_before_gc(&gc_tracer); sla@5237: duke@435: SpecializationStats::clear(); duke@435: duke@435: // These can be shared for all code paths duke@435: IsAliveClosure is_alive(this); duke@435: ScanWeakRefClosure scan_weak_ref(this); duke@435: duke@435: age_table()->clear(); jmasa@698: to()->clear(SpaceDecorator::Mangle); duke@435: duke@435: gch->rem_set()->prepare_for_younger_refs_iterate(false); duke@435: duke@435: assert(gch->no_allocs_since_save_marks(0), duke@435: "save marks have not been newly set."); duke@435: duke@435: // Not very pretty. duke@435: CollectorPolicy* cp = gch->collector_policy(); duke@435: duke@435: FastScanClosure fsc_with_no_gc_barrier(this, false); duke@435: FastScanClosure fsc_with_gc_barrier(this, true); duke@435: coleenp@4037: KlassScanClosure klass_scan_closure(&fsc_with_no_gc_barrier, coleenp@4037: gch->rem_set()->klass_rem_set()); stefank@6992: CLDToKlassAndOopClosure cld_scan_closure(&klass_scan_closure, stefank@6992: &fsc_with_no_gc_barrier, stefank@6992: false); coleenp@4037: duke@435: set_promo_failure_scan_stack_closure(&fsc_with_no_gc_barrier); duke@435: FastEvacuateFollowersClosure evacuate_followers(gch, _level, this, duke@435: &fsc_with_no_gc_barrier, duke@435: &fsc_with_gc_barrier); duke@435: duke@435: assert(gch->no_allocs_since_save_marks(0), duke@435: "save marks have not been newly set."); duke@435: stefank@6992: gch->gen_process_roots(_level, stefank@6992: true, // Process younger gens, if any, stefank@6992: // as strong roots. stefank@6992: true, // activate StrongRootsScope stefank@6992: SharedHeap::SO_ScavengeCodeCache, stefank@6992: GenCollectedHeap::StrongAndWeakRoots, stefank@6992: &fsc_with_no_gc_barrier, stefank@6992: &fsc_with_gc_barrier, stefank@6992: &cld_scan_closure); duke@435: duke@435: // "evacuate followers". duke@435: evacuate_followers.do_void(); duke@435: duke@435: FastKeepAliveClosure keep_alive(this, &scan_weak_ref); ysr@888: ReferenceProcessor* rp = ref_processor(); ysr@892: rp->setup_policy(clear_all_soft_refs); sla@5237: const ReferenceProcessorStats& stats = ysr@888: rp->process_discovered_references(&is_alive, &keep_alive, &evacuate_followers, brutisso@6904: NULL, _gc_timer, gc_tracer.gc_id()); sla@5237: gc_tracer.report_gc_reference_stats(stats); sla@5237: sla@5237: if (!_promotion_failed) { duke@435: // Swap the survivor spaces. jmasa@698: eden()->clear(SpaceDecorator::Mangle); jmasa@698: from()->clear(SpaceDecorator::Mangle); jmasa@698: if (ZapUnusedHeapArea) { jmasa@698: // This is now done here because of the piece-meal mangling which jmasa@698: // can check for valid mangling at intermediate points in the jmasa@698: // collection(s). When a minor collection fails to collect jmasa@698: // sufficient space resizing of the young generation can occur jmasa@698: // an redistribute the spaces in the young generation. Mangle jmasa@698: // here so that unzapped regions don't get distributed to jmasa@698: // other spaces. jmasa@698: to()->mangle_unused_area(); jmasa@698: } duke@435: swap_spaces(); duke@435: duke@435: assert(to()->is_empty(), "to space should be empty now"); duke@435: brutisso@4452: adjust_desired_tenuring_threshold(); duke@435: jmasa@1822: // A successful scavenge should restart the GC time limit count which is jmasa@1822: // for full GC's. jmasa@1822: AdaptiveSizePolicy* size_policy = gch->gen_policy()->size_policy(); jmasa@1822: size_policy->reset_gc_overhead_limit_count(); duke@435: if (PrintGC && !PrintGCDetails) { duke@435: gch->print_heap_change(gch_prev_used); duke@435: } ysr@2243: assert(!gch->incremental_collection_failed(), "Should be clear"); duke@435: } else { jcoomes@2191: assert(_promo_failure_scan_stack.is_empty(), "post condition"); jcoomes@2191: _promo_failure_scan_stack.clear(true); // Clear cached segments. duke@435: duke@435: remove_forwarding_pointers(); duke@435: if (PrintGCDetails) { ysr@1580: gclog_or_tty->print(" (promotion failed) "); duke@435: } duke@435: // Add to-space to the list of space to compact duke@435: // when a promotion failure has occurred. In that duke@435: // case there can be live objects in to-space duke@435: // as a result of a partial evacuation of eden duke@435: // and from-space. jcoomes@2191: swap_spaces(); // For uniformity wrt ParNewGeneration. duke@435: from()->set_next_compaction_space(to()); ysr@2243: gch->set_incremental_collection_failed(); duke@435: ysr@1580: // Inform the next generation that a promotion failure occurred. ysr@1580: _next_gen->promotion_failure_occurred(); sla@5237: gc_tracer.report_promotion_failed(_promotion_failed_info); ysr@1580: duke@435: // Reset the PromotionFailureALot counters. duke@435: NOT_PRODUCT(Universe::heap()->reset_promotion_should_fail();) duke@435: } duke@435: // set new iteration safe limit for the survivor spaces duke@435: from()->set_concurrent_iteration_safe_limit(from()->top()); duke@435: to()->set_concurrent_iteration_safe_limit(to()->top()); duke@435: SpecializationStats::print(); johnc@3538: sla@5237: // We need to use a monotonically non-decreasing time in ms johnc@3538: // or we will see time-warp warnings and os::javaTimeMillis() johnc@3538: // does not guarantee monotonicity. johnc@3538: jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC; johnc@3538: update_time_of_last_gc(now); sla@5237: sla@5237: gch->trace_heap_after_gc(&gc_tracer); sla@5237: gc_tracer.report_tenuring_threshold(tenuring_threshold()); sla@5237: mgronlun@6131: _gc_timer->register_gc_end(); sla@5237: sla@5237: gc_tracer.report_gc_end(_gc_timer->gc_end(), _gc_timer->time_partitions()); duke@435: } duke@435: duke@435: class RemoveForwardPointerClosure: public ObjectClosure { duke@435: public: duke@435: void do_object(oop obj) { duke@435: obj->init_mark(); duke@435: } duke@435: }; duke@435: duke@435: void DefNewGeneration::init_assuming_no_promotion_failure() { duke@435: _promotion_failed = false; sla@5237: _promotion_failed_info.reset(); duke@435: from()->set_next_compaction_space(NULL); duke@435: } duke@435: duke@435: void DefNewGeneration::remove_forwarding_pointers() { duke@435: RemoveForwardPointerClosure rspc; duke@435: eden()->object_iterate(&rspc); duke@435: from()->object_iterate(&rspc); jcoomes@2191: duke@435: // Now restore saved marks, if any. jcoomes@2191: assert(_objs_with_preserved_marks.size() == _preserved_marks_of_objs.size(), jcoomes@2191: "should be the same"); jcoomes@2191: while (!_objs_with_preserved_marks.is_empty()) { jcoomes@2191: oop obj = _objs_with_preserved_marks.pop(); jcoomes@2191: markOop m = _preserved_marks_of_objs.pop(); jcoomes@2191: obj->set_mark(m); duke@435: } jcoomes@2191: _objs_with_preserved_marks.clear(true); jcoomes@2191: _preserved_marks_of_objs.clear(true); duke@435: } duke@435: ysr@2380: void DefNewGeneration::preserve_mark(oop obj, markOop m) { sla@5237: assert(_promotion_failed && m->must_be_preserved_for_promotion_failure(obj), ysr@2380: "Oversaving!"); ysr@2380: _objs_with_preserved_marks.push(obj); ysr@2380: _preserved_marks_of_objs.push(m); ysr@2380: } ysr@2380: duke@435: void DefNewGeneration::preserve_mark_if_necessary(oop obj, markOop m) { duke@435: if (m->must_be_preserved_for_promotion_failure(obj)) { ysr@2380: preserve_mark(obj, m); duke@435: } duke@435: } duke@435: duke@435: void DefNewGeneration::handle_promotion_failure(oop old) { ysr@2380: if (PrintPromotionFailure && !_promotion_failed) { ysr@1580: gclog_or_tty->print(" (promotion failure size = " SIZE_FORMAT ") ", ysr@1580: old->size()); ysr@1580: } ysr@2380: _promotion_failed = true; sla@5237: _promotion_failed_info.register_copy_failure(old->size()); ysr@2380: preserve_mark_if_necessary(old, old->mark()); duke@435: // forward to self duke@435: old->forward_to(old); duke@435: jcoomes@2191: _promo_failure_scan_stack.push(old); duke@435: duke@435: if (!_promo_failure_drain_in_progress) { duke@435: // prevent recursion in copy_to_survivor_space() duke@435: _promo_failure_drain_in_progress = true; duke@435: drain_promo_failure_scan_stack(); duke@435: _promo_failure_drain_in_progress = false; duke@435: } duke@435: } duke@435: coleenp@548: oop DefNewGeneration::copy_to_survivor_space(oop old) { duke@435: assert(is_in_reserved(old) && !old->is_forwarded(), duke@435: "shouldn't be scavenging this oop"); duke@435: size_t s = old->size(); duke@435: oop obj = NULL; duke@435: duke@435: // Try allocating obj in to-space (unless too old) duke@435: if (old->age() < tenuring_threshold()) { duke@435: obj = (oop) to()->allocate(s); duke@435: } duke@435: duke@435: // Otherwise try allocating obj tenured duke@435: if (obj == NULL) { coleenp@548: obj = _next_gen->promote(old, s); duke@435: if (obj == NULL) { duke@435: handle_promotion_failure(old); duke@435: return old; duke@435: } duke@435: } else { duke@435: // Prefetch beyond obj duke@435: const intx interval = PrefetchCopyIntervalInBytes; duke@435: Prefetch::write(obj, interval); duke@435: duke@435: // Copy obj duke@435: Copy::aligned_disjoint_words((HeapWord*)old, (HeapWord*)obj, s); duke@435: duke@435: // Increment age if obj still in new generation duke@435: obj->incr_age(); duke@435: age_table()->add(obj, s); duke@435: } duke@435: duke@435: // Done, insert forward pointer to obj in this header duke@435: old->forward_to(obj); duke@435: duke@435: return obj; duke@435: } duke@435: duke@435: void DefNewGeneration::drain_promo_failure_scan_stack() { jcoomes@2191: while (!_promo_failure_scan_stack.is_empty()) { jcoomes@2191: oop obj = _promo_failure_scan_stack.pop(); duke@435: obj->oop_iterate(_promo_failure_scan_stack_closure); duke@435: } duke@435: } duke@435: duke@435: void DefNewGeneration::save_marks() { duke@435: eden()->set_saved_mark(); duke@435: to()->set_saved_mark(); duke@435: from()->set_saved_mark(); duke@435: } duke@435: duke@435: duke@435: void DefNewGeneration::reset_saved_marks() { duke@435: eden()->reset_saved_mark(); duke@435: to()->reset_saved_mark(); duke@435: from()->reset_saved_mark(); duke@435: } duke@435: duke@435: duke@435: bool DefNewGeneration::no_allocs_since_save_marks() { duke@435: assert(eden()->saved_mark_at_top(), "Violated spec - alloc in eden"); duke@435: assert(from()->saved_mark_at_top(), "Violated spec - alloc in from"); duke@435: return to()->saved_mark_at_top(); duke@435: } duke@435: duke@435: #define DefNew_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix) \ duke@435: \ duke@435: void DefNewGeneration:: \ duke@435: oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl) { \ duke@435: cl->set_generation(this); \ duke@435: eden()->oop_since_save_marks_iterate##nv_suffix(cl); \ duke@435: to()->oop_since_save_marks_iterate##nv_suffix(cl); \ duke@435: from()->oop_since_save_marks_iterate##nv_suffix(cl); \ duke@435: cl->reset_generation(); \ duke@435: save_marks(); \ duke@435: } duke@435: duke@435: ALL_SINCE_SAVE_MARKS_CLOSURES(DefNew_SINCE_SAVE_MARKS_DEFN) duke@435: duke@435: #undef DefNew_SINCE_SAVE_MARKS_DEFN duke@435: duke@435: void DefNewGeneration::contribute_scratch(ScratchBlock*& list, Generation* requestor, duke@435: size_t max_alloc_words) { duke@435: if (requestor == this || _promotion_failed) return; duke@435: assert(requestor->level() > level(), "DefNewGeneration must be youngest"); duke@435: duke@435: /* $$$ Assert this? "trace" is a "MarkSweep" function so that's not appropriate. duke@435: if (to_space->top() > to_space->bottom()) { duke@435: trace("to_space not empty when contribute_scratch called"); duke@435: } duke@435: */ duke@435: duke@435: ContiguousSpace* to_space = to(); duke@435: assert(to_space->end() >= to_space->top(), "pointers out of order"); duke@435: size_t free_words = pointer_delta(to_space->end(), to_space->top()); duke@435: if (free_words >= MinFreeScratchWords) { duke@435: ScratchBlock* sb = (ScratchBlock*)to_space->top(); duke@435: sb->num_words = free_words; duke@435: sb->next = list; duke@435: list = sb; duke@435: } duke@435: } duke@435: jmasa@698: void DefNewGeneration::reset_scratch() { jmasa@698: // If contributing scratch in to_space, mangle all of jmasa@698: // to_space if ZapUnusedHeapArea. This is needed because jmasa@698: // top is not maintained while using to-space as scratch. jmasa@698: if (ZapUnusedHeapArea) { jmasa@698: to()->mangle_unused_area_complete(); jmasa@698: } jmasa@698: } jmasa@698: duke@435: bool DefNewGeneration::collection_attempt_is_safe() { duke@435: if (!to()->is_empty()) { ysr@2336: if (Verbose && PrintGCDetails) { ysr@2336: gclog_or_tty->print(" :: to is not empty :: "); ysr@2336: } duke@435: return false; duke@435: } duke@435: if (_next_gen == NULL) { duke@435: GenCollectedHeap* gch = GenCollectedHeap::heap(); duke@435: _next_gen = gch->next_gen(this); duke@435: } ysr@2243: return _next_gen->promotion_attempt_is_safe(used()); duke@435: } duke@435: duke@435: void DefNewGeneration::gc_epilogue(bool full) { ysr@2244: DEBUG_ONLY(static bool seen_incremental_collection_failed = false;) ysr@2244: ysr@2244: assert(!GC_locker::is_active(), "We should not be executing here"); duke@435: // Check if the heap is approaching full after a collection has duke@435: // been done. Generally the young generation is empty at duke@435: // a minimum at the end of a collection. If it is not, then duke@435: // the heap is approaching full. duke@435: GenCollectedHeap* gch = GenCollectedHeap::heap(); ysr@2243: if (full) { ysr@2244: DEBUG_ONLY(seen_incremental_collection_failed = false;) ysr@2336: if (!collection_attempt_is_safe() && !_eden_space->is_empty()) { ysr@2336: if (Verbose && PrintGCDetails) { ysr@2336: gclog_or_tty->print("DefNewEpilogue: cause(%s), full, not safe, set_failed, set_alloc_from, clear_seen", ysr@2336: GCCause::to_string(gch->gc_cause())); ysr@2336: } ysr@2243: gch->set_incremental_collection_failed(); // Slight lie: a full gc left us in that state ysr@2243: set_should_allocate_from_space(); // we seem to be running out of space ysr@2243: } else { ysr@2336: if (Verbose && PrintGCDetails) { ysr@2336: gclog_or_tty->print("DefNewEpilogue: cause(%s), full, safe, clear_failed, clear_alloc_from, clear_seen", ysr@2336: GCCause::to_string(gch->gc_cause())); ysr@2336: } ysr@2243: gch->clear_incremental_collection_failed(); // We just did a full collection ysr@2243: clear_should_allocate_from_space(); // if set ysr@2243: } duke@435: } else { ysr@2244: #ifdef ASSERT ysr@2244: // It is possible that incremental_collection_failed() == true ysr@2244: // here, because an attempted scavenge did not succeed. The policy ysr@2244: // is normally expected to cause a full collection which should ysr@2244: // clear that condition, so we should not be here twice in a row ysr@2244: // with incremental_collection_failed() == true without having done ysr@2244: // a full collection in between. ysr@2244: if (!seen_incremental_collection_failed && ysr@2244: gch->incremental_collection_failed()) { ysr@2336: if (Verbose && PrintGCDetails) { ysr@2336: gclog_or_tty->print("DefNewEpilogue: cause(%s), not full, not_seen_failed, failed, set_seen_failed", ysr@2336: GCCause::to_string(gch->gc_cause())); ysr@2336: } ysr@2244: seen_incremental_collection_failed = true; ysr@2244: } else if (seen_incremental_collection_failed) { ysr@2336: if (Verbose && PrintGCDetails) { ysr@2336: gclog_or_tty->print("DefNewEpilogue: cause(%s), not full, seen_failed, will_clear_seen_failed", ysr@2336: GCCause::to_string(gch->gc_cause())); ysr@2336: } ysr@2336: assert(gch->gc_cause() == GCCause::_scavenge_alot || ysr@2336: (gch->gc_cause() == GCCause::_java_lang_system_gc && UseConcMarkSweepGC && ExplicitGCInvokesConcurrent) || ysr@2336: !gch->incremental_collection_failed(), ysr@2295: "Twice in a row"); ysr@2244: seen_incremental_collection_failed = false; ysr@2244: } ysr@2244: #endif // ASSERT duke@435: } duke@435: jmasa@698: if (ZapUnusedHeapArea) { jmasa@698: eden()->check_mangled_unused_area_complete(); jmasa@698: from()->check_mangled_unused_area_complete(); jmasa@698: to()->check_mangled_unused_area_complete(); jmasa@698: } jmasa@698: jcoomes@2996: if (!CleanChunkPoolAsync) { jcoomes@2996: Chunk::clean_chunk_pool(); jcoomes@2996: } jcoomes@2996: duke@435: // update the generation and space performance counters duke@435: update_counters(); duke@435: gch->collector_policy()->counters()->update_counters(); duke@435: } duke@435: jmasa@698: void DefNewGeneration::record_spaces_top() { jmasa@698: assert(ZapUnusedHeapArea, "Not mangling unused space"); jmasa@698: eden()->set_top_for_allocations(); jmasa@698: to()->set_top_for_allocations(); jmasa@698: from()->set_top_for_allocations(); jmasa@698: } jmasa@698: sla@5237: void DefNewGeneration::ref_processor_init() { sla@5237: Generation::ref_processor_init(); sla@5237: } sla@5237: jmasa@698: duke@435: void DefNewGeneration::update_counters() { duke@435: if (UsePerfData) { duke@435: _eden_counters->update_all(); duke@435: _from_counters->update_all(); duke@435: _to_counters->update_all(); duke@435: _gen_counters->update_all(); duke@435: } duke@435: } duke@435: brutisso@3711: void DefNewGeneration::verify() { brutisso@3711: eden()->verify(); brutisso@3711: from()->verify(); brutisso@3711: to()->verify(); duke@435: } duke@435: duke@435: void DefNewGeneration::print_on(outputStream* st) const { duke@435: Generation::print_on(st); duke@435: st->print(" eden"); duke@435: eden()->print_on(st); duke@435: st->print(" from"); duke@435: from()->print_on(st); duke@435: st->print(" to "); duke@435: to()->print_on(st); duke@435: } duke@435: duke@435: duke@435: const char* DefNewGeneration::name() const { duke@435: return "def new generation"; duke@435: } coleenp@548: coleenp@548: // Moved from inline file as they are not called inline coleenp@548: CompactibleSpace* DefNewGeneration::first_compaction_space() const { coleenp@548: return eden(); coleenp@548: } coleenp@548: coleenp@548: HeapWord* DefNewGeneration::allocate(size_t word_size, coleenp@548: bool is_tlab) { coleenp@548: // This is the slow-path allocation for the DefNewGeneration. coleenp@548: // Most allocations are fast-path in compiled code. coleenp@548: // We try to allocate from the eden. If that works, we are happy. coleenp@548: // Note that since DefNewGeneration supports lock-free allocation, we coleenp@548: // have to use it here, as well. coleenp@548: HeapWord* result = eden()->par_allocate(word_size); coleenp@548: if (result != NULL) { jmasa@5459: if (CMSEdenChunksRecordAlways && _next_gen != NULL) { jmasa@5459: _next_gen->sample_eden_chunk(); jmasa@5459: } coleenp@548: return result; coleenp@548: } coleenp@548: do { coleenp@548: HeapWord* old_limit = eden()->soft_end(); coleenp@548: if (old_limit < eden()->end()) { coleenp@548: // Tell the next generation we reached a limit. coleenp@548: HeapWord* new_limit = coleenp@548: next_gen()->allocation_limit_reached(eden(), eden()->top(), word_size); coleenp@548: if (new_limit != NULL) { coleenp@548: Atomic::cmpxchg_ptr(new_limit, eden()->soft_end_addr(), old_limit); coleenp@548: } else { coleenp@548: assert(eden()->soft_end() == eden()->end(), coleenp@548: "invalid state after allocation_limit_reached returned null"); coleenp@548: } coleenp@548: } else { coleenp@548: // The allocation failed and the soft limit is equal to the hard limit, coleenp@548: // there are no reasons to do an attempt to allocate coleenp@548: assert(old_limit == eden()->end(), "sanity check"); coleenp@548: break; coleenp@548: } coleenp@548: // Try to allocate until succeeded or the soft limit can't be adjusted coleenp@548: result = eden()->par_allocate(word_size); coleenp@548: } while (result == NULL); coleenp@548: coleenp@548: // If the eden is full and the last collection bailed out, we are running coleenp@548: // out of heap space, and we try to allocate the from-space, too. coleenp@548: // allocate_from_space can't be inlined because that would introduce a coleenp@548: // circular dependency at compile time. coleenp@548: if (result == NULL) { coleenp@548: result = allocate_from_space(word_size); jmasa@5459: } else if (CMSEdenChunksRecordAlways && _next_gen != NULL) { jmasa@5459: _next_gen->sample_eden_chunk(); coleenp@548: } coleenp@548: return result; coleenp@548: } coleenp@548: coleenp@548: HeapWord* DefNewGeneration::par_allocate(size_t word_size, coleenp@548: bool is_tlab) { jmasa@5459: HeapWord* res = eden()->par_allocate(word_size); jmasa@5459: if (CMSEdenChunksRecordAlways && _next_gen != NULL) { jmasa@5459: _next_gen->sample_eden_chunk(); jmasa@5459: } jmasa@5459: return res; coleenp@548: } coleenp@548: coleenp@548: void DefNewGeneration::gc_prologue(bool full) { coleenp@548: // Ensure that _end and _soft_end are the same in eden space. coleenp@548: eden()->set_soft_end(eden()->end()); coleenp@548: } coleenp@548: coleenp@548: size_t DefNewGeneration::tlab_capacity() const { coleenp@548: return eden()->capacity(); coleenp@548: } coleenp@548: brutisso@6376: size_t DefNewGeneration::tlab_used() const { brutisso@6376: return eden()->used(); brutisso@6376: } brutisso@6376: coleenp@548: size_t DefNewGeneration::unsafe_max_tlab_alloc() const { coleenp@548: return unsafe_max_alloc_nogc(); coleenp@548: }