duke@435: /* stefank@2314: * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved. duke@435: * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. duke@435: * duke@435: * This code is free software; you can redistribute it and/or modify it duke@435: * under the terms of the GNU General Public License version 2 only, as duke@435: * published by the Free Software Foundation. duke@435: * duke@435: * This code is distributed in the hope that it will be useful, but WITHOUT duke@435: * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or duke@435: * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License duke@435: * version 2 for more details (a copy is included in the LICENSE file that duke@435: * accompanied this code). duke@435: * duke@435: * You should have received a copy of the GNU General Public License version duke@435: * 2 along with this work; if not, write to the Free Software Foundation, duke@435: * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. duke@435: * trims@1907: * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA trims@1907: * or visit www.oracle.com if you need additional information or have any trims@1907: * questions. duke@435: * duke@435: */ duke@435: stefank@2314: #ifndef SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_CONCURRENTMARKSWEEPGENERATION_INLINE_HPP stefank@2314: #define SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_CONCURRENTMARKSWEEPGENERATION_INLINE_HPP stefank@2314: stefank@2314: #include "gc_implementation/concurrentMarkSweep/cmsLockVerifier.hpp" stefank@2314: #include "gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp" stefank@2314: #include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp" stefank@2314: #include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.hpp" stefank@2314: #include "gc_implementation/shared/gcUtil.hpp" stefank@2314: #include "memory/defNewGeneration.hpp" stefank@2314: duke@435: inline void CMSBitMap::clear_all() { duke@435: assert_locked(); duke@435: // CMS bitmaps are usually cover large memory regions duke@435: _bm.clear_large(); duke@435: return; duke@435: } duke@435: duke@435: inline size_t CMSBitMap::heapWordToOffset(HeapWord* addr) const { duke@435: return (pointer_delta(addr, _bmStartWord)) >> _shifter; duke@435: } duke@435: duke@435: inline HeapWord* CMSBitMap::offsetToHeapWord(size_t offset) const { duke@435: return _bmStartWord + (offset << _shifter); duke@435: } duke@435: duke@435: inline size_t CMSBitMap::heapWordDiffToOffsetDiff(size_t diff) const { duke@435: assert((diff & ((1 << _shifter) - 1)) == 0, "argument check"); duke@435: return diff >> _shifter; duke@435: } duke@435: duke@435: inline void CMSBitMap::mark(HeapWord* addr) { duke@435: assert_locked(); duke@435: assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize), duke@435: "outside underlying space?"); duke@435: _bm.set_bit(heapWordToOffset(addr)); duke@435: } duke@435: duke@435: inline bool CMSBitMap::par_mark(HeapWord* addr) { duke@435: assert_locked(); duke@435: assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize), duke@435: "outside underlying space?"); duke@435: return _bm.par_at_put(heapWordToOffset(addr), true); duke@435: } duke@435: duke@435: inline void CMSBitMap::par_clear(HeapWord* addr) { duke@435: assert_locked(); duke@435: assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize), duke@435: "outside underlying space?"); duke@435: _bm.par_at_put(heapWordToOffset(addr), false); duke@435: } duke@435: duke@435: inline void CMSBitMap::mark_range(MemRegion mr) { duke@435: NOT_PRODUCT(region_invariant(mr)); duke@435: // Range size is usually just 1 bit. duke@435: _bm.set_range(heapWordToOffset(mr.start()), heapWordToOffset(mr.end()), duke@435: BitMap::small_range); duke@435: } duke@435: duke@435: inline void CMSBitMap::clear_range(MemRegion mr) { duke@435: NOT_PRODUCT(region_invariant(mr)); duke@435: // Range size is usually just 1 bit. duke@435: _bm.clear_range(heapWordToOffset(mr.start()), heapWordToOffset(mr.end()), duke@435: BitMap::small_range); duke@435: } duke@435: duke@435: inline void CMSBitMap::par_mark_range(MemRegion mr) { duke@435: NOT_PRODUCT(region_invariant(mr)); duke@435: // Range size is usually just 1 bit. duke@435: _bm.par_set_range(heapWordToOffset(mr.start()), heapWordToOffset(mr.end()), duke@435: BitMap::small_range); duke@435: } duke@435: duke@435: inline void CMSBitMap::par_clear_range(MemRegion mr) { duke@435: NOT_PRODUCT(region_invariant(mr)); duke@435: // Range size is usually just 1 bit. duke@435: _bm.par_clear_range(heapWordToOffset(mr.start()), heapWordToOffset(mr.end()), duke@435: BitMap::small_range); duke@435: } duke@435: duke@435: inline void CMSBitMap::mark_large_range(MemRegion mr) { duke@435: NOT_PRODUCT(region_invariant(mr)); duke@435: // Range size must be greater than 32 bytes. duke@435: _bm.set_range(heapWordToOffset(mr.start()), heapWordToOffset(mr.end()), duke@435: BitMap::large_range); duke@435: } duke@435: duke@435: inline void CMSBitMap::clear_large_range(MemRegion mr) { duke@435: NOT_PRODUCT(region_invariant(mr)); duke@435: // Range size must be greater than 32 bytes. duke@435: _bm.clear_range(heapWordToOffset(mr.start()), heapWordToOffset(mr.end()), duke@435: BitMap::large_range); duke@435: } duke@435: duke@435: inline void CMSBitMap::par_mark_large_range(MemRegion mr) { duke@435: NOT_PRODUCT(region_invariant(mr)); duke@435: // Range size must be greater than 32 bytes. duke@435: _bm.par_set_range(heapWordToOffset(mr.start()), heapWordToOffset(mr.end()), duke@435: BitMap::large_range); duke@435: } duke@435: duke@435: inline void CMSBitMap::par_clear_large_range(MemRegion mr) { duke@435: NOT_PRODUCT(region_invariant(mr)); duke@435: // Range size must be greater than 32 bytes. duke@435: _bm.par_clear_range(heapWordToOffset(mr.start()), heapWordToOffset(mr.end()), duke@435: BitMap::large_range); duke@435: } duke@435: duke@435: // Starting at "addr" (inclusive) return a memory region duke@435: // corresponding to the first maximally contiguous marked ("1") region. duke@435: inline MemRegion CMSBitMap::getAndClearMarkedRegion(HeapWord* addr) { duke@435: return getAndClearMarkedRegion(addr, endWord()); duke@435: } duke@435: duke@435: // Starting at "start_addr" (inclusive) return a memory region duke@435: // corresponding to the first maximal contiguous marked ("1") region duke@435: // strictly less than end_addr. duke@435: inline MemRegion CMSBitMap::getAndClearMarkedRegion(HeapWord* start_addr, duke@435: HeapWord* end_addr) { duke@435: HeapWord *start, *end; duke@435: assert_locked(); duke@435: start = getNextMarkedWordAddress (start_addr, end_addr); duke@435: end = getNextUnmarkedWordAddress(start, end_addr); duke@435: assert(start <= end, "Consistency check"); duke@435: MemRegion mr(start, end); duke@435: if (!mr.is_empty()) { duke@435: clear_range(mr); duke@435: } duke@435: return mr; duke@435: } duke@435: duke@435: inline bool CMSBitMap::isMarked(HeapWord* addr) const { duke@435: assert_locked(); duke@435: assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize), duke@435: "outside underlying space?"); duke@435: return _bm.at(heapWordToOffset(addr)); duke@435: } duke@435: duke@435: // The same as isMarked() but without a lock check. duke@435: inline bool CMSBitMap::par_isMarked(HeapWord* addr) const { duke@435: assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize), duke@435: "outside underlying space?"); duke@435: return _bm.at(heapWordToOffset(addr)); duke@435: } duke@435: duke@435: duke@435: inline bool CMSBitMap::isUnmarked(HeapWord* addr) const { duke@435: assert_locked(); duke@435: assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize), duke@435: "outside underlying space?"); duke@435: return !_bm.at(heapWordToOffset(addr)); duke@435: } duke@435: duke@435: // Return the HeapWord address corresponding to next "1" bit duke@435: // (inclusive). duke@435: inline HeapWord* CMSBitMap::getNextMarkedWordAddress(HeapWord* addr) const { duke@435: return getNextMarkedWordAddress(addr, endWord()); duke@435: } duke@435: duke@435: // Return the least HeapWord address corresponding to next "1" bit duke@435: // starting at start_addr (inclusive) but strictly less than end_addr. duke@435: inline HeapWord* CMSBitMap::getNextMarkedWordAddress( duke@435: HeapWord* start_addr, HeapWord* end_addr) const { duke@435: assert_locked(); duke@435: size_t nextOffset = _bm.get_next_one_offset( duke@435: heapWordToOffset(start_addr), duke@435: heapWordToOffset(end_addr)); duke@435: HeapWord* nextAddr = offsetToHeapWord(nextOffset); duke@435: assert(nextAddr >= start_addr && duke@435: nextAddr <= end_addr, "get_next_one postcondition"); duke@435: assert((nextAddr == end_addr) || duke@435: isMarked(nextAddr), "get_next_one postcondition"); duke@435: return nextAddr; duke@435: } duke@435: duke@435: duke@435: // Return the HeapWord address corrsponding to the next "0" bit duke@435: // (inclusive). duke@435: inline HeapWord* CMSBitMap::getNextUnmarkedWordAddress(HeapWord* addr) const { duke@435: return getNextUnmarkedWordAddress(addr, endWord()); duke@435: } duke@435: duke@435: // Return the HeapWord address corrsponding to the next "0" bit duke@435: // (inclusive). duke@435: inline HeapWord* CMSBitMap::getNextUnmarkedWordAddress( duke@435: HeapWord* start_addr, HeapWord* end_addr) const { duke@435: assert_locked(); duke@435: size_t nextOffset = _bm.get_next_zero_offset( duke@435: heapWordToOffset(start_addr), duke@435: heapWordToOffset(end_addr)); duke@435: HeapWord* nextAddr = offsetToHeapWord(nextOffset); duke@435: assert(nextAddr >= start_addr && duke@435: nextAddr <= end_addr, "get_next_zero postcondition"); duke@435: assert((nextAddr == end_addr) || duke@435: isUnmarked(nextAddr), "get_next_zero postcondition"); duke@435: return nextAddr; duke@435: } duke@435: duke@435: inline bool CMSBitMap::isAllClear() const { duke@435: assert_locked(); duke@435: return getNextMarkedWordAddress(startWord()) >= endWord(); duke@435: } duke@435: duke@435: inline void CMSBitMap::iterate(BitMapClosure* cl, HeapWord* left, duke@435: HeapWord* right) { duke@435: assert_locked(); duke@435: left = MAX2(_bmStartWord, left); duke@435: right = MIN2(_bmStartWord + _bmWordSize, right); duke@435: if (right > left) { duke@435: _bm.iterate(cl, heapWordToOffset(left), heapWordToOffset(right)); duke@435: } duke@435: } duke@435: duke@435: inline void CMSCollector::start_icms() { duke@435: if (CMSIncrementalMode) { duke@435: ConcurrentMarkSweepThread::start_icms(); duke@435: } duke@435: } duke@435: duke@435: inline void CMSCollector::stop_icms() { duke@435: if (CMSIncrementalMode) { duke@435: ConcurrentMarkSweepThread::stop_icms(); duke@435: } duke@435: } duke@435: duke@435: inline void CMSCollector::disable_icms() { duke@435: if (CMSIncrementalMode) { duke@435: ConcurrentMarkSweepThread::disable_icms(); duke@435: } duke@435: } duke@435: duke@435: inline void CMSCollector::enable_icms() { duke@435: if (CMSIncrementalMode) { duke@435: ConcurrentMarkSweepThread::enable_icms(); duke@435: } duke@435: } duke@435: duke@435: inline void CMSCollector::icms_wait() { duke@435: if (CMSIncrementalMode) { duke@435: cmsThread()->icms_wait(); duke@435: } duke@435: } duke@435: duke@435: inline void CMSCollector::save_sweep_limits() { duke@435: _cmsGen->save_sweep_limit(); duke@435: _permGen->save_sweep_limit(); duke@435: } duke@435: duke@435: inline bool CMSCollector::is_dead_obj(oop obj) const { duke@435: HeapWord* addr = (HeapWord*)obj; duke@435: assert((_cmsGen->cmsSpace()->is_in_reserved(addr) duke@435: && _cmsGen->cmsSpace()->block_is_obj(addr)) duke@435: || duke@435: (_permGen->cmsSpace()->is_in_reserved(addr) duke@435: && _permGen->cmsSpace()->block_is_obj(addr)), duke@435: "must be object"); ysr@529: return should_unload_classes() && duke@435: _collectorState == Sweeping && duke@435: !_markBitMap.isMarked(addr); duke@435: } duke@435: duke@435: inline bool CMSCollector::should_abort_preclean() const { duke@435: // We are in the midst of an "abortable preclean" and either duke@435: // scavenge is done or foreground GC wants to take over collection duke@435: return _collectorState == AbortablePreclean && duke@435: (_abort_preclean || _foregroundGCIsActive || ysr@2336: GenCollectedHeap::heap()->incremental_collection_will_fail(true /* consult_young */)); duke@435: } duke@435: duke@435: inline size_t CMSCollector::get_eden_used() const { duke@435: return _young_gen->as_DefNewGeneration()->eden()->used(); duke@435: } duke@435: duke@435: inline size_t CMSCollector::get_eden_capacity() const { duke@435: return _young_gen->as_DefNewGeneration()->eden()->capacity(); duke@435: } duke@435: duke@435: inline bool CMSStats::valid() const { duke@435: return _valid_bits == _ALL_VALID; duke@435: } duke@435: duke@435: inline void CMSStats::record_gc0_begin() { duke@435: if (_gc0_begin_time.is_updated()) { duke@435: float last_gc0_period = _gc0_begin_time.seconds(); duke@435: _gc0_period = AdaptiveWeightedAverage::exp_avg(_gc0_period, duke@435: last_gc0_period, _gc0_alpha); duke@435: _gc0_alpha = _saved_alpha; duke@435: _valid_bits |= _GC0_VALID; duke@435: } duke@435: _cms_used_at_gc0_begin = _cms_gen->cmsSpace()->used(); duke@435: duke@435: _gc0_begin_time.update(); duke@435: } duke@435: duke@435: inline void CMSStats::record_gc0_end(size_t cms_gen_bytes_used) { duke@435: float last_gc0_duration = _gc0_begin_time.seconds(); duke@435: _gc0_duration = AdaptiveWeightedAverage::exp_avg(_gc0_duration, duke@435: last_gc0_duration, _gc0_alpha); duke@435: duke@435: // Amount promoted. duke@435: _cms_used_at_gc0_end = cms_gen_bytes_used; duke@435: duke@435: size_t promoted_bytes = 0; duke@435: if (_cms_used_at_gc0_end >= _cms_used_at_gc0_begin) { duke@435: promoted_bytes = _cms_used_at_gc0_end - _cms_used_at_gc0_begin; duke@435: } duke@435: duke@435: // If the younger gen collections were skipped, then the duke@435: // number of promoted bytes will be 0 and adding it to the duke@435: // average will incorrectly lessen the average. It is, however, duke@435: // also possible that no promotion was needed. duke@435: // duke@435: // _gc0_promoted used to be calculated as duke@435: // _gc0_promoted = AdaptiveWeightedAverage::exp_avg(_gc0_promoted, duke@435: // promoted_bytes, _gc0_alpha); duke@435: _cms_gen->gc_stats()->avg_promoted()->sample(promoted_bytes); duke@435: _gc0_promoted = (size_t) _cms_gen->gc_stats()->avg_promoted()->average(); duke@435: duke@435: // Amount directly allocated. duke@435: size_t allocated_bytes = _cms_gen->direct_allocated_words() * HeapWordSize; duke@435: _cms_gen->reset_direct_allocated_words(); duke@435: _cms_allocated = AdaptiveWeightedAverage::exp_avg(_cms_allocated, duke@435: allocated_bytes, _gc0_alpha); duke@435: } duke@435: duke@435: inline void CMSStats::record_cms_begin() { duke@435: _cms_timer.stop(); duke@435: duke@435: // This is just an approximate value, but is good enough. duke@435: _cms_used_at_cms_begin = _cms_used_at_gc0_end; duke@435: duke@435: _cms_period = AdaptiveWeightedAverage::exp_avg((float)_cms_period, duke@435: (float) _cms_timer.seconds(), _cms_alpha); duke@435: _cms_begin_time.update(); duke@435: duke@435: _cms_timer.reset(); duke@435: _cms_timer.start(); duke@435: } duke@435: duke@435: inline void CMSStats::record_cms_end() { duke@435: _cms_timer.stop(); duke@435: duke@435: float cur_duration = _cms_timer.seconds(); duke@435: _cms_duration = AdaptiveWeightedAverage::exp_avg(_cms_duration, duke@435: cur_duration, _cms_alpha); duke@435: duke@435: // Avoid division by 0. duke@435: const size_t cms_used_mb = MAX2(_cms_used_at_cms_begin / M, (size_t)1); duke@435: _cms_duration_per_mb = AdaptiveWeightedAverage::exp_avg(_cms_duration_per_mb, duke@435: cur_duration / cms_used_mb, duke@435: _cms_alpha); duke@435: duke@435: _cms_end_time.update(); duke@435: _cms_alpha = _saved_alpha; duke@435: _allow_duty_cycle_reduction = true; duke@435: _valid_bits |= _CMS_VALID; duke@435: duke@435: _cms_timer.start(); duke@435: } duke@435: duke@435: inline double CMSStats::cms_time_since_begin() const { duke@435: return _cms_begin_time.seconds(); duke@435: } duke@435: duke@435: inline double CMSStats::cms_time_since_end() const { duke@435: return _cms_end_time.seconds(); duke@435: } duke@435: duke@435: inline double CMSStats::promotion_rate() const { duke@435: assert(valid(), "statistics not valid yet"); duke@435: return gc0_promoted() / gc0_period(); duke@435: } duke@435: duke@435: inline double CMSStats::cms_allocation_rate() const { duke@435: assert(valid(), "statistics not valid yet"); duke@435: return cms_allocated() / gc0_period(); duke@435: } duke@435: duke@435: inline double CMSStats::cms_consumption_rate() const { duke@435: assert(valid(), "statistics not valid yet"); duke@435: return (gc0_promoted() + cms_allocated()) / gc0_period(); duke@435: } duke@435: duke@435: inline unsigned int CMSStats::icms_update_duty_cycle() { duke@435: // Update the duty cycle only if pacing is enabled and the stats are valid duke@435: // (after at least one young gen gc and one cms cycle have completed). duke@435: if (CMSIncrementalPacing && valid()) { duke@435: return icms_update_duty_cycle_impl(); duke@435: } duke@435: return _icms_duty_cycle; duke@435: } duke@435: duke@435: inline void ConcurrentMarkSweepGeneration::save_sweep_limit() { duke@435: cmsSpace()->save_sweep_limit(); duke@435: } duke@435: duke@435: inline size_t ConcurrentMarkSweepGeneration::capacity() const { duke@435: return _cmsSpace->capacity(); duke@435: } duke@435: duke@435: inline size_t ConcurrentMarkSweepGeneration::used() const { duke@435: return _cmsSpace->used(); duke@435: } duke@435: duke@435: inline size_t ConcurrentMarkSweepGeneration::free() const { duke@435: return _cmsSpace->free(); duke@435: } duke@435: duke@435: inline MemRegion ConcurrentMarkSweepGeneration::used_region() const { duke@435: return _cmsSpace->used_region(); duke@435: } duke@435: duke@435: inline MemRegion ConcurrentMarkSweepGeneration::used_region_at_save_marks() const { duke@435: return _cmsSpace->used_region_at_save_marks(); duke@435: } duke@435: duke@435: inline void MarkFromRootsClosure::do_yield_check() { duke@435: if (ConcurrentMarkSweepThread::should_yield() && duke@435: !_collector->foregroundGCIsActive() && duke@435: _yield) { duke@435: do_yield_work(); duke@435: } duke@435: } duke@435: duke@435: inline void Par_MarkFromRootsClosure::do_yield_check() { duke@435: if (ConcurrentMarkSweepThread::should_yield() && duke@435: !_collector->foregroundGCIsActive() && duke@435: _yield) { duke@435: do_yield_work(); duke@435: } duke@435: } duke@435: duke@435: // Return value of "true" indicates that the on-going preclean duke@435: // should be aborted. duke@435: inline bool ScanMarkedObjectsAgainCarefullyClosure::do_yield_check() { duke@435: if (ConcurrentMarkSweepThread::should_yield() && duke@435: !_collector->foregroundGCIsActive() && duke@435: _yield) { duke@435: // Sample young gen size before and after yield duke@435: _collector->sample_eden(); duke@435: do_yield_work(); duke@435: _collector->sample_eden(); duke@435: return _collector->should_abort_preclean(); duke@435: } duke@435: return false; duke@435: } duke@435: duke@435: inline void SurvivorSpacePrecleanClosure::do_yield_check() { duke@435: if (ConcurrentMarkSweepThread::should_yield() && duke@435: !_collector->foregroundGCIsActive() && duke@435: _yield) { duke@435: // Sample young gen size before and after yield duke@435: _collector->sample_eden(); duke@435: do_yield_work(); duke@435: _collector->sample_eden(); duke@435: } duke@435: } duke@435: duke@435: inline void SweepClosure::do_yield_check(HeapWord* addr) { duke@435: if (ConcurrentMarkSweepThread::should_yield() && duke@435: !_collector->foregroundGCIsActive() && duke@435: _yield) { duke@435: do_yield_work(addr); duke@435: } duke@435: } duke@435: duke@435: inline void MarkRefsIntoAndScanClosure::do_yield_check() { duke@435: // The conditions are ordered for the remarking phase duke@435: // when _yield is false. duke@435: if (_yield && duke@435: !_collector->foregroundGCIsActive() && duke@435: ConcurrentMarkSweepThread::should_yield()) { duke@435: do_yield_work(); duke@435: } duke@435: } duke@435: duke@435: duke@435: inline void ModUnionClosure::do_MemRegion(MemRegion mr) { duke@435: // Align the end of mr so it's at a card boundary. duke@435: // This is superfluous except at the end of the space; duke@435: // we should do better than this XXX duke@435: MemRegion mr2(mr.start(), (HeapWord*)round_to((intptr_t)mr.end(), duke@435: CardTableModRefBS::card_size /* bytes */)); duke@435: _t->mark_range(mr2); duke@435: } duke@435: duke@435: inline void ModUnionClosurePar::do_MemRegion(MemRegion mr) { duke@435: // Align the end of mr so it's at a card boundary. duke@435: // This is superfluous except at the end of the space; duke@435: // we should do better than this XXX duke@435: MemRegion mr2(mr.start(), (HeapWord*)round_to((intptr_t)mr.end(), duke@435: CardTableModRefBS::card_size /* bytes */)); duke@435: _t->par_mark_range(mr2); duke@435: } stefank@2314: stefank@2314: #endif // SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_CONCURRENTMARKSWEEPGENERATION_INLINE_HPP