src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.inline.hpp

Wed, 02 Jul 2008 12:55:16 -0700

author
xdono
date
Wed, 02 Jul 2008 12:55:16 -0700
changeset 631
d1605aabd0a1
parent 529
0834225a7916
child 1907
c18cbe5936b8
permissions
-rw-r--r--

6719955: Update copyright year
Summary: Update copyright year for files that have been modified in 2008
Reviewed-by: ohair, tbell

     1 /*
     2  * Copyright 2001-2008 Sun Microsystems, Inc.  All Rights Reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
    20  * CA 95054 USA or visit www.sun.com if you need additional information or
    21  * have any questions.
    22  *
    23  */
    25 inline void CMSBitMap::clear_all() {
    26   assert_locked();
    27   // CMS bitmaps are usually cover large memory regions
    28   _bm.clear_large();
    29   return;
    30 }
    32 inline size_t CMSBitMap::heapWordToOffset(HeapWord* addr) const {
    33   return (pointer_delta(addr, _bmStartWord)) >> _shifter;
    34 }
    36 inline HeapWord* CMSBitMap::offsetToHeapWord(size_t offset) const {
    37   return _bmStartWord + (offset << _shifter);
    38 }
    40 inline size_t CMSBitMap::heapWordDiffToOffsetDiff(size_t diff) const {
    41   assert((diff & ((1 << _shifter) - 1)) == 0, "argument check");
    42   return diff >> _shifter;
    43 }
    45 inline void CMSBitMap::mark(HeapWord* addr) {
    46   assert_locked();
    47   assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize),
    48          "outside underlying space?");
    49   _bm.set_bit(heapWordToOffset(addr));
    50 }
    52 inline bool CMSBitMap::par_mark(HeapWord* addr) {
    53   assert_locked();
    54   assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize),
    55          "outside underlying space?");
    56   return _bm.par_at_put(heapWordToOffset(addr), true);
    57 }
    59 inline void CMSBitMap::par_clear(HeapWord* addr) {
    60   assert_locked();
    61   assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize),
    62          "outside underlying space?");
    63   _bm.par_at_put(heapWordToOffset(addr), false);
    64 }
    66 inline void CMSBitMap::mark_range(MemRegion mr) {
    67   NOT_PRODUCT(region_invariant(mr));
    68   // Range size is usually just 1 bit.
    69   _bm.set_range(heapWordToOffset(mr.start()), heapWordToOffset(mr.end()),
    70                 BitMap::small_range);
    71 }
    73 inline void CMSBitMap::clear_range(MemRegion mr) {
    74   NOT_PRODUCT(region_invariant(mr));
    75   // Range size is usually just 1 bit.
    76   _bm.clear_range(heapWordToOffset(mr.start()), heapWordToOffset(mr.end()),
    77                   BitMap::small_range);
    78 }
    80 inline void CMSBitMap::par_mark_range(MemRegion mr) {
    81   NOT_PRODUCT(region_invariant(mr));
    82   // Range size is usually just 1 bit.
    83   _bm.par_set_range(heapWordToOffset(mr.start()), heapWordToOffset(mr.end()),
    84                     BitMap::small_range);
    85 }
    87 inline void CMSBitMap::par_clear_range(MemRegion mr) {
    88   NOT_PRODUCT(region_invariant(mr));
    89   // Range size is usually just 1 bit.
    90   _bm.par_clear_range(heapWordToOffset(mr.start()), heapWordToOffset(mr.end()),
    91                       BitMap::small_range);
    92 }
    94 inline void CMSBitMap::mark_large_range(MemRegion mr) {
    95   NOT_PRODUCT(region_invariant(mr));
    96   // Range size must be greater than 32 bytes.
    97   _bm.set_range(heapWordToOffset(mr.start()), heapWordToOffset(mr.end()),
    98                 BitMap::large_range);
    99 }
   101 inline void CMSBitMap::clear_large_range(MemRegion mr) {
   102   NOT_PRODUCT(region_invariant(mr));
   103   // Range size must be greater than 32 bytes.
   104   _bm.clear_range(heapWordToOffset(mr.start()), heapWordToOffset(mr.end()),
   105                   BitMap::large_range);
   106 }
   108 inline void CMSBitMap::par_mark_large_range(MemRegion mr) {
   109   NOT_PRODUCT(region_invariant(mr));
   110   // Range size must be greater than 32 bytes.
   111   _bm.par_set_range(heapWordToOffset(mr.start()), heapWordToOffset(mr.end()),
   112                     BitMap::large_range);
   113 }
   115 inline void CMSBitMap::par_clear_large_range(MemRegion mr) {
   116   NOT_PRODUCT(region_invariant(mr));
   117   // Range size must be greater than 32 bytes.
   118   _bm.par_clear_range(heapWordToOffset(mr.start()), heapWordToOffset(mr.end()),
   119                       BitMap::large_range);
   120 }
   122 // Starting at "addr" (inclusive) return a memory region
   123 // corresponding to the first maximally contiguous marked ("1") region.
   124 inline MemRegion CMSBitMap::getAndClearMarkedRegion(HeapWord* addr) {
   125   return getAndClearMarkedRegion(addr, endWord());
   126 }
   128 // Starting at "start_addr" (inclusive) return a memory region
   129 // corresponding to the first maximal contiguous marked ("1") region
   130 // strictly less than end_addr.
   131 inline MemRegion CMSBitMap::getAndClearMarkedRegion(HeapWord* start_addr,
   132                                                     HeapWord* end_addr) {
   133   HeapWord *start, *end;
   134   assert_locked();
   135   start = getNextMarkedWordAddress  (start_addr, end_addr);
   136   end   = getNextUnmarkedWordAddress(start,      end_addr);
   137   assert(start <= end, "Consistency check");
   138   MemRegion mr(start, end);
   139   if (!mr.is_empty()) {
   140     clear_range(mr);
   141   }
   142   return mr;
   143 }
   145 inline bool CMSBitMap::isMarked(HeapWord* addr) const {
   146   assert_locked();
   147   assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize),
   148          "outside underlying space?");
   149   return _bm.at(heapWordToOffset(addr));
   150 }
   152 // The same as isMarked() but without a lock check.
   153 inline bool CMSBitMap::par_isMarked(HeapWord* addr) const {
   154   assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize),
   155          "outside underlying space?");
   156   return _bm.at(heapWordToOffset(addr));
   157 }
   160 inline bool CMSBitMap::isUnmarked(HeapWord* addr) const {
   161   assert_locked();
   162   assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize),
   163          "outside underlying space?");
   164   return !_bm.at(heapWordToOffset(addr));
   165 }
   167 // Return the HeapWord address corresponding to next "1" bit
   168 // (inclusive).
   169 inline HeapWord* CMSBitMap::getNextMarkedWordAddress(HeapWord* addr) const {
   170   return getNextMarkedWordAddress(addr, endWord());
   171 }
   173 // Return the least HeapWord address corresponding to next "1" bit
   174 // starting at start_addr (inclusive) but strictly less than end_addr.
   175 inline HeapWord* CMSBitMap::getNextMarkedWordAddress(
   176   HeapWord* start_addr, HeapWord* end_addr) const {
   177   assert_locked();
   178   size_t nextOffset = _bm.get_next_one_offset(
   179                         heapWordToOffset(start_addr),
   180                         heapWordToOffset(end_addr));
   181   HeapWord* nextAddr = offsetToHeapWord(nextOffset);
   182   assert(nextAddr >= start_addr &&
   183          nextAddr <= end_addr, "get_next_one postcondition");
   184   assert((nextAddr == end_addr) ||
   185          isMarked(nextAddr), "get_next_one postcondition");
   186   return nextAddr;
   187 }
   190 // Return the HeapWord address corrsponding to the next "0" bit
   191 // (inclusive).
   192 inline HeapWord* CMSBitMap::getNextUnmarkedWordAddress(HeapWord* addr) const {
   193   return getNextUnmarkedWordAddress(addr, endWord());
   194 }
   196 // Return the HeapWord address corrsponding to the next "0" bit
   197 // (inclusive).
   198 inline HeapWord* CMSBitMap::getNextUnmarkedWordAddress(
   199   HeapWord* start_addr, HeapWord* end_addr) const {
   200   assert_locked();
   201   size_t nextOffset = _bm.get_next_zero_offset(
   202                         heapWordToOffset(start_addr),
   203                         heapWordToOffset(end_addr));
   204   HeapWord* nextAddr = offsetToHeapWord(nextOffset);
   205   assert(nextAddr >= start_addr &&
   206          nextAddr <= end_addr, "get_next_zero postcondition");
   207   assert((nextAddr == end_addr) ||
   208           isUnmarked(nextAddr), "get_next_zero postcondition");
   209   return nextAddr;
   210 }
   212 inline bool CMSBitMap::isAllClear() const {
   213   assert_locked();
   214   return getNextMarkedWordAddress(startWord()) >= endWord();
   215 }
   217 inline void CMSBitMap::iterate(BitMapClosure* cl, HeapWord* left,
   218                             HeapWord* right) {
   219   assert_locked();
   220   left = MAX2(_bmStartWord, left);
   221   right = MIN2(_bmStartWord + _bmWordSize, right);
   222   if (right > left) {
   223     _bm.iterate(cl, heapWordToOffset(left), heapWordToOffset(right));
   224   }
   225 }
   227 inline void CMSCollector::start_icms() {
   228   if (CMSIncrementalMode) {
   229     ConcurrentMarkSweepThread::start_icms();
   230   }
   231 }
   233 inline void CMSCollector::stop_icms() {
   234   if (CMSIncrementalMode) {
   235     ConcurrentMarkSweepThread::stop_icms();
   236   }
   237 }
   239 inline void CMSCollector::disable_icms() {
   240   if (CMSIncrementalMode) {
   241     ConcurrentMarkSweepThread::disable_icms();
   242   }
   243 }
   245 inline void CMSCollector::enable_icms() {
   246   if (CMSIncrementalMode) {
   247     ConcurrentMarkSweepThread::enable_icms();
   248   }
   249 }
   251 inline void CMSCollector::icms_wait() {
   252   if (CMSIncrementalMode) {
   253     cmsThread()->icms_wait();
   254   }
   255 }
   257 inline void CMSCollector::save_sweep_limits() {
   258   _cmsGen->save_sweep_limit();
   259   _permGen->save_sweep_limit();
   260 }
   262 inline bool CMSCollector::is_dead_obj(oop obj) const {
   263   HeapWord* addr = (HeapWord*)obj;
   264   assert((_cmsGen->cmsSpace()->is_in_reserved(addr)
   265           && _cmsGen->cmsSpace()->block_is_obj(addr))
   266          ||
   267          (_permGen->cmsSpace()->is_in_reserved(addr)
   268           && _permGen->cmsSpace()->block_is_obj(addr)),
   269          "must be object");
   270   return  should_unload_classes() &&
   271           _collectorState == Sweeping &&
   272          !_markBitMap.isMarked(addr);
   273 }
   275 inline bool CMSCollector::should_abort_preclean() const {
   276   // We are in the midst of an "abortable preclean" and either
   277   // scavenge is done or foreground GC wants to take over collection
   278   return _collectorState == AbortablePreclean &&
   279          (_abort_preclean || _foregroundGCIsActive ||
   280           GenCollectedHeap::heap()->incremental_collection_will_fail());
   281 }
   283 inline size_t CMSCollector::get_eden_used() const {
   284   return _young_gen->as_DefNewGeneration()->eden()->used();
   285 }
   287 inline size_t CMSCollector::get_eden_capacity() const {
   288   return _young_gen->as_DefNewGeneration()->eden()->capacity();
   289 }
   291 inline bool CMSStats::valid() const {
   292   return _valid_bits == _ALL_VALID;
   293 }
   295 inline void CMSStats::record_gc0_begin() {
   296   if (_gc0_begin_time.is_updated()) {
   297     float last_gc0_period = _gc0_begin_time.seconds();
   298     _gc0_period = AdaptiveWeightedAverage::exp_avg(_gc0_period,
   299       last_gc0_period, _gc0_alpha);
   300     _gc0_alpha = _saved_alpha;
   301     _valid_bits |= _GC0_VALID;
   302   }
   303   _cms_used_at_gc0_begin = _cms_gen->cmsSpace()->used();
   305   _gc0_begin_time.update();
   306 }
   308 inline void CMSStats::record_gc0_end(size_t cms_gen_bytes_used) {
   309   float last_gc0_duration = _gc0_begin_time.seconds();
   310   _gc0_duration = AdaptiveWeightedAverage::exp_avg(_gc0_duration,
   311     last_gc0_duration, _gc0_alpha);
   313   // Amount promoted.
   314   _cms_used_at_gc0_end = cms_gen_bytes_used;
   316   size_t promoted_bytes = 0;
   317   if (_cms_used_at_gc0_end >= _cms_used_at_gc0_begin) {
   318     promoted_bytes = _cms_used_at_gc0_end - _cms_used_at_gc0_begin;
   319   }
   321   // If the younger gen collections were skipped, then the
   322   // number of promoted bytes will be 0 and adding it to the
   323   // average will incorrectly lessen the average.  It is, however,
   324   // also possible that no promotion was needed.
   325   //
   326   // _gc0_promoted used to be calculated as
   327   // _gc0_promoted = AdaptiveWeightedAverage::exp_avg(_gc0_promoted,
   328   //  promoted_bytes, _gc0_alpha);
   329   _cms_gen->gc_stats()->avg_promoted()->sample(promoted_bytes);
   330   _gc0_promoted = (size_t) _cms_gen->gc_stats()->avg_promoted()->average();
   332   // Amount directly allocated.
   333   size_t allocated_bytes = _cms_gen->direct_allocated_words() * HeapWordSize;
   334   _cms_gen->reset_direct_allocated_words();
   335   _cms_allocated = AdaptiveWeightedAverage::exp_avg(_cms_allocated,
   336     allocated_bytes, _gc0_alpha);
   337 }
   339 inline void CMSStats::record_cms_begin() {
   340   _cms_timer.stop();
   342   // This is just an approximate value, but is good enough.
   343   _cms_used_at_cms_begin = _cms_used_at_gc0_end;
   345   _cms_period = AdaptiveWeightedAverage::exp_avg((float)_cms_period,
   346     (float) _cms_timer.seconds(), _cms_alpha);
   347   _cms_begin_time.update();
   349   _cms_timer.reset();
   350   _cms_timer.start();
   351 }
   353 inline void CMSStats::record_cms_end() {
   354   _cms_timer.stop();
   356   float cur_duration = _cms_timer.seconds();
   357   _cms_duration = AdaptiveWeightedAverage::exp_avg(_cms_duration,
   358     cur_duration, _cms_alpha);
   360   // Avoid division by 0.
   361   const size_t cms_used_mb = MAX2(_cms_used_at_cms_begin / M, (size_t)1);
   362   _cms_duration_per_mb = AdaptiveWeightedAverage::exp_avg(_cms_duration_per_mb,
   363                                  cur_duration / cms_used_mb,
   364                                  _cms_alpha);
   366   _cms_end_time.update();
   367   _cms_alpha = _saved_alpha;
   368   _allow_duty_cycle_reduction = true;
   369   _valid_bits |= _CMS_VALID;
   371   _cms_timer.start();
   372 }
   374 inline double CMSStats::cms_time_since_begin() const {
   375   return _cms_begin_time.seconds();
   376 }
   378 inline double CMSStats::cms_time_since_end() const {
   379   return _cms_end_time.seconds();
   380 }
   382 inline double CMSStats::promotion_rate() const {
   383   assert(valid(), "statistics not valid yet");
   384   return gc0_promoted() / gc0_period();
   385 }
   387 inline double CMSStats::cms_allocation_rate() const {
   388   assert(valid(), "statistics not valid yet");
   389   return cms_allocated() / gc0_period();
   390 }
   392 inline double CMSStats::cms_consumption_rate() const {
   393   assert(valid(), "statistics not valid yet");
   394   return (gc0_promoted() + cms_allocated()) / gc0_period();
   395 }
   397 inline unsigned int CMSStats::icms_update_duty_cycle() {
   398   // Update the duty cycle only if pacing is enabled and the stats are valid
   399   // (after at least one young gen gc and one cms cycle have completed).
   400   if (CMSIncrementalPacing && valid()) {
   401     return icms_update_duty_cycle_impl();
   402   }
   403   return _icms_duty_cycle;
   404 }
   406 inline void ConcurrentMarkSweepGeneration::save_sweep_limit() {
   407   cmsSpace()->save_sweep_limit();
   408 }
   410 inline size_t ConcurrentMarkSweepGeneration::capacity() const {
   411   return _cmsSpace->capacity();
   412 }
   414 inline size_t ConcurrentMarkSweepGeneration::used() const {
   415   return _cmsSpace->used();
   416 }
   418 inline size_t ConcurrentMarkSweepGeneration::free() const {
   419   return _cmsSpace->free();
   420 }
   422 inline MemRegion ConcurrentMarkSweepGeneration::used_region() const {
   423   return _cmsSpace->used_region();
   424 }
   426 inline MemRegion ConcurrentMarkSweepGeneration::used_region_at_save_marks() const {
   427   return _cmsSpace->used_region_at_save_marks();
   428 }
   430 inline void MarkFromRootsClosure::do_yield_check() {
   431   if (ConcurrentMarkSweepThread::should_yield() &&
   432       !_collector->foregroundGCIsActive() &&
   433       _yield) {
   434     do_yield_work();
   435   }
   436 }
   438 inline void Par_MarkFromRootsClosure::do_yield_check() {
   439   if (ConcurrentMarkSweepThread::should_yield() &&
   440       !_collector->foregroundGCIsActive() &&
   441       _yield) {
   442     do_yield_work();
   443   }
   444 }
   446 // Return value of "true" indicates that the on-going preclean
   447 // should be aborted.
   448 inline bool ScanMarkedObjectsAgainCarefullyClosure::do_yield_check() {
   449   if (ConcurrentMarkSweepThread::should_yield() &&
   450       !_collector->foregroundGCIsActive() &&
   451       _yield) {
   452     // Sample young gen size before and after yield
   453     _collector->sample_eden();
   454     do_yield_work();
   455     _collector->sample_eden();
   456     return _collector->should_abort_preclean();
   457   }
   458   return false;
   459 }
   461 inline void SurvivorSpacePrecleanClosure::do_yield_check() {
   462   if (ConcurrentMarkSweepThread::should_yield() &&
   463       !_collector->foregroundGCIsActive() &&
   464       _yield) {
   465     // Sample young gen size before and after yield
   466     _collector->sample_eden();
   467     do_yield_work();
   468     _collector->sample_eden();
   469   }
   470 }
   472 inline void SweepClosure::do_yield_check(HeapWord* addr) {
   473   if (ConcurrentMarkSweepThread::should_yield() &&
   474       !_collector->foregroundGCIsActive() &&
   475       _yield) {
   476     do_yield_work(addr);
   477   }
   478 }
   480 inline void MarkRefsIntoAndScanClosure::do_yield_check() {
   481   // The conditions are ordered for the remarking phase
   482   // when _yield is false.
   483   if (_yield &&
   484       !_collector->foregroundGCIsActive() &&
   485       ConcurrentMarkSweepThread::should_yield()) {
   486     do_yield_work();
   487   }
   488 }
   491 inline void ModUnionClosure::do_MemRegion(MemRegion mr) {
   492   // Align the end of mr so it's at a card boundary.
   493   // This is superfluous except at the end of the space;
   494   // we should do better than this XXX
   495   MemRegion mr2(mr.start(), (HeapWord*)round_to((intptr_t)mr.end(),
   496                  CardTableModRefBS::card_size /* bytes */));
   497   _t->mark_range(mr2);
   498 }
   500 inline void ModUnionClosurePar::do_MemRegion(MemRegion mr) {
   501   // Align the end of mr so it's at a card boundary.
   502   // This is superfluous except at the end of the space;
   503   // we should do better than this XXX
   504   MemRegion mr2(mr.start(), (HeapWord*)round_to((intptr_t)mr.end(),
   505                  CardTableModRefBS::card_size /* bytes */));
   506   _t->par_mark_range(mr2);
   507 }

mercurial