src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.inline.hpp

changeset 435
a61af66fc99e
child 529
0834225a7916
     1.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     1.2 +++ b/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.inline.hpp	Sat Dec 01 00:00:00 2007 +0000
     1.3 @@ -0,0 +1,507 @@
     1.4 +/*
     1.5 + * Copyright 2001-2007 Sun Microsystems, Inc.  All Rights Reserved.
     1.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     1.7 + *
     1.8 + * This code is free software; you can redistribute it and/or modify it
     1.9 + * under the terms of the GNU General Public License version 2 only, as
    1.10 + * published by the Free Software Foundation.
    1.11 + *
    1.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
    1.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    1.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    1.15 + * version 2 for more details (a copy is included in the LICENSE file that
    1.16 + * accompanied this code).
    1.17 + *
    1.18 + * You should have received a copy of the GNU General Public License version
    1.19 + * 2 along with this work; if not, write to the Free Software Foundation,
    1.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    1.21 + *
    1.22 + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
    1.23 + * CA 95054 USA or visit www.sun.com if you need additional information or
    1.24 + * have any questions.
    1.25 + *
    1.26 + */
    1.27 +
    1.28 +inline void CMSBitMap::clear_all() {
    1.29 +  assert_locked();
    1.30 +  // CMS bitmaps are usually cover large memory regions
    1.31 +  _bm.clear_large();
    1.32 +  return;
    1.33 +}
    1.34 +
    1.35 +inline size_t CMSBitMap::heapWordToOffset(HeapWord* addr) const {
    1.36 +  return (pointer_delta(addr, _bmStartWord)) >> _shifter;
    1.37 +}
    1.38 +
    1.39 +inline HeapWord* CMSBitMap::offsetToHeapWord(size_t offset) const {
    1.40 +  return _bmStartWord + (offset << _shifter);
    1.41 +}
    1.42 +
    1.43 +inline size_t CMSBitMap::heapWordDiffToOffsetDiff(size_t diff) const {
    1.44 +  assert((diff & ((1 << _shifter) - 1)) == 0, "argument check");
    1.45 +  return diff >> _shifter;
    1.46 +}
    1.47 +
    1.48 +inline void CMSBitMap::mark(HeapWord* addr) {
    1.49 +  assert_locked();
    1.50 +  assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize),
    1.51 +         "outside underlying space?");
    1.52 +  _bm.set_bit(heapWordToOffset(addr));
    1.53 +}
    1.54 +
    1.55 +inline bool CMSBitMap::par_mark(HeapWord* addr) {
    1.56 +  assert_locked();
    1.57 +  assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize),
    1.58 +         "outside underlying space?");
    1.59 +  return _bm.par_at_put(heapWordToOffset(addr), true);
    1.60 +}
    1.61 +
    1.62 +inline void CMSBitMap::par_clear(HeapWord* addr) {
    1.63 +  assert_locked();
    1.64 +  assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize),
    1.65 +         "outside underlying space?");
    1.66 +  _bm.par_at_put(heapWordToOffset(addr), false);
    1.67 +}
    1.68 +
    1.69 +inline void CMSBitMap::mark_range(MemRegion mr) {
    1.70 +  NOT_PRODUCT(region_invariant(mr));
    1.71 +  // Range size is usually just 1 bit.
    1.72 +  _bm.set_range(heapWordToOffset(mr.start()), heapWordToOffset(mr.end()),
    1.73 +                BitMap::small_range);
    1.74 +}
    1.75 +
    1.76 +inline void CMSBitMap::clear_range(MemRegion mr) {
    1.77 +  NOT_PRODUCT(region_invariant(mr));
    1.78 +  // Range size is usually just 1 bit.
    1.79 +  _bm.clear_range(heapWordToOffset(mr.start()), heapWordToOffset(mr.end()),
    1.80 +                  BitMap::small_range);
    1.81 +}
    1.82 +
    1.83 +inline void CMSBitMap::par_mark_range(MemRegion mr) {
    1.84 +  NOT_PRODUCT(region_invariant(mr));
    1.85 +  // Range size is usually just 1 bit.
    1.86 +  _bm.par_set_range(heapWordToOffset(mr.start()), heapWordToOffset(mr.end()),
    1.87 +                    BitMap::small_range);
    1.88 +}
    1.89 +
    1.90 +inline void CMSBitMap::par_clear_range(MemRegion mr) {
    1.91 +  NOT_PRODUCT(region_invariant(mr));
    1.92 +  // Range size is usually just 1 bit.
    1.93 +  _bm.par_clear_range(heapWordToOffset(mr.start()), heapWordToOffset(mr.end()),
    1.94 +                      BitMap::small_range);
    1.95 +}
    1.96 +
    1.97 +inline void CMSBitMap::mark_large_range(MemRegion mr) {
    1.98 +  NOT_PRODUCT(region_invariant(mr));
    1.99 +  // Range size must be greater than 32 bytes.
   1.100 +  _bm.set_range(heapWordToOffset(mr.start()), heapWordToOffset(mr.end()),
   1.101 +                BitMap::large_range);
   1.102 +}
   1.103 +
   1.104 +inline void CMSBitMap::clear_large_range(MemRegion mr) {
   1.105 +  NOT_PRODUCT(region_invariant(mr));
   1.106 +  // Range size must be greater than 32 bytes.
   1.107 +  _bm.clear_range(heapWordToOffset(mr.start()), heapWordToOffset(mr.end()),
   1.108 +                  BitMap::large_range);
   1.109 +}
   1.110 +
   1.111 +inline void CMSBitMap::par_mark_large_range(MemRegion mr) {
   1.112 +  NOT_PRODUCT(region_invariant(mr));
   1.113 +  // Range size must be greater than 32 bytes.
   1.114 +  _bm.par_set_range(heapWordToOffset(mr.start()), heapWordToOffset(mr.end()),
   1.115 +                    BitMap::large_range);
   1.116 +}
   1.117 +
   1.118 +inline void CMSBitMap::par_clear_large_range(MemRegion mr) {
   1.119 +  NOT_PRODUCT(region_invariant(mr));
   1.120 +  // Range size must be greater than 32 bytes.
   1.121 +  _bm.par_clear_range(heapWordToOffset(mr.start()), heapWordToOffset(mr.end()),
   1.122 +                      BitMap::large_range);
   1.123 +}
   1.124 +
   1.125 +// Starting at "addr" (inclusive) return a memory region
   1.126 +// corresponding to the first maximally contiguous marked ("1") region.
   1.127 +inline MemRegion CMSBitMap::getAndClearMarkedRegion(HeapWord* addr) {
   1.128 +  return getAndClearMarkedRegion(addr, endWord());
   1.129 +}
   1.130 +
   1.131 +// Starting at "start_addr" (inclusive) return a memory region
   1.132 +// corresponding to the first maximal contiguous marked ("1") region
   1.133 +// strictly less than end_addr.
   1.134 +inline MemRegion CMSBitMap::getAndClearMarkedRegion(HeapWord* start_addr,
   1.135 +                                                    HeapWord* end_addr) {
   1.136 +  HeapWord *start, *end;
   1.137 +  assert_locked();
   1.138 +  start = getNextMarkedWordAddress  (start_addr, end_addr);
   1.139 +  end   = getNextUnmarkedWordAddress(start,      end_addr);
   1.140 +  assert(start <= end, "Consistency check");
   1.141 +  MemRegion mr(start, end);
   1.142 +  if (!mr.is_empty()) {
   1.143 +    clear_range(mr);
   1.144 +  }
   1.145 +  return mr;
   1.146 +}
   1.147 +
   1.148 +inline bool CMSBitMap::isMarked(HeapWord* addr) const {
   1.149 +  assert_locked();
   1.150 +  assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize),
   1.151 +         "outside underlying space?");
   1.152 +  return _bm.at(heapWordToOffset(addr));
   1.153 +}
   1.154 +
   1.155 +// The same as isMarked() but without a lock check.
   1.156 +inline bool CMSBitMap::par_isMarked(HeapWord* addr) const {
   1.157 +  assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize),
   1.158 +         "outside underlying space?");
   1.159 +  return _bm.at(heapWordToOffset(addr));
   1.160 +}
   1.161 +
   1.162 +
   1.163 +inline bool CMSBitMap::isUnmarked(HeapWord* addr) const {
   1.164 +  assert_locked();
   1.165 +  assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize),
   1.166 +         "outside underlying space?");
   1.167 +  return !_bm.at(heapWordToOffset(addr));
   1.168 +}
   1.169 +
   1.170 +// Return the HeapWord address corresponding to next "1" bit
   1.171 +// (inclusive).
   1.172 +inline HeapWord* CMSBitMap::getNextMarkedWordAddress(HeapWord* addr) const {
   1.173 +  return getNextMarkedWordAddress(addr, endWord());
   1.174 +}
   1.175 +
   1.176 +// Return the least HeapWord address corresponding to next "1" bit
   1.177 +// starting at start_addr (inclusive) but strictly less than end_addr.
   1.178 +inline HeapWord* CMSBitMap::getNextMarkedWordAddress(
   1.179 +  HeapWord* start_addr, HeapWord* end_addr) const {
   1.180 +  assert_locked();
   1.181 +  size_t nextOffset = _bm.get_next_one_offset(
   1.182 +                        heapWordToOffset(start_addr),
   1.183 +                        heapWordToOffset(end_addr));
   1.184 +  HeapWord* nextAddr = offsetToHeapWord(nextOffset);
   1.185 +  assert(nextAddr >= start_addr &&
   1.186 +         nextAddr <= end_addr, "get_next_one postcondition");
   1.187 +  assert((nextAddr == end_addr) ||
   1.188 +         isMarked(nextAddr), "get_next_one postcondition");
   1.189 +  return nextAddr;
   1.190 +}
   1.191 +
   1.192 +
   1.193 +// Return the HeapWord address corrsponding to the next "0" bit
   1.194 +// (inclusive).
   1.195 +inline HeapWord* CMSBitMap::getNextUnmarkedWordAddress(HeapWord* addr) const {
   1.196 +  return getNextUnmarkedWordAddress(addr, endWord());
   1.197 +}
   1.198 +
   1.199 +// Return the HeapWord address corrsponding to the next "0" bit
   1.200 +// (inclusive).
   1.201 +inline HeapWord* CMSBitMap::getNextUnmarkedWordAddress(
   1.202 +  HeapWord* start_addr, HeapWord* end_addr) const {
   1.203 +  assert_locked();
   1.204 +  size_t nextOffset = _bm.get_next_zero_offset(
   1.205 +                        heapWordToOffset(start_addr),
   1.206 +                        heapWordToOffset(end_addr));
   1.207 +  HeapWord* nextAddr = offsetToHeapWord(nextOffset);
   1.208 +  assert(nextAddr >= start_addr &&
   1.209 +         nextAddr <= end_addr, "get_next_zero postcondition");
   1.210 +  assert((nextAddr == end_addr) ||
   1.211 +          isUnmarked(nextAddr), "get_next_zero postcondition");
   1.212 +  return nextAddr;
   1.213 +}
   1.214 +
   1.215 +inline bool CMSBitMap::isAllClear() const {
   1.216 +  assert_locked();
   1.217 +  return getNextMarkedWordAddress(startWord()) >= endWord();
   1.218 +}
   1.219 +
   1.220 +inline void CMSBitMap::iterate(BitMapClosure* cl, HeapWord* left,
   1.221 +                            HeapWord* right) {
   1.222 +  assert_locked();
   1.223 +  left = MAX2(_bmStartWord, left);
   1.224 +  right = MIN2(_bmStartWord + _bmWordSize, right);
   1.225 +  if (right > left) {
   1.226 +    _bm.iterate(cl, heapWordToOffset(left), heapWordToOffset(right));
   1.227 +  }
   1.228 +}
   1.229 +
   1.230 +inline void CMSCollector::start_icms() {
   1.231 +  if (CMSIncrementalMode) {
   1.232 +    ConcurrentMarkSweepThread::start_icms();
   1.233 +  }
   1.234 +}
   1.235 +
   1.236 +inline void CMSCollector::stop_icms() {
   1.237 +  if (CMSIncrementalMode) {
   1.238 +    ConcurrentMarkSweepThread::stop_icms();
   1.239 +  }
   1.240 +}
   1.241 +
   1.242 +inline void CMSCollector::disable_icms() {
   1.243 +  if (CMSIncrementalMode) {
   1.244 +    ConcurrentMarkSweepThread::disable_icms();
   1.245 +  }
   1.246 +}
   1.247 +
   1.248 +inline void CMSCollector::enable_icms() {
   1.249 +  if (CMSIncrementalMode) {
   1.250 +    ConcurrentMarkSweepThread::enable_icms();
   1.251 +  }
   1.252 +}
   1.253 +
   1.254 +inline void CMSCollector::icms_wait() {
   1.255 +  if (CMSIncrementalMode) {
   1.256 +    cmsThread()->icms_wait();
   1.257 +  }
   1.258 +}
   1.259 +
   1.260 +inline void CMSCollector::save_sweep_limits() {
   1.261 +  _cmsGen->save_sweep_limit();
   1.262 +  _permGen->save_sweep_limit();
   1.263 +}
   1.264 +
   1.265 +inline bool CMSCollector::is_dead_obj(oop obj) const {
   1.266 +  HeapWord* addr = (HeapWord*)obj;
   1.267 +  assert((_cmsGen->cmsSpace()->is_in_reserved(addr)
   1.268 +          && _cmsGen->cmsSpace()->block_is_obj(addr))
   1.269 +         ||
   1.270 +         (_permGen->cmsSpace()->is_in_reserved(addr)
   1.271 +          && _permGen->cmsSpace()->block_is_obj(addr)),
   1.272 +         "must be object");
   1.273 +  return  cms_should_unload_classes() &&
   1.274 +          _collectorState == Sweeping &&
   1.275 +         !_markBitMap.isMarked(addr);
   1.276 +}
   1.277 +
   1.278 +inline bool CMSCollector::should_abort_preclean() const {
   1.279 +  // We are in the midst of an "abortable preclean" and either
   1.280 +  // scavenge is done or foreground GC wants to take over collection
   1.281 +  return _collectorState == AbortablePreclean &&
   1.282 +         (_abort_preclean || _foregroundGCIsActive ||
   1.283 +          GenCollectedHeap::heap()->incremental_collection_will_fail());
   1.284 +}
   1.285 +
   1.286 +inline size_t CMSCollector::get_eden_used() const {
   1.287 +  return _young_gen->as_DefNewGeneration()->eden()->used();
   1.288 +}
   1.289 +
   1.290 +inline size_t CMSCollector::get_eden_capacity() const {
   1.291 +  return _young_gen->as_DefNewGeneration()->eden()->capacity();
   1.292 +}
   1.293 +
   1.294 +inline bool CMSStats::valid() const {
   1.295 +  return _valid_bits == _ALL_VALID;
   1.296 +}
   1.297 +
   1.298 +inline void CMSStats::record_gc0_begin() {
   1.299 +  if (_gc0_begin_time.is_updated()) {
   1.300 +    float last_gc0_period = _gc0_begin_time.seconds();
   1.301 +    _gc0_period = AdaptiveWeightedAverage::exp_avg(_gc0_period,
   1.302 +      last_gc0_period, _gc0_alpha);
   1.303 +    _gc0_alpha = _saved_alpha;
   1.304 +    _valid_bits |= _GC0_VALID;
   1.305 +  }
   1.306 +  _cms_used_at_gc0_begin = _cms_gen->cmsSpace()->used();
   1.307 +
   1.308 +  _gc0_begin_time.update();
   1.309 +}
   1.310 +
   1.311 +inline void CMSStats::record_gc0_end(size_t cms_gen_bytes_used) {
   1.312 +  float last_gc0_duration = _gc0_begin_time.seconds();
   1.313 +  _gc0_duration = AdaptiveWeightedAverage::exp_avg(_gc0_duration,
   1.314 +    last_gc0_duration, _gc0_alpha);
   1.315 +
   1.316 +  // Amount promoted.
   1.317 +  _cms_used_at_gc0_end = cms_gen_bytes_used;
   1.318 +
   1.319 +  size_t promoted_bytes = 0;
   1.320 +  if (_cms_used_at_gc0_end >= _cms_used_at_gc0_begin) {
   1.321 +    promoted_bytes = _cms_used_at_gc0_end - _cms_used_at_gc0_begin;
   1.322 +  }
   1.323 +
   1.324 +  // If the younger gen collections were skipped, then the
   1.325 +  // number of promoted bytes will be 0 and adding it to the
   1.326 +  // average will incorrectly lessen the average.  It is, however,
   1.327 +  // also possible that no promotion was needed.
   1.328 +  //
   1.329 +  // _gc0_promoted used to be calculated as
   1.330 +  // _gc0_promoted = AdaptiveWeightedAverage::exp_avg(_gc0_promoted,
   1.331 +  //  promoted_bytes, _gc0_alpha);
   1.332 +  _cms_gen->gc_stats()->avg_promoted()->sample(promoted_bytes);
   1.333 +  _gc0_promoted = (size_t) _cms_gen->gc_stats()->avg_promoted()->average();
   1.334 +
   1.335 +  // Amount directly allocated.
   1.336 +  size_t allocated_bytes = _cms_gen->direct_allocated_words() * HeapWordSize;
   1.337 +  _cms_gen->reset_direct_allocated_words();
   1.338 +  _cms_allocated = AdaptiveWeightedAverage::exp_avg(_cms_allocated,
   1.339 +    allocated_bytes, _gc0_alpha);
   1.340 +}
   1.341 +
   1.342 +inline void CMSStats::record_cms_begin() {
   1.343 +  _cms_timer.stop();
   1.344 +
   1.345 +  // This is just an approximate value, but is good enough.
   1.346 +  _cms_used_at_cms_begin = _cms_used_at_gc0_end;
   1.347 +
   1.348 +  _cms_period = AdaptiveWeightedAverage::exp_avg((float)_cms_period,
   1.349 +    (float) _cms_timer.seconds(), _cms_alpha);
   1.350 +  _cms_begin_time.update();
   1.351 +
   1.352 +  _cms_timer.reset();
   1.353 +  _cms_timer.start();
   1.354 +}
   1.355 +
   1.356 +inline void CMSStats::record_cms_end() {
   1.357 +  _cms_timer.stop();
   1.358 +
   1.359 +  float cur_duration = _cms_timer.seconds();
   1.360 +  _cms_duration = AdaptiveWeightedAverage::exp_avg(_cms_duration,
   1.361 +    cur_duration, _cms_alpha);
   1.362 +
   1.363 +  // Avoid division by 0.
   1.364 +  const size_t cms_used_mb = MAX2(_cms_used_at_cms_begin / M, (size_t)1);
   1.365 +  _cms_duration_per_mb = AdaptiveWeightedAverage::exp_avg(_cms_duration_per_mb,
   1.366 +                                 cur_duration / cms_used_mb,
   1.367 +                                 _cms_alpha);
   1.368 +
   1.369 +  _cms_end_time.update();
   1.370 +  _cms_alpha = _saved_alpha;
   1.371 +  _allow_duty_cycle_reduction = true;
   1.372 +  _valid_bits |= _CMS_VALID;
   1.373 +
   1.374 +  _cms_timer.start();
   1.375 +}
   1.376 +
   1.377 +inline double CMSStats::cms_time_since_begin() const {
   1.378 +  return _cms_begin_time.seconds();
   1.379 +}
   1.380 +
   1.381 +inline double CMSStats::cms_time_since_end() const {
   1.382 +  return _cms_end_time.seconds();
   1.383 +}
   1.384 +
   1.385 +inline double CMSStats::promotion_rate() const {
   1.386 +  assert(valid(), "statistics not valid yet");
   1.387 +  return gc0_promoted() / gc0_period();
   1.388 +}
   1.389 +
   1.390 +inline double CMSStats::cms_allocation_rate() const {
   1.391 +  assert(valid(), "statistics not valid yet");
   1.392 +  return cms_allocated() / gc0_period();
   1.393 +}
   1.394 +
   1.395 +inline double CMSStats::cms_consumption_rate() const {
   1.396 +  assert(valid(), "statistics not valid yet");
   1.397 +  return (gc0_promoted() + cms_allocated()) / gc0_period();
   1.398 +}
   1.399 +
   1.400 +inline unsigned int CMSStats::icms_update_duty_cycle() {
   1.401 +  // Update the duty cycle only if pacing is enabled and the stats are valid
   1.402 +  // (after at least one young gen gc and one cms cycle have completed).
   1.403 +  if (CMSIncrementalPacing && valid()) {
   1.404 +    return icms_update_duty_cycle_impl();
   1.405 +  }
   1.406 +  return _icms_duty_cycle;
   1.407 +}
   1.408 +
   1.409 +inline void ConcurrentMarkSweepGeneration::save_sweep_limit() {
   1.410 +  cmsSpace()->save_sweep_limit();
   1.411 +}
   1.412 +
   1.413 +inline size_t ConcurrentMarkSweepGeneration::capacity() const {
   1.414 +  return _cmsSpace->capacity();
   1.415 +}
   1.416 +
   1.417 +inline size_t ConcurrentMarkSweepGeneration::used() const {
   1.418 +  return _cmsSpace->used();
   1.419 +}
   1.420 +
   1.421 +inline size_t ConcurrentMarkSweepGeneration::free() const {
   1.422 +  return _cmsSpace->free();
   1.423 +}
   1.424 +
   1.425 +inline MemRegion ConcurrentMarkSweepGeneration::used_region() const {
   1.426 +  return _cmsSpace->used_region();
   1.427 +}
   1.428 +
   1.429 +inline MemRegion ConcurrentMarkSweepGeneration::used_region_at_save_marks() const {
   1.430 +  return _cmsSpace->used_region_at_save_marks();
   1.431 +}
   1.432 +
   1.433 +inline void MarkFromRootsClosure::do_yield_check() {
   1.434 +  if (ConcurrentMarkSweepThread::should_yield() &&
   1.435 +      !_collector->foregroundGCIsActive() &&
   1.436 +      _yield) {
   1.437 +    do_yield_work();
   1.438 +  }
   1.439 +}
   1.440 +
   1.441 +inline void Par_MarkFromRootsClosure::do_yield_check() {
   1.442 +  if (ConcurrentMarkSweepThread::should_yield() &&
   1.443 +      !_collector->foregroundGCIsActive() &&
   1.444 +      _yield) {
   1.445 +    do_yield_work();
   1.446 +  }
   1.447 +}
   1.448 +
   1.449 +// Return value of "true" indicates that the on-going preclean
   1.450 +// should be aborted.
   1.451 +inline bool ScanMarkedObjectsAgainCarefullyClosure::do_yield_check() {
   1.452 +  if (ConcurrentMarkSweepThread::should_yield() &&
   1.453 +      !_collector->foregroundGCIsActive() &&
   1.454 +      _yield) {
   1.455 +    // Sample young gen size before and after yield
   1.456 +    _collector->sample_eden();
   1.457 +    do_yield_work();
   1.458 +    _collector->sample_eden();
   1.459 +    return _collector->should_abort_preclean();
   1.460 +  }
   1.461 +  return false;
   1.462 +}
   1.463 +
   1.464 +inline void SurvivorSpacePrecleanClosure::do_yield_check() {
   1.465 +  if (ConcurrentMarkSweepThread::should_yield() &&
   1.466 +      !_collector->foregroundGCIsActive() &&
   1.467 +      _yield) {
   1.468 +    // Sample young gen size before and after yield
   1.469 +    _collector->sample_eden();
   1.470 +    do_yield_work();
   1.471 +    _collector->sample_eden();
   1.472 +  }
   1.473 +}
   1.474 +
   1.475 +inline void SweepClosure::do_yield_check(HeapWord* addr) {
   1.476 +  if (ConcurrentMarkSweepThread::should_yield() &&
   1.477 +      !_collector->foregroundGCIsActive() &&
   1.478 +      _yield) {
   1.479 +    do_yield_work(addr);
   1.480 +  }
   1.481 +}
   1.482 +
   1.483 +inline void MarkRefsIntoAndScanClosure::do_yield_check() {
   1.484 +  // The conditions are ordered for the remarking phase
   1.485 +  // when _yield is false.
   1.486 +  if (_yield &&
   1.487 +      !_collector->foregroundGCIsActive() &&
   1.488 +      ConcurrentMarkSweepThread::should_yield()) {
   1.489 +    do_yield_work();
   1.490 +  }
   1.491 +}
   1.492 +
   1.493 +
   1.494 +inline void ModUnionClosure::do_MemRegion(MemRegion mr) {
   1.495 +  // Align the end of mr so it's at a card boundary.
   1.496 +  // This is superfluous except at the end of the space;
   1.497 +  // we should do better than this XXX
   1.498 +  MemRegion mr2(mr.start(), (HeapWord*)round_to((intptr_t)mr.end(),
   1.499 +                 CardTableModRefBS::card_size /* bytes */));
   1.500 +  _t->mark_range(mr2);
   1.501 +}
   1.502 +
   1.503 +inline void ModUnionClosurePar::do_MemRegion(MemRegion mr) {
   1.504 +  // Align the end of mr so it's at a card boundary.
   1.505 +  // This is superfluous except at the end of the space;
   1.506 +  // we should do better than this XXX
   1.507 +  MemRegion mr2(mr.start(), (HeapWord*)round_to((intptr_t)mr.end(),
   1.508 +                 CardTableModRefBS::card_size /* bytes */));
   1.509 +  _t->par_mark_range(mr2);
   1.510 +}

mercurial