src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.inline.hpp

changeset 0
f90c822e73f8
child 6876
710a3c8b516e
     1.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     1.2 +++ b/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.inline.hpp	Wed Apr 27 01:25:04 2016 +0800
     1.3 @@ -0,0 +1,523 @@
     1.4 +/*
     1.5 + * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
     1.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     1.7 + *
     1.8 + * This code is free software; you can redistribute it and/or modify it
     1.9 + * under the terms of the GNU General Public License version 2 only, as
    1.10 + * published by the Free Software Foundation.
    1.11 + *
    1.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
    1.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    1.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    1.15 + * version 2 for more details (a copy is included in the LICENSE file that
    1.16 + * accompanied this code).
    1.17 + *
    1.18 + * You should have received a copy of the GNU General Public License version
    1.19 + * 2 along with this work; if not, write to the Free Software Foundation,
    1.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    1.21 + *
    1.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    1.23 + * or visit www.oracle.com if you need additional information or have any
    1.24 + * questions.
    1.25 + *
    1.26 + */
    1.27 +
    1.28 +#ifndef SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_CONCURRENTMARKSWEEPGENERATION_INLINE_HPP
    1.29 +#define SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_CONCURRENTMARKSWEEPGENERATION_INLINE_HPP
    1.30 +
    1.31 +#include "gc_implementation/concurrentMarkSweep/cmsLockVerifier.hpp"
    1.32 +#include "gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp"
    1.33 +#include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp"
    1.34 +#include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.hpp"
    1.35 +#include "gc_implementation/shared/gcUtil.hpp"
    1.36 +#include "memory/defNewGeneration.hpp"
    1.37 +
    1.38 +inline void CMSBitMap::clear_all() {
    1.39 +  assert_locked();
    1.40 +  // CMS bitmaps are usually cover large memory regions
    1.41 +  _bm.clear_large();
    1.42 +  return;
    1.43 +}
    1.44 +
    1.45 +inline size_t CMSBitMap::heapWordToOffset(HeapWord* addr) const {
    1.46 +  return (pointer_delta(addr, _bmStartWord)) >> _shifter;
    1.47 +}
    1.48 +
    1.49 +inline HeapWord* CMSBitMap::offsetToHeapWord(size_t offset) const {
    1.50 +  return _bmStartWord + (offset << _shifter);
    1.51 +}
    1.52 +
    1.53 +inline size_t CMSBitMap::heapWordDiffToOffsetDiff(size_t diff) const {
    1.54 +  assert((diff & ((1 << _shifter) - 1)) == 0, "argument check");
    1.55 +  return diff >> _shifter;
    1.56 +}
    1.57 +
    1.58 +inline void CMSBitMap::mark(HeapWord* addr) {
    1.59 +  assert_locked();
    1.60 +  assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize),
    1.61 +         "outside underlying space?");
    1.62 +  _bm.set_bit(heapWordToOffset(addr));
    1.63 +}
    1.64 +
    1.65 +inline bool CMSBitMap::par_mark(HeapWord* addr) {
    1.66 +  assert_locked();
    1.67 +  assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize),
    1.68 +         "outside underlying space?");
    1.69 +  return _bm.par_at_put(heapWordToOffset(addr), true);
    1.70 +}
    1.71 +
    1.72 +inline void CMSBitMap::par_clear(HeapWord* addr) {
    1.73 +  assert_locked();
    1.74 +  assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize),
    1.75 +         "outside underlying space?");
    1.76 +  _bm.par_at_put(heapWordToOffset(addr), false);
    1.77 +}
    1.78 +
    1.79 +inline void CMSBitMap::mark_range(MemRegion mr) {
    1.80 +  NOT_PRODUCT(region_invariant(mr));
    1.81 +  // Range size is usually just 1 bit.
    1.82 +  _bm.set_range(heapWordToOffset(mr.start()), heapWordToOffset(mr.end()),
    1.83 +                BitMap::small_range);
    1.84 +}
    1.85 +
    1.86 +inline void CMSBitMap::clear_range(MemRegion mr) {
    1.87 +  NOT_PRODUCT(region_invariant(mr));
    1.88 +  // Range size is usually just 1 bit.
    1.89 +  _bm.clear_range(heapWordToOffset(mr.start()), heapWordToOffset(mr.end()),
    1.90 +                  BitMap::small_range);
    1.91 +}
    1.92 +
    1.93 +inline void CMSBitMap::par_mark_range(MemRegion mr) {
    1.94 +  NOT_PRODUCT(region_invariant(mr));
    1.95 +  // Range size is usually just 1 bit.
    1.96 +  _bm.par_set_range(heapWordToOffset(mr.start()), heapWordToOffset(mr.end()),
    1.97 +                    BitMap::small_range);
    1.98 +}
    1.99 +
   1.100 +inline void CMSBitMap::par_clear_range(MemRegion mr) {
   1.101 +  NOT_PRODUCT(region_invariant(mr));
   1.102 +  // Range size is usually just 1 bit.
   1.103 +  _bm.par_clear_range(heapWordToOffset(mr.start()), heapWordToOffset(mr.end()),
   1.104 +                      BitMap::small_range);
   1.105 +}
   1.106 +
   1.107 +inline void CMSBitMap::mark_large_range(MemRegion mr) {
   1.108 +  NOT_PRODUCT(region_invariant(mr));
   1.109 +  // Range size must be greater than 32 bytes.
   1.110 +  _bm.set_range(heapWordToOffset(mr.start()), heapWordToOffset(mr.end()),
   1.111 +                BitMap::large_range);
   1.112 +}
   1.113 +
   1.114 +inline void CMSBitMap::clear_large_range(MemRegion mr) {
   1.115 +  NOT_PRODUCT(region_invariant(mr));
   1.116 +  // Range size must be greater than 32 bytes.
   1.117 +  _bm.clear_range(heapWordToOffset(mr.start()), heapWordToOffset(mr.end()),
   1.118 +                  BitMap::large_range);
   1.119 +}
   1.120 +
   1.121 +inline void CMSBitMap::par_mark_large_range(MemRegion mr) {
   1.122 +  NOT_PRODUCT(region_invariant(mr));
   1.123 +  // Range size must be greater than 32 bytes.
   1.124 +  _bm.par_set_range(heapWordToOffset(mr.start()), heapWordToOffset(mr.end()),
   1.125 +                    BitMap::large_range);
   1.126 +}
   1.127 +
   1.128 +inline void CMSBitMap::par_clear_large_range(MemRegion mr) {
   1.129 +  NOT_PRODUCT(region_invariant(mr));
   1.130 +  // Range size must be greater than 32 bytes.
   1.131 +  _bm.par_clear_range(heapWordToOffset(mr.start()), heapWordToOffset(mr.end()),
   1.132 +                      BitMap::large_range);
   1.133 +}
   1.134 +
   1.135 +// Starting at "addr" (inclusive) return a memory region
   1.136 +// corresponding to the first maximally contiguous marked ("1") region.
   1.137 +inline MemRegion CMSBitMap::getAndClearMarkedRegion(HeapWord* addr) {
   1.138 +  return getAndClearMarkedRegion(addr, endWord());
   1.139 +}
   1.140 +
   1.141 +// Starting at "start_addr" (inclusive) return a memory region
   1.142 +// corresponding to the first maximal contiguous marked ("1") region
   1.143 +// strictly less than end_addr.
   1.144 +inline MemRegion CMSBitMap::getAndClearMarkedRegion(HeapWord* start_addr,
   1.145 +                                                    HeapWord* end_addr) {
   1.146 +  HeapWord *start, *end;
   1.147 +  assert_locked();
   1.148 +  start = getNextMarkedWordAddress  (start_addr, end_addr);
   1.149 +  end   = getNextUnmarkedWordAddress(start,      end_addr);
   1.150 +  assert(start <= end, "Consistency check");
   1.151 +  MemRegion mr(start, end);
   1.152 +  if (!mr.is_empty()) {
   1.153 +    clear_range(mr);
   1.154 +  }
   1.155 +  return mr;
   1.156 +}
   1.157 +
   1.158 +inline bool CMSBitMap::isMarked(HeapWord* addr) const {
   1.159 +  assert_locked();
   1.160 +  assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize),
   1.161 +         "outside underlying space?");
   1.162 +  return _bm.at(heapWordToOffset(addr));
   1.163 +}
   1.164 +
   1.165 +// The same as isMarked() but without a lock check.
   1.166 +inline bool CMSBitMap::par_isMarked(HeapWord* addr) const {
   1.167 +  assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize),
   1.168 +         "outside underlying space?");
   1.169 +  return _bm.at(heapWordToOffset(addr));
   1.170 +}
   1.171 +
   1.172 +
   1.173 +inline bool CMSBitMap::isUnmarked(HeapWord* addr) const {
   1.174 +  assert_locked();
   1.175 +  assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize),
   1.176 +         "outside underlying space?");
   1.177 +  return !_bm.at(heapWordToOffset(addr));
   1.178 +}
   1.179 +
   1.180 +// Return the HeapWord address corresponding to next "1" bit
   1.181 +// (inclusive).
   1.182 +inline HeapWord* CMSBitMap::getNextMarkedWordAddress(HeapWord* addr) const {
   1.183 +  return getNextMarkedWordAddress(addr, endWord());
   1.184 +}
   1.185 +
   1.186 +// Return the least HeapWord address corresponding to next "1" bit
   1.187 +// starting at start_addr (inclusive) but strictly less than end_addr.
   1.188 +inline HeapWord* CMSBitMap::getNextMarkedWordAddress(
   1.189 +  HeapWord* start_addr, HeapWord* end_addr) const {
   1.190 +  assert_locked();
   1.191 +  size_t nextOffset = _bm.get_next_one_offset(
   1.192 +                        heapWordToOffset(start_addr),
   1.193 +                        heapWordToOffset(end_addr));
   1.194 +  HeapWord* nextAddr = offsetToHeapWord(nextOffset);
   1.195 +  assert(nextAddr >= start_addr &&
   1.196 +         nextAddr <= end_addr, "get_next_one postcondition");
   1.197 +  assert((nextAddr == end_addr) ||
   1.198 +         isMarked(nextAddr), "get_next_one postcondition");
   1.199 +  return nextAddr;
   1.200 +}
   1.201 +
   1.202 +
   1.203 +// Return the HeapWord address corrsponding to the next "0" bit
   1.204 +// (inclusive).
   1.205 +inline HeapWord* CMSBitMap::getNextUnmarkedWordAddress(HeapWord* addr) const {
   1.206 +  return getNextUnmarkedWordAddress(addr, endWord());
   1.207 +}
   1.208 +
   1.209 +// Return the HeapWord address corrsponding to the next "0" bit
   1.210 +// (inclusive).
   1.211 +inline HeapWord* CMSBitMap::getNextUnmarkedWordAddress(
   1.212 +  HeapWord* start_addr, HeapWord* end_addr) const {
   1.213 +  assert_locked();
   1.214 +  size_t nextOffset = _bm.get_next_zero_offset(
   1.215 +                        heapWordToOffset(start_addr),
   1.216 +                        heapWordToOffset(end_addr));
   1.217 +  HeapWord* nextAddr = offsetToHeapWord(nextOffset);
   1.218 +  assert(nextAddr >= start_addr &&
   1.219 +         nextAddr <= end_addr, "get_next_zero postcondition");
   1.220 +  assert((nextAddr == end_addr) ||
   1.221 +          isUnmarked(nextAddr), "get_next_zero postcondition");
   1.222 +  return nextAddr;
   1.223 +}
   1.224 +
   1.225 +inline bool CMSBitMap::isAllClear() const {
   1.226 +  assert_locked();
   1.227 +  return getNextMarkedWordAddress(startWord()) >= endWord();
   1.228 +}
   1.229 +
   1.230 +inline void CMSBitMap::iterate(BitMapClosure* cl, HeapWord* left,
   1.231 +                            HeapWord* right) {
   1.232 +  assert_locked();
   1.233 +  left = MAX2(_bmStartWord, left);
   1.234 +  right = MIN2(_bmStartWord + _bmWordSize, right);
   1.235 +  if (right > left) {
   1.236 +    _bm.iterate(cl, heapWordToOffset(left), heapWordToOffset(right));
   1.237 +  }
   1.238 +}
   1.239 +
   1.240 +inline void CMSCollector::start_icms() {
   1.241 +  if (CMSIncrementalMode) {
   1.242 +    ConcurrentMarkSweepThread::start_icms();
   1.243 +  }
   1.244 +}
   1.245 +
   1.246 +inline void CMSCollector::stop_icms() {
   1.247 +  if (CMSIncrementalMode) {
   1.248 +    ConcurrentMarkSweepThread::stop_icms();
   1.249 +  }
   1.250 +}
   1.251 +
   1.252 +inline void CMSCollector::disable_icms() {
   1.253 +  if (CMSIncrementalMode) {
   1.254 +    ConcurrentMarkSweepThread::disable_icms();
   1.255 +  }
   1.256 +}
   1.257 +
   1.258 +inline void CMSCollector::enable_icms() {
   1.259 +  if (CMSIncrementalMode) {
   1.260 +    ConcurrentMarkSweepThread::enable_icms();
   1.261 +  }
   1.262 +}
   1.263 +
   1.264 +inline void CMSCollector::icms_wait() {
   1.265 +  if (CMSIncrementalMode) {
   1.266 +    cmsThread()->icms_wait();
   1.267 +  }
   1.268 +}
   1.269 +
   1.270 +inline void CMSCollector::save_sweep_limits() {
   1.271 +  _cmsGen->save_sweep_limit();
   1.272 +}
   1.273 +
   1.274 +inline bool CMSCollector::is_dead_obj(oop obj) const {
   1.275 +  HeapWord* addr = (HeapWord*)obj;
   1.276 +  assert((_cmsGen->cmsSpace()->is_in_reserved(addr)
   1.277 +          && _cmsGen->cmsSpace()->block_is_obj(addr)),
   1.278 +         "must be object");
   1.279 +  return  should_unload_classes() &&
   1.280 +          _collectorState == Sweeping &&
   1.281 +         !_markBitMap.isMarked(addr);
   1.282 +}
   1.283 +
   1.284 +inline bool CMSCollector::should_abort_preclean() const {
   1.285 +  // We are in the midst of an "abortable preclean" and either
   1.286 +  // scavenge is done or foreground GC wants to take over collection
   1.287 +  return _collectorState == AbortablePreclean &&
   1.288 +         (_abort_preclean || _foregroundGCIsActive ||
   1.289 +          GenCollectedHeap::heap()->incremental_collection_will_fail(true /* consult_young */));
   1.290 +}
   1.291 +
   1.292 +inline size_t CMSCollector::get_eden_used() const {
   1.293 +  return _young_gen->as_DefNewGeneration()->eden()->used();
   1.294 +}
   1.295 +
   1.296 +inline size_t CMSCollector::get_eden_capacity() const {
   1.297 +  return _young_gen->as_DefNewGeneration()->eden()->capacity();
   1.298 +}
   1.299 +
   1.300 +inline bool CMSStats::valid() const {
   1.301 +  return _valid_bits == _ALL_VALID;
   1.302 +}
   1.303 +
   1.304 +inline void CMSStats::record_gc0_begin() {
   1.305 +  if (_gc0_begin_time.is_updated()) {
   1.306 +    float last_gc0_period = _gc0_begin_time.seconds();
   1.307 +    _gc0_period = AdaptiveWeightedAverage::exp_avg(_gc0_period,
   1.308 +      last_gc0_period, _gc0_alpha);
   1.309 +    _gc0_alpha = _saved_alpha;
   1.310 +    _valid_bits |= _GC0_VALID;
   1.311 +  }
   1.312 +  _cms_used_at_gc0_begin = _cms_gen->cmsSpace()->used();
   1.313 +
   1.314 +  _gc0_begin_time.update();
   1.315 +}
   1.316 +
   1.317 +inline void CMSStats::record_gc0_end(size_t cms_gen_bytes_used) {
   1.318 +  float last_gc0_duration = _gc0_begin_time.seconds();
   1.319 +  _gc0_duration = AdaptiveWeightedAverage::exp_avg(_gc0_duration,
   1.320 +    last_gc0_duration, _gc0_alpha);
   1.321 +
   1.322 +  // Amount promoted.
   1.323 +  _cms_used_at_gc0_end = cms_gen_bytes_used;
   1.324 +
   1.325 +  size_t promoted_bytes = 0;
   1.326 +  if (_cms_used_at_gc0_end >= _cms_used_at_gc0_begin) {
   1.327 +    promoted_bytes = _cms_used_at_gc0_end - _cms_used_at_gc0_begin;
   1.328 +  }
   1.329 +
   1.330 +  // If the younger gen collections were skipped, then the
   1.331 +  // number of promoted bytes will be 0 and adding it to the
   1.332 +  // average will incorrectly lessen the average.  It is, however,
   1.333 +  // also possible that no promotion was needed.
   1.334 +  //
   1.335 +  // _gc0_promoted used to be calculated as
   1.336 +  // _gc0_promoted = AdaptiveWeightedAverage::exp_avg(_gc0_promoted,
   1.337 +  //  promoted_bytes, _gc0_alpha);
   1.338 +  _cms_gen->gc_stats()->avg_promoted()->sample(promoted_bytes);
   1.339 +  _gc0_promoted = (size_t) _cms_gen->gc_stats()->avg_promoted()->average();
   1.340 +
   1.341 +  // Amount directly allocated.
   1.342 +  size_t allocated_bytes = _cms_gen->direct_allocated_words() * HeapWordSize;
   1.343 +  _cms_gen->reset_direct_allocated_words();
   1.344 +  _cms_allocated = AdaptiveWeightedAverage::exp_avg(_cms_allocated,
   1.345 +    allocated_bytes, _gc0_alpha);
   1.346 +}
   1.347 +
   1.348 +inline void CMSStats::record_cms_begin() {
   1.349 +  _cms_timer.stop();
   1.350 +
   1.351 +  // This is just an approximate value, but is good enough.
   1.352 +  _cms_used_at_cms_begin = _cms_used_at_gc0_end;
   1.353 +
   1.354 +  _cms_period = AdaptiveWeightedAverage::exp_avg((float)_cms_period,
   1.355 +    (float) _cms_timer.seconds(), _cms_alpha);
   1.356 +  _cms_begin_time.update();
   1.357 +
   1.358 +  _cms_timer.reset();
   1.359 +  _cms_timer.start();
   1.360 +}
   1.361 +
   1.362 +inline void CMSStats::record_cms_end() {
   1.363 +  _cms_timer.stop();
   1.364 +
   1.365 +  float cur_duration = _cms_timer.seconds();
   1.366 +  _cms_duration = AdaptiveWeightedAverage::exp_avg(_cms_duration,
   1.367 +    cur_duration, _cms_alpha);
   1.368 +
   1.369 +  // Avoid division by 0.
   1.370 +  const size_t cms_used_mb = MAX2(_cms_used_at_cms_begin / M, (size_t)1);
   1.371 +  _cms_duration_per_mb = AdaptiveWeightedAverage::exp_avg(_cms_duration_per_mb,
   1.372 +                                 cur_duration / cms_used_mb,
   1.373 +                                 _cms_alpha);
   1.374 +
   1.375 +  _cms_end_time.update();
   1.376 +  _cms_alpha = _saved_alpha;
   1.377 +  _allow_duty_cycle_reduction = true;
   1.378 +  _valid_bits |= _CMS_VALID;
   1.379 +
   1.380 +  _cms_timer.start();
   1.381 +}
   1.382 +
   1.383 +inline double CMSStats::cms_time_since_begin() const {
   1.384 +  return _cms_begin_time.seconds();
   1.385 +}
   1.386 +
   1.387 +inline double CMSStats::cms_time_since_end() const {
   1.388 +  return _cms_end_time.seconds();
   1.389 +}
   1.390 +
   1.391 +inline double CMSStats::promotion_rate() const {
   1.392 +  assert(valid(), "statistics not valid yet");
   1.393 +  return gc0_promoted() / gc0_period();
   1.394 +}
   1.395 +
   1.396 +inline double CMSStats::cms_allocation_rate() const {
   1.397 +  assert(valid(), "statistics not valid yet");
   1.398 +  return cms_allocated() / gc0_period();
   1.399 +}
   1.400 +
   1.401 +inline double CMSStats::cms_consumption_rate() const {
   1.402 +  assert(valid(), "statistics not valid yet");
   1.403 +  return (gc0_promoted() + cms_allocated()) / gc0_period();
   1.404 +}
   1.405 +
   1.406 +inline unsigned int CMSStats::icms_update_duty_cycle() {
   1.407 +  // Update the duty cycle only if pacing is enabled and the stats are valid
   1.408 +  // (after at least one young gen gc and one cms cycle have completed).
   1.409 +  if (CMSIncrementalPacing && valid()) {
   1.410 +    return icms_update_duty_cycle_impl();
   1.411 +  }
   1.412 +  return _icms_duty_cycle;
   1.413 +}
   1.414 +
   1.415 +inline void ConcurrentMarkSweepGeneration::save_sweep_limit() {
   1.416 +  cmsSpace()->save_sweep_limit();
   1.417 +}
   1.418 +
   1.419 +inline size_t ConcurrentMarkSweepGeneration::capacity() const {
   1.420 +  return _cmsSpace->capacity();
   1.421 +}
   1.422 +
   1.423 +inline size_t ConcurrentMarkSweepGeneration::used() const {
   1.424 +  return _cmsSpace->used();
   1.425 +}
   1.426 +
   1.427 +inline size_t ConcurrentMarkSweepGeneration::free() const {
   1.428 +  return _cmsSpace->free();
   1.429 +}
   1.430 +
   1.431 +inline MemRegion ConcurrentMarkSweepGeneration::used_region() const {
   1.432 +  return _cmsSpace->used_region();
   1.433 +}
   1.434 +
   1.435 +inline MemRegion ConcurrentMarkSweepGeneration::used_region_at_save_marks() const {
   1.436 +  return _cmsSpace->used_region_at_save_marks();
   1.437 +}
   1.438 +
   1.439 +inline void MarkFromRootsClosure::do_yield_check() {
   1.440 +  if (ConcurrentMarkSweepThread::should_yield() &&
   1.441 +      !_collector->foregroundGCIsActive() &&
   1.442 +      _yield) {
   1.443 +    do_yield_work();
   1.444 +  }
   1.445 +}
   1.446 +
   1.447 +inline void Par_MarkFromRootsClosure::do_yield_check() {
   1.448 +  if (ConcurrentMarkSweepThread::should_yield() &&
   1.449 +      !_collector->foregroundGCIsActive() &&
   1.450 +      _yield) {
   1.451 +    do_yield_work();
   1.452 +  }
   1.453 +}
   1.454 +
   1.455 +inline void PushOrMarkClosure::do_yield_check() {
   1.456 +  _parent->do_yield_check();
   1.457 +}
   1.458 +
   1.459 +inline void Par_PushOrMarkClosure::do_yield_check() {
   1.460 +  _parent->do_yield_check();
   1.461 +}
   1.462 +
   1.463 +// Return value of "true" indicates that the on-going preclean
   1.464 +// should be aborted.
   1.465 +inline bool ScanMarkedObjectsAgainCarefullyClosure::do_yield_check() {
   1.466 +  if (ConcurrentMarkSweepThread::should_yield() &&
   1.467 +      !_collector->foregroundGCIsActive() &&
   1.468 +      _yield) {
   1.469 +    // Sample young gen size before and after yield
   1.470 +    _collector->sample_eden();
   1.471 +    do_yield_work();
   1.472 +    _collector->sample_eden();
   1.473 +    return _collector->should_abort_preclean();
   1.474 +  }
   1.475 +  return false;
   1.476 +}
   1.477 +
   1.478 +inline void SurvivorSpacePrecleanClosure::do_yield_check() {
   1.479 +  if (ConcurrentMarkSweepThread::should_yield() &&
   1.480 +      !_collector->foregroundGCIsActive() &&
   1.481 +      _yield) {
   1.482 +    // Sample young gen size before and after yield
   1.483 +    _collector->sample_eden();
   1.484 +    do_yield_work();
   1.485 +    _collector->sample_eden();
   1.486 +  }
   1.487 +}
   1.488 +
   1.489 +inline void SweepClosure::do_yield_check(HeapWord* addr) {
   1.490 +  if (ConcurrentMarkSweepThread::should_yield() &&
   1.491 +      !_collector->foregroundGCIsActive() &&
   1.492 +      _yield) {
   1.493 +    do_yield_work(addr);
   1.494 +  }
   1.495 +}
   1.496 +
   1.497 +inline void MarkRefsIntoAndScanClosure::do_yield_check() {
   1.498 +  // The conditions are ordered for the remarking phase
   1.499 +  // when _yield is false.
   1.500 +  if (_yield &&
   1.501 +      !_collector->foregroundGCIsActive() &&
   1.502 +      ConcurrentMarkSweepThread::should_yield()) {
   1.503 +    do_yield_work();
   1.504 +  }
   1.505 +}
   1.506 +
   1.507 +
   1.508 +inline void ModUnionClosure::do_MemRegion(MemRegion mr) {
   1.509 +  // Align the end of mr so it's at a card boundary.
   1.510 +  // This is superfluous except at the end of the space;
   1.511 +  // we should do better than this XXX
   1.512 +  MemRegion mr2(mr.start(), (HeapWord*)round_to((intptr_t)mr.end(),
   1.513 +                 CardTableModRefBS::card_size /* bytes */));
   1.514 +  _t->mark_range(mr2);
   1.515 +}
   1.516 +
   1.517 +inline void ModUnionClosurePar::do_MemRegion(MemRegion mr) {
   1.518 +  // Align the end of mr so it's at a card boundary.
   1.519 +  // This is superfluous except at the end of the space;
   1.520 +  // we should do better than this XXX
   1.521 +  MemRegion mr2(mr.start(), (HeapWord*)round_to((intptr_t)mr.end(),
   1.522 +                 CardTableModRefBS::card_size /* bytes */));
   1.523 +  _t->par_mark_range(mr2);
   1.524 +}
   1.525 +
   1.526 +#endif // SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_CONCURRENTMARKSWEEPGENERATION_INLINE_HPP

mercurial