src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.inline.hpp

Tue, 07 Dec 2010 21:55:53 -0800

author
ysr
date
Tue, 07 Dec 2010 21:55:53 -0800
changeset 2336
6cd6d394f280
parent 2314
f95d63e2154a
child 4037
da91efe96a93
permissions
-rw-r--r--

7001033: assert(gch->gc_cause() == GCCause::_scavenge_alot || !gch->incremental_collection_failed())
7002546: regression on SpecJbb2005 on 7b118 comparing to 7b117 on small heaps
Summary: Relaxed assertion checking related to incremental_collection_failed flag to allow for ExplicitGCInvokesConcurrent behaviour where we do not want a failing scavenge to bail to a stop-world collection. Parameterized incremental_collection_will_fail() so we can selectively use, or not use, as appropriate, the statistical prediction at specific use sites. This essentially reverts the scavenge bail-out logic to what it was prior to some recent changes that had inadvertently started using the statistical prediction which can be noisy in the presence of bursty loads. Added some associated verbose non-product debugging messages.
Reviewed-by: johnc, tonyp

duke@435 1 /*
stefank@2314 2 * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
duke@435 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
duke@435 4 *
duke@435 5 * This code is free software; you can redistribute it and/or modify it
duke@435 6 * under the terms of the GNU General Public License version 2 only, as
duke@435 7 * published by the Free Software Foundation.
duke@435 8 *
duke@435 9 * This code is distributed in the hope that it will be useful, but WITHOUT
duke@435 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
duke@435 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
duke@435 12 * version 2 for more details (a copy is included in the LICENSE file that
duke@435 13 * accompanied this code).
duke@435 14 *
duke@435 15 * You should have received a copy of the GNU General Public License version
duke@435 16 * 2 along with this work; if not, write to the Free Software Foundation,
duke@435 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
duke@435 18 *
trims@1907 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
trims@1907 20 * or visit www.oracle.com if you need additional information or have any
trims@1907 21 * questions.
duke@435 22 *
duke@435 23 */
duke@435 24
stefank@2314 25 #ifndef SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_CONCURRENTMARKSWEEPGENERATION_INLINE_HPP
stefank@2314 26 #define SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_CONCURRENTMARKSWEEPGENERATION_INLINE_HPP
stefank@2314 27
stefank@2314 28 #include "gc_implementation/concurrentMarkSweep/cmsLockVerifier.hpp"
stefank@2314 29 #include "gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp"
stefank@2314 30 #include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp"
stefank@2314 31 #include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.hpp"
stefank@2314 32 #include "gc_implementation/shared/gcUtil.hpp"
stefank@2314 33 #include "memory/defNewGeneration.hpp"
stefank@2314 34
duke@435 35 inline void CMSBitMap::clear_all() {
duke@435 36 assert_locked();
duke@435 37 // CMS bitmaps are usually cover large memory regions
duke@435 38 _bm.clear_large();
duke@435 39 return;
duke@435 40 }
duke@435 41
duke@435 42 inline size_t CMSBitMap::heapWordToOffset(HeapWord* addr) const {
duke@435 43 return (pointer_delta(addr, _bmStartWord)) >> _shifter;
duke@435 44 }
duke@435 45
duke@435 46 inline HeapWord* CMSBitMap::offsetToHeapWord(size_t offset) const {
duke@435 47 return _bmStartWord + (offset << _shifter);
duke@435 48 }
duke@435 49
duke@435 50 inline size_t CMSBitMap::heapWordDiffToOffsetDiff(size_t diff) const {
duke@435 51 assert((diff & ((1 << _shifter) - 1)) == 0, "argument check");
duke@435 52 return diff >> _shifter;
duke@435 53 }
duke@435 54
duke@435 55 inline void CMSBitMap::mark(HeapWord* addr) {
duke@435 56 assert_locked();
duke@435 57 assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize),
duke@435 58 "outside underlying space?");
duke@435 59 _bm.set_bit(heapWordToOffset(addr));
duke@435 60 }
duke@435 61
duke@435 62 inline bool CMSBitMap::par_mark(HeapWord* addr) {
duke@435 63 assert_locked();
duke@435 64 assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize),
duke@435 65 "outside underlying space?");
duke@435 66 return _bm.par_at_put(heapWordToOffset(addr), true);
duke@435 67 }
duke@435 68
duke@435 69 inline void CMSBitMap::par_clear(HeapWord* addr) {
duke@435 70 assert_locked();
duke@435 71 assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize),
duke@435 72 "outside underlying space?");
duke@435 73 _bm.par_at_put(heapWordToOffset(addr), false);
duke@435 74 }
duke@435 75
duke@435 76 inline void CMSBitMap::mark_range(MemRegion mr) {
duke@435 77 NOT_PRODUCT(region_invariant(mr));
duke@435 78 // Range size is usually just 1 bit.
duke@435 79 _bm.set_range(heapWordToOffset(mr.start()), heapWordToOffset(mr.end()),
duke@435 80 BitMap::small_range);
duke@435 81 }
duke@435 82
duke@435 83 inline void CMSBitMap::clear_range(MemRegion mr) {
duke@435 84 NOT_PRODUCT(region_invariant(mr));
duke@435 85 // Range size is usually just 1 bit.
duke@435 86 _bm.clear_range(heapWordToOffset(mr.start()), heapWordToOffset(mr.end()),
duke@435 87 BitMap::small_range);
duke@435 88 }
duke@435 89
duke@435 90 inline void CMSBitMap::par_mark_range(MemRegion mr) {
duke@435 91 NOT_PRODUCT(region_invariant(mr));
duke@435 92 // Range size is usually just 1 bit.
duke@435 93 _bm.par_set_range(heapWordToOffset(mr.start()), heapWordToOffset(mr.end()),
duke@435 94 BitMap::small_range);
duke@435 95 }
duke@435 96
duke@435 97 inline void CMSBitMap::par_clear_range(MemRegion mr) {
duke@435 98 NOT_PRODUCT(region_invariant(mr));
duke@435 99 // Range size is usually just 1 bit.
duke@435 100 _bm.par_clear_range(heapWordToOffset(mr.start()), heapWordToOffset(mr.end()),
duke@435 101 BitMap::small_range);
duke@435 102 }
duke@435 103
duke@435 104 inline void CMSBitMap::mark_large_range(MemRegion mr) {
duke@435 105 NOT_PRODUCT(region_invariant(mr));
duke@435 106 // Range size must be greater than 32 bytes.
duke@435 107 _bm.set_range(heapWordToOffset(mr.start()), heapWordToOffset(mr.end()),
duke@435 108 BitMap::large_range);
duke@435 109 }
duke@435 110
duke@435 111 inline void CMSBitMap::clear_large_range(MemRegion mr) {
duke@435 112 NOT_PRODUCT(region_invariant(mr));
duke@435 113 // Range size must be greater than 32 bytes.
duke@435 114 _bm.clear_range(heapWordToOffset(mr.start()), heapWordToOffset(mr.end()),
duke@435 115 BitMap::large_range);
duke@435 116 }
duke@435 117
duke@435 118 inline void CMSBitMap::par_mark_large_range(MemRegion mr) {
duke@435 119 NOT_PRODUCT(region_invariant(mr));
duke@435 120 // Range size must be greater than 32 bytes.
duke@435 121 _bm.par_set_range(heapWordToOffset(mr.start()), heapWordToOffset(mr.end()),
duke@435 122 BitMap::large_range);
duke@435 123 }
duke@435 124
duke@435 125 inline void CMSBitMap::par_clear_large_range(MemRegion mr) {
duke@435 126 NOT_PRODUCT(region_invariant(mr));
duke@435 127 // Range size must be greater than 32 bytes.
duke@435 128 _bm.par_clear_range(heapWordToOffset(mr.start()), heapWordToOffset(mr.end()),
duke@435 129 BitMap::large_range);
duke@435 130 }
duke@435 131
duke@435 132 // Starting at "addr" (inclusive) return a memory region
duke@435 133 // corresponding to the first maximally contiguous marked ("1") region.
duke@435 134 inline MemRegion CMSBitMap::getAndClearMarkedRegion(HeapWord* addr) {
duke@435 135 return getAndClearMarkedRegion(addr, endWord());
duke@435 136 }
duke@435 137
duke@435 138 // Starting at "start_addr" (inclusive) return a memory region
duke@435 139 // corresponding to the first maximal contiguous marked ("1") region
duke@435 140 // strictly less than end_addr.
duke@435 141 inline MemRegion CMSBitMap::getAndClearMarkedRegion(HeapWord* start_addr,
duke@435 142 HeapWord* end_addr) {
duke@435 143 HeapWord *start, *end;
duke@435 144 assert_locked();
duke@435 145 start = getNextMarkedWordAddress (start_addr, end_addr);
duke@435 146 end = getNextUnmarkedWordAddress(start, end_addr);
duke@435 147 assert(start <= end, "Consistency check");
duke@435 148 MemRegion mr(start, end);
duke@435 149 if (!mr.is_empty()) {
duke@435 150 clear_range(mr);
duke@435 151 }
duke@435 152 return mr;
duke@435 153 }
duke@435 154
duke@435 155 inline bool CMSBitMap::isMarked(HeapWord* addr) const {
duke@435 156 assert_locked();
duke@435 157 assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize),
duke@435 158 "outside underlying space?");
duke@435 159 return _bm.at(heapWordToOffset(addr));
duke@435 160 }
duke@435 161
duke@435 162 // The same as isMarked() but without a lock check.
duke@435 163 inline bool CMSBitMap::par_isMarked(HeapWord* addr) const {
duke@435 164 assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize),
duke@435 165 "outside underlying space?");
duke@435 166 return _bm.at(heapWordToOffset(addr));
duke@435 167 }
duke@435 168
duke@435 169
duke@435 170 inline bool CMSBitMap::isUnmarked(HeapWord* addr) const {
duke@435 171 assert_locked();
duke@435 172 assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize),
duke@435 173 "outside underlying space?");
duke@435 174 return !_bm.at(heapWordToOffset(addr));
duke@435 175 }
duke@435 176
duke@435 177 // Return the HeapWord address corresponding to next "1" bit
duke@435 178 // (inclusive).
duke@435 179 inline HeapWord* CMSBitMap::getNextMarkedWordAddress(HeapWord* addr) const {
duke@435 180 return getNextMarkedWordAddress(addr, endWord());
duke@435 181 }
duke@435 182
duke@435 183 // Return the least HeapWord address corresponding to next "1" bit
duke@435 184 // starting at start_addr (inclusive) but strictly less than end_addr.
duke@435 185 inline HeapWord* CMSBitMap::getNextMarkedWordAddress(
duke@435 186 HeapWord* start_addr, HeapWord* end_addr) const {
duke@435 187 assert_locked();
duke@435 188 size_t nextOffset = _bm.get_next_one_offset(
duke@435 189 heapWordToOffset(start_addr),
duke@435 190 heapWordToOffset(end_addr));
duke@435 191 HeapWord* nextAddr = offsetToHeapWord(nextOffset);
duke@435 192 assert(nextAddr >= start_addr &&
duke@435 193 nextAddr <= end_addr, "get_next_one postcondition");
duke@435 194 assert((nextAddr == end_addr) ||
duke@435 195 isMarked(nextAddr), "get_next_one postcondition");
duke@435 196 return nextAddr;
duke@435 197 }
duke@435 198
duke@435 199
duke@435 200 // Return the HeapWord address corrsponding to the next "0" bit
duke@435 201 // (inclusive).
duke@435 202 inline HeapWord* CMSBitMap::getNextUnmarkedWordAddress(HeapWord* addr) const {
duke@435 203 return getNextUnmarkedWordAddress(addr, endWord());
duke@435 204 }
duke@435 205
duke@435 206 // Return the HeapWord address corrsponding to the next "0" bit
duke@435 207 // (inclusive).
duke@435 208 inline HeapWord* CMSBitMap::getNextUnmarkedWordAddress(
duke@435 209 HeapWord* start_addr, HeapWord* end_addr) const {
duke@435 210 assert_locked();
duke@435 211 size_t nextOffset = _bm.get_next_zero_offset(
duke@435 212 heapWordToOffset(start_addr),
duke@435 213 heapWordToOffset(end_addr));
duke@435 214 HeapWord* nextAddr = offsetToHeapWord(nextOffset);
duke@435 215 assert(nextAddr >= start_addr &&
duke@435 216 nextAddr <= end_addr, "get_next_zero postcondition");
duke@435 217 assert((nextAddr == end_addr) ||
duke@435 218 isUnmarked(nextAddr), "get_next_zero postcondition");
duke@435 219 return nextAddr;
duke@435 220 }
duke@435 221
duke@435 222 inline bool CMSBitMap::isAllClear() const {
duke@435 223 assert_locked();
duke@435 224 return getNextMarkedWordAddress(startWord()) >= endWord();
duke@435 225 }
duke@435 226
duke@435 227 inline void CMSBitMap::iterate(BitMapClosure* cl, HeapWord* left,
duke@435 228 HeapWord* right) {
duke@435 229 assert_locked();
duke@435 230 left = MAX2(_bmStartWord, left);
duke@435 231 right = MIN2(_bmStartWord + _bmWordSize, right);
duke@435 232 if (right > left) {
duke@435 233 _bm.iterate(cl, heapWordToOffset(left), heapWordToOffset(right));
duke@435 234 }
duke@435 235 }
duke@435 236
duke@435 237 inline void CMSCollector::start_icms() {
duke@435 238 if (CMSIncrementalMode) {
duke@435 239 ConcurrentMarkSweepThread::start_icms();
duke@435 240 }
duke@435 241 }
duke@435 242
duke@435 243 inline void CMSCollector::stop_icms() {
duke@435 244 if (CMSIncrementalMode) {
duke@435 245 ConcurrentMarkSweepThread::stop_icms();
duke@435 246 }
duke@435 247 }
duke@435 248
duke@435 249 inline void CMSCollector::disable_icms() {
duke@435 250 if (CMSIncrementalMode) {
duke@435 251 ConcurrentMarkSweepThread::disable_icms();
duke@435 252 }
duke@435 253 }
duke@435 254
duke@435 255 inline void CMSCollector::enable_icms() {
duke@435 256 if (CMSIncrementalMode) {
duke@435 257 ConcurrentMarkSweepThread::enable_icms();
duke@435 258 }
duke@435 259 }
duke@435 260
duke@435 261 inline void CMSCollector::icms_wait() {
duke@435 262 if (CMSIncrementalMode) {
duke@435 263 cmsThread()->icms_wait();
duke@435 264 }
duke@435 265 }
duke@435 266
duke@435 267 inline void CMSCollector::save_sweep_limits() {
duke@435 268 _cmsGen->save_sweep_limit();
duke@435 269 _permGen->save_sweep_limit();
duke@435 270 }
duke@435 271
duke@435 272 inline bool CMSCollector::is_dead_obj(oop obj) const {
duke@435 273 HeapWord* addr = (HeapWord*)obj;
duke@435 274 assert((_cmsGen->cmsSpace()->is_in_reserved(addr)
duke@435 275 && _cmsGen->cmsSpace()->block_is_obj(addr))
duke@435 276 ||
duke@435 277 (_permGen->cmsSpace()->is_in_reserved(addr)
duke@435 278 && _permGen->cmsSpace()->block_is_obj(addr)),
duke@435 279 "must be object");
ysr@529 280 return should_unload_classes() &&
duke@435 281 _collectorState == Sweeping &&
duke@435 282 !_markBitMap.isMarked(addr);
duke@435 283 }
duke@435 284
duke@435 285 inline bool CMSCollector::should_abort_preclean() const {
duke@435 286 // We are in the midst of an "abortable preclean" and either
duke@435 287 // scavenge is done or foreground GC wants to take over collection
duke@435 288 return _collectorState == AbortablePreclean &&
duke@435 289 (_abort_preclean || _foregroundGCIsActive ||
ysr@2336 290 GenCollectedHeap::heap()->incremental_collection_will_fail(true /* consult_young */));
duke@435 291 }
duke@435 292
duke@435 293 inline size_t CMSCollector::get_eden_used() const {
duke@435 294 return _young_gen->as_DefNewGeneration()->eden()->used();
duke@435 295 }
duke@435 296
duke@435 297 inline size_t CMSCollector::get_eden_capacity() const {
duke@435 298 return _young_gen->as_DefNewGeneration()->eden()->capacity();
duke@435 299 }
duke@435 300
duke@435 301 inline bool CMSStats::valid() const {
duke@435 302 return _valid_bits == _ALL_VALID;
duke@435 303 }
duke@435 304
duke@435 305 inline void CMSStats::record_gc0_begin() {
duke@435 306 if (_gc0_begin_time.is_updated()) {
duke@435 307 float last_gc0_period = _gc0_begin_time.seconds();
duke@435 308 _gc0_period = AdaptiveWeightedAverage::exp_avg(_gc0_period,
duke@435 309 last_gc0_period, _gc0_alpha);
duke@435 310 _gc0_alpha = _saved_alpha;
duke@435 311 _valid_bits |= _GC0_VALID;
duke@435 312 }
duke@435 313 _cms_used_at_gc0_begin = _cms_gen->cmsSpace()->used();
duke@435 314
duke@435 315 _gc0_begin_time.update();
duke@435 316 }
duke@435 317
duke@435 318 inline void CMSStats::record_gc0_end(size_t cms_gen_bytes_used) {
duke@435 319 float last_gc0_duration = _gc0_begin_time.seconds();
duke@435 320 _gc0_duration = AdaptiveWeightedAverage::exp_avg(_gc0_duration,
duke@435 321 last_gc0_duration, _gc0_alpha);
duke@435 322
duke@435 323 // Amount promoted.
duke@435 324 _cms_used_at_gc0_end = cms_gen_bytes_used;
duke@435 325
duke@435 326 size_t promoted_bytes = 0;
duke@435 327 if (_cms_used_at_gc0_end >= _cms_used_at_gc0_begin) {
duke@435 328 promoted_bytes = _cms_used_at_gc0_end - _cms_used_at_gc0_begin;
duke@435 329 }
duke@435 330
duke@435 331 // If the younger gen collections were skipped, then the
duke@435 332 // number of promoted bytes will be 0 and adding it to the
duke@435 333 // average will incorrectly lessen the average. It is, however,
duke@435 334 // also possible that no promotion was needed.
duke@435 335 //
duke@435 336 // _gc0_promoted used to be calculated as
duke@435 337 // _gc0_promoted = AdaptiveWeightedAverage::exp_avg(_gc0_promoted,
duke@435 338 // promoted_bytes, _gc0_alpha);
duke@435 339 _cms_gen->gc_stats()->avg_promoted()->sample(promoted_bytes);
duke@435 340 _gc0_promoted = (size_t) _cms_gen->gc_stats()->avg_promoted()->average();
duke@435 341
duke@435 342 // Amount directly allocated.
duke@435 343 size_t allocated_bytes = _cms_gen->direct_allocated_words() * HeapWordSize;
duke@435 344 _cms_gen->reset_direct_allocated_words();
duke@435 345 _cms_allocated = AdaptiveWeightedAverage::exp_avg(_cms_allocated,
duke@435 346 allocated_bytes, _gc0_alpha);
duke@435 347 }
duke@435 348
duke@435 349 inline void CMSStats::record_cms_begin() {
duke@435 350 _cms_timer.stop();
duke@435 351
duke@435 352 // This is just an approximate value, but is good enough.
duke@435 353 _cms_used_at_cms_begin = _cms_used_at_gc0_end;
duke@435 354
duke@435 355 _cms_period = AdaptiveWeightedAverage::exp_avg((float)_cms_period,
duke@435 356 (float) _cms_timer.seconds(), _cms_alpha);
duke@435 357 _cms_begin_time.update();
duke@435 358
duke@435 359 _cms_timer.reset();
duke@435 360 _cms_timer.start();
duke@435 361 }
duke@435 362
duke@435 363 inline void CMSStats::record_cms_end() {
duke@435 364 _cms_timer.stop();
duke@435 365
duke@435 366 float cur_duration = _cms_timer.seconds();
duke@435 367 _cms_duration = AdaptiveWeightedAverage::exp_avg(_cms_duration,
duke@435 368 cur_duration, _cms_alpha);
duke@435 369
duke@435 370 // Avoid division by 0.
duke@435 371 const size_t cms_used_mb = MAX2(_cms_used_at_cms_begin / M, (size_t)1);
duke@435 372 _cms_duration_per_mb = AdaptiveWeightedAverage::exp_avg(_cms_duration_per_mb,
duke@435 373 cur_duration / cms_used_mb,
duke@435 374 _cms_alpha);
duke@435 375
duke@435 376 _cms_end_time.update();
duke@435 377 _cms_alpha = _saved_alpha;
duke@435 378 _allow_duty_cycle_reduction = true;
duke@435 379 _valid_bits |= _CMS_VALID;
duke@435 380
duke@435 381 _cms_timer.start();
duke@435 382 }
duke@435 383
duke@435 384 inline double CMSStats::cms_time_since_begin() const {
duke@435 385 return _cms_begin_time.seconds();
duke@435 386 }
duke@435 387
duke@435 388 inline double CMSStats::cms_time_since_end() const {
duke@435 389 return _cms_end_time.seconds();
duke@435 390 }
duke@435 391
duke@435 392 inline double CMSStats::promotion_rate() const {
duke@435 393 assert(valid(), "statistics not valid yet");
duke@435 394 return gc0_promoted() / gc0_period();
duke@435 395 }
duke@435 396
duke@435 397 inline double CMSStats::cms_allocation_rate() const {
duke@435 398 assert(valid(), "statistics not valid yet");
duke@435 399 return cms_allocated() / gc0_period();
duke@435 400 }
duke@435 401
duke@435 402 inline double CMSStats::cms_consumption_rate() const {
duke@435 403 assert(valid(), "statistics not valid yet");
duke@435 404 return (gc0_promoted() + cms_allocated()) / gc0_period();
duke@435 405 }
duke@435 406
duke@435 407 inline unsigned int CMSStats::icms_update_duty_cycle() {
duke@435 408 // Update the duty cycle only if pacing is enabled and the stats are valid
duke@435 409 // (after at least one young gen gc and one cms cycle have completed).
duke@435 410 if (CMSIncrementalPacing && valid()) {
duke@435 411 return icms_update_duty_cycle_impl();
duke@435 412 }
duke@435 413 return _icms_duty_cycle;
duke@435 414 }
duke@435 415
duke@435 416 inline void ConcurrentMarkSweepGeneration::save_sweep_limit() {
duke@435 417 cmsSpace()->save_sweep_limit();
duke@435 418 }
duke@435 419
duke@435 420 inline size_t ConcurrentMarkSweepGeneration::capacity() const {
duke@435 421 return _cmsSpace->capacity();
duke@435 422 }
duke@435 423
duke@435 424 inline size_t ConcurrentMarkSweepGeneration::used() const {
duke@435 425 return _cmsSpace->used();
duke@435 426 }
duke@435 427
duke@435 428 inline size_t ConcurrentMarkSweepGeneration::free() const {
duke@435 429 return _cmsSpace->free();
duke@435 430 }
duke@435 431
duke@435 432 inline MemRegion ConcurrentMarkSweepGeneration::used_region() const {
duke@435 433 return _cmsSpace->used_region();
duke@435 434 }
duke@435 435
duke@435 436 inline MemRegion ConcurrentMarkSweepGeneration::used_region_at_save_marks() const {
duke@435 437 return _cmsSpace->used_region_at_save_marks();
duke@435 438 }
duke@435 439
duke@435 440 inline void MarkFromRootsClosure::do_yield_check() {
duke@435 441 if (ConcurrentMarkSweepThread::should_yield() &&
duke@435 442 !_collector->foregroundGCIsActive() &&
duke@435 443 _yield) {
duke@435 444 do_yield_work();
duke@435 445 }
duke@435 446 }
duke@435 447
duke@435 448 inline void Par_MarkFromRootsClosure::do_yield_check() {
duke@435 449 if (ConcurrentMarkSweepThread::should_yield() &&
duke@435 450 !_collector->foregroundGCIsActive() &&
duke@435 451 _yield) {
duke@435 452 do_yield_work();
duke@435 453 }
duke@435 454 }
duke@435 455
duke@435 456 // Return value of "true" indicates that the on-going preclean
duke@435 457 // should be aborted.
duke@435 458 inline bool ScanMarkedObjectsAgainCarefullyClosure::do_yield_check() {
duke@435 459 if (ConcurrentMarkSweepThread::should_yield() &&
duke@435 460 !_collector->foregroundGCIsActive() &&
duke@435 461 _yield) {
duke@435 462 // Sample young gen size before and after yield
duke@435 463 _collector->sample_eden();
duke@435 464 do_yield_work();
duke@435 465 _collector->sample_eden();
duke@435 466 return _collector->should_abort_preclean();
duke@435 467 }
duke@435 468 return false;
duke@435 469 }
duke@435 470
duke@435 471 inline void SurvivorSpacePrecleanClosure::do_yield_check() {
duke@435 472 if (ConcurrentMarkSweepThread::should_yield() &&
duke@435 473 !_collector->foregroundGCIsActive() &&
duke@435 474 _yield) {
duke@435 475 // Sample young gen size before and after yield
duke@435 476 _collector->sample_eden();
duke@435 477 do_yield_work();
duke@435 478 _collector->sample_eden();
duke@435 479 }
duke@435 480 }
duke@435 481
duke@435 482 inline void SweepClosure::do_yield_check(HeapWord* addr) {
duke@435 483 if (ConcurrentMarkSweepThread::should_yield() &&
duke@435 484 !_collector->foregroundGCIsActive() &&
duke@435 485 _yield) {
duke@435 486 do_yield_work(addr);
duke@435 487 }
duke@435 488 }
duke@435 489
duke@435 490 inline void MarkRefsIntoAndScanClosure::do_yield_check() {
duke@435 491 // The conditions are ordered for the remarking phase
duke@435 492 // when _yield is false.
duke@435 493 if (_yield &&
duke@435 494 !_collector->foregroundGCIsActive() &&
duke@435 495 ConcurrentMarkSweepThread::should_yield()) {
duke@435 496 do_yield_work();
duke@435 497 }
duke@435 498 }
duke@435 499
duke@435 500
duke@435 501 inline void ModUnionClosure::do_MemRegion(MemRegion mr) {
duke@435 502 // Align the end of mr so it's at a card boundary.
duke@435 503 // This is superfluous except at the end of the space;
duke@435 504 // we should do better than this XXX
duke@435 505 MemRegion mr2(mr.start(), (HeapWord*)round_to((intptr_t)mr.end(),
duke@435 506 CardTableModRefBS::card_size /* bytes */));
duke@435 507 _t->mark_range(mr2);
duke@435 508 }
duke@435 509
duke@435 510 inline void ModUnionClosurePar::do_MemRegion(MemRegion mr) {
duke@435 511 // Align the end of mr so it's at a card boundary.
duke@435 512 // This is superfluous except at the end of the space;
duke@435 513 // we should do better than this XXX
duke@435 514 MemRegion mr2(mr.start(), (HeapWord*)round_to((intptr_t)mr.end(),
duke@435 515 CardTableModRefBS::card_size /* bytes */));
duke@435 516 _t->par_mark_range(mr2);
duke@435 517 }
stefank@2314 518
stefank@2314 519 #endif // SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_CONCURRENTMARKSWEEPGENERATION_INLINE_HPP

mercurial