src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.inline.hpp

Fri, 01 Nov 2013 17:09:38 +0100

author
jwilhelm
date
Fri, 01 Nov 2013 17:09:38 +0100
changeset 6085
8f07aa079343
parent 4037
da91efe96a93
child 6876
710a3c8b516e
permissions
-rw-r--r--

8016309: assert(eden_size > 0 && survivor_size > 0) failed: just checking
7057939: jmap shows MaxNewSize=4GB when Java is using parallel collector
Summary: Major cleanup of the collectorpolicy classes
Reviewed-by: tschatzl, jcoomes

duke@435 1 /*
coleenp@4037 2 * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
duke@435 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
duke@435 4 *
duke@435 5 * This code is free software; you can redistribute it and/or modify it
duke@435 6 * under the terms of the GNU General Public License version 2 only, as
duke@435 7 * published by the Free Software Foundation.
duke@435 8 *
duke@435 9 * This code is distributed in the hope that it will be useful, but WITHOUT
duke@435 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
duke@435 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
duke@435 12 * version 2 for more details (a copy is included in the LICENSE file that
duke@435 13 * accompanied this code).
duke@435 14 *
duke@435 15 * You should have received a copy of the GNU General Public License version
duke@435 16 * 2 along with this work; if not, write to the Free Software Foundation,
duke@435 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
duke@435 18 *
trims@1907 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
trims@1907 20 * or visit www.oracle.com if you need additional information or have any
trims@1907 21 * questions.
duke@435 22 *
duke@435 23 */
duke@435 24
stefank@2314 25 #ifndef SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_CONCURRENTMARKSWEEPGENERATION_INLINE_HPP
stefank@2314 26 #define SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_CONCURRENTMARKSWEEPGENERATION_INLINE_HPP
stefank@2314 27
stefank@2314 28 #include "gc_implementation/concurrentMarkSweep/cmsLockVerifier.hpp"
stefank@2314 29 #include "gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp"
stefank@2314 30 #include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp"
stefank@2314 31 #include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.hpp"
stefank@2314 32 #include "gc_implementation/shared/gcUtil.hpp"
stefank@2314 33 #include "memory/defNewGeneration.hpp"
stefank@2314 34
duke@435 35 inline void CMSBitMap::clear_all() {
duke@435 36 assert_locked();
duke@435 37 // CMS bitmaps are usually cover large memory regions
duke@435 38 _bm.clear_large();
duke@435 39 return;
duke@435 40 }
duke@435 41
duke@435 42 inline size_t CMSBitMap::heapWordToOffset(HeapWord* addr) const {
duke@435 43 return (pointer_delta(addr, _bmStartWord)) >> _shifter;
duke@435 44 }
duke@435 45
duke@435 46 inline HeapWord* CMSBitMap::offsetToHeapWord(size_t offset) const {
duke@435 47 return _bmStartWord + (offset << _shifter);
duke@435 48 }
duke@435 49
duke@435 50 inline size_t CMSBitMap::heapWordDiffToOffsetDiff(size_t diff) const {
duke@435 51 assert((diff & ((1 << _shifter) - 1)) == 0, "argument check");
duke@435 52 return diff >> _shifter;
duke@435 53 }
duke@435 54
duke@435 55 inline void CMSBitMap::mark(HeapWord* addr) {
duke@435 56 assert_locked();
duke@435 57 assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize),
duke@435 58 "outside underlying space?");
duke@435 59 _bm.set_bit(heapWordToOffset(addr));
duke@435 60 }
duke@435 61
duke@435 62 inline bool CMSBitMap::par_mark(HeapWord* addr) {
duke@435 63 assert_locked();
duke@435 64 assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize),
duke@435 65 "outside underlying space?");
duke@435 66 return _bm.par_at_put(heapWordToOffset(addr), true);
duke@435 67 }
duke@435 68
duke@435 69 inline void CMSBitMap::par_clear(HeapWord* addr) {
duke@435 70 assert_locked();
duke@435 71 assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize),
duke@435 72 "outside underlying space?");
duke@435 73 _bm.par_at_put(heapWordToOffset(addr), false);
duke@435 74 }
duke@435 75
duke@435 76 inline void CMSBitMap::mark_range(MemRegion mr) {
duke@435 77 NOT_PRODUCT(region_invariant(mr));
duke@435 78 // Range size is usually just 1 bit.
duke@435 79 _bm.set_range(heapWordToOffset(mr.start()), heapWordToOffset(mr.end()),
duke@435 80 BitMap::small_range);
duke@435 81 }
duke@435 82
duke@435 83 inline void CMSBitMap::clear_range(MemRegion mr) {
duke@435 84 NOT_PRODUCT(region_invariant(mr));
duke@435 85 // Range size is usually just 1 bit.
duke@435 86 _bm.clear_range(heapWordToOffset(mr.start()), heapWordToOffset(mr.end()),
duke@435 87 BitMap::small_range);
duke@435 88 }
duke@435 89
duke@435 90 inline void CMSBitMap::par_mark_range(MemRegion mr) {
duke@435 91 NOT_PRODUCT(region_invariant(mr));
duke@435 92 // Range size is usually just 1 bit.
duke@435 93 _bm.par_set_range(heapWordToOffset(mr.start()), heapWordToOffset(mr.end()),
duke@435 94 BitMap::small_range);
duke@435 95 }
duke@435 96
duke@435 97 inline void CMSBitMap::par_clear_range(MemRegion mr) {
duke@435 98 NOT_PRODUCT(region_invariant(mr));
duke@435 99 // Range size is usually just 1 bit.
duke@435 100 _bm.par_clear_range(heapWordToOffset(mr.start()), heapWordToOffset(mr.end()),
duke@435 101 BitMap::small_range);
duke@435 102 }
duke@435 103
duke@435 104 inline void CMSBitMap::mark_large_range(MemRegion mr) {
duke@435 105 NOT_PRODUCT(region_invariant(mr));
duke@435 106 // Range size must be greater than 32 bytes.
duke@435 107 _bm.set_range(heapWordToOffset(mr.start()), heapWordToOffset(mr.end()),
duke@435 108 BitMap::large_range);
duke@435 109 }
duke@435 110
duke@435 111 inline void CMSBitMap::clear_large_range(MemRegion mr) {
duke@435 112 NOT_PRODUCT(region_invariant(mr));
duke@435 113 // Range size must be greater than 32 bytes.
duke@435 114 _bm.clear_range(heapWordToOffset(mr.start()), heapWordToOffset(mr.end()),
duke@435 115 BitMap::large_range);
duke@435 116 }
duke@435 117
duke@435 118 inline void CMSBitMap::par_mark_large_range(MemRegion mr) {
duke@435 119 NOT_PRODUCT(region_invariant(mr));
duke@435 120 // Range size must be greater than 32 bytes.
duke@435 121 _bm.par_set_range(heapWordToOffset(mr.start()), heapWordToOffset(mr.end()),
duke@435 122 BitMap::large_range);
duke@435 123 }
duke@435 124
duke@435 125 inline void CMSBitMap::par_clear_large_range(MemRegion mr) {
duke@435 126 NOT_PRODUCT(region_invariant(mr));
duke@435 127 // Range size must be greater than 32 bytes.
duke@435 128 _bm.par_clear_range(heapWordToOffset(mr.start()), heapWordToOffset(mr.end()),
duke@435 129 BitMap::large_range);
duke@435 130 }
duke@435 131
duke@435 132 // Starting at "addr" (inclusive) return a memory region
duke@435 133 // corresponding to the first maximally contiguous marked ("1") region.
duke@435 134 inline MemRegion CMSBitMap::getAndClearMarkedRegion(HeapWord* addr) {
duke@435 135 return getAndClearMarkedRegion(addr, endWord());
duke@435 136 }
duke@435 137
duke@435 138 // Starting at "start_addr" (inclusive) return a memory region
duke@435 139 // corresponding to the first maximal contiguous marked ("1") region
duke@435 140 // strictly less than end_addr.
duke@435 141 inline MemRegion CMSBitMap::getAndClearMarkedRegion(HeapWord* start_addr,
duke@435 142 HeapWord* end_addr) {
duke@435 143 HeapWord *start, *end;
duke@435 144 assert_locked();
duke@435 145 start = getNextMarkedWordAddress (start_addr, end_addr);
duke@435 146 end = getNextUnmarkedWordAddress(start, end_addr);
duke@435 147 assert(start <= end, "Consistency check");
duke@435 148 MemRegion mr(start, end);
duke@435 149 if (!mr.is_empty()) {
duke@435 150 clear_range(mr);
duke@435 151 }
duke@435 152 return mr;
duke@435 153 }
duke@435 154
duke@435 155 inline bool CMSBitMap::isMarked(HeapWord* addr) const {
duke@435 156 assert_locked();
duke@435 157 assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize),
duke@435 158 "outside underlying space?");
duke@435 159 return _bm.at(heapWordToOffset(addr));
duke@435 160 }
duke@435 161
duke@435 162 // The same as isMarked() but without a lock check.
duke@435 163 inline bool CMSBitMap::par_isMarked(HeapWord* addr) const {
duke@435 164 assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize),
duke@435 165 "outside underlying space?");
duke@435 166 return _bm.at(heapWordToOffset(addr));
duke@435 167 }
duke@435 168
duke@435 169
duke@435 170 inline bool CMSBitMap::isUnmarked(HeapWord* addr) const {
duke@435 171 assert_locked();
duke@435 172 assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize),
duke@435 173 "outside underlying space?");
duke@435 174 return !_bm.at(heapWordToOffset(addr));
duke@435 175 }
duke@435 176
duke@435 177 // Return the HeapWord address corresponding to next "1" bit
duke@435 178 // (inclusive).
duke@435 179 inline HeapWord* CMSBitMap::getNextMarkedWordAddress(HeapWord* addr) const {
duke@435 180 return getNextMarkedWordAddress(addr, endWord());
duke@435 181 }
duke@435 182
duke@435 183 // Return the least HeapWord address corresponding to next "1" bit
duke@435 184 // starting at start_addr (inclusive) but strictly less than end_addr.
duke@435 185 inline HeapWord* CMSBitMap::getNextMarkedWordAddress(
duke@435 186 HeapWord* start_addr, HeapWord* end_addr) const {
duke@435 187 assert_locked();
duke@435 188 size_t nextOffset = _bm.get_next_one_offset(
duke@435 189 heapWordToOffset(start_addr),
duke@435 190 heapWordToOffset(end_addr));
duke@435 191 HeapWord* nextAddr = offsetToHeapWord(nextOffset);
duke@435 192 assert(nextAddr >= start_addr &&
duke@435 193 nextAddr <= end_addr, "get_next_one postcondition");
duke@435 194 assert((nextAddr == end_addr) ||
duke@435 195 isMarked(nextAddr), "get_next_one postcondition");
duke@435 196 return nextAddr;
duke@435 197 }
duke@435 198
duke@435 199
duke@435 200 // Return the HeapWord address corrsponding to the next "0" bit
duke@435 201 // (inclusive).
duke@435 202 inline HeapWord* CMSBitMap::getNextUnmarkedWordAddress(HeapWord* addr) const {
duke@435 203 return getNextUnmarkedWordAddress(addr, endWord());
duke@435 204 }
duke@435 205
duke@435 206 // Return the HeapWord address corrsponding to the next "0" bit
duke@435 207 // (inclusive).
duke@435 208 inline HeapWord* CMSBitMap::getNextUnmarkedWordAddress(
duke@435 209 HeapWord* start_addr, HeapWord* end_addr) const {
duke@435 210 assert_locked();
duke@435 211 size_t nextOffset = _bm.get_next_zero_offset(
duke@435 212 heapWordToOffset(start_addr),
duke@435 213 heapWordToOffset(end_addr));
duke@435 214 HeapWord* nextAddr = offsetToHeapWord(nextOffset);
duke@435 215 assert(nextAddr >= start_addr &&
duke@435 216 nextAddr <= end_addr, "get_next_zero postcondition");
duke@435 217 assert((nextAddr == end_addr) ||
duke@435 218 isUnmarked(nextAddr), "get_next_zero postcondition");
duke@435 219 return nextAddr;
duke@435 220 }
duke@435 221
duke@435 222 inline bool CMSBitMap::isAllClear() const {
duke@435 223 assert_locked();
duke@435 224 return getNextMarkedWordAddress(startWord()) >= endWord();
duke@435 225 }
duke@435 226
duke@435 227 inline void CMSBitMap::iterate(BitMapClosure* cl, HeapWord* left,
duke@435 228 HeapWord* right) {
duke@435 229 assert_locked();
duke@435 230 left = MAX2(_bmStartWord, left);
duke@435 231 right = MIN2(_bmStartWord + _bmWordSize, right);
duke@435 232 if (right > left) {
duke@435 233 _bm.iterate(cl, heapWordToOffset(left), heapWordToOffset(right));
duke@435 234 }
duke@435 235 }
duke@435 236
duke@435 237 inline void CMSCollector::start_icms() {
duke@435 238 if (CMSIncrementalMode) {
duke@435 239 ConcurrentMarkSweepThread::start_icms();
duke@435 240 }
duke@435 241 }
duke@435 242
duke@435 243 inline void CMSCollector::stop_icms() {
duke@435 244 if (CMSIncrementalMode) {
duke@435 245 ConcurrentMarkSweepThread::stop_icms();
duke@435 246 }
duke@435 247 }
duke@435 248
duke@435 249 inline void CMSCollector::disable_icms() {
duke@435 250 if (CMSIncrementalMode) {
duke@435 251 ConcurrentMarkSweepThread::disable_icms();
duke@435 252 }
duke@435 253 }
duke@435 254
duke@435 255 inline void CMSCollector::enable_icms() {
duke@435 256 if (CMSIncrementalMode) {
duke@435 257 ConcurrentMarkSweepThread::enable_icms();
duke@435 258 }
duke@435 259 }
duke@435 260
duke@435 261 inline void CMSCollector::icms_wait() {
duke@435 262 if (CMSIncrementalMode) {
duke@435 263 cmsThread()->icms_wait();
duke@435 264 }
duke@435 265 }
duke@435 266
duke@435 267 inline void CMSCollector::save_sweep_limits() {
duke@435 268 _cmsGen->save_sweep_limit();
duke@435 269 }
duke@435 270
duke@435 271 inline bool CMSCollector::is_dead_obj(oop obj) const {
duke@435 272 HeapWord* addr = (HeapWord*)obj;
duke@435 273 assert((_cmsGen->cmsSpace()->is_in_reserved(addr)
coleenp@4037 274 && _cmsGen->cmsSpace()->block_is_obj(addr)),
duke@435 275 "must be object");
ysr@529 276 return should_unload_classes() &&
duke@435 277 _collectorState == Sweeping &&
duke@435 278 !_markBitMap.isMarked(addr);
duke@435 279 }
duke@435 280
duke@435 281 inline bool CMSCollector::should_abort_preclean() const {
duke@435 282 // We are in the midst of an "abortable preclean" and either
duke@435 283 // scavenge is done or foreground GC wants to take over collection
duke@435 284 return _collectorState == AbortablePreclean &&
duke@435 285 (_abort_preclean || _foregroundGCIsActive ||
ysr@2336 286 GenCollectedHeap::heap()->incremental_collection_will_fail(true /* consult_young */));
duke@435 287 }
duke@435 288
duke@435 289 inline size_t CMSCollector::get_eden_used() const {
duke@435 290 return _young_gen->as_DefNewGeneration()->eden()->used();
duke@435 291 }
duke@435 292
duke@435 293 inline size_t CMSCollector::get_eden_capacity() const {
duke@435 294 return _young_gen->as_DefNewGeneration()->eden()->capacity();
duke@435 295 }
duke@435 296
duke@435 297 inline bool CMSStats::valid() const {
duke@435 298 return _valid_bits == _ALL_VALID;
duke@435 299 }
duke@435 300
duke@435 301 inline void CMSStats::record_gc0_begin() {
duke@435 302 if (_gc0_begin_time.is_updated()) {
duke@435 303 float last_gc0_period = _gc0_begin_time.seconds();
duke@435 304 _gc0_period = AdaptiveWeightedAverage::exp_avg(_gc0_period,
duke@435 305 last_gc0_period, _gc0_alpha);
duke@435 306 _gc0_alpha = _saved_alpha;
duke@435 307 _valid_bits |= _GC0_VALID;
duke@435 308 }
duke@435 309 _cms_used_at_gc0_begin = _cms_gen->cmsSpace()->used();
duke@435 310
duke@435 311 _gc0_begin_time.update();
duke@435 312 }
duke@435 313
duke@435 314 inline void CMSStats::record_gc0_end(size_t cms_gen_bytes_used) {
duke@435 315 float last_gc0_duration = _gc0_begin_time.seconds();
duke@435 316 _gc0_duration = AdaptiveWeightedAverage::exp_avg(_gc0_duration,
duke@435 317 last_gc0_duration, _gc0_alpha);
duke@435 318
duke@435 319 // Amount promoted.
duke@435 320 _cms_used_at_gc0_end = cms_gen_bytes_used;
duke@435 321
duke@435 322 size_t promoted_bytes = 0;
duke@435 323 if (_cms_used_at_gc0_end >= _cms_used_at_gc0_begin) {
duke@435 324 promoted_bytes = _cms_used_at_gc0_end - _cms_used_at_gc0_begin;
duke@435 325 }
duke@435 326
duke@435 327 // If the younger gen collections were skipped, then the
duke@435 328 // number of promoted bytes will be 0 and adding it to the
duke@435 329 // average will incorrectly lessen the average. It is, however,
duke@435 330 // also possible that no promotion was needed.
duke@435 331 //
duke@435 332 // _gc0_promoted used to be calculated as
duke@435 333 // _gc0_promoted = AdaptiveWeightedAverage::exp_avg(_gc0_promoted,
duke@435 334 // promoted_bytes, _gc0_alpha);
duke@435 335 _cms_gen->gc_stats()->avg_promoted()->sample(promoted_bytes);
duke@435 336 _gc0_promoted = (size_t) _cms_gen->gc_stats()->avg_promoted()->average();
duke@435 337
duke@435 338 // Amount directly allocated.
duke@435 339 size_t allocated_bytes = _cms_gen->direct_allocated_words() * HeapWordSize;
duke@435 340 _cms_gen->reset_direct_allocated_words();
duke@435 341 _cms_allocated = AdaptiveWeightedAverage::exp_avg(_cms_allocated,
duke@435 342 allocated_bytes, _gc0_alpha);
duke@435 343 }
duke@435 344
duke@435 345 inline void CMSStats::record_cms_begin() {
duke@435 346 _cms_timer.stop();
duke@435 347
duke@435 348 // This is just an approximate value, but is good enough.
duke@435 349 _cms_used_at_cms_begin = _cms_used_at_gc0_end;
duke@435 350
duke@435 351 _cms_period = AdaptiveWeightedAverage::exp_avg((float)_cms_period,
duke@435 352 (float) _cms_timer.seconds(), _cms_alpha);
duke@435 353 _cms_begin_time.update();
duke@435 354
duke@435 355 _cms_timer.reset();
duke@435 356 _cms_timer.start();
duke@435 357 }
duke@435 358
duke@435 359 inline void CMSStats::record_cms_end() {
duke@435 360 _cms_timer.stop();
duke@435 361
duke@435 362 float cur_duration = _cms_timer.seconds();
duke@435 363 _cms_duration = AdaptiveWeightedAverage::exp_avg(_cms_duration,
duke@435 364 cur_duration, _cms_alpha);
duke@435 365
duke@435 366 // Avoid division by 0.
duke@435 367 const size_t cms_used_mb = MAX2(_cms_used_at_cms_begin / M, (size_t)1);
duke@435 368 _cms_duration_per_mb = AdaptiveWeightedAverage::exp_avg(_cms_duration_per_mb,
duke@435 369 cur_duration / cms_used_mb,
duke@435 370 _cms_alpha);
duke@435 371
duke@435 372 _cms_end_time.update();
duke@435 373 _cms_alpha = _saved_alpha;
duke@435 374 _allow_duty_cycle_reduction = true;
duke@435 375 _valid_bits |= _CMS_VALID;
duke@435 376
duke@435 377 _cms_timer.start();
duke@435 378 }
duke@435 379
duke@435 380 inline double CMSStats::cms_time_since_begin() const {
duke@435 381 return _cms_begin_time.seconds();
duke@435 382 }
duke@435 383
duke@435 384 inline double CMSStats::cms_time_since_end() const {
duke@435 385 return _cms_end_time.seconds();
duke@435 386 }
duke@435 387
duke@435 388 inline double CMSStats::promotion_rate() const {
duke@435 389 assert(valid(), "statistics not valid yet");
duke@435 390 return gc0_promoted() / gc0_period();
duke@435 391 }
duke@435 392
duke@435 393 inline double CMSStats::cms_allocation_rate() const {
duke@435 394 assert(valid(), "statistics not valid yet");
duke@435 395 return cms_allocated() / gc0_period();
duke@435 396 }
duke@435 397
duke@435 398 inline double CMSStats::cms_consumption_rate() const {
duke@435 399 assert(valid(), "statistics not valid yet");
duke@435 400 return (gc0_promoted() + cms_allocated()) / gc0_period();
duke@435 401 }
duke@435 402
duke@435 403 inline unsigned int CMSStats::icms_update_duty_cycle() {
duke@435 404 // Update the duty cycle only if pacing is enabled and the stats are valid
duke@435 405 // (after at least one young gen gc and one cms cycle have completed).
duke@435 406 if (CMSIncrementalPacing && valid()) {
duke@435 407 return icms_update_duty_cycle_impl();
duke@435 408 }
duke@435 409 return _icms_duty_cycle;
duke@435 410 }
duke@435 411
duke@435 412 inline void ConcurrentMarkSweepGeneration::save_sweep_limit() {
duke@435 413 cmsSpace()->save_sweep_limit();
duke@435 414 }
duke@435 415
duke@435 416 inline size_t ConcurrentMarkSweepGeneration::capacity() const {
duke@435 417 return _cmsSpace->capacity();
duke@435 418 }
duke@435 419
duke@435 420 inline size_t ConcurrentMarkSweepGeneration::used() const {
duke@435 421 return _cmsSpace->used();
duke@435 422 }
duke@435 423
duke@435 424 inline size_t ConcurrentMarkSweepGeneration::free() const {
duke@435 425 return _cmsSpace->free();
duke@435 426 }
duke@435 427
duke@435 428 inline MemRegion ConcurrentMarkSweepGeneration::used_region() const {
duke@435 429 return _cmsSpace->used_region();
duke@435 430 }
duke@435 431
duke@435 432 inline MemRegion ConcurrentMarkSweepGeneration::used_region_at_save_marks() const {
duke@435 433 return _cmsSpace->used_region_at_save_marks();
duke@435 434 }
duke@435 435
duke@435 436 inline void MarkFromRootsClosure::do_yield_check() {
duke@435 437 if (ConcurrentMarkSweepThread::should_yield() &&
duke@435 438 !_collector->foregroundGCIsActive() &&
duke@435 439 _yield) {
duke@435 440 do_yield_work();
duke@435 441 }
duke@435 442 }
duke@435 443
duke@435 444 inline void Par_MarkFromRootsClosure::do_yield_check() {
duke@435 445 if (ConcurrentMarkSweepThread::should_yield() &&
duke@435 446 !_collector->foregroundGCIsActive() &&
duke@435 447 _yield) {
duke@435 448 do_yield_work();
duke@435 449 }
duke@435 450 }
duke@435 451
coleenp@4037 452 inline void PushOrMarkClosure::do_yield_check() {
coleenp@4037 453 _parent->do_yield_check();
coleenp@4037 454 }
coleenp@4037 455
coleenp@4037 456 inline void Par_PushOrMarkClosure::do_yield_check() {
coleenp@4037 457 _parent->do_yield_check();
coleenp@4037 458 }
coleenp@4037 459
duke@435 460 // Return value of "true" indicates that the on-going preclean
duke@435 461 // should be aborted.
duke@435 462 inline bool ScanMarkedObjectsAgainCarefullyClosure::do_yield_check() {
duke@435 463 if (ConcurrentMarkSweepThread::should_yield() &&
duke@435 464 !_collector->foregroundGCIsActive() &&
duke@435 465 _yield) {
duke@435 466 // Sample young gen size before and after yield
duke@435 467 _collector->sample_eden();
duke@435 468 do_yield_work();
duke@435 469 _collector->sample_eden();
duke@435 470 return _collector->should_abort_preclean();
duke@435 471 }
duke@435 472 return false;
duke@435 473 }
duke@435 474
duke@435 475 inline void SurvivorSpacePrecleanClosure::do_yield_check() {
duke@435 476 if (ConcurrentMarkSweepThread::should_yield() &&
duke@435 477 !_collector->foregroundGCIsActive() &&
duke@435 478 _yield) {
duke@435 479 // Sample young gen size before and after yield
duke@435 480 _collector->sample_eden();
duke@435 481 do_yield_work();
duke@435 482 _collector->sample_eden();
duke@435 483 }
duke@435 484 }
duke@435 485
duke@435 486 inline void SweepClosure::do_yield_check(HeapWord* addr) {
duke@435 487 if (ConcurrentMarkSweepThread::should_yield() &&
duke@435 488 !_collector->foregroundGCIsActive() &&
duke@435 489 _yield) {
duke@435 490 do_yield_work(addr);
duke@435 491 }
duke@435 492 }
duke@435 493
duke@435 494 inline void MarkRefsIntoAndScanClosure::do_yield_check() {
duke@435 495 // The conditions are ordered for the remarking phase
duke@435 496 // when _yield is false.
duke@435 497 if (_yield &&
duke@435 498 !_collector->foregroundGCIsActive() &&
duke@435 499 ConcurrentMarkSweepThread::should_yield()) {
duke@435 500 do_yield_work();
duke@435 501 }
duke@435 502 }
duke@435 503
duke@435 504
duke@435 505 inline void ModUnionClosure::do_MemRegion(MemRegion mr) {
duke@435 506 // Align the end of mr so it's at a card boundary.
duke@435 507 // This is superfluous except at the end of the space;
duke@435 508 // we should do better than this XXX
duke@435 509 MemRegion mr2(mr.start(), (HeapWord*)round_to((intptr_t)mr.end(),
duke@435 510 CardTableModRefBS::card_size /* bytes */));
duke@435 511 _t->mark_range(mr2);
duke@435 512 }
duke@435 513
duke@435 514 inline void ModUnionClosurePar::do_MemRegion(MemRegion mr) {
duke@435 515 // Align the end of mr so it's at a card boundary.
duke@435 516 // This is superfluous except at the end of the space;
duke@435 517 // we should do better than this XXX
duke@435 518 MemRegion mr2(mr.start(), (HeapWord*)round_to((intptr_t)mr.end(),
duke@435 519 CardTableModRefBS::card_size /* bytes */));
duke@435 520 _t->par_mark_range(mr2);
duke@435 521 }
stefank@2314 522
stefank@2314 523 #endif // SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_CONCURRENTMARKSWEEPGENERATION_INLINE_HPP

mercurial