src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.inline.hpp

Sun, 16 Mar 2008 21:57:25 -0700

author
ysr
date
Sun, 16 Mar 2008 21:57:25 -0700
changeset 529
0834225a7916
parent 435
a61af66fc99e
child 631
d1605aabd0a1
permissions
-rw-r--r--

6634032: CMS: Need CMSInitiatingPermOccupancyFraction for perm, divorcing from CMSInitiatingOccupancyFraction
Summary: The option CMSInitiatingPermOccupancyFraction now controls perm triggering threshold. Even though the actual value of the threshold has not yet been changed, so there is no change in policy, we now have the infrastructure in place for dynamically deciding when to collect the perm gen, an issue that will be addressed in the near future.
Reviewed-by: jmasa

duke@435 1 /*
duke@435 2 * Copyright 2001-2007 Sun Microsystems, Inc. All Rights Reserved.
duke@435 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
duke@435 4 *
duke@435 5 * This code is free software; you can redistribute it and/or modify it
duke@435 6 * under the terms of the GNU General Public License version 2 only, as
duke@435 7 * published by the Free Software Foundation.
duke@435 8 *
duke@435 9 * This code is distributed in the hope that it will be useful, but WITHOUT
duke@435 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
duke@435 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
duke@435 12 * version 2 for more details (a copy is included in the LICENSE file that
duke@435 13 * accompanied this code).
duke@435 14 *
duke@435 15 * You should have received a copy of the GNU General Public License version
duke@435 16 * 2 along with this work; if not, write to the Free Software Foundation,
duke@435 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
duke@435 18 *
duke@435 19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
duke@435 20 * CA 95054 USA or visit www.sun.com if you need additional information or
duke@435 21 * have any questions.
duke@435 22 *
duke@435 23 */
duke@435 24
duke@435 25 inline void CMSBitMap::clear_all() {
duke@435 26 assert_locked();
duke@435 27 // CMS bitmaps are usually cover large memory regions
duke@435 28 _bm.clear_large();
duke@435 29 return;
duke@435 30 }
duke@435 31
duke@435 32 inline size_t CMSBitMap::heapWordToOffset(HeapWord* addr) const {
duke@435 33 return (pointer_delta(addr, _bmStartWord)) >> _shifter;
duke@435 34 }
duke@435 35
duke@435 36 inline HeapWord* CMSBitMap::offsetToHeapWord(size_t offset) const {
duke@435 37 return _bmStartWord + (offset << _shifter);
duke@435 38 }
duke@435 39
duke@435 40 inline size_t CMSBitMap::heapWordDiffToOffsetDiff(size_t diff) const {
duke@435 41 assert((diff & ((1 << _shifter) - 1)) == 0, "argument check");
duke@435 42 return diff >> _shifter;
duke@435 43 }
duke@435 44
duke@435 45 inline void CMSBitMap::mark(HeapWord* addr) {
duke@435 46 assert_locked();
duke@435 47 assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize),
duke@435 48 "outside underlying space?");
duke@435 49 _bm.set_bit(heapWordToOffset(addr));
duke@435 50 }
duke@435 51
duke@435 52 inline bool CMSBitMap::par_mark(HeapWord* addr) {
duke@435 53 assert_locked();
duke@435 54 assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize),
duke@435 55 "outside underlying space?");
duke@435 56 return _bm.par_at_put(heapWordToOffset(addr), true);
duke@435 57 }
duke@435 58
duke@435 59 inline void CMSBitMap::par_clear(HeapWord* addr) {
duke@435 60 assert_locked();
duke@435 61 assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize),
duke@435 62 "outside underlying space?");
duke@435 63 _bm.par_at_put(heapWordToOffset(addr), false);
duke@435 64 }
duke@435 65
duke@435 66 inline void CMSBitMap::mark_range(MemRegion mr) {
duke@435 67 NOT_PRODUCT(region_invariant(mr));
duke@435 68 // Range size is usually just 1 bit.
duke@435 69 _bm.set_range(heapWordToOffset(mr.start()), heapWordToOffset(mr.end()),
duke@435 70 BitMap::small_range);
duke@435 71 }
duke@435 72
duke@435 73 inline void CMSBitMap::clear_range(MemRegion mr) {
duke@435 74 NOT_PRODUCT(region_invariant(mr));
duke@435 75 // Range size is usually just 1 bit.
duke@435 76 _bm.clear_range(heapWordToOffset(mr.start()), heapWordToOffset(mr.end()),
duke@435 77 BitMap::small_range);
duke@435 78 }
duke@435 79
duke@435 80 inline void CMSBitMap::par_mark_range(MemRegion mr) {
duke@435 81 NOT_PRODUCT(region_invariant(mr));
duke@435 82 // Range size is usually just 1 bit.
duke@435 83 _bm.par_set_range(heapWordToOffset(mr.start()), heapWordToOffset(mr.end()),
duke@435 84 BitMap::small_range);
duke@435 85 }
duke@435 86
duke@435 87 inline void CMSBitMap::par_clear_range(MemRegion mr) {
duke@435 88 NOT_PRODUCT(region_invariant(mr));
duke@435 89 // Range size is usually just 1 bit.
duke@435 90 _bm.par_clear_range(heapWordToOffset(mr.start()), heapWordToOffset(mr.end()),
duke@435 91 BitMap::small_range);
duke@435 92 }
duke@435 93
duke@435 94 inline void CMSBitMap::mark_large_range(MemRegion mr) {
duke@435 95 NOT_PRODUCT(region_invariant(mr));
duke@435 96 // Range size must be greater than 32 bytes.
duke@435 97 _bm.set_range(heapWordToOffset(mr.start()), heapWordToOffset(mr.end()),
duke@435 98 BitMap::large_range);
duke@435 99 }
duke@435 100
duke@435 101 inline void CMSBitMap::clear_large_range(MemRegion mr) {
duke@435 102 NOT_PRODUCT(region_invariant(mr));
duke@435 103 // Range size must be greater than 32 bytes.
duke@435 104 _bm.clear_range(heapWordToOffset(mr.start()), heapWordToOffset(mr.end()),
duke@435 105 BitMap::large_range);
duke@435 106 }
duke@435 107
duke@435 108 inline void CMSBitMap::par_mark_large_range(MemRegion mr) {
duke@435 109 NOT_PRODUCT(region_invariant(mr));
duke@435 110 // Range size must be greater than 32 bytes.
duke@435 111 _bm.par_set_range(heapWordToOffset(mr.start()), heapWordToOffset(mr.end()),
duke@435 112 BitMap::large_range);
duke@435 113 }
duke@435 114
duke@435 115 inline void CMSBitMap::par_clear_large_range(MemRegion mr) {
duke@435 116 NOT_PRODUCT(region_invariant(mr));
duke@435 117 // Range size must be greater than 32 bytes.
duke@435 118 _bm.par_clear_range(heapWordToOffset(mr.start()), heapWordToOffset(mr.end()),
duke@435 119 BitMap::large_range);
duke@435 120 }
duke@435 121
duke@435 122 // Starting at "addr" (inclusive) return a memory region
duke@435 123 // corresponding to the first maximally contiguous marked ("1") region.
duke@435 124 inline MemRegion CMSBitMap::getAndClearMarkedRegion(HeapWord* addr) {
duke@435 125 return getAndClearMarkedRegion(addr, endWord());
duke@435 126 }
duke@435 127
duke@435 128 // Starting at "start_addr" (inclusive) return a memory region
duke@435 129 // corresponding to the first maximal contiguous marked ("1") region
duke@435 130 // strictly less than end_addr.
duke@435 131 inline MemRegion CMSBitMap::getAndClearMarkedRegion(HeapWord* start_addr,
duke@435 132 HeapWord* end_addr) {
duke@435 133 HeapWord *start, *end;
duke@435 134 assert_locked();
duke@435 135 start = getNextMarkedWordAddress (start_addr, end_addr);
duke@435 136 end = getNextUnmarkedWordAddress(start, end_addr);
duke@435 137 assert(start <= end, "Consistency check");
duke@435 138 MemRegion mr(start, end);
duke@435 139 if (!mr.is_empty()) {
duke@435 140 clear_range(mr);
duke@435 141 }
duke@435 142 return mr;
duke@435 143 }
duke@435 144
duke@435 145 inline bool CMSBitMap::isMarked(HeapWord* addr) const {
duke@435 146 assert_locked();
duke@435 147 assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize),
duke@435 148 "outside underlying space?");
duke@435 149 return _bm.at(heapWordToOffset(addr));
duke@435 150 }
duke@435 151
duke@435 152 // The same as isMarked() but without a lock check.
duke@435 153 inline bool CMSBitMap::par_isMarked(HeapWord* addr) const {
duke@435 154 assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize),
duke@435 155 "outside underlying space?");
duke@435 156 return _bm.at(heapWordToOffset(addr));
duke@435 157 }
duke@435 158
duke@435 159
duke@435 160 inline bool CMSBitMap::isUnmarked(HeapWord* addr) const {
duke@435 161 assert_locked();
duke@435 162 assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize),
duke@435 163 "outside underlying space?");
duke@435 164 return !_bm.at(heapWordToOffset(addr));
duke@435 165 }
duke@435 166
duke@435 167 // Return the HeapWord address corresponding to next "1" bit
duke@435 168 // (inclusive).
duke@435 169 inline HeapWord* CMSBitMap::getNextMarkedWordAddress(HeapWord* addr) const {
duke@435 170 return getNextMarkedWordAddress(addr, endWord());
duke@435 171 }
duke@435 172
duke@435 173 // Return the least HeapWord address corresponding to next "1" bit
duke@435 174 // starting at start_addr (inclusive) but strictly less than end_addr.
duke@435 175 inline HeapWord* CMSBitMap::getNextMarkedWordAddress(
duke@435 176 HeapWord* start_addr, HeapWord* end_addr) const {
duke@435 177 assert_locked();
duke@435 178 size_t nextOffset = _bm.get_next_one_offset(
duke@435 179 heapWordToOffset(start_addr),
duke@435 180 heapWordToOffset(end_addr));
duke@435 181 HeapWord* nextAddr = offsetToHeapWord(nextOffset);
duke@435 182 assert(nextAddr >= start_addr &&
duke@435 183 nextAddr <= end_addr, "get_next_one postcondition");
duke@435 184 assert((nextAddr == end_addr) ||
duke@435 185 isMarked(nextAddr), "get_next_one postcondition");
duke@435 186 return nextAddr;
duke@435 187 }
duke@435 188
duke@435 189
duke@435 190 // Return the HeapWord address corrsponding to the next "0" bit
duke@435 191 // (inclusive).
duke@435 192 inline HeapWord* CMSBitMap::getNextUnmarkedWordAddress(HeapWord* addr) const {
duke@435 193 return getNextUnmarkedWordAddress(addr, endWord());
duke@435 194 }
duke@435 195
duke@435 196 // Return the HeapWord address corrsponding to the next "0" bit
duke@435 197 // (inclusive).
duke@435 198 inline HeapWord* CMSBitMap::getNextUnmarkedWordAddress(
duke@435 199 HeapWord* start_addr, HeapWord* end_addr) const {
duke@435 200 assert_locked();
duke@435 201 size_t nextOffset = _bm.get_next_zero_offset(
duke@435 202 heapWordToOffset(start_addr),
duke@435 203 heapWordToOffset(end_addr));
duke@435 204 HeapWord* nextAddr = offsetToHeapWord(nextOffset);
duke@435 205 assert(nextAddr >= start_addr &&
duke@435 206 nextAddr <= end_addr, "get_next_zero postcondition");
duke@435 207 assert((nextAddr == end_addr) ||
duke@435 208 isUnmarked(nextAddr), "get_next_zero postcondition");
duke@435 209 return nextAddr;
duke@435 210 }
duke@435 211
duke@435 212 inline bool CMSBitMap::isAllClear() const {
duke@435 213 assert_locked();
duke@435 214 return getNextMarkedWordAddress(startWord()) >= endWord();
duke@435 215 }
duke@435 216
duke@435 217 inline void CMSBitMap::iterate(BitMapClosure* cl, HeapWord* left,
duke@435 218 HeapWord* right) {
duke@435 219 assert_locked();
duke@435 220 left = MAX2(_bmStartWord, left);
duke@435 221 right = MIN2(_bmStartWord + _bmWordSize, right);
duke@435 222 if (right > left) {
duke@435 223 _bm.iterate(cl, heapWordToOffset(left), heapWordToOffset(right));
duke@435 224 }
duke@435 225 }
duke@435 226
duke@435 227 inline void CMSCollector::start_icms() {
duke@435 228 if (CMSIncrementalMode) {
duke@435 229 ConcurrentMarkSweepThread::start_icms();
duke@435 230 }
duke@435 231 }
duke@435 232
duke@435 233 inline void CMSCollector::stop_icms() {
duke@435 234 if (CMSIncrementalMode) {
duke@435 235 ConcurrentMarkSweepThread::stop_icms();
duke@435 236 }
duke@435 237 }
duke@435 238
duke@435 239 inline void CMSCollector::disable_icms() {
duke@435 240 if (CMSIncrementalMode) {
duke@435 241 ConcurrentMarkSweepThread::disable_icms();
duke@435 242 }
duke@435 243 }
duke@435 244
duke@435 245 inline void CMSCollector::enable_icms() {
duke@435 246 if (CMSIncrementalMode) {
duke@435 247 ConcurrentMarkSweepThread::enable_icms();
duke@435 248 }
duke@435 249 }
duke@435 250
duke@435 251 inline void CMSCollector::icms_wait() {
duke@435 252 if (CMSIncrementalMode) {
duke@435 253 cmsThread()->icms_wait();
duke@435 254 }
duke@435 255 }
duke@435 256
duke@435 257 inline void CMSCollector::save_sweep_limits() {
duke@435 258 _cmsGen->save_sweep_limit();
duke@435 259 _permGen->save_sweep_limit();
duke@435 260 }
duke@435 261
duke@435 262 inline bool CMSCollector::is_dead_obj(oop obj) const {
duke@435 263 HeapWord* addr = (HeapWord*)obj;
duke@435 264 assert((_cmsGen->cmsSpace()->is_in_reserved(addr)
duke@435 265 && _cmsGen->cmsSpace()->block_is_obj(addr))
duke@435 266 ||
duke@435 267 (_permGen->cmsSpace()->is_in_reserved(addr)
duke@435 268 && _permGen->cmsSpace()->block_is_obj(addr)),
duke@435 269 "must be object");
ysr@529 270 return should_unload_classes() &&
duke@435 271 _collectorState == Sweeping &&
duke@435 272 !_markBitMap.isMarked(addr);
duke@435 273 }
duke@435 274
duke@435 275 inline bool CMSCollector::should_abort_preclean() const {
duke@435 276 // We are in the midst of an "abortable preclean" and either
duke@435 277 // scavenge is done or foreground GC wants to take over collection
duke@435 278 return _collectorState == AbortablePreclean &&
duke@435 279 (_abort_preclean || _foregroundGCIsActive ||
duke@435 280 GenCollectedHeap::heap()->incremental_collection_will_fail());
duke@435 281 }
duke@435 282
duke@435 283 inline size_t CMSCollector::get_eden_used() const {
duke@435 284 return _young_gen->as_DefNewGeneration()->eden()->used();
duke@435 285 }
duke@435 286
duke@435 287 inline size_t CMSCollector::get_eden_capacity() const {
duke@435 288 return _young_gen->as_DefNewGeneration()->eden()->capacity();
duke@435 289 }
duke@435 290
duke@435 291 inline bool CMSStats::valid() const {
duke@435 292 return _valid_bits == _ALL_VALID;
duke@435 293 }
duke@435 294
duke@435 295 inline void CMSStats::record_gc0_begin() {
duke@435 296 if (_gc0_begin_time.is_updated()) {
duke@435 297 float last_gc0_period = _gc0_begin_time.seconds();
duke@435 298 _gc0_period = AdaptiveWeightedAverage::exp_avg(_gc0_period,
duke@435 299 last_gc0_period, _gc0_alpha);
duke@435 300 _gc0_alpha = _saved_alpha;
duke@435 301 _valid_bits |= _GC0_VALID;
duke@435 302 }
duke@435 303 _cms_used_at_gc0_begin = _cms_gen->cmsSpace()->used();
duke@435 304
duke@435 305 _gc0_begin_time.update();
duke@435 306 }
duke@435 307
duke@435 308 inline void CMSStats::record_gc0_end(size_t cms_gen_bytes_used) {
duke@435 309 float last_gc0_duration = _gc0_begin_time.seconds();
duke@435 310 _gc0_duration = AdaptiveWeightedAverage::exp_avg(_gc0_duration,
duke@435 311 last_gc0_duration, _gc0_alpha);
duke@435 312
duke@435 313 // Amount promoted.
duke@435 314 _cms_used_at_gc0_end = cms_gen_bytes_used;
duke@435 315
duke@435 316 size_t promoted_bytes = 0;
duke@435 317 if (_cms_used_at_gc0_end >= _cms_used_at_gc0_begin) {
duke@435 318 promoted_bytes = _cms_used_at_gc0_end - _cms_used_at_gc0_begin;
duke@435 319 }
duke@435 320
duke@435 321 // If the younger gen collections were skipped, then the
duke@435 322 // number of promoted bytes will be 0 and adding it to the
duke@435 323 // average will incorrectly lessen the average. It is, however,
duke@435 324 // also possible that no promotion was needed.
duke@435 325 //
duke@435 326 // _gc0_promoted used to be calculated as
duke@435 327 // _gc0_promoted = AdaptiveWeightedAverage::exp_avg(_gc0_promoted,
duke@435 328 // promoted_bytes, _gc0_alpha);
duke@435 329 _cms_gen->gc_stats()->avg_promoted()->sample(promoted_bytes);
duke@435 330 _gc0_promoted = (size_t) _cms_gen->gc_stats()->avg_promoted()->average();
duke@435 331
duke@435 332 // Amount directly allocated.
duke@435 333 size_t allocated_bytes = _cms_gen->direct_allocated_words() * HeapWordSize;
duke@435 334 _cms_gen->reset_direct_allocated_words();
duke@435 335 _cms_allocated = AdaptiveWeightedAverage::exp_avg(_cms_allocated,
duke@435 336 allocated_bytes, _gc0_alpha);
duke@435 337 }
duke@435 338
duke@435 339 inline void CMSStats::record_cms_begin() {
duke@435 340 _cms_timer.stop();
duke@435 341
duke@435 342 // This is just an approximate value, but is good enough.
duke@435 343 _cms_used_at_cms_begin = _cms_used_at_gc0_end;
duke@435 344
duke@435 345 _cms_period = AdaptiveWeightedAverage::exp_avg((float)_cms_period,
duke@435 346 (float) _cms_timer.seconds(), _cms_alpha);
duke@435 347 _cms_begin_time.update();
duke@435 348
duke@435 349 _cms_timer.reset();
duke@435 350 _cms_timer.start();
duke@435 351 }
duke@435 352
duke@435 353 inline void CMSStats::record_cms_end() {
duke@435 354 _cms_timer.stop();
duke@435 355
duke@435 356 float cur_duration = _cms_timer.seconds();
duke@435 357 _cms_duration = AdaptiveWeightedAverage::exp_avg(_cms_duration,
duke@435 358 cur_duration, _cms_alpha);
duke@435 359
duke@435 360 // Avoid division by 0.
duke@435 361 const size_t cms_used_mb = MAX2(_cms_used_at_cms_begin / M, (size_t)1);
duke@435 362 _cms_duration_per_mb = AdaptiveWeightedAverage::exp_avg(_cms_duration_per_mb,
duke@435 363 cur_duration / cms_used_mb,
duke@435 364 _cms_alpha);
duke@435 365
duke@435 366 _cms_end_time.update();
duke@435 367 _cms_alpha = _saved_alpha;
duke@435 368 _allow_duty_cycle_reduction = true;
duke@435 369 _valid_bits |= _CMS_VALID;
duke@435 370
duke@435 371 _cms_timer.start();
duke@435 372 }
duke@435 373
duke@435 374 inline double CMSStats::cms_time_since_begin() const {
duke@435 375 return _cms_begin_time.seconds();
duke@435 376 }
duke@435 377
duke@435 378 inline double CMSStats::cms_time_since_end() const {
duke@435 379 return _cms_end_time.seconds();
duke@435 380 }
duke@435 381
duke@435 382 inline double CMSStats::promotion_rate() const {
duke@435 383 assert(valid(), "statistics not valid yet");
duke@435 384 return gc0_promoted() / gc0_period();
duke@435 385 }
duke@435 386
duke@435 387 inline double CMSStats::cms_allocation_rate() const {
duke@435 388 assert(valid(), "statistics not valid yet");
duke@435 389 return cms_allocated() / gc0_period();
duke@435 390 }
duke@435 391
duke@435 392 inline double CMSStats::cms_consumption_rate() const {
duke@435 393 assert(valid(), "statistics not valid yet");
duke@435 394 return (gc0_promoted() + cms_allocated()) / gc0_period();
duke@435 395 }
duke@435 396
duke@435 397 inline unsigned int CMSStats::icms_update_duty_cycle() {
duke@435 398 // Update the duty cycle only if pacing is enabled and the stats are valid
duke@435 399 // (after at least one young gen gc and one cms cycle have completed).
duke@435 400 if (CMSIncrementalPacing && valid()) {
duke@435 401 return icms_update_duty_cycle_impl();
duke@435 402 }
duke@435 403 return _icms_duty_cycle;
duke@435 404 }
duke@435 405
duke@435 406 inline void ConcurrentMarkSweepGeneration::save_sweep_limit() {
duke@435 407 cmsSpace()->save_sweep_limit();
duke@435 408 }
duke@435 409
duke@435 410 inline size_t ConcurrentMarkSweepGeneration::capacity() const {
duke@435 411 return _cmsSpace->capacity();
duke@435 412 }
duke@435 413
duke@435 414 inline size_t ConcurrentMarkSweepGeneration::used() const {
duke@435 415 return _cmsSpace->used();
duke@435 416 }
duke@435 417
duke@435 418 inline size_t ConcurrentMarkSweepGeneration::free() const {
duke@435 419 return _cmsSpace->free();
duke@435 420 }
duke@435 421
duke@435 422 inline MemRegion ConcurrentMarkSweepGeneration::used_region() const {
duke@435 423 return _cmsSpace->used_region();
duke@435 424 }
duke@435 425
duke@435 426 inline MemRegion ConcurrentMarkSweepGeneration::used_region_at_save_marks() const {
duke@435 427 return _cmsSpace->used_region_at_save_marks();
duke@435 428 }
duke@435 429
duke@435 430 inline void MarkFromRootsClosure::do_yield_check() {
duke@435 431 if (ConcurrentMarkSweepThread::should_yield() &&
duke@435 432 !_collector->foregroundGCIsActive() &&
duke@435 433 _yield) {
duke@435 434 do_yield_work();
duke@435 435 }
duke@435 436 }
duke@435 437
duke@435 438 inline void Par_MarkFromRootsClosure::do_yield_check() {
duke@435 439 if (ConcurrentMarkSweepThread::should_yield() &&
duke@435 440 !_collector->foregroundGCIsActive() &&
duke@435 441 _yield) {
duke@435 442 do_yield_work();
duke@435 443 }
duke@435 444 }
duke@435 445
duke@435 446 // Return value of "true" indicates that the on-going preclean
duke@435 447 // should be aborted.
duke@435 448 inline bool ScanMarkedObjectsAgainCarefullyClosure::do_yield_check() {
duke@435 449 if (ConcurrentMarkSweepThread::should_yield() &&
duke@435 450 !_collector->foregroundGCIsActive() &&
duke@435 451 _yield) {
duke@435 452 // Sample young gen size before and after yield
duke@435 453 _collector->sample_eden();
duke@435 454 do_yield_work();
duke@435 455 _collector->sample_eden();
duke@435 456 return _collector->should_abort_preclean();
duke@435 457 }
duke@435 458 return false;
duke@435 459 }
duke@435 460
duke@435 461 inline void SurvivorSpacePrecleanClosure::do_yield_check() {
duke@435 462 if (ConcurrentMarkSweepThread::should_yield() &&
duke@435 463 !_collector->foregroundGCIsActive() &&
duke@435 464 _yield) {
duke@435 465 // Sample young gen size before and after yield
duke@435 466 _collector->sample_eden();
duke@435 467 do_yield_work();
duke@435 468 _collector->sample_eden();
duke@435 469 }
duke@435 470 }
duke@435 471
duke@435 472 inline void SweepClosure::do_yield_check(HeapWord* addr) {
duke@435 473 if (ConcurrentMarkSweepThread::should_yield() &&
duke@435 474 !_collector->foregroundGCIsActive() &&
duke@435 475 _yield) {
duke@435 476 do_yield_work(addr);
duke@435 477 }
duke@435 478 }
duke@435 479
duke@435 480 inline void MarkRefsIntoAndScanClosure::do_yield_check() {
duke@435 481 // The conditions are ordered for the remarking phase
duke@435 482 // when _yield is false.
duke@435 483 if (_yield &&
duke@435 484 !_collector->foregroundGCIsActive() &&
duke@435 485 ConcurrentMarkSweepThread::should_yield()) {
duke@435 486 do_yield_work();
duke@435 487 }
duke@435 488 }
duke@435 489
duke@435 490
duke@435 491 inline void ModUnionClosure::do_MemRegion(MemRegion mr) {
duke@435 492 // Align the end of mr so it's at a card boundary.
duke@435 493 // This is superfluous except at the end of the space;
duke@435 494 // we should do better than this XXX
duke@435 495 MemRegion mr2(mr.start(), (HeapWord*)round_to((intptr_t)mr.end(),
duke@435 496 CardTableModRefBS::card_size /* bytes */));
duke@435 497 _t->mark_range(mr2);
duke@435 498 }
duke@435 499
duke@435 500 inline void ModUnionClosurePar::do_MemRegion(MemRegion mr) {
duke@435 501 // Align the end of mr so it's at a card boundary.
duke@435 502 // This is superfluous except at the end of the space;
duke@435 503 // we should do better than this XXX
duke@435 504 MemRegion mr2(mr.start(), (HeapWord*)round_to((intptr_t)mr.end(),
duke@435 505 CardTableModRefBS::card_size /* bytes */));
duke@435 506 _t->par_mark_range(mr2);
duke@435 507 }

mercurial