src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.inline.hpp

Wed, 27 Apr 2016 01:25:04 +0800

author
aoqi
date
Wed, 27 Apr 2016 01:25:04 +0800
changeset 0
f90c822e73f8
child 6876
710a3c8b516e
permissions
-rw-r--r--

Initial load
http://hg.openjdk.java.net/jdk8u/jdk8u/hotspot/
changeset: 6782:28b50d07f6f8
tag: jdk8u25-b17

aoqi@0 1 /*
aoqi@0 2 * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
aoqi@0 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
aoqi@0 4 *
aoqi@0 5 * This code is free software; you can redistribute it and/or modify it
aoqi@0 6 * under the terms of the GNU General Public License version 2 only, as
aoqi@0 7 * published by the Free Software Foundation.
aoqi@0 8 *
aoqi@0 9 * This code is distributed in the hope that it will be useful, but WITHOUT
aoqi@0 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
aoqi@0 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
aoqi@0 12 * version 2 for more details (a copy is included in the LICENSE file that
aoqi@0 13 * accompanied this code).
aoqi@0 14 *
aoqi@0 15 * You should have received a copy of the GNU General Public License version
aoqi@0 16 * 2 along with this work; if not, write to the Free Software Foundation,
aoqi@0 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
aoqi@0 18 *
aoqi@0 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
aoqi@0 20 * or visit www.oracle.com if you need additional information or have any
aoqi@0 21 * questions.
aoqi@0 22 *
aoqi@0 23 */
aoqi@0 24
aoqi@0 25 #ifndef SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_CONCURRENTMARKSWEEPGENERATION_INLINE_HPP
aoqi@0 26 #define SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_CONCURRENTMARKSWEEPGENERATION_INLINE_HPP
aoqi@0 27
aoqi@0 28 #include "gc_implementation/concurrentMarkSweep/cmsLockVerifier.hpp"
aoqi@0 29 #include "gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp"
aoqi@0 30 #include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp"
aoqi@0 31 #include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.hpp"
aoqi@0 32 #include "gc_implementation/shared/gcUtil.hpp"
aoqi@0 33 #include "memory/defNewGeneration.hpp"
aoqi@0 34
aoqi@0 35 inline void CMSBitMap::clear_all() {
aoqi@0 36 assert_locked();
aoqi@0 37 // CMS bitmaps are usually cover large memory regions
aoqi@0 38 _bm.clear_large();
aoqi@0 39 return;
aoqi@0 40 }
aoqi@0 41
aoqi@0 42 inline size_t CMSBitMap::heapWordToOffset(HeapWord* addr) const {
aoqi@0 43 return (pointer_delta(addr, _bmStartWord)) >> _shifter;
aoqi@0 44 }
aoqi@0 45
aoqi@0 46 inline HeapWord* CMSBitMap::offsetToHeapWord(size_t offset) const {
aoqi@0 47 return _bmStartWord + (offset << _shifter);
aoqi@0 48 }
aoqi@0 49
aoqi@0 50 inline size_t CMSBitMap::heapWordDiffToOffsetDiff(size_t diff) const {
aoqi@0 51 assert((diff & ((1 << _shifter) - 1)) == 0, "argument check");
aoqi@0 52 return diff >> _shifter;
aoqi@0 53 }
aoqi@0 54
aoqi@0 55 inline void CMSBitMap::mark(HeapWord* addr) {
aoqi@0 56 assert_locked();
aoqi@0 57 assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize),
aoqi@0 58 "outside underlying space?");
aoqi@0 59 _bm.set_bit(heapWordToOffset(addr));
aoqi@0 60 }
aoqi@0 61
aoqi@0 62 inline bool CMSBitMap::par_mark(HeapWord* addr) {
aoqi@0 63 assert_locked();
aoqi@0 64 assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize),
aoqi@0 65 "outside underlying space?");
aoqi@0 66 return _bm.par_at_put(heapWordToOffset(addr), true);
aoqi@0 67 }
aoqi@0 68
aoqi@0 69 inline void CMSBitMap::par_clear(HeapWord* addr) {
aoqi@0 70 assert_locked();
aoqi@0 71 assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize),
aoqi@0 72 "outside underlying space?");
aoqi@0 73 _bm.par_at_put(heapWordToOffset(addr), false);
aoqi@0 74 }
aoqi@0 75
aoqi@0 76 inline void CMSBitMap::mark_range(MemRegion mr) {
aoqi@0 77 NOT_PRODUCT(region_invariant(mr));
aoqi@0 78 // Range size is usually just 1 bit.
aoqi@0 79 _bm.set_range(heapWordToOffset(mr.start()), heapWordToOffset(mr.end()),
aoqi@0 80 BitMap::small_range);
aoqi@0 81 }
aoqi@0 82
aoqi@0 83 inline void CMSBitMap::clear_range(MemRegion mr) {
aoqi@0 84 NOT_PRODUCT(region_invariant(mr));
aoqi@0 85 // Range size is usually just 1 bit.
aoqi@0 86 _bm.clear_range(heapWordToOffset(mr.start()), heapWordToOffset(mr.end()),
aoqi@0 87 BitMap::small_range);
aoqi@0 88 }
aoqi@0 89
aoqi@0 90 inline void CMSBitMap::par_mark_range(MemRegion mr) {
aoqi@0 91 NOT_PRODUCT(region_invariant(mr));
aoqi@0 92 // Range size is usually just 1 bit.
aoqi@0 93 _bm.par_set_range(heapWordToOffset(mr.start()), heapWordToOffset(mr.end()),
aoqi@0 94 BitMap::small_range);
aoqi@0 95 }
aoqi@0 96
aoqi@0 97 inline void CMSBitMap::par_clear_range(MemRegion mr) {
aoqi@0 98 NOT_PRODUCT(region_invariant(mr));
aoqi@0 99 // Range size is usually just 1 bit.
aoqi@0 100 _bm.par_clear_range(heapWordToOffset(mr.start()), heapWordToOffset(mr.end()),
aoqi@0 101 BitMap::small_range);
aoqi@0 102 }
aoqi@0 103
aoqi@0 104 inline void CMSBitMap::mark_large_range(MemRegion mr) {
aoqi@0 105 NOT_PRODUCT(region_invariant(mr));
aoqi@0 106 // Range size must be greater than 32 bytes.
aoqi@0 107 _bm.set_range(heapWordToOffset(mr.start()), heapWordToOffset(mr.end()),
aoqi@0 108 BitMap::large_range);
aoqi@0 109 }
aoqi@0 110
aoqi@0 111 inline void CMSBitMap::clear_large_range(MemRegion mr) {
aoqi@0 112 NOT_PRODUCT(region_invariant(mr));
aoqi@0 113 // Range size must be greater than 32 bytes.
aoqi@0 114 _bm.clear_range(heapWordToOffset(mr.start()), heapWordToOffset(mr.end()),
aoqi@0 115 BitMap::large_range);
aoqi@0 116 }
aoqi@0 117
aoqi@0 118 inline void CMSBitMap::par_mark_large_range(MemRegion mr) {
aoqi@0 119 NOT_PRODUCT(region_invariant(mr));
aoqi@0 120 // Range size must be greater than 32 bytes.
aoqi@0 121 _bm.par_set_range(heapWordToOffset(mr.start()), heapWordToOffset(mr.end()),
aoqi@0 122 BitMap::large_range);
aoqi@0 123 }
aoqi@0 124
aoqi@0 125 inline void CMSBitMap::par_clear_large_range(MemRegion mr) {
aoqi@0 126 NOT_PRODUCT(region_invariant(mr));
aoqi@0 127 // Range size must be greater than 32 bytes.
aoqi@0 128 _bm.par_clear_range(heapWordToOffset(mr.start()), heapWordToOffset(mr.end()),
aoqi@0 129 BitMap::large_range);
aoqi@0 130 }
aoqi@0 131
aoqi@0 132 // Starting at "addr" (inclusive) return a memory region
aoqi@0 133 // corresponding to the first maximally contiguous marked ("1") region.
aoqi@0 134 inline MemRegion CMSBitMap::getAndClearMarkedRegion(HeapWord* addr) {
aoqi@0 135 return getAndClearMarkedRegion(addr, endWord());
aoqi@0 136 }
aoqi@0 137
aoqi@0 138 // Starting at "start_addr" (inclusive) return a memory region
aoqi@0 139 // corresponding to the first maximal contiguous marked ("1") region
aoqi@0 140 // strictly less than end_addr.
aoqi@0 141 inline MemRegion CMSBitMap::getAndClearMarkedRegion(HeapWord* start_addr,
aoqi@0 142 HeapWord* end_addr) {
aoqi@0 143 HeapWord *start, *end;
aoqi@0 144 assert_locked();
aoqi@0 145 start = getNextMarkedWordAddress (start_addr, end_addr);
aoqi@0 146 end = getNextUnmarkedWordAddress(start, end_addr);
aoqi@0 147 assert(start <= end, "Consistency check");
aoqi@0 148 MemRegion mr(start, end);
aoqi@0 149 if (!mr.is_empty()) {
aoqi@0 150 clear_range(mr);
aoqi@0 151 }
aoqi@0 152 return mr;
aoqi@0 153 }
aoqi@0 154
aoqi@0 155 inline bool CMSBitMap::isMarked(HeapWord* addr) const {
aoqi@0 156 assert_locked();
aoqi@0 157 assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize),
aoqi@0 158 "outside underlying space?");
aoqi@0 159 return _bm.at(heapWordToOffset(addr));
aoqi@0 160 }
aoqi@0 161
aoqi@0 162 // The same as isMarked() but without a lock check.
aoqi@0 163 inline bool CMSBitMap::par_isMarked(HeapWord* addr) const {
aoqi@0 164 assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize),
aoqi@0 165 "outside underlying space?");
aoqi@0 166 return _bm.at(heapWordToOffset(addr));
aoqi@0 167 }
aoqi@0 168
aoqi@0 169
aoqi@0 170 inline bool CMSBitMap::isUnmarked(HeapWord* addr) const {
aoqi@0 171 assert_locked();
aoqi@0 172 assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize),
aoqi@0 173 "outside underlying space?");
aoqi@0 174 return !_bm.at(heapWordToOffset(addr));
aoqi@0 175 }
aoqi@0 176
aoqi@0 177 // Return the HeapWord address corresponding to next "1" bit
aoqi@0 178 // (inclusive).
aoqi@0 179 inline HeapWord* CMSBitMap::getNextMarkedWordAddress(HeapWord* addr) const {
aoqi@0 180 return getNextMarkedWordAddress(addr, endWord());
aoqi@0 181 }
aoqi@0 182
aoqi@0 183 // Return the least HeapWord address corresponding to next "1" bit
aoqi@0 184 // starting at start_addr (inclusive) but strictly less than end_addr.
aoqi@0 185 inline HeapWord* CMSBitMap::getNextMarkedWordAddress(
aoqi@0 186 HeapWord* start_addr, HeapWord* end_addr) const {
aoqi@0 187 assert_locked();
aoqi@0 188 size_t nextOffset = _bm.get_next_one_offset(
aoqi@0 189 heapWordToOffset(start_addr),
aoqi@0 190 heapWordToOffset(end_addr));
aoqi@0 191 HeapWord* nextAddr = offsetToHeapWord(nextOffset);
aoqi@0 192 assert(nextAddr >= start_addr &&
aoqi@0 193 nextAddr <= end_addr, "get_next_one postcondition");
aoqi@0 194 assert((nextAddr == end_addr) ||
aoqi@0 195 isMarked(nextAddr), "get_next_one postcondition");
aoqi@0 196 return nextAddr;
aoqi@0 197 }
aoqi@0 198
aoqi@0 199
aoqi@0 200 // Return the HeapWord address corrsponding to the next "0" bit
aoqi@0 201 // (inclusive).
aoqi@0 202 inline HeapWord* CMSBitMap::getNextUnmarkedWordAddress(HeapWord* addr) const {
aoqi@0 203 return getNextUnmarkedWordAddress(addr, endWord());
aoqi@0 204 }
aoqi@0 205
aoqi@0 206 // Return the HeapWord address corrsponding to the next "0" bit
aoqi@0 207 // (inclusive).
aoqi@0 208 inline HeapWord* CMSBitMap::getNextUnmarkedWordAddress(
aoqi@0 209 HeapWord* start_addr, HeapWord* end_addr) const {
aoqi@0 210 assert_locked();
aoqi@0 211 size_t nextOffset = _bm.get_next_zero_offset(
aoqi@0 212 heapWordToOffset(start_addr),
aoqi@0 213 heapWordToOffset(end_addr));
aoqi@0 214 HeapWord* nextAddr = offsetToHeapWord(nextOffset);
aoqi@0 215 assert(nextAddr >= start_addr &&
aoqi@0 216 nextAddr <= end_addr, "get_next_zero postcondition");
aoqi@0 217 assert((nextAddr == end_addr) ||
aoqi@0 218 isUnmarked(nextAddr), "get_next_zero postcondition");
aoqi@0 219 return nextAddr;
aoqi@0 220 }
aoqi@0 221
aoqi@0 222 inline bool CMSBitMap::isAllClear() const {
aoqi@0 223 assert_locked();
aoqi@0 224 return getNextMarkedWordAddress(startWord()) >= endWord();
aoqi@0 225 }
aoqi@0 226
aoqi@0 227 inline void CMSBitMap::iterate(BitMapClosure* cl, HeapWord* left,
aoqi@0 228 HeapWord* right) {
aoqi@0 229 assert_locked();
aoqi@0 230 left = MAX2(_bmStartWord, left);
aoqi@0 231 right = MIN2(_bmStartWord + _bmWordSize, right);
aoqi@0 232 if (right > left) {
aoqi@0 233 _bm.iterate(cl, heapWordToOffset(left), heapWordToOffset(right));
aoqi@0 234 }
aoqi@0 235 }
aoqi@0 236
aoqi@0 237 inline void CMSCollector::start_icms() {
aoqi@0 238 if (CMSIncrementalMode) {
aoqi@0 239 ConcurrentMarkSweepThread::start_icms();
aoqi@0 240 }
aoqi@0 241 }
aoqi@0 242
aoqi@0 243 inline void CMSCollector::stop_icms() {
aoqi@0 244 if (CMSIncrementalMode) {
aoqi@0 245 ConcurrentMarkSweepThread::stop_icms();
aoqi@0 246 }
aoqi@0 247 }
aoqi@0 248
aoqi@0 249 inline void CMSCollector::disable_icms() {
aoqi@0 250 if (CMSIncrementalMode) {
aoqi@0 251 ConcurrentMarkSweepThread::disable_icms();
aoqi@0 252 }
aoqi@0 253 }
aoqi@0 254
aoqi@0 255 inline void CMSCollector::enable_icms() {
aoqi@0 256 if (CMSIncrementalMode) {
aoqi@0 257 ConcurrentMarkSweepThread::enable_icms();
aoqi@0 258 }
aoqi@0 259 }
aoqi@0 260
aoqi@0 261 inline void CMSCollector::icms_wait() {
aoqi@0 262 if (CMSIncrementalMode) {
aoqi@0 263 cmsThread()->icms_wait();
aoqi@0 264 }
aoqi@0 265 }
aoqi@0 266
aoqi@0 267 inline void CMSCollector::save_sweep_limits() {
aoqi@0 268 _cmsGen->save_sweep_limit();
aoqi@0 269 }
aoqi@0 270
aoqi@0 271 inline bool CMSCollector::is_dead_obj(oop obj) const {
aoqi@0 272 HeapWord* addr = (HeapWord*)obj;
aoqi@0 273 assert((_cmsGen->cmsSpace()->is_in_reserved(addr)
aoqi@0 274 && _cmsGen->cmsSpace()->block_is_obj(addr)),
aoqi@0 275 "must be object");
aoqi@0 276 return should_unload_classes() &&
aoqi@0 277 _collectorState == Sweeping &&
aoqi@0 278 !_markBitMap.isMarked(addr);
aoqi@0 279 }
aoqi@0 280
aoqi@0 281 inline bool CMSCollector::should_abort_preclean() const {
aoqi@0 282 // We are in the midst of an "abortable preclean" and either
aoqi@0 283 // scavenge is done or foreground GC wants to take over collection
aoqi@0 284 return _collectorState == AbortablePreclean &&
aoqi@0 285 (_abort_preclean || _foregroundGCIsActive ||
aoqi@0 286 GenCollectedHeap::heap()->incremental_collection_will_fail(true /* consult_young */));
aoqi@0 287 }
aoqi@0 288
aoqi@0 289 inline size_t CMSCollector::get_eden_used() const {
aoqi@0 290 return _young_gen->as_DefNewGeneration()->eden()->used();
aoqi@0 291 }
aoqi@0 292
aoqi@0 293 inline size_t CMSCollector::get_eden_capacity() const {
aoqi@0 294 return _young_gen->as_DefNewGeneration()->eden()->capacity();
aoqi@0 295 }
aoqi@0 296
aoqi@0 297 inline bool CMSStats::valid() const {
aoqi@0 298 return _valid_bits == _ALL_VALID;
aoqi@0 299 }
aoqi@0 300
aoqi@0 301 inline void CMSStats::record_gc0_begin() {
aoqi@0 302 if (_gc0_begin_time.is_updated()) {
aoqi@0 303 float last_gc0_period = _gc0_begin_time.seconds();
aoqi@0 304 _gc0_period = AdaptiveWeightedAverage::exp_avg(_gc0_period,
aoqi@0 305 last_gc0_period, _gc0_alpha);
aoqi@0 306 _gc0_alpha = _saved_alpha;
aoqi@0 307 _valid_bits |= _GC0_VALID;
aoqi@0 308 }
aoqi@0 309 _cms_used_at_gc0_begin = _cms_gen->cmsSpace()->used();
aoqi@0 310
aoqi@0 311 _gc0_begin_time.update();
aoqi@0 312 }
aoqi@0 313
aoqi@0 314 inline void CMSStats::record_gc0_end(size_t cms_gen_bytes_used) {
aoqi@0 315 float last_gc0_duration = _gc0_begin_time.seconds();
aoqi@0 316 _gc0_duration = AdaptiveWeightedAverage::exp_avg(_gc0_duration,
aoqi@0 317 last_gc0_duration, _gc0_alpha);
aoqi@0 318
aoqi@0 319 // Amount promoted.
aoqi@0 320 _cms_used_at_gc0_end = cms_gen_bytes_used;
aoqi@0 321
aoqi@0 322 size_t promoted_bytes = 0;
aoqi@0 323 if (_cms_used_at_gc0_end >= _cms_used_at_gc0_begin) {
aoqi@0 324 promoted_bytes = _cms_used_at_gc0_end - _cms_used_at_gc0_begin;
aoqi@0 325 }
aoqi@0 326
aoqi@0 327 // If the younger gen collections were skipped, then the
aoqi@0 328 // number of promoted bytes will be 0 and adding it to the
aoqi@0 329 // average will incorrectly lessen the average. It is, however,
aoqi@0 330 // also possible that no promotion was needed.
aoqi@0 331 //
aoqi@0 332 // _gc0_promoted used to be calculated as
aoqi@0 333 // _gc0_promoted = AdaptiveWeightedAverage::exp_avg(_gc0_promoted,
aoqi@0 334 // promoted_bytes, _gc0_alpha);
aoqi@0 335 _cms_gen->gc_stats()->avg_promoted()->sample(promoted_bytes);
aoqi@0 336 _gc0_promoted = (size_t) _cms_gen->gc_stats()->avg_promoted()->average();
aoqi@0 337
aoqi@0 338 // Amount directly allocated.
aoqi@0 339 size_t allocated_bytes = _cms_gen->direct_allocated_words() * HeapWordSize;
aoqi@0 340 _cms_gen->reset_direct_allocated_words();
aoqi@0 341 _cms_allocated = AdaptiveWeightedAverage::exp_avg(_cms_allocated,
aoqi@0 342 allocated_bytes, _gc0_alpha);
aoqi@0 343 }
aoqi@0 344
aoqi@0 345 inline void CMSStats::record_cms_begin() {
aoqi@0 346 _cms_timer.stop();
aoqi@0 347
aoqi@0 348 // This is just an approximate value, but is good enough.
aoqi@0 349 _cms_used_at_cms_begin = _cms_used_at_gc0_end;
aoqi@0 350
aoqi@0 351 _cms_period = AdaptiveWeightedAverage::exp_avg((float)_cms_period,
aoqi@0 352 (float) _cms_timer.seconds(), _cms_alpha);
aoqi@0 353 _cms_begin_time.update();
aoqi@0 354
aoqi@0 355 _cms_timer.reset();
aoqi@0 356 _cms_timer.start();
aoqi@0 357 }
aoqi@0 358
aoqi@0 359 inline void CMSStats::record_cms_end() {
aoqi@0 360 _cms_timer.stop();
aoqi@0 361
aoqi@0 362 float cur_duration = _cms_timer.seconds();
aoqi@0 363 _cms_duration = AdaptiveWeightedAverage::exp_avg(_cms_duration,
aoqi@0 364 cur_duration, _cms_alpha);
aoqi@0 365
aoqi@0 366 // Avoid division by 0.
aoqi@0 367 const size_t cms_used_mb = MAX2(_cms_used_at_cms_begin / M, (size_t)1);
aoqi@0 368 _cms_duration_per_mb = AdaptiveWeightedAverage::exp_avg(_cms_duration_per_mb,
aoqi@0 369 cur_duration / cms_used_mb,
aoqi@0 370 _cms_alpha);
aoqi@0 371
aoqi@0 372 _cms_end_time.update();
aoqi@0 373 _cms_alpha = _saved_alpha;
aoqi@0 374 _allow_duty_cycle_reduction = true;
aoqi@0 375 _valid_bits |= _CMS_VALID;
aoqi@0 376
aoqi@0 377 _cms_timer.start();
aoqi@0 378 }
aoqi@0 379
aoqi@0 380 inline double CMSStats::cms_time_since_begin() const {
aoqi@0 381 return _cms_begin_time.seconds();
aoqi@0 382 }
aoqi@0 383
aoqi@0 384 inline double CMSStats::cms_time_since_end() const {
aoqi@0 385 return _cms_end_time.seconds();
aoqi@0 386 }
aoqi@0 387
aoqi@0 388 inline double CMSStats::promotion_rate() const {
aoqi@0 389 assert(valid(), "statistics not valid yet");
aoqi@0 390 return gc0_promoted() / gc0_period();
aoqi@0 391 }
aoqi@0 392
aoqi@0 393 inline double CMSStats::cms_allocation_rate() const {
aoqi@0 394 assert(valid(), "statistics not valid yet");
aoqi@0 395 return cms_allocated() / gc0_period();
aoqi@0 396 }
aoqi@0 397
aoqi@0 398 inline double CMSStats::cms_consumption_rate() const {
aoqi@0 399 assert(valid(), "statistics not valid yet");
aoqi@0 400 return (gc0_promoted() + cms_allocated()) / gc0_period();
aoqi@0 401 }
aoqi@0 402
aoqi@0 403 inline unsigned int CMSStats::icms_update_duty_cycle() {
aoqi@0 404 // Update the duty cycle only if pacing is enabled and the stats are valid
aoqi@0 405 // (after at least one young gen gc and one cms cycle have completed).
aoqi@0 406 if (CMSIncrementalPacing && valid()) {
aoqi@0 407 return icms_update_duty_cycle_impl();
aoqi@0 408 }
aoqi@0 409 return _icms_duty_cycle;
aoqi@0 410 }
aoqi@0 411
aoqi@0 412 inline void ConcurrentMarkSweepGeneration::save_sweep_limit() {
aoqi@0 413 cmsSpace()->save_sweep_limit();
aoqi@0 414 }
aoqi@0 415
aoqi@0 416 inline size_t ConcurrentMarkSweepGeneration::capacity() const {
aoqi@0 417 return _cmsSpace->capacity();
aoqi@0 418 }
aoqi@0 419
aoqi@0 420 inline size_t ConcurrentMarkSweepGeneration::used() const {
aoqi@0 421 return _cmsSpace->used();
aoqi@0 422 }
aoqi@0 423
aoqi@0 424 inline size_t ConcurrentMarkSweepGeneration::free() const {
aoqi@0 425 return _cmsSpace->free();
aoqi@0 426 }
aoqi@0 427
aoqi@0 428 inline MemRegion ConcurrentMarkSweepGeneration::used_region() const {
aoqi@0 429 return _cmsSpace->used_region();
aoqi@0 430 }
aoqi@0 431
aoqi@0 432 inline MemRegion ConcurrentMarkSweepGeneration::used_region_at_save_marks() const {
aoqi@0 433 return _cmsSpace->used_region_at_save_marks();
aoqi@0 434 }
aoqi@0 435
aoqi@0 436 inline void MarkFromRootsClosure::do_yield_check() {
aoqi@0 437 if (ConcurrentMarkSweepThread::should_yield() &&
aoqi@0 438 !_collector->foregroundGCIsActive() &&
aoqi@0 439 _yield) {
aoqi@0 440 do_yield_work();
aoqi@0 441 }
aoqi@0 442 }
aoqi@0 443
aoqi@0 444 inline void Par_MarkFromRootsClosure::do_yield_check() {
aoqi@0 445 if (ConcurrentMarkSweepThread::should_yield() &&
aoqi@0 446 !_collector->foregroundGCIsActive() &&
aoqi@0 447 _yield) {
aoqi@0 448 do_yield_work();
aoqi@0 449 }
aoqi@0 450 }
aoqi@0 451
aoqi@0 452 inline void PushOrMarkClosure::do_yield_check() {
aoqi@0 453 _parent->do_yield_check();
aoqi@0 454 }
aoqi@0 455
aoqi@0 456 inline void Par_PushOrMarkClosure::do_yield_check() {
aoqi@0 457 _parent->do_yield_check();
aoqi@0 458 }
aoqi@0 459
aoqi@0 460 // Return value of "true" indicates that the on-going preclean
aoqi@0 461 // should be aborted.
aoqi@0 462 inline bool ScanMarkedObjectsAgainCarefullyClosure::do_yield_check() {
aoqi@0 463 if (ConcurrentMarkSweepThread::should_yield() &&
aoqi@0 464 !_collector->foregroundGCIsActive() &&
aoqi@0 465 _yield) {
aoqi@0 466 // Sample young gen size before and after yield
aoqi@0 467 _collector->sample_eden();
aoqi@0 468 do_yield_work();
aoqi@0 469 _collector->sample_eden();
aoqi@0 470 return _collector->should_abort_preclean();
aoqi@0 471 }
aoqi@0 472 return false;
aoqi@0 473 }
aoqi@0 474
aoqi@0 475 inline void SurvivorSpacePrecleanClosure::do_yield_check() {
aoqi@0 476 if (ConcurrentMarkSweepThread::should_yield() &&
aoqi@0 477 !_collector->foregroundGCIsActive() &&
aoqi@0 478 _yield) {
aoqi@0 479 // Sample young gen size before and after yield
aoqi@0 480 _collector->sample_eden();
aoqi@0 481 do_yield_work();
aoqi@0 482 _collector->sample_eden();
aoqi@0 483 }
aoqi@0 484 }
aoqi@0 485
aoqi@0 486 inline void SweepClosure::do_yield_check(HeapWord* addr) {
aoqi@0 487 if (ConcurrentMarkSweepThread::should_yield() &&
aoqi@0 488 !_collector->foregroundGCIsActive() &&
aoqi@0 489 _yield) {
aoqi@0 490 do_yield_work(addr);
aoqi@0 491 }
aoqi@0 492 }
aoqi@0 493
aoqi@0 494 inline void MarkRefsIntoAndScanClosure::do_yield_check() {
aoqi@0 495 // The conditions are ordered for the remarking phase
aoqi@0 496 // when _yield is false.
aoqi@0 497 if (_yield &&
aoqi@0 498 !_collector->foregroundGCIsActive() &&
aoqi@0 499 ConcurrentMarkSweepThread::should_yield()) {
aoqi@0 500 do_yield_work();
aoqi@0 501 }
aoqi@0 502 }
aoqi@0 503
aoqi@0 504
aoqi@0 505 inline void ModUnionClosure::do_MemRegion(MemRegion mr) {
aoqi@0 506 // Align the end of mr so it's at a card boundary.
aoqi@0 507 // This is superfluous except at the end of the space;
aoqi@0 508 // we should do better than this XXX
aoqi@0 509 MemRegion mr2(mr.start(), (HeapWord*)round_to((intptr_t)mr.end(),
aoqi@0 510 CardTableModRefBS::card_size /* bytes */));
aoqi@0 511 _t->mark_range(mr2);
aoqi@0 512 }
aoqi@0 513
aoqi@0 514 inline void ModUnionClosurePar::do_MemRegion(MemRegion mr) {
aoqi@0 515 // Align the end of mr so it's at a card boundary.
aoqi@0 516 // This is superfluous except at the end of the space;
aoqi@0 517 // we should do better than this XXX
aoqi@0 518 MemRegion mr2(mr.start(), (HeapWord*)round_to((intptr_t)mr.end(),
aoqi@0 519 CardTableModRefBS::card_size /* bytes */));
aoqi@0 520 _t->par_mark_range(mr2);
aoqi@0 521 }
aoqi@0 522
aoqi@0 523 #endif // SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_CONCURRENTMARKSWEEPGENERATION_INLINE_HPP

mercurial