src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp

Thu, 23 Oct 2014 12:02:08 -0700

author
asaha
date
Thu, 23 Oct 2014 12:02:08 -0700
changeset 7476
c2844108a708
parent 6992
2c6ef90f030a
parent 7470
060cdf93040c
child 7535
7ae4e26cb1e0
child 9793
7386b3a385ac
permissions
-rw-r--r--

Merge

duke@435 1 /*
jwilhelm@4576 2 * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
duke@435 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
duke@435 4 *
duke@435 5 * This code is free software; you can redistribute it and/or modify it
duke@435 6 * under the terms of the GNU General Public License version 2 only, as
duke@435 7 * published by the Free Software Foundation.
duke@435 8 *
duke@435 9 * This code is distributed in the hope that it will be useful, but WITHOUT
duke@435 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
duke@435 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
duke@435 12 * version 2 for more details (a copy is included in the LICENSE file that
duke@435 13 * accompanied this code).
duke@435 14 *
duke@435 15 * You should have received a copy of the GNU General Public License version
duke@435 16 * 2 along with this work; if not, write to the Free Software Foundation,
duke@435 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
duke@435 18 *
trims@1907 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
trims@1907 20 * or visit www.oracle.com if you need additional information or have any
trims@1907 21 * questions.
duke@435 22 *
duke@435 23 */
duke@435 24
stefank@2314 25 #ifndef SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_CONCURRENTMARKSWEEPGENERATION_HPP
stefank@2314 26 #define SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_CONCURRENTMARKSWEEPGENERATION_HPP
stefank@2314 27
sla@5237 28 #include "gc_implementation/shared/gcHeapSummary.hpp"
stefank@2314 29 #include "gc_implementation/shared/gSpaceCounters.hpp"
stefank@2314 30 #include "gc_implementation/shared/gcStats.hpp"
sla@5237 31 #include "gc_implementation/shared/gcWhen.hpp"
stefank@2314 32 #include "gc_implementation/shared/generationCounters.hpp"
jmasa@3730 33 #include "memory/freeBlockDictionary.hpp"
stefank@2314 34 #include "memory/generation.hpp"
stefank@6992 35 #include "memory/iterator.hpp"
stefank@2314 36 #include "runtime/mutexLocker.hpp"
stefank@2314 37 #include "runtime/virtualspace.hpp"
stefank@2314 38 #include "services/memoryService.hpp"
stefank@2314 39 #include "utilities/bitMap.inline.hpp"
stefank@2314 40 #include "utilities/stack.inline.hpp"
stefank@2314 41 #include "utilities/taskqueue.hpp"
stefank@2314 42 #include "utilities/yieldingWorkgroup.hpp"
stefank@2314 43
duke@435 44 // ConcurrentMarkSweepGeneration is in support of a concurrent
duke@435 45 // mark-sweep old generation in the Detlefs-Printezis--Boehm-Demers-Schenker
duke@435 46 // style. We assume, for now, that this generation is always the
coleenp@4037 47 // seniormost generation and for simplicity
duke@435 48 // in the first implementation, that this generation is a single compactible
duke@435 49 // space. Neither of these restrictions appears essential, and will be
duke@435 50 // relaxed in the future when more time is available to implement the
duke@435 51 // greater generality (and there's a need for it).
duke@435 52 //
duke@435 53 // Concurrent mode failures are currently handled by
duke@435 54 // means of a sliding mark-compact.
duke@435 55
duke@435 56 class CMSAdaptiveSizePolicy;
duke@435 57 class CMSConcMarkingTask;
duke@435 58 class CMSGCAdaptivePolicyCounters;
sla@5237 59 class CMSTracer;
sla@5237 60 class ConcurrentGCTimer;
duke@435 61 class ConcurrentMarkSweepGeneration;
duke@435 62 class ConcurrentMarkSweepPolicy;
duke@435 63 class ConcurrentMarkSweepThread;
duke@435 64 class CompactibleFreeListSpace;
duke@435 65 class FreeChunk;
duke@435 66 class PromotionInfo;
duke@435 67 class ScanMarkedObjectsAgainCarefullyClosure;
jmasa@4900 68 class TenuredGeneration;
sla@5237 69 class SerialOldTracer;
duke@435 70
duke@435 71 // A generic CMS bit map. It's the basis for both the CMS marking bit map
duke@435 72 // as well as for the mod union table (in each case only a subset of the
duke@435 73 // methods are used). This is essentially a wrapper around the BitMap class,
duke@435 74 // with one bit per (1<<_shifter) HeapWords. (i.e. for the marking bit map,
duke@435 75 // we have _shifter == 0. and for the mod union table we have
duke@435 76 // shifter == CardTableModRefBS::card_shift - LogHeapWordSize.)
duke@435 77 // XXX 64-bit issues in BitMap?
duke@435 78 class CMSBitMap VALUE_OBJ_CLASS_SPEC {
duke@435 79 friend class VMStructs;
duke@435 80
duke@435 81 HeapWord* _bmStartWord; // base address of range covered by map
duke@435 82 size_t _bmWordSize; // map size (in #HeapWords covered)
duke@435 83 const int _shifter; // shifts to convert HeapWord to bit position
duke@435 84 VirtualSpace _virtual_space; // underlying the bit map
duke@435 85 BitMap _bm; // the bit map itself
duke@435 86 public:
duke@435 87 Mutex* const _lock; // mutex protecting _bm;
duke@435 88
duke@435 89 public:
duke@435 90 // constructor
duke@435 91 CMSBitMap(int shifter, int mutex_rank, const char* mutex_name);
duke@435 92
duke@435 93 // allocates the actual storage for the map
duke@435 94 bool allocate(MemRegion mr);
duke@435 95 // field getter
duke@435 96 Mutex* lock() const { return _lock; }
duke@435 97 // locking verifier convenience function
duke@435 98 void assert_locked() const PRODUCT_RETURN;
duke@435 99
duke@435 100 // inquiries
duke@435 101 HeapWord* startWord() const { return _bmStartWord; }
duke@435 102 size_t sizeInWords() const { return _bmWordSize; }
duke@435 103 size_t sizeInBits() const { return _bm.size(); }
duke@435 104 // the following is one past the last word in space
duke@435 105 HeapWord* endWord() const { return _bmStartWord + _bmWordSize; }
duke@435 106
duke@435 107 // reading marks
duke@435 108 bool isMarked(HeapWord* addr) const;
duke@435 109 bool par_isMarked(HeapWord* addr) const; // do not lock checks
duke@435 110 bool isUnmarked(HeapWord* addr) const;
duke@435 111 bool isAllClear() const;
duke@435 112
duke@435 113 // writing marks
duke@435 114 void mark(HeapWord* addr);
duke@435 115 // For marking by parallel GC threads;
duke@435 116 // returns true if we did, false if another thread did
duke@435 117 bool par_mark(HeapWord* addr);
duke@435 118
duke@435 119 void mark_range(MemRegion mr);
duke@435 120 void par_mark_range(MemRegion mr);
duke@435 121 void mark_large_range(MemRegion mr);
duke@435 122 void par_mark_large_range(MemRegion mr);
duke@435 123 void par_clear(HeapWord* addr); // For unmarking by parallel GC threads.
duke@435 124 void clear_range(MemRegion mr);
duke@435 125 void par_clear_range(MemRegion mr);
duke@435 126 void clear_large_range(MemRegion mr);
duke@435 127 void par_clear_large_range(MemRegion mr);
duke@435 128 void clear_all();
duke@435 129 void clear_all_incrementally(); // Not yet implemented!!
duke@435 130
duke@435 131 NOT_PRODUCT(
duke@435 132 // checks the memory region for validity
duke@435 133 void region_invariant(MemRegion mr);
duke@435 134 )
duke@435 135
duke@435 136 // iteration
duke@435 137 void iterate(BitMapClosure* cl) {
duke@435 138 _bm.iterate(cl);
duke@435 139 }
duke@435 140 void iterate(BitMapClosure* cl, HeapWord* left, HeapWord* right);
duke@435 141 void dirty_range_iterate_clear(MemRegionClosure* cl);
duke@435 142 void dirty_range_iterate_clear(MemRegion mr, MemRegionClosure* cl);
duke@435 143
duke@435 144 // auxiliary support for iteration
duke@435 145 HeapWord* getNextMarkedWordAddress(HeapWord* addr) const;
duke@435 146 HeapWord* getNextMarkedWordAddress(HeapWord* start_addr,
duke@435 147 HeapWord* end_addr) const;
duke@435 148 HeapWord* getNextUnmarkedWordAddress(HeapWord* addr) const;
duke@435 149 HeapWord* getNextUnmarkedWordAddress(HeapWord* start_addr,
duke@435 150 HeapWord* end_addr) const;
duke@435 151 MemRegion getAndClearMarkedRegion(HeapWord* addr);
duke@435 152 MemRegion getAndClearMarkedRegion(HeapWord* start_addr,
duke@435 153 HeapWord* end_addr);
duke@435 154
duke@435 155 // conversion utilities
duke@435 156 HeapWord* offsetToHeapWord(size_t offset) const;
duke@435 157 size_t heapWordToOffset(HeapWord* addr) const;
duke@435 158 size_t heapWordDiffToOffsetDiff(size_t diff) const;
duke@435 159
stefank@4904 160 void print_on_error(outputStream* st, const char* prefix) const;
stefank@4904 161
duke@435 162 // debugging
duke@435 163 // is this address range covered by the bit-map?
duke@435 164 NOT_PRODUCT(
duke@435 165 bool covers(MemRegion mr) const;
duke@435 166 bool covers(HeapWord* start, size_t size = 0) const;
duke@435 167 )
duke@435 168 void verifyNoOneBitsInRange(HeapWord* left, HeapWord* right) PRODUCT_RETURN;
duke@435 169 };
duke@435 170
duke@435 171 // Represents a marking stack used by the CMS collector.
duke@435 172 // Ideally this should be GrowableArray<> just like MSC's marking stack(s).
zgu@3900 173 class CMSMarkStack: public CHeapObj<mtGC> {
duke@435 174 //
duke@435 175 friend class CMSCollector; // to get at expasion stats further below
duke@435 176 //
duke@435 177
duke@435 178 VirtualSpace _virtual_space; // space for the stack
duke@435 179 oop* _base; // bottom of stack
duke@435 180 size_t _index; // one more than last occupied index
duke@435 181 size_t _capacity; // max #elements
duke@435 182 Mutex _par_lock; // an advisory lock used in case of parallel access
duke@435 183 NOT_PRODUCT(size_t _max_depth;) // max depth plumbed during run
duke@435 184
duke@435 185 protected:
duke@435 186 size_t _hit_limit; // we hit max stack size limit
duke@435 187 size_t _failed_double; // we failed expansion before hitting limit
duke@435 188
duke@435 189 public:
duke@435 190 CMSMarkStack():
duke@435 191 _par_lock(Mutex::event, "CMSMarkStack._par_lock", true),
duke@435 192 _hit_limit(0),
duke@435 193 _failed_double(0) {}
duke@435 194
duke@435 195 bool allocate(size_t size);
duke@435 196
duke@435 197 size_t capacity() const { return _capacity; }
duke@435 198
duke@435 199 oop pop() {
duke@435 200 if (!isEmpty()) {
duke@435 201 return _base[--_index] ;
duke@435 202 }
duke@435 203 return NULL;
duke@435 204 }
duke@435 205
duke@435 206 bool push(oop ptr) {
duke@435 207 if (isFull()) {
duke@435 208 return false;
duke@435 209 } else {
duke@435 210 _base[_index++] = ptr;
duke@435 211 NOT_PRODUCT(_max_depth = MAX2(_max_depth, _index));
duke@435 212 return true;
duke@435 213 }
duke@435 214 }
duke@435 215
duke@435 216 bool isEmpty() const { return _index == 0; }
duke@435 217 bool isFull() const {
duke@435 218 assert(_index <= _capacity, "buffer overflow");
duke@435 219 return _index == _capacity;
duke@435 220 }
duke@435 221
duke@435 222 size_t length() { return _index; }
duke@435 223
duke@435 224 // "Parallel versions" of some of the above
duke@435 225 oop par_pop() {
duke@435 226 // lock and pop
duke@435 227 MutexLockerEx x(&_par_lock, Mutex::_no_safepoint_check_flag);
duke@435 228 return pop();
duke@435 229 }
duke@435 230
duke@435 231 bool par_push(oop ptr) {
duke@435 232 // lock and push
duke@435 233 MutexLockerEx x(&_par_lock, Mutex::_no_safepoint_check_flag);
duke@435 234 return push(ptr);
duke@435 235 }
duke@435 236
duke@435 237 // Forcibly reset the stack, losing all of its contents.
duke@435 238 void reset() {
duke@435 239 _index = 0;
duke@435 240 }
duke@435 241
duke@435 242 // Expand the stack, typically in response to an overflow condition
duke@435 243 void expand();
duke@435 244
duke@435 245 // Compute the least valued stack element.
duke@435 246 oop least_value(HeapWord* low) {
duke@435 247 oop least = (oop)low;
duke@435 248 for (size_t i = 0; i < _index; i++) {
duke@435 249 least = MIN2(least, _base[i]);
duke@435 250 }
duke@435 251 return least;
duke@435 252 }
duke@435 253
duke@435 254 // Exposed here to allow stack expansion in || case
duke@435 255 Mutex* par_lock() { return &_par_lock; }
duke@435 256 };
duke@435 257
duke@435 258 class CardTableRS;
duke@435 259 class CMSParGCThreadState;
duke@435 260
duke@435 261 class ModUnionClosure: public MemRegionClosure {
duke@435 262 protected:
duke@435 263 CMSBitMap* _t;
duke@435 264 public:
duke@435 265 ModUnionClosure(CMSBitMap* t): _t(t) { }
duke@435 266 void do_MemRegion(MemRegion mr);
duke@435 267 };
duke@435 268
duke@435 269 class ModUnionClosurePar: public ModUnionClosure {
duke@435 270 public:
duke@435 271 ModUnionClosurePar(CMSBitMap* t): ModUnionClosure(t) { }
duke@435 272 void do_MemRegion(MemRegion mr);
duke@435 273 };
duke@435 274
duke@435 275 // Survivor Chunk Array in support of parallelization of
duke@435 276 // Survivor Space rescan.
zgu@3900 277 class ChunkArray: public CHeapObj<mtGC> {
duke@435 278 size_t _index;
duke@435 279 size_t _capacity;
ysr@2108 280 size_t _overflows;
duke@435 281 HeapWord** _array; // storage for array
duke@435 282
duke@435 283 public:
ysr@2108 284 ChunkArray() : _index(0), _capacity(0), _overflows(0), _array(NULL) {}
duke@435 285 ChunkArray(HeapWord** a, size_t c):
ysr@2108 286 _index(0), _capacity(c), _overflows(0), _array(a) {}
duke@435 287
duke@435 288 HeapWord** array() { return _array; }
duke@435 289 void set_array(HeapWord** a) { _array = a; }
duke@435 290
duke@435 291 size_t capacity() { return _capacity; }
duke@435 292 void set_capacity(size_t c) { _capacity = c; }
duke@435 293
duke@435 294 size_t end() {
ysr@2108 295 assert(_index <= capacity(),
ysr@2108 296 err_msg("_index (" SIZE_FORMAT ") > _capacity (" SIZE_FORMAT "): out of bounds",
ysr@2108 297 _index, _capacity));
duke@435 298 return _index;
duke@435 299 } // exclusive
duke@435 300
duke@435 301 HeapWord* nth(size_t n) {
duke@435 302 assert(n < end(), "Out of bounds access");
duke@435 303 return _array[n];
duke@435 304 }
duke@435 305
duke@435 306 void reset() {
duke@435 307 _index = 0;
ysr@2108 308 if (_overflows > 0 && PrintCMSStatistics > 1) {
ysr@2108 309 warning("CMS: ChunkArray[" SIZE_FORMAT "] overflowed " SIZE_FORMAT " times",
ysr@2108 310 _capacity, _overflows);
ysr@2108 311 }
ysr@2108 312 _overflows = 0;
duke@435 313 }
duke@435 314
duke@435 315 void record_sample(HeapWord* p, size_t sz) {
duke@435 316 // For now we do not do anything with the size
duke@435 317 if (_index < _capacity) {
duke@435 318 _array[_index++] = p;
ysr@2108 319 } else {
ysr@2108 320 ++_overflows;
ysr@2108 321 assert(_index == _capacity,
ysr@2108 322 err_msg("_index (" SIZE_FORMAT ") > _capacity (" SIZE_FORMAT
ysr@2108 323 "): out of bounds at overflow#" SIZE_FORMAT,
ysr@2108 324 _index, _capacity, _overflows));
duke@435 325 }
duke@435 326 }
duke@435 327 };
duke@435 328
duke@435 329 //
duke@435 330 // Timing, allocation and promotion statistics for gc scheduling and incremental
duke@435 331 // mode pacing. Most statistics are exponential averages.
duke@435 332 //
duke@435 333 class CMSStats VALUE_OBJ_CLASS_SPEC {
duke@435 334 private:
duke@435 335 ConcurrentMarkSweepGeneration* const _cms_gen; // The cms (old) gen.
duke@435 336
duke@435 337 // The following are exponential averages with factor alpha:
duke@435 338 // avg = (100 - alpha) * avg + alpha * cur_sample
duke@435 339 //
duke@435 340 // The durations measure: end_time[n] - start_time[n]
duke@435 341 // The periods measure: start_time[n] - start_time[n-1]
duke@435 342 //
duke@435 343 // The cms period and duration include only concurrent collections; time spent
duke@435 344 // in foreground cms collections due to System.gc() or because of a failure to
duke@435 345 // keep up are not included.
duke@435 346 //
duke@435 347 // There are 3 alphas to "bootstrap" the statistics. The _saved_alpha is the
duke@435 348 // real value, but is used only after the first period. A value of 100 is
duke@435 349 // used for the first sample so it gets the entire weight.
duke@435 350 unsigned int _saved_alpha; // 0-100
duke@435 351 unsigned int _gc0_alpha;
duke@435 352 unsigned int _cms_alpha;
duke@435 353
duke@435 354 double _gc0_duration;
duke@435 355 double _gc0_period;
duke@435 356 size_t _gc0_promoted; // bytes promoted per gc0
duke@435 357 double _cms_duration;
duke@435 358 double _cms_duration_pre_sweep; // time from initiation to start of sweep
duke@435 359 double _cms_duration_per_mb;
duke@435 360 double _cms_period;
duke@435 361 size_t _cms_allocated; // bytes of direct allocation per gc0 period
duke@435 362
duke@435 363 // Timers.
duke@435 364 elapsedTimer _cms_timer;
duke@435 365 TimeStamp _gc0_begin_time;
duke@435 366 TimeStamp _cms_begin_time;
duke@435 367 TimeStamp _cms_end_time;
duke@435 368
duke@435 369 // Snapshots of the amount used in the CMS generation.
duke@435 370 size_t _cms_used_at_gc0_begin;
duke@435 371 size_t _cms_used_at_gc0_end;
duke@435 372 size_t _cms_used_at_cms_begin;
duke@435 373
duke@435 374 // Used to prevent the duty cycle from being reduced in the middle of a cms
duke@435 375 // cycle.
duke@435 376 bool _allow_duty_cycle_reduction;
duke@435 377
duke@435 378 enum {
duke@435 379 _GC0_VALID = 0x1,
duke@435 380 _CMS_VALID = 0x2,
duke@435 381 _ALL_VALID = _GC0_VALID | _CMS_VALID
duke@435 382 };
duke@435 383
duke@435 384 unsigned int _valid_bits;
duke@435 385
duke@435 386 unsigned int _icms_duty_cycle; // icms duty cycle (0-100).
duke@435 387
duke@435 388 protected:
duke@435 389
duke@435 390 // Return a duty cycle that avoids wild oscillations, by limiting the amount
duke@435 391 // of change between old_duty_cycle and new_duty_cycle (the latter is treated
duke@435 392 // as a recommended value).
duke@435 393 static unsigned int icms_damped_duty_cycle(unsigned int old_duty_cycle,
duke@435 394 unsigned int new_duty_cycle);
duke@435 395 unsigned int icms_update_duty_cycle_impl();
duke@435 396
ysr@1580 397 // In support of adjusting of cms trigger ratios based on history
ysr@1580 398 // of concurrent mode failure.
ysr@1580 399 double cms_free_adjustment_factor(size_t free) const;
ysr@1580 400 void adjust_cms_free_adjustment_factor(bool fail, size_t free);
ysr@1580 401
duke@435 402 public:
duke@435 403 CMSStats(ConcurrentMarkSweepGeneration* cms_gen,
duke@435 404 unsigned int alpha = CMSExpAvgFactor);
duke@435 405
duke@435 406 // Whether or not the statistics contain valid data; higher level statistics
duke@435 407 // cannot be called until this returns true (they require at least one young
duke@435 408 // gen and one cms cycle to have completed).
duke@435 409 bool valid() const;
duke@435 410
duke@435 411 // Record statistics.
duke@435 412 void record_gc0_begin();
duke@435 413 void record_gc0_end(size_t cms_gen_bytes_used);
duke@435 414 void record_cms_begin();
duke@435 415 void record_cms_end();
duke@435 416
duke@435 417 // Allow management of the cms timer, which must be stopped/started around
duke@435 418 // yield points.
duke@435 419 elapsedTimer& cms_timer() { return _cms_timer; }
duke@435 420 void start_cms_timer() { _cms_timer.start(); }
duke@435 421 void stop_cms_timer() { _cms_timer.stop(); }
duke@435 422
duke@435 423 // Basic statistics; units are seconds or bytes.
duke@435 424 double gc0_period() const { return _gc0_period; }
duke@435 425 double gc0_duration() const { return _gc0_duration; }
duke@435 426 size_t gc0_promoted() const { return _gc0_promoted; }
duke@435 427 double cms_period() const { return _cms_period; }
duke@435 428 double cms_duration() const { return _cms_duration; }
duke@435 429 double cms_duration_per_mb() const { return _cms_duration_per_mb; }
duke@435 430 size_t cms_allocated() const { return _cms_allocated; }
duke@435 431
duke@435 432 size_t cms_used_at_gc0_end() const { return _cms_used_at_gc0_end;}
duke@435 433
duke@435 434 // Seconds since the last background cms cycle began or ended.
duke@435 435 double cms_time_since_begin() const;
duke@435 436 double cms_time_since_end() const;
duke@435 437
duke@435 438 // Higher level statistics--caller must check that valid() returns true before
duke@435 439 // calling.
duke@435 440
duke@435 441 // Returns bytes promoted per second of wall clock time.
duke@435 442 double promotion_rate() const;
duke@435 443
duke@435 444 // Returns bytes directly allocated per second of wall clock time.
duke@435 445 double cms_allocation_rate() const;
duke@435 446
duke@435 447 // Rate at which space in the cms generation is being consumed (sum of the
duke@435 448 // above two).
duke@435 449 double cms_consumption_rate() const;
duke@435 450
duke@435 451 // Returns an estimate of the number of seconds until the cms generation will
duke@435 452 // fill up, assuming no collection work is done.
duke@435 453 double time_until_cms_gen_full() const;
duke@435 454
duke@435 455 // Returns an estimate of the number of seconds remaining until
duke@435 456 // the cms generation collection should start.
duke@435 457 double time_until_cms_start() const;
duke@435 458
duke@435 459 // End of higher level statistics.
duke@435 460
duke@435 461 // Returns the cms incremental mode duty cycle, as a percentage (0-100).
duke@435 462 unsigned int icms_duty_cycle() const { return _icms_duty_cycle; }
duke@435 463
duke@435 464 // Update the duty cycle and return the new value.
duke@435 465 unsigned int icms_update_duty_cycle();
duke@435 466
duke@435 467 // Debugging.
duke@435 468 void print_on(outputStream* st) const PRODUCT_RETURN;
duke@435 469 void print() const { print_on(gclog_or_tty); }
duke@435 470 };
duke@435 471
duke@435 472 // A closure related to weak references processing which
duke@435 473 // we embed in the CMSCollector, since we need to pass
duke@435 474 // it to the reference processor for secondary filtering
duke@435 475 // of references based on reachability of referent;
duke@435 476 // see role of _is_alive_non_header closure in the
duke@435 477 // ReferenceProcessor class.
duke@435 478 // For objects in the CMS generation, this closure checks
duke@435 479 // if the object is "live" (reachable). Used in weak
duke@435 480 // reference processing.
duke@435 481 class CMSIsAliveClosure: public BoolObjectClosure {
ysr@578 482 const MemRegion _span;
duke@435 483 const CMSBitMap* _bit_map;
duke@435 484
duke@435 485 friend class CMSCollector;
duke@435 486 public:
duke@435 487 CMSIsAliveClosure(MemRegion span,
duke@435 488 CMSBitMap* bit_map):
duke@435 489 _span(span),
ysr@578 490 _bit_map(bit_map) {
ysr@578 491 assert(!span.is_empty(), "Empty span could spell trouble");
ysr@578 492 }
ysr@578 493
duke@435 494 bool do_object_b(oop obj);
duke@435 495 };
duke@435 496
duke@435 497
duke@435 498 // Implements AbstractRefProcTaskExecutor for CMS.
duke@435 499 class CMSRefProcTaskExecutor: public AbstractRefProcTaskExecutor {
duke@435 500 public:
duke@435 501
duke@435 502 CMSRefProcTaskExecutor(CMSCollector& collector)
duke@435 503 : _collector(collector)
duke@435 504 { }
duke@435 505
duke@435 506 // Executes a task using worker threads.
duke@435 507 virtual void execute(ProcessTask& task);
duke@435 508 virtual void execute(EnqueueTask& task);
duke@435 509 private:
duke@435 510 CMSCollector& _collector;
duke@435 511 };
duke@435 512
duke@435 513
zgu@3900 514 class CMSCollector: public CHeapObj<mtGC> {
duke@435 515 friend class VMStructs;
duke@435 516 friend class ConcurrentMarkSweepThread;
duke@435 517 friend class ConcurrentMarkSweepGeneration;
duke@435 518 friend class CompactibleFreeListSpace;
jmasa@5461 519 friend class CMSParMarkTask;
jmasa@5461 520 friend class CMSParInitialMarkTask;
duke@435 521 friend class CMSParRemarkTask;
duke@435 522 friend class CMSConcMarkingTask;
duke@435 523 friend class CMSRefProcTaskProxy;
duke@435 524 friend class CMSRefProcTaskExecutor;
duke@435 525 friend class ScanMarkedObjectsAgainCarefullyClosure; // for sampling eden
duke@435 526 friend class SurvivorSpacePrecleanClosure; // --- ditto -------
duke@435 527 friend class PushOrMarkClosure; // to access _restart_addr
duke@435 528 friend class Par_PushOrMarkClosure; // to access _restart_addr
duke@435 529 friend class MarkFromRootsClosure; // -- ditto --
duke@435 530 // ... and for clearing cards
duke@435 531 friend class Par_MarkFromRootsClosure; // to access _restart_addr
duke@435 532 // ... and for clearing cards
duke@435 533 friend class Par_ConcMarkingClosure; // to access _restart_addr etc.
duke@435 534 friend class MarkFromRootsVerifyClosure; // to access _restart_addr
duke@435 535 friend class PushAndMarkVerifyClosure; // -- ditto --
duke@435 536 friend class MarkRefsIntoAndScanClosure; // to access _overflow_list
duke@435 537 friend class PushAndMarkClosure; // -- ditto --
duke@435 538 friend class Par_PushAndMarkClosure; // -- ditto --
duke@435 539 friend class CMSKeepAliveClosure; // -- ditto --
duke@435 540 friend class CMSDrainMarkingStackClosure; // -- ditto --
duke@435 541 friend class CMSInnerParMarkAndPushClosure; // -- ditto --
duke@435 542 NOT_PRODUCT(friend class ScanMarkedObjectsAgainClosure;) // assertion on _overflow_list
duke@435 543 friend class ReleaseForegroundGC; // to access _foregroundGCShouldWait
duke@435 544 friend class VM_CMS_Operation;
duke@435 545 friend class VM_CMS_Initial_Mark;
duke@435 546 friend class VM_CMS_Final_Remark;
kevinw@2058 547 friend class TraceCMSMemoryManagerStats;
duke@435 548
duke@435 549 private:
duke@435 550 jlong _time_of_last_gc;
duke@435 551 void update_time_of_last_gc(jlong now) {
duke@435 552 _time_of_last_gc = now;
duke@435 553 }
duke@435 554
duke@435 555 OopTaskQueueSet* _task_queues;
duke@435 556
duke@435 557 // Overflow list of grey objects, threaded through mark-word
duke@435 558 // Manipulated with CAS in the parallel/multi-threaded case.
duke@435 559 oop _overflow_list;
duke@435 560 // The following array-pair keeps track of mark words
duke@435 561 // displaced for accomodating overflow list above.
duke@435 562 // This code will likely be revisited under RFE#4922830.
zgu@3900 563 Stack<oop, mtGC> _preserved_oop_stack;
zgu@3900 564 Stack<markOop, mtGC> _preserved_mark_stack;
duke@435 565
duke@435 566 int* _hash_seed;
duke@435 567
duke@435 568 // In support of multi-threaded concurrent phases
duke@435 569 YieldingFlexibleWorkGang* _conc_workers;
duke@435 570
duke@435 571 // Performance Counters
duke@435 572 CollectorCounters* _gc_counters;
duke@435 573
duke@435 574 // Initialization Errors
duke@435 575 bool _completed_initialization;
duke@435 576
duke@435 577 // In support of ExplicitGCInvokesConcurrent
sla@5237 578 static bool _full_gc_requested;
sla@5237 579 static GCCause::Cause _full_gc_cause;
sla@5237 580 unsigned int _collection_count_start;
ysr@529 581
duke@435 582 // Should we unload classes this concurrent cycle?
ysr@529 583 bool _should_unload_classes;
ysr@529 584 unsigned int _concurrent_cycles_since_last_unload;
ysr@529 585 unsigned int concurrent_cycles_since_last_unload() const {
ysr@529 586 return _concurrent_cycles_since_last_unload;
ysr@529 587 }
duke@435 588 // Did we (allow) unload classes in the previous concurrent cycle?
ysr@529 589 bool unloaded_classes_last_cycle() const {
ysr@529 590 return concurrent_cycles_since_last_unload() == 0;
duke@435 591 }
ysr@1233 592 // Root scanning options for perm gen
ysr@1233 593 int _roots_scanning_options;
ysr@1233 594 int roots_scanning_options() const { return _roots_scanning_options; }
ysr@1233 595 void add_root_scanning_option(int o) { _roots_scanning_options |= o; }
ysr@1233 596 void remove_root_scanning_option(int o) { _roots_scanning_options &= ~o; }
duke@435 597
duke@435 598 // Verification support
duke@435 599 CMSBitMap _verification_mark_bm;
duke@435 600 void verify_after_remark_work_1();
duke@435 601 void verify_after_remark_work_2();
duke@435 602
duke@435 603 // true if any verification flag is on.
duke@435 604 bool _verifying;
duke@435 605 bool verifying() const { return _verifying; }
duke@435 606 void set_verifying(bool v) { _verifying = v; }
duke@435 607
duke@435 608 // Collector policy
duke@435 609 ConcurrentMarkSweepPolicy* _collector_policy;
duke@435 610 ConcurrentMarkSweepPolicy* collector_policy() { return _collector_policy; }
duke@435 611
jmasa@5076 612 void set_did_compact(bool v);
jmasa@5076 613
duke@435 614 // XXX Move these to CMSStats ??? FIX ME !!!
ysr@1580 615 elapsedTimer _inter_sweep_timer; // time between sweeps
ysr@1580 616 elapsedTimer _intra_sweep_timer; // time _in_ sweeps
ysr@1580 617 // padded decaying average estimates of the above
ysr@1580 618 AdaptivePaddedAverage _inter_sweep_estimate;
ysr@1580 619 AdaptivePaddedAverage _intra_sweep_estimate;
duke@435 620
sla@5237 621 CMSTracer* _gc_tracer_cm;
sla@5237 622 ConcurrentGCTimer* _gc_timer_cm;
sla@5237 623
sla@5237 624 bool _cms_start_registered;
sla@5237 625
sla@5237 626 GCHeapSummary _last_heap_summary;
sla@5237 627 MetaspaceSummary _last_metaspace_summary;
sla@5237 628
sla@5237 629 void register_foreground_gc_start(GCCause::Cause cause);
sla@5237 630 void register_gc_start(GCCause::Cause cause);
sla@5237 631 void register_gc_end();
sla@5237 632 void save_heap_summary();
sla@5237 633 void report_heap_summary(GCWhen::Type when);
sla@5237 634
duke@435 635 protected:
duke@435 636 ConcurrentMarkSweepGeneration* _cmsGen; // old gen (CMS)
duke@435 637 MemRegion _span; // span covering above two
duke@435 638 CardTableRS* _ct; // card table
duke@435 639
duke@435 640 // CMS marking support structures
duke@435 641 CMSBitMap _markBitMap;
duke@435 642 CMSBitMap _modUnionTable;
duke@435 643 CMSMarkStack _markStack;
duke@435 644
duke@435 645 HeapWord* _restart_addr; // in support of marking stack overflow
duke@435 646 void lower_restart_addr(HeapWord* low);
duke@435 647
duke@435 648 // Counters in support of marking stack / work queue overflow handling:
duke@435 649 // a non-zero value indicates certain types of overflow events during
duke@435 650 // the current CMS cycle and could lead to stack resizing efforts at
duke@435 651 // an opportune future time.
duke@435 652 size_t _ser_pmc_preclean_ovflw;
duke@435 653 size_t _ser_pmc_remark_ovflw;
duke@435 654 size_t _par_pmc_remark_ovflw;
ysr@887 655 size_t _ser_kac_preclean_ovflw;
duke@435 656 size_t _ser_kac_ovflw;
duke@435 657 size_t _par_kac_ovflw;
ysr@969 658 NOT_PRODUCT(ssize_t _num_par_pushes;)
duke@435 659
duke@435 660 // ("Weak") Reference processing support
duke@435 661 ReferenceProcessor* _ref_processor;
duke@435 662 CMSIsAliveClosure _is_alive_closure;
ysr@578 663 // keep this textually after _markBitMap and _span; c'tor dependency
duke@435 664
duke@435 665 ConcurrentMarkSweepThread* _cmsThread; // the thread doing the work
duke@435 666 ModUnionClosure _modUnionClosure;
duke@435 667 ModUnionClosurePar _modUnionClosurePar;
duke@435 668
duke@435 669 // CMS abstract state machine
duke@435 670 // initial_state: Idling
duke@435 671 // next_state(Idling) = {Marking}
duke@435 672 // next_state(Marking) = {Precleaning, Sweeping}
duke@435 673 // next_state(Precleaning) = {AbortablePreclean, FinalMarking}
duke@435 674 // next_state(AbortablePreclean) = {FinalMarking}
duke@435 675 // next_state(FinalMarking) = {Sweeping}
duke@435 676 // next_state(Sweeping) = {Resizing}
duke@435 677 // next_state(Resizing) = {Resetting}
duke@435 678 // next_state(Resetting) = {Idling}
duke@435 679 // The numeric values below are chosen so that:
duke@435 680 // . _collectorState <= Idling == post-sweep && pre-mark
duke@435 681 // . _collectorState in (Idling, Sweeping) == {initial,final}marking ||
duke@435 682 // precleaning || abortablePrecleanb
ysr@1580 683 public:
duke@435 684 enum CollectorState {
duke@435 685 Resizing = 0,
duke@435 686 Resetting = 1,
duke@435 687 Idling = 2,
duke@435 688 InitialMarking = 3,
duke@435 689 Marking = 4,
duke@435 690 Precleaning = 5,
duke@435 691 AbortablePreclean = 6,
duke@435 692 FinalMarking = 7,
duke@435 693 Sweeping = 8
duke@435 694 };
ysr@1580 695 protected:
duke@435 696 static CollectorState _collectorState;
duke@435 697
duke@435 698 // State related to prologue/epilogue invocation for my generations
duke@435 699 bool _between_prologue_and_epilogue;
duke@435 700
duke@435 701 // Signalling/State related to coordination between fore- and backgroud GC
duke@435 702 // Note: When the baton has been passed from background GC to foreground GC,
duke@435 703 // _foregroundGCIsActive is true and _foregroundGCShouldWait is false.
duke@435 704 static bool _foregroundGCIsActive; // true iff foreground collector is active or
duke@435 705 // wants to go active
duke@435 706 static bool _foregroundGCShouldWait; // true iff background GC is active and has not
duke@435 707 // yet passed the baton to the foreground GC
duke@435 708
duke@435 709 // Support for CMSScheduleRemark (abortable preclean)
duke@435 710 bool _abort_preclean;
duke@435 711 bool _start_sampling;
duke@435 712
duke@435 713 int _numYields;
duke@435 714 size_t _numDirtyCards;
ysr@1580 715 size_t _sweep_count;
duke@435 716 // number of full gc's since the last concurrent gc.
duke@435 717 uint _full_gcs_since_conc_gc;
duke@435 718
duke@435 719 // occupancy used for bootstrapping stats
duke@435 720 double _bootstrap_occupancy;
duke@435 721
duke@435 722 // timer
duke@435 723 elapsedTimer _timer;
duke@435 724
duke@435 725 // Timing, allocation and promotion statistics, used for scheduling.
duke@435 726 CMSStats _stats;
duke@435 727
duke@435 728 // Allocation limits installed in the young gen, used only in
duke@435 729 // CMSIncrementalMode. When an allocation in the young gen would cross one of
duke@435 730 // these limits, the cms generation is notified and the cms thread is started
duke@435 731 // or stopped, respectively.
duke@435 732 HeapWord* _icms_start_limit;
duke@435 733 HeapWord* _icms_stop_limit;
duke@435 734
duke@435 735 enum CMS_op_type {
duke@435 736 CMS_op_checkpointRootsInitial,
duke@435 737 CMS_op_checkpointRootsFinal
duke@435 738 };
duke@435 739
brutisso@3767 740 void do_CMS_operation(CMS_op_type op, GCCause::Cause gc_cause);
duke@435 741 bool stop_world_and_do(CMS_op_type op);
duke@435 742
duke@435 743 OopTaskQueueSet* task_queues() { return _task_queues; }
duke@435 744 int* hash_seed(int i) { return &_hash_seed[i]; }
duke@435 745 YieldingFlexibleWorkGang* conc_workers() { return _conc_workers; }
duke@435 746
duke@435 747 // Support for parallelizing Eden rescan in CMS remark phase
duke@435 748 void sample_eden(); // ... sample Eden space top
duke@435 749
duke@435 750 private:
duke@435 751 // Support for parallelizing young gen rescan in CMS remark phase
duke@435 752 Generation* _young_gen; // the younger gen
duke@435 753 HeapWord** _top_addr; // ... Top of Eden
duke@435 754 HeapWord** _end_addr; // ... End of Eden
jmasa@5459 755 Mutex* _eden_chunk_lock;
duke@435 756 HeapWord** _eden_chunk_array; // ... Eden partitioning array
duke@435 757 size_t _eden_chunk_index; // ... top (exclusive) of array
duke@435 758 size_t _eden_chunk_capacity; // ... max entries in array
duke@435 759
duke@435 760 // Support for parallelizing survivor space rescan
duke@435 761 HeapWord** _survivor_chunk_array;
duke@435 762 size_t _survivor_chunk_index;
duke@435 763 size_t _survivor_chunk_capacity;
duke@435 764 size_t* _cursor;
duke@435 765 ChunkArray* _survivor_plab_array;
duke@435 766
mgerdin@7470 767 // A bounded minimum size of PLABs, should not return too small values since
mgerdin@7470 768 // this will affect the size of the data structures used for parallel young gen rescan
mgerdin@7470 769 size_t plab_sample_minimum_size();
mgerdin@7470 770
duke@435 771 // Support for marking stack overflow handling
duke@435 772 bool take_from_overflow_list(size_t num, CMSMarkStack* to_stack);
jmasa@2188 773 bool par_take_from_overflow_list(size_t num,
jmasa@2188 774 OopTaskQueue* to_work_q,
jmasa@2188 775 int no_of_gc_threads);
duke@435 776 void push_on_overflow_list(oop p);
duke@435 777 void par_push_on_overflow_list(oop p);
duke@435 778 // the following is, obviously, not, in general, "MT-stable"
duke@435 779 bool overflow_list_is_empty() const;
duke@435 780
duke@435 781 void preserve_mark_if_necessary(oop p);
duke@435 782 void par_preserve_mark_if_necessary(oop p);
duke@435 783 void preserve_mark_work(oop p, markOop m);
duke@435 784 void restore_preserved_marks_if_any();
duke@435 785 NOT_PRODUCT(bool no_preserved_marks() const;)
duke@435 786 // in support of testing overflow code
duke@435 787 NOT_PRODUCT(int _overflow_counter;)
duke@435 788 NOT_PRODUCT(bool simulate_overflow();) // sequential
duke@435 789 NOT_PRODUCT(bool par_simulate_overflow();) // MT version
duke@435 790
duke@435 791 // CMS work methods
duke@435 792 void checkpointRootsInitialWork(bool asynch); // initial checkpoint work
duke@435 793
duke@435 794 // a return value of false indicates failure due to stack overflow
duke@435 795 bool markFromRootsWork(bool asynch); // concurrent marking work
duke@435 796
duke@435 797 public: // FIX ME!!! only for testing
duke@435 798 bool do_marking_st(bool asynch); // single-threaded marking
duke@435 799 bool do_marking_mt(bool asynch); // multi-threaded marking
duke@435 800
duke@435 801 private:
duke@435 802
duke@435 803 // concurrent precleaning work
duke@435 804 size_t preclean_mod_union_table(ConcurrentMarkSweepGeneration* gen,
duke@435 805 ScanMarkedObjectsAgainCarefullyClosure* cl);
duke@435 806 size_t preclean_card_table(ConcurrentMarkSweepGeneration* gen,
duke@435 807 ScanMarkedObjectsAgainCarefullyClosure* cl);
duke@435 808 // Does precleaning work, returning a quantity indicative of
duke@435 809 // the amount of "useful work" done.
duke@435 810 size_t preclean_work(bool clean_refs, bool clean_survivors);
coleenp@4037 811 void preclean_klasses(MarkRefsIntoAndScanClosure* cl, Mutex* freelistLock);
duke@435 812 void abortable_preclean(); // Preclean while looking for possible abort
duke@435 813 void initialize_sequential_subtasks_for_young_gen_rescan(int i);
duke@435 814 // Helper function for above; merge-sorts the per-thread plab samples
jmasa@2188 815 void merge_survivor_plab_arrays(ContiguousSpace* surv, int no_of_gc_threads);
duke@435 816 // Resets (i.e. clears) the per-thread plab sample vectors
duke@435 817 void reset_survivor_plab_arrays();
duke@435 818
duke@435 819 // final (second) checkpoint work
duke@435 820 void checkpointRootsFinalWork(bool asynch, bool clear_all_soft_refs,
duke@435 821 bool init_mark_was_synchronous);
duke@435 822 // work routine for parallel version of remark
duke@435 823 void do_remark_parallel();
duke@435 824 // work routine for non-parallel version of remark
duke@435 825 void do_remark_non_parallel();
duke@435 826 // reference processing work routine (during second checkpoint)
duke@435 827 void refProcessingWork(bool asynch, bool clear_all_soft_refs);
duke@435 828
duke@435 829 // concurrent sweeping work
duke@435 830 void sweepWork(ConcurrentMarkSweepGeneration* gen, bool asynch);
duke@435 831
duke@435 832 // (concurrent) resetting of support data structures
duke@435 833 void reset(bool asynch);
duke@435 834
duke@435 835 // Clear _expansion_cause fields of constituent generations
duke@435 836 void clear_expansion_cause();
duke@435 837
duke@435 838 // An auxilliary method used to record the ends of
duke@435 839 // used regions of each generation to limit the extent of sweep
duke@435 840 void save_sweep_limits();
duke@435 841
duke@435 842 // A work method used by foreground collection to determine
duke@435 843 // what type of collection (compacting or not, continuing or fresh)
duke@435 844 // it should do.
duke@435 845 void decide_foreground_collection_type(bool clear_all_soft_refs,
duke@435 846 bool* should_compact, bool* should_start_over);
duke@435 847
duke@435 848 // A work method used by the foreground collector to do
duke@435 849 // a mark-sweep-compact.
duke@435 850 void do_compaction_work(bool clear_all_soft_refs);
duke@435 851
duke@435 852 // A work method used by the foreground collector to do
duke@435 853 // a mark-sweep, after taking over from a possibly on-going
duke@435 854 // concurrent mark-sweep collection.
duke@435 855 void do_mark_sweep_work(bool clear_all_soft_refs,
duke@435 856 CollectorState first_state, bool should_start_over);
duke@435 857
sla@5237 858 // Work methods for reporting concurrent mode interruption or failure
sla@5237 859 bool is_external_interruption();
sla@5237 860 void report_concurrent_mode_interruption();
sla@5237 861
duke@435 862 // If the backgrould GC is active, acquire control from the background
duke@435 863 // GC and do the collection.
duke@435 864 void acquire_control_and_collect(bool full, bool clear_all_soft_refs);
duke@435 865
duke@435 866 // For synchronizing passing of control from background to foreground
duke@435 867 // GC. waitForForegroundGC() is called by the background
duke@435 868 // collector. It if had to wait for a foreground collection,
duke@435 869 // it returns true and the background collection should assume
duke@435 870 // that the collection was finished by the foreground
duke@435 871 // collector.
duke@435 872 bool waitForForegroundGC();
duke@435 873
duke@435 874 // Incremental mode triggering: recompute the icms duty cycle and set the
duke@435 875 // allocation limits in the young gen.
duke@435 876 void icms_update_allocation_limits();
duke@435 877
duke@435 878 size_t block_size_using_printezis_bits(HeapWord* addr) const;
duke@435 879 size_t block_size_if_printezis_bits(HeapWord* addr) const;
duke@435 880 HeapWord* next_card_start_after_block(HeapWord* addr) const;
duke@435 881
duke@435 882 void setup_cms_unloading_and_verification_state();
duke@435 883 public:
duke@435 884 CMSCollector(ConcurrentMarkSweepGeneration* cmsGen,
duke@435 885 CardTableRS* ct,
duke@435 886 ConcurrentMarkSweepPolicy* cp);
duke@435 887 ConcurrentMarkSweepThread* cmsThread() { return _cmsThread; }
duke@435 888
duke@435 889 ReferenceProcessor* ref_processor() { return _ref_processor; }
duke@435 890 void ref_processor_init();
duke@435 891
duke@435 892 Mutex* bitMapLock() const { return _markBitMap.lock(); }
duke@435 893 static CollectorState abstract_state() { return _collectorState; }
duke@435 894
duke@435 895 bool should_abort_preclean() const; // Whether preclean should be aborted.
duke@435 896 size_t get_eden_used() const;
duke@435 897 size_t get_eden_capacity() const;
duke@435 898
duke@435 899 ConcurrentMarkSweepGeneration* cmsGen() { return _cmsGen; }
duke@435 900
duke@435 901 // locking checks
duke@435 902 NOT_PRODUCT(static bool have_cms_token();)
duke@435 903
duke@435 904 // XXXPERM bool should_collect(bool full, size_t size, bool tlab);
duke@435 905 bool shouldConcurrentCollect();
duke@435 906
duke@435 907 void collect(bool full,
duke@435 908 bool clear_all_soft_refs,
duke@435 909 size_t size,
duke@435 910 bool tlab);
sla@5237 911 void collect_in_background(bool clear_all_soft_refs, GCCause::Cause cause);
sla@5237 912 void collect_in_foreground(bool clear_all_soft_refs, GCCause::Cause cause);
duke@435 913
duke@435 914 // In support of ExplicitGCInvokesConcurrent
sla@5237 915 static void request_full_gc(unsigned int full_gc_count, GCCause::Cause cause);
duke@435 916 // Should we unload classes in a particular concurrent cycle?
ysr@529 917 bool should_unload_classes() const {
ysr@529 918 return _should_unload_classes;
duke@435 919 }
coleenp@4037 920 void update_should_unload_classes();
duke@435 921
duke@435 922 void direct_allocated(HeapWord* start, size_t size);
duke@435 923
duke@435 924 // Object is dead if not marked and current phase is sweeping.
duke@435 925 bool is_dead_obj(oop obj) const;
duke@435 926
duke@435 927 // After a promotion (of "start"), do any necessary marking.
duke@435 928 // If "par", then it's being done by a parallel GC thread.
duke@435 929 // The last two args indicate if we need precise marking
duke@435 930 // and if so the size of the object so it can be dirtied
duke@435 931 // in its entirety.
duke@435 932 void promoted(bool par, HeapWord* start,
duke@435 933 bool is_obj_array, size_t obj_size);
duke@435 934
duke@435 935 HeapWord* allocation_limit_reached(Space* space, HeapWord* top,
duke@435 936 size_t word_size);
duke@435 937
duke@435 938 void getFreelistLocks() const;
duke@435 939 void releaseFreelistLocks() const;
duke@435 940 bool haveFreelistLocks() const;
duke@435 941
jmasa@4900 942 // Adjust size of underlying generation
jmasa@4900 943 void compute_new_size();
jmasa@4900 944
duke@435 945 // GC prologue and epilogue
duke@435 946 void gc_prologue(bool full);
duke@435 947 void gc_epilogue(bool full);
duke@435 948
duke@435 949 jlong time_of_last_gc(jlong now) {
duke@435 950 if (_collectorState <= Idling) {
duke@435 951 // gc not in progress
duke@435 952 return _time_of_last_gc;
duke@435 953 } else {
duke@435 954 // collection in progress
duke@435 955 return now;
duke@435 956 }
duke@435 957 }
duke@435 958
duke@435 959 // Support for parallel remark of survivor space
duke@435 960 void* get_data_recorder(int thr_num);
jmasa@5459 961 void sample_eden_chunk();
duke@435 962
duke@435 963 CMSBitMap* markBitMap() { return &_markBitMap; }
duke@435 964 void directAllocated(HeapWord* start, size_t size);
duke@435 965
duke@435 966 // main CMS steps and related support
duke@435 967 void checkpointRootsInitial(bool asynch);
duke@435 968 bool markFromRoots(bool asynch); // a return value of false indicates failure
duke@435 969 // due to stack overflow
duke@435 970 void preclean();
duke@435 971 void checkpointRootsFinal(bool asynch, bool clear_all_soft_refs,
duke@435 972 bool init_mark_was_synchronous);
duke@435 973 void sweep(bool asynch);
duke@435 974
duke@435 975 // Check that the currently executing thread is the expected
duke@435 976 // one (foreground collector or background collector).
ysr@1580 977 static void check_correct_thread_executing() PRODUCT_RETURN;
duke@435 978 // XXXPERM void print_statistics() PRODUCT_RETURN;
duke@435 979
duke@435 980 bool is_cms_reachable(HeapWord* addr);
duke@435 981
duke@435 982 // Performance Counter Support
duke@435 983 CollectorCounters* counters() { return _gc_counters; }
duke@435 984
duke@435 985 // timer stuff
duke@435 986 void startTimer() { assert(!_timer.is_active(), "Error"); _timer.start(); }
duke@435 987 void stopTimer() { assert( _timer.is_active(), "Error"); _timer.stop(); }
duke@435 988 void resetTimer() { assert(!_timer.is_active(), "Error"); _timer.reset(); }
duke@435 989 double timerValue() { assert(!_timer.is_active(), "Error"); return _timer.seconds(); }
duke@435 990
duke@435 991 int yields() { return _numYields; }
duke@435 992 void resetYields() { _numYields = 0; }
duke@435 993 void incrementYields() { _numYields++; }
duke@435 994 void resetNumDirtyCards() { _numDirtyCards = 0; }
duke@435 995 void incrementNumDirtyCards(size_t num) { _numDirtyCards += num; }
duke@435 996 size_t numDirtyCards() { return _numDirtyCards; }
duke@435 997
duke@435 998 static bool foregroundGCShouldWait() { return _foregroundGCShouldWait; }
duke@435 999 static void set_foregroundGCShouldWait(bool v) { _foregroundGCShouldWait = v; }
duke@435 1000 static bool foregroundGCIsActive() { return _foregroundGCIsActive; }
duke@435 1001 static void set_foregroundGCIsActive(bool v) { _foregroundGCIsActive = v; }
ysr@1580 1002 size_t sweep_count() const { return _sweep_count; }
ysr@1580 1003 void increment_sweep_count() { _sweep_count++; }
duke@435 1004
duke@435 1005 // Timers/stats for gc scheduling and incremental mode pacing.
duke@435 1006 CMSStats& stats() { return _stats; }
duke@435 1007
duke@435 1008 // Convenience methods that check whether CMSIncrementalMode is enabled and
duke@435 1009 // forward to the corresponding methods in ConcurrentMarkSweepThread.
duke@435 1010 static void start_icms();
duke@435 1011 static void stop_icms(); // Called at the end of the cms cycle.
duke@435 1012 static void disable_icms(); // Called before a foreground collection.
duke@435 1013 static void enable_icms(); // Called after a foreground collection.
duke@435 1014 void icms_wait(); // Called at yield points.
duke@435 1015
duke@435 1016 // Adaptive size policy
duke@435 1017 CMSAdaptiveSizePolicy* size_policy();
duke@435 1018 CMSGCAdaptivePolicyCounters* gc_adaptive_policy_counters();
duke@435 1019
stefank@4904 1020 static void print_on_error(outputStream* st);
stefank@4904 1021
duke@435 1022 // debugging
brutisso@3711 1023 void verify();
stefank@5018 1024 bool verify_after_remark(bool silent = VerifySilently);
duke@435 1025 void verify_ok_to_terminate() const PRODUCT_RETURN;
duke@435 1026 void verify_work_stacks_empty() const PRODUCT_RETURN;
duke@435 1027 void verify_overflow_empty() const PRODUCT_RETURN;
duke@435 1028
duke@435 1029 // convenience methods in support of debugging
duke@435 1030 static const size_t skip_header_HeapWords() PRODUCT_RETURN0;
duke@435 1031 HeapWord* block_start(const void* p) const PRODUCT_RETURN0;
duke@435 1032
duke@435 1033 // accessors
duke@435 1034 CMSMarkStack* verification_mark_stack() { return &_markStack; }
duke@435 1035 CMSBitMap* verification_mark_bm() { return &_verification_mark_bm; }
duke@435 1036
duke@435 1037 // Initialization errors
duke@435 1038 bool completed_initialization() { return _completed_initialization; }
jmasa@5459 1039
jmasa@5459 1040 void print_eden_and_survivor_chunk_arrays();
duke@435 1041 };
duke@435 1042
duke@435 1043 class CMSExpansionCause : public AllStatic {
duke@435 1044 public:
duke@435 1045 enum Cause {
duke@435 1046 _no_expansion,
duke@435 1047 _satisfy_free_ratio,
duke@435 1048 _satisfy_promotion,
duke@435 1049 _satisfy_allocation,
duke@435 1050 _allocate_par_lab,
duke@435 1051 _allocate_par_spooling_space,
duke@435 1052 _adaptive_size_policy
duke@435 1053 };
duke@435 1054 // Return a string describing the cause of the expansion.
duke@435 1055 static const char* to_string(CMSExpansionCause::Cause cause);
duke@435 1056 };
duke@435 1057
duke@435 1058 class ConcurrentMarkSweepGeneration: public CardGeneration {
duke@435 1059 friend class VMStructs;
duke@435 1060 friend class ConcurrentMarkSweepThread;
duke@435 1061 friend class ConcurrentMarkSweep;
duke@435 1062 friend class CMSCollector;
duke@435 1063 protected:
duke@435 1064 static CMSCollector* _collector; // the collector that collects us
duke@435 1065 CompactibleFreeListSpace* _cmsSpace; // underlying space (only one for now)
duke@435 1066
duke@435 1067 // Performance Counters
duke@435 1068 GenerationCounters* _gen_counters;
duke@435 1069 GSpaceCounters* _space_counters;
duke@435 1070
duke@435 1071 // Words directly allocated, used by CMSStats.
duke@435 1072 size_t _direct_allocated_words;
duke@435 1073
duke@435 1074 // Non-product stat counters
duke@435 1075 NOT_PRODUCT(
ysr@2071 1076 size_t _numObjectsPromoted;
ysr@2071 1077 size_t _numWordsPromoted;
ysr@2071 1078 size_t _numObjectsAllocated;
ysr@2071 1079 size_t _numWordsAllocated;
duke@435 1080 )
duke@435 1081
duke@435 1082 // Used for sizing decisions
duke@435 1083 bool _incremental_collection_failed;
duke@435 1084 bool incremental_collection_failed() {
duke@435 1085 return _incremental_collection_failed;
duke@435 1086 }
duke@435 1087 void set_incremental_collection_failed() {
duke@435 1088 _incremental_collection_failed = true;
duke@435 1089 }
duke@435 1090 void clear_incremental_collection_failed() {
duke@435 1091 _incremental_collection_failed = false;
duke@435 1092 }
duke@435 1093
ysr@529 1094 // accessors
ysr@529 1095 void set_expansion_cause(CMSExpansionCause::Cause v) { _expansion_cause = v;}
ysr@529 1096 CMSExpansionCause::Cause expansion_cause() const { return _expansion_cause; }
ysr@529 1097
duke@435 1098 private:
duke@435 1099 // For parallel young-gen GC support.
duke@435 1100 CMSParGCThreadState** _par_gc_thread_states;
duke@435 1101
duke@435 1102 // Reason generation was expanded
duke@435 1103 CMSExpansionCause::Cause _expansion_cause;
duke@435 1104
duke@435 1105 // In support of MinChunkSize being larger than min object size
duke@435 1106 const double _dilatation_factor;
duke@435 1107
duke@435 1108 enum CollectionTypes {
duke@435 1109 Concurrent_collection_type = 0,
duke@435 1110 MS_foreground_collection_type = 1,
duke@435 1111 MSC_foreground_collection_type = 2,
duke@435 1112 Unknown_collection_type = 3
duke@435 1113 };
duke@435 1114
duke@435 1115 CollectionTypes _debug_collection_type;
duke@435 1116
jmasa@5076 1117 // True if a compactiing collection was done.
jmasa@5076 1118 bool _did_compact;
jmasa@5076 1119 bool did_compact() { return _did_compact; }
jmasa@5076 1120
ysr@529 1121 // Fraction of current occupancy at which to start a CMS collection which
ysr@529 1122 // will collect this generation (at least).
ysr@529 1123 double _initiating_occupancy;
ysr@529 1124
duke@435 1125 protected:
duke@435 1126 // Shrink generation by specified size (returns false if unable to shrink)
jmasa@4900 1127 void shrink_free_list_by(size_t bytes);
duke@435 1128
duke@435 1129 // Update statistics for GC
duke@435 1130 virtual void update_gc_stats(int level, bool full);
duke@435 1131
duke@435 1132 // Maximum available space in the generation (including uncommitted)
duke@435 1133 // space.
duke@435 1134 size_t max_available() const;
duke@435 1135
ysr@529 1136 // getter and initializer for _initiating_occupancy field.
ysr@529 1137 double initiating_occupancy() const { return _initiating_occupancy; }
jwilhelm@4576 1138 void init_initiating_occupancy(intx io, uintx tr);
ysr@529 1139
duke@435 1140 public:
duke@435 1141 ConcurrentMarkSweepGeneration(ReservedSpace rs, size_t initial_byte_size,
duke@435 1142 int level, CardTableRS* ct,
duke@435 1143 bool use_adaptive_freelists,
jmasa@3730 1144 FreeBlockDictionary<FreeChunk>::DictionaryChoice);
duke@435 1145
duke@435 1146 // Accessors
duke@435 1147 CMSCollector* collector() const { return _collector; }
duke@435 1148 static void set_collector(CMSCollector* collector) {
duke@435 1149 assert(_collector == NULL, "already set");
duke@435 1150 _collector = collector;
duke@435 1151 }
duke@435 1152 CompactibleFreeListSpace* cmsSpace() const { return _cmsSpace; }
duke@435 1153
duke@435 1154 Mutex* freelistLock() const;
duke@435 1155
duke@435 1156 virtual Generation::Name kind() { return Generation::ConcurrentMarkSweep; }
duke@435 1157
duke@435 1158 // Adaptive size policy
duke@435 1159 CMSAdaptiveSizePolicy* size_policy();
duke@435 1160
jmasa@5076 1161 void set_did_compact(bool v) { _did_compact = v; }
jmasa@5076 1162
duke@435 1163 bool refs_discovery_is_atomic() const { return false; }
duke@435 1164 bool refs_discovery_is_mt() const {
duke@435 1165 // Note: CMS does MT-discovery during the parallel-remark
duke@435 1166 // phases. Use ReferenceProcessorMTMutator to make refs
duke@435 1167 // discovery MT-safe during such phases or other parallel
duke@435 1168 // discovery phases in the future. This may all go away
duke@435 1169 // if/when we decide that refs discovery is sufficiently
duke@435 1170 // rare that the cost of the CAS's involved is in the
duke@435 1171 // noise. That's a measurement that should be done, and
duke@435 1172 // the code simplified if that turns out to be the case.
ysr@2651 1173 return ConcGCThreads > 1;
duke@435 1174 }
duke@435 1175
duke@435 1176 // Override
duke@435 1177 virtual void ref_processor_init();
duke@435 1178
jmasa@706 1179 // Grow generation by specified size (returns false if unable to grow)
jmasa@706 1180 bool grow_by(size_t bytes);
jmasa@706 1181 // Grow generation to reserved size.
jmasa@706 1182 bool grow_to_reserved();
jmasa@706 1183
duke@435 1184 void clear_expansion_cause() { _expansion_cause = CMSExpansionCause::_no_expansion; }
duke@435 1185
duke@435 1186 // Space enquiries
duke@435 1187 size_t capacity() const;
duke@435 1188 size_t used() const;
duke@435 1189 size_t free() const;
ysr@529 1190 double occupancy() const { return ((double)used())/((double)capacity()); }
duke@435 1191 size_t contiguous_available() const;
duke@435 1192 size_t unsafe_max_alloc_nogc() const;
duke@435 1193
duke@435 1194 // over-rides
duke@435 1195 MemRegion used_region() const;
duke@435 1196 MemRegion used_region_at_save_marks() const;
duke@435 1197
duke@435 1198 // Does a "full" (forced) collection invoked on this generation collect
duke@435 1199 // all younger generations as well? Note that the second conjunct is a
duke@435 1200 // hack to allow the collection of the younger gen first if the flag is
duke@435 1201 // set. This is better than using th policy's should_collect_gen0_first()
duke@435 1202 // since that causes us to do an extra unnecessary pair of restart-&-stop-world.
duke@435 1203 virtual bool full_collects_younger_generations() const {
duke@435 1204 return UseCMSCompactAtFullCollection && !CollectGen0First;
duke@435 1205 }
duke@435 1206
duke@435 1207 void space_iterate(SpaceClosure* blk, bool usedOnly = false);
duke@435 1208
duke@435 1209 // Support for compaction
duke@435 1210 CompactibleSpace* first_compaction_space() const;
duke@435 1211 // Adjust quantites in the generation affected by
duke@435 1212 // the compaction.
duke@435 1213 void reset_after_compaction();
duke@435 1214
duke@435 1215 // Allocation support
duke@435 1216 HeapWord* allocate(size_t size, bool tlab);
duke@435 1217 HeapWord* have_lock_and_allocate(size_t size, bool tlab);
coleenp@548 1218 oop promote(oop obj, size_t obj_size);
duke@435 1219 HeapWord* par_allocate(size_t size, bool tlab) {
duke@435 1220 return allocate(size, tlab);
duke@435 1221 }
duke@435 1222
duke@435 1223 // Incremental mode triggering.
duke@435 1224 HeapWord* allocation_limit_reached(Space* space, HeapWord* top,
duke@435 1225 size_t word_size);
duke@435 1226
duke@435 1227 // Used by CMSStats to track direct allocation. The value is sampled and
duke@435 1228 // reset after each young gen collection.
duke@435 1229 size_t direct_allocated_words() const { return _direct_allocated_words; }
duke@435 1230 void reset_direct_allocated_words() { _direct_allocated_words = 0; }
duke@435 1231
duke@435 1232 // Overrides for parallel promotion.
duke@435 1233 virtual oop par_promote(int thread_num,
duke@435 1234 oop obj, markOop m, size_t word_sz);
duke@435 1235 // This one should not be called for CMS.
duke@435 1236 virtual void par_promote_alloc_undo(int thread_num,
duke@435 1237 HeapWord* obj, size_t word_sz);
duke@435 1238 virtual void par_promote_alloc_done(int thread_num);
duke@435 1239 virtual void par_oop_since_save_marks_iterate_done(int thread_num);
duke@435 1240
ysr@2243 1241 virtual bool promotion_attempt_is_safe(size_t promotion_in_bytes) const;
duke@435 1242
ysr@1580 1243 // Inform this (non-young) generation that a promotion failure was
ysr@1580 1244 // encountered during a collection of a younger generation that
ysr@1580 1245 // promotes into this generation.
ysr@1580 1246 virtual void promotion_failure_occurred();
ysr@1580 1247
duke@435 1248 bool should_collect(bool full, size_t size, bool tlab);
ysr@529 1249 virtual bool should_concurrent_collect() const;
ysr@529 1250 virtual bool is_too_full() const;
duke@435 1251 void collect(bool full,
duke@435 1252 bool clear_all_soft_refs,
duke@435 1253 size_t size,
duke@435 1254 bool tlab);
duke@435 1255
duke@435 1256 HeapWord* expand_and_allocate(size_t word_size,
duke@435 1257 bool tlab,
duke@435 1258 bool parallel = false);
duke@435 1259
duke@435 1260 // GC prologue and epilogue
duke@435 1261 void gc_prologue(bool full);
duke@435 1262 void gc_prologue_work(bool full, bool registerClosure,
duke@435 1263 ModUnionClosure* modUnionClosure);
duke@435 1264 void gc_epilogue(bool full);
duke@435 1265 void gc_epilogue_work(bool full);
duke@435 1266
duke@435 1267 // Time since last GC of this generation
duke@435 1268 jlong time_of_last_gc(jlong now) {
duke@435 1269 return collector()->time_of_last_gc(now);
duke@435 1270 }
duke@435 1271 void update_time_of_last_gc(jlong now) {
duke@435 1272 collector()-> update_time_of_last_gc(now);
duke@435 1273 }
duke@435 1274
duke@435 1275 // Allocation failure
duke@435 1276 void expand(size_t bytes, size_t expand_bytes,
duke@435 1277 CMSExpansionCause::Cause cause);
jmasa@706 1278 virtual bool expand(size_t bytes, size_t expand_bytes);
duke@435 1279 void shrink(size_t bytes);
jmasa@4900 1280 void shrink_by(size_t bytes);
duke@435 1281 HeapWord* expand_and_par_lab_allocate(CMSParGCThreadState* ps, size_t word_sz);
duke@435 1282 bool expand_and_ensure_spooling_space(PromotionInfo* promo);
duke@435 1283
duke@435 1284 // Iteration support and related enquiries
duke@435 1285 void save_marks();
duke@435 1286 bool no_allocs_since_save_marks();
duke@435 1287 void younger_refs_iterate(OopsInGenClosure* cl);
duke@435 1288
duke@435 1289 // Iteration support specific to CMS generations
duke@435 1290 void save_sweep_limit();
duke@435 1291
duke@435 1292 // More iteration support
coleenp@4037 1293 virtual void oop_iterate(ExtendedOopClosure* cl);
jmasa@952 1294 virtual void safe_object_iterate(ObjectClosure* cl);
duke@435 1295 virtual void object_iterate(ObjectClosure* cl);
duke@435 1296
duke@435 1297 // Need to declare the full complement of closures, whether we'll
duke@435 1298 // override them or not, or get message from the compiler:
duke@435 1299 // oop_since_save_marks_iterate_nv hides virtual function...
duke@435 1300 #define CMS_SINCE_SAVE_MARKS_DECL(OopClosureType, nv_suffix) \
duke@435 1301 void oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl);
duke@435 1302 ALL_SINCE_SAVE_MARKS_CLOSURES(CMS_SINCE_SAVE_MARKS_DECL)
duke@435 1303
duke@435 1304 // Smart allocation XXX -- move to CFLSpace?
duke@435 1305 void setNearLargestChunk();
duke@435 1306 bool isNearLargestChunk(HeapWord* addr);
duke@435 1307
duke@435 1308 // Get the chunk at the end of the space. Delagates to
duke@435 1309 // the space.
duke@435 1310 FreeChunk* find_chunk_at_end();
duke@435 1311
duke@435 1312 void post_compact();
duke@435 1313
duke@435 1314 // Debugging
duke@435 1315 void prepare_for_verify();
brutisso@3711 1316 void verify();
duke@435 1317 void print_statistics() PRODUCT_RETURN;
duke@435 1318
duke@435 1319 // Performance Counters support
duke@435 1320 virtual void update_counters();
duke@435 1321 virtual void update_counters(size_t used);
duke@435 1322 void initialize_performance_counters();
duke@435 1323 CollectorCounters* counters() { return collector()->counters(); }
duke@435 1324
duke@435 1325 // Support for parallel remark of survivor space
duke@435 1326 void* get_data_recorder(int thr_num) {
duke@435 1327 //Delegate to collector
duke@435 1328 return collector()->get_data_recorder(thr_num);
duke@435 1329 }
jmasa@5459 1330 void sample_eden_chunk() {
jmasa@5459 1331 //Delegate to collector
jmasa@5459 1332 return collector()->sample_eden_chunk();
jmasa@5459 1333 }
duke@435 1334
duke@435 1335 // Printing
duke@435 1336 const char* name() const;
duke@435 1337 virtual const char* short_name() const { return "CMS"; }
duke@435 1338 void print() const;
duke@435 1339 void printOccupancy(const char* s);
duke@435 1340 bool must_be_youngest() const { return false; }
duke@435 1341 bool must_be_oldest() const { return true; }
duke@435 1342
jmasa@4900 1343 // Resize the generation after a compacting GC. The
jmasa@4900 1344 // generation can be treated as a contiguous space
jmasa@4900 1345 // after the compaction.
jmasa@4900 1346 virtual void compute_new_size();
jmasa@4900 1347 // Resize the generation after a non-compacting
jmasa@4900 1348 // collection.
jmasa@4900 1349 void compute_new_size_free_list();
duke@435 1350
duke@435 1351 CollectionTypes debug_collection_type() { return _debug_collection_type; }
duke@435 1352 void rotate_debug_collection_type();
duke@435 1353 };
duke@435 1354
duke@435 1355 class ASConcurrentMarkSweepGeneration : public ConcurrentMarkSweepGeneration {
duke@435 1356
duke@435 1357 // Return the size policy from the heap's collector
duke@435 1358 // policy casted to CMSAdaptiveSizePolicy*.
duke@435 1359 CMSAdaptiveSizePolicy* cms_size_policy() const;
duke@435 1360
duke@435 1361 // Resize the generation based on the adaptive size
duke@435 1362 // policy.
duke@435 1363 void resize(size_t cur_promo, size_t desired_promo);
duke@435 1364
duke@435 1365 // Return the GC counters from the collector policy
duke@435 1366 CMSGCAdaptivePolicyCounters* gc_adaptive_policy_counters();
duke@435 1367
duke@435 1368 virtual void shrink_by(size_t bytes);
duke@435 1369
duke@435 1370 public:
duke@435 1371 ASConcurrentMarkSweepGeneration(ReservedSpace rs, size_t initial_byte_size,
duke@435 1372 int level, CardTableRS* ct,
duke@435 1373 bool use_adaptive_freelists,
jmasa@3730 1374 FreeBlockDictionary<FreeChunk>::DictionaryChoice
duke@435 1375 dictionaryChoice) :
duke@435 1376 ConcurrentMarkSweepGeneration(rs, initial_byte_size, level, ct,
duke@435 1377 use_adaptive_freelists, dictionaryChoice) {}
duke@435 1378
duke@435 1379 virtual const char* short_name() const { return "ASCMS"; }
duke@435 1380 virtual Generation::Name kind() { return Generation::ASConcurrentMarkSweep; }
duke@435 1381
duke@435 1382 virtual void update_counters();
duke@435 1383 virtual void update_counters(size_t used);
duke@435 1384 };
duke@435 1385
duke@435 1386 //
duke@435 1387 // Closures of various sorts used by CMS to accomplish its work
duke@435 1388 //
duke@435 1389
duke@435 1390 // This closure is used to do concurrent marking from the roots
duke@435 1391 // following the first checkpoint.
duke@435 1392 class MarkFromRootsClosure: public BitMapClosure {
duke@435 1393 CMSCollector* _collector;
duke@435 1394 MemRegion _span;
duke@435 1395 CMSBitMap* _bitMap;
duke@435 1396 CMSBitMap* _mut;
duke@435 1397 CMSMarkStack* _markStack;
duke@435 1398 bool _yield;
duke@435 1399 int _skipBits;
duke@435 1400 HeapWord* _finger;
duke@435 1401 HeapWord* _threshold;
duke@435 1402 DEBUG_ONLY(bool _verifying;)
duke@435 1403
duke@435 1404 public:
duke@435 1405 MarkFromRootsClosure(CMSCollector* collector, MemRegion span,
duke@435 1406 CMSBitMap* bitMap,
duke@435 1407 CMSMarkStack* markStack,
duke@435 1408 bool should_yield, bool verifying = false);
ysr@777 1409 bool do_bit(size_t offset);
duke@435 1410 void reset(HeapWord* addr);
duke@435 1411 inline void do_yield_check();
duke@435 1412
duke@435 1413 private:
duke@435 1414 void scanOopsInOop(HeapWord* ptr);
duke@435 1415 void do_yield_work();
duke@435 1416 };
duke@435 1417
duke@435 1418 // This closure is used to do concurrent multi-threaded
duke@435 1419 // marking from the roots following the first checkpoint.
duke@435 1420 // XXX This should really be a subclass of The serial version
duke@435 1421 // above, but i have not had the time to refactor things cleanly.
duke@435 1422 // That willbe done for Dolphin.
duke@435 1423 class Par_MarkFromRootsClosure: public BitMapClosure {
duke@435 1424 CMSCollector* _collector;
duke@435 1425 MemRegion _whole_span;
duke@435 1426 MemRegion _span;
duke@435 1427 CMSBitMap* _bit_map;
duke@435 1428 CMSBitMap* _mut;
duke@435 1429 OopTaskQueue* _work_queue;
duke@435 1430 CMSMarkStack* _overflow_stack;
duke@435 1431 bool _yield;
duke@435 1432 int _skip_bits;
duke@435 1433 HeapWord* _finger;
duke@435 1434 HeapWord* _threshold;
duke@435 1435 CMSConcMarkingTask* _task;
duke@435 1436 public:
duke@435 1437 Par_MarkFromRootsClosure(CMSConcMarkingTask* task, CMSCollector* collector,
duke@435 1438 MemRegion span,
duke@435 1439 CMSBitMap* bit_map,
duke@435 1440 OopTaskQueue* work_queue,
duke@435 1441 CMSMarkStack* overflow_stack,
duke@435 1442 bool should_yield);
ysr@777 1443 bool do_bit(size_t offset);
duke@435 1444 inline void do_yield_check();
duke@435 1445
duke@435 1446 private:
duke@435 1447 void scan_oops_in_oop(HeapWord* ptr);
duke@435 1448 void do_yield_work();
duke@435 1449 bool get_work_from_overflow_stack();
duke@435 1450 };
duke@435 1451
duke@435 1452 // The following closures are used to do certain kinds of verification of
duke@435 1453 // CMS marking.
stefank@6982 1454 class PushAndMarkVerifyClosure: public MetadataAwareOopClosure {
duke@435 1455 CMSCollector* _collector;
duke@435 1456 MemRegion _span;
duke@435 1457 CMSBitMap* _verification_bm;
duke@435 1458 CMSBitMap* _cms_bm;
duke@435 1459 CMSMarkStack* _mark_stack;
coleenp@548 1460 protected:
coleenp@548 1461 void do_oop(oop p);
coleenp@548 1462 template <class T> inline void do_oop_work(T *p) {
coleenp@4037 1463 oop obj = oopDesc::load_decode_heap_oop(p);
coleenp@548 1464 do_oop(obj);
coleenp@548 1465 }
duke@435 1466 public:
duke@435 1467 PushAndMarkVerifyClosure(CMSCollector* cms_collector,
duke@435 1468 MemRegion span,
duke@435 1469 CMSBitMap* verification_bm,
duke@435 1470 CMSBitMap* cms_bm,
duke@435 1471 CMSMarkStack* mark_stack);
duke@435 1472 void do_oop(oop* p);
coleenp@548 1473 void do_oop(narrowOop* p);
coleenp@4037 1474
duke@435 1475 // Deal with a stack overflow condition
duke@435 1476 void handle_stack_overflow(HeapWord* lost);
duke@435 1477 };
duke@435 1478
duke@435 1479 class MarkFromRootsVerifyClosure: public BitMapClosure {
duke@435 1480 CMSCollector* _collector;
duke@435 1481 MemRegion _span;
duke@435 1482 CMSBitMap* _verification_bm;
duke@435 1483 CMSBitMap* _cms_bm;
duke@435 1484 CMSMarkStack* _mark_stack;
duke@435 1485 HeapWord* _finger;
duke@435 1486 PushAndMarkVerifyClosure _pam_verify_closure;
duke@435 1487 public:
duke@435 1488 MarkFromRootsVerifyClosure(CMSCollector* collector, MemRegion span,
duke@435 1489 CMSBitMap* verification_bm,
duke@435 1490 CMSBitMap* cms_bm,
duke@435 1491 CMSMarkStack* mark_stack);
ysr@777 1492 bool do_bit(size_t offset);
duke@435 1493 void reset(HeapWord* addr);
duke@435 1494 };
duke@435 1495
duke@435 1496
duke@435 1497 // This closure is used to check that a certain set of bits is
duke@435 1498 // "empty" (i.e. the bit vector doesn't have any 1-bits).
duke@435 1499 class FalseBitMapClosure: public BitMapClosure {
duke@435 1500 public:
ysr@777 1501 bool do_bit(size_t offset) {
duke@435 1502 guarantee(false, "Should not have a 1 bit");
ysr@777 1503 return true;
duke@435 1504 }
duke@435 1505 };
duke@435 1506
mgerdin@6979 1507 // A version of ObjectClosure with "memory" (see _previous_address below)
mgerdin@6979 1508 class UpwardsObjectClosure: public BoolObjectClosure {
mgerdin@6979 1509 HeapWord* _previous_address;
mgerdin@6979 1510 public:
mgerdin@6979 1511 UpwardsObjectClosure() : _previous_address(NULL) { }
mgerdin@6979 1512 void set_previous(HeapWord* addr) { _previous_address = addr; }
mgerdin@6979 1513 HeapWord* previous() { return _previous_address; }
mgerdin@6979 1514 // A return value of "true" can be used by the caller to decide
mgerdin@6979 1515 // if this object's end should *NOT* be recorded in
mgerdin@6979 1516 // _previous_address above.
mgerdin@6979 1517 virtual bool do_object_bm(oop obj, MemRegion mr) = 0;
mgerdin@6979 1518 };
mgerdin@6979 1519
duke@435 1520 // This closure is used during the second checkpointing phase
duke@435 1521 // to rescan the marked objects on the dirty cards in the mod
duke@435 1522 // union table and the card table proper. It's invoked via
duke@435 1523 // MarkFromDirtyCardsClosure below. It uses either
duke@435 1524 // [Par_]MarkRefsIntoAndScanClosure (Par_ in the parallel case)
duke@435 1525 // declared in genOopClosures.hpp to accomplish some of its work.
duke@435 1526 // In the parallel case the bitMap is shared, so access to
duke@435 1527 // it needs to be suitably synchronized for updates by embedded
duke@435 1528 // closures that update it; however, this closure itself only
duke@435 1529 // reads the bit_map and because it is idempotent, is immune to
duke@435 1530 // reading stale values.
duke@435 1531 class ScanMarkedObjectsAgainClosure: public UpwardsObjectClosure {
duke@435 1532 #ifdef ASSERT
duke@435 1533 CMSCollector* _collector;
duke@435 1534 MemRegion _span;
duke@435 1535 union {
duke@435 1536 CMSMarkStack* _mark_stack;
duke@435 1537 OopTaskQueue* _work_queue;
duke@435 1538 };
duke@435 1539 #endif // ASSERT
duke@435 1540 bool _parallel;
duke@435 1541 CMSBitMap* _bit_map;
duke@435 1542 union {
duke@435 1543 MarkRefsIntoAndScanClosure* _scan_closure;
duke@435 1544 Par_MarkRefsIntoAndScanClosure* _par_scan_closure;
duke@435 1545 };
duke@435 1546
duke@435 1547 public:
duke@435 1548 ScanMarkedObjectsAgainClosure(CMSCollector* collector,
duke@435 1549 MemRegion span,
duke@435 1550 ReferenceProcessor* rp,
duke@435 1551 CMSBitMap* bit_map,
duke@435 1552 CMSMarkStack* mark_stack,
duke@435 1553 MarkRefsIntoAndScanClosure* cl):
duke@435 1554 #ifdef ASSERT
duke@435 1555 _collector(collector),
duke@435 1556 _span(span),
duke@435 1557 _mark_stack(mark_stack),
duke@435 1558 #endif // ASSERT
duke@435 1559 _parallel(false),
duke@435 1560 _bit_map(bit_map),
duke@435 1561 _scan_closure(cl) { }
duke@435 1562
duke@435 1563 ScanMarkedObjectsAgainClosure(CMSCollector* collector,
duke@435 1564 MemRegion span,
duke@435 1565 ReferenceProcessor* rp,
duke@435 1566 CMSBitMap* bit_map,
duke@435 1567 OopTaskQueue* work_queue,
duke@435 1568 Par_MarkRefsIntoAndScanClosure* cl):
duke@435 1569 #ifdef ASSERT
duke@435 1570 _collector(collector),
duke@435 1571 _span(span),
duke@435 1572 _work_queue(work_queue),
duke@435 1573 #endif // ASSERT
duke@435 1574 _parallel(true),
duke@435 1575 _bit_map(bit_map),
duke@435 1576 _par_scan_closure(cl) { }
duke@435 1577
duke@435 1578 bool do_object_b(oop obj) {
duke@435 1579 guarantee(false, "Call do_object_b(oop, MemRegion) form instead");
duke@435 1580 return false;
duke@435 1581 }
duke@435 1582 bool do_object_bm(oop p, MemRegion mr);
duke@435 1583 };
duke@435 1584
duke@435 1585 // This closure is used during the second checkpointing phase
duke@435 1586 // to rescan the marked objects on the dirty cards in the mod
duke@435 1587 // union table and the card table proper. It invokes
duke@435 1588 // ScanMarkedObjectsAgainClosure above to accomplish much of its work.
duke@435 1589 // In the parallel case, the bit map is shared and requires
duke@435 1590 // synchronized access.
duke@435 1591 class MarkFromDirtyCardsClosure: public MemRegionClosure {
duke@435 1592 CompactibleFreeListSpace* _space;
duke@435 1593 ScanMarkedObjectsAgainClosure _scan_cl;
duke@435 1594 size_t _num_dirty_cards;
duke@435 1595
duke@435 1596 public:
duke@435 1597 MarkFromDirtyCardsClosure(CMSCollector* collector,
duke@435 1598 MemRegion span,
duke@435 1599 CompactibleFreeListSpace* space,
duke@435 1600 CMSBitMap* bit_map,
duke@435 1601 CMSMarkStack* mark_stack,
duke@435 1602 MarkRefsIntoAndScanClosure* cl):
duke@435 1603 _space(space),
duke@435 1604 _num_dirty_cards(0),
duke@435 1605 _scan_cl(collector, span, collector->ref_processor(), bit_map,
coleenp@4037 1606 mark_stack, cl) { }
duke@435 1607
duke@435 1608 MarkFromDirtyCardsClosure(CMSCollector* collector,
duke@435 1609 MemRegion span,
duke@435 1610 CompactibleFreeListSpace* space,
duke@435 1611 CMSBitMap* bit_map,
duke@435 1612 OopTaskQueue* work_queue,
duke@435 1613 Par_MarkRefsIntoAndScanClosure* cl):
duke@435 1614 _space(space),
duke@435 1615 _num_dirty_cards(0),
duke@435 1616 _scan_cl(collector, span, collector->ref_processor(), bit_map,
coleenp@4037 1617 work_queue, cl) { }
duke@435 1618
duke@435 1619 void do_MemRegion(MemRegion mr);
duke@435 1620 void set_space(CompactibleFreeListSpace* space) { _space = space; }
duke@435 1621 size_t num_dirty_cards() { return _num_dirty_cards; }
duke@435 1622 };
duke@435 1623
duke@435 1624 // This closure is used in the non-product build to check
duke@435 1625 // that there are no MemRegions with a certain property.
duke@435 1626 class FalseMemRegionClosure: public MemRegionClosure {
duke@435 1627 void do_MemRegion(MemRegion mr) {
duke@435 1628 guarantee(!mr.is_empty(), "Shouldn't be empty");
duke@435 1629 guarantee(false, "Should never be here");
duke@435 1630 }
duke@435 1631 };
duke@435 1632
duke@435 1633 // This closure is used during the precleaning phase
duke@435 1634 // to "carefully" rescan marked objects on dirty cards.
duke@435 1635 // It uses MarkRefsIntoAndScanClosure declared in genOopClosures.hpp
duke@435 1636 // to accomplish some of its work.
duke@435 1637 class ScanMarkedObjectsAgainCarefullyClosure: public ObjectClosureCareful {
duke@435 1638 CMSCollector* _collector;
duke@435 1639 MemRegion _span;
duke@435 1640 bool _yield;
duke@435 1641 Mutex* _freelistLock;
duke@435 1642 CMSBitMap* _bitMap;
duke@435 1643 CMSMarkStack* _markStack;
duke@435 1644 MarkRefsIntoAndScanClosure* _scanningClosure;
duke@435 1645
duke@435 1646 public:
duke@435 1647 ScanMarkedObjectsAgainCarefullyClosure(CMSCollector* collector,
duke@435 1648 MemRegion span,
duke@435 1649 CMSBitMap* bitMap,
duke@435 1650 CMSMarkStack* markStack,
duke@435 1651 MarkRefsIntoAndScanClosure* cl,
duke@435 1652 bool should_yield):
duke@435 1653 _collector(collector),
duke@435 1654 _span(span),
duke@435 1655 _yield(should_yield),
duke@435 1656 _bitMap(bitMap),
duke@435 1657 _markStack(markStack),
duke@435 1658 _scanningClosure(cl) {
duke@435 1659 }
duke@435 1660
duke@435 1661 void do_object(oop p) {
duke@435 1662 guarantee(false, "call do_object_careful instead");
duke@435 1663 }
duke@435 1664
duke@435 1665 size_t do_object_careful(oop p) {
duke@435 1666 guarantee(false, "Unexpected caller");
duke@435 1667 return 0;
duke@435 1668 }
duke@435 1669
duke@435 1670 size_t do_object_careful_m(oop p, MemRegion mr);
duke@435 1671
duke@435 1672 void setFreelistLock(Mutex* m) {
duke@435 1673 _freelistLock = m;
duke@435 1674 _scanningClosure->set_freelistLock(m);
duke@435 1675 }
duke@435 1676
duke@435 1677 private:
duke@435 1678 inline bool do_yield_check();
duke@435 1679
duke@435 1680 void do_yield_work();
duke@435 1681 };
duke@435 1682
duke@435 1683 class SurvivorSpacePrecleanClosure: public ObjectClosureCareful {
duke@435 1684 CMSCollector* _collector;
duke@435 1685 MemRegion _span;
duke@435 1686 bool _yield;
duke@435 1687 CMSBitMap* _bit_map;
duke@435 1688 CMSMarkStack* _mark_stack;
duke@435 1689 PushAndMarkClosure* _scanning_closure;
duke@435 1690 unsigned int _before_count;
duke@435 1691
duke@435 1692 public:
duke@435 1693 SurvivorSpacePrecleanClosure(CMSCollector* collector,
duke@435 1694 MemRegion span,
duke@435 1695 CMSBitMap* bit_map,
duke@435 1696 CMSMarkStack* mark_stack,
duke@435 1697 PushAndMarkClosure* cl,
duke@435 1698 unsigned int before_count,
duke@435 1699 bool should_yield):
duke@435 1700 _collector(collector),
duke@435 1701 _span(span),
duke@435 1702 _yield(should_yield),
duke@435 1703 _bit_map(bit_map),
duke@435 1704 _mark_stack(mark_stack),
duke@435 1705 _scanning_closure(cl),
duke@435 1706 _before_count(before_count)
duke@435 1707 { }
duke@435 1708
duke@435 1709 void do_object(oop p) {
duke@435 1710 guarantee(false, "call do_object_careful instead");
duke@435 1711 }
duke@435 1712
duke@435 1713 size_t do_object_careful(oop p);
duke@435 1714
duke@435 1715 size_t do_object_careful_m(oop p, MemRegion mr) {
duke@435 1716 guarantee(false, "Unexpected caller");
duke@435 1717 return 0;
duke@435 1718 }
duke@435 1719
duke@435 1720 private:
duke@435 1721 inline void do_yield_check();
duke@435 1722 void do_yield_work();
duke@435 1723 };
duke@435 1724
duke@435 1725 // This closure is used to accomplish the sweeping work
duke@435 1726 // after the second checkpoint but before the concurrent reset
duke@435 1727 // phase.
duke@435 1728 //
duke@435 1729 // Terminology
duke@435 1730 // left hand chunk (LHC) - block of one or more chunks currently being
duke@435 1731 // coalesced. The LHC is available for coalescing with a new chunk.
duke@435 1732 // right hand chunk (RHC) - block that is currently being swept that is
duke@435 1733 // free or garbage that can be coalesced with the LHC.
duke@435 1734 // _inFreeRange is true if there is currently a LHC
duke@435 1735 // _lastFreeRangeCoalesced is true if the LHC consists of more than one chunk.
duke@435 1736 // _freeRangeInFreeLists is true if the LHC is in the free lists.
duke@435 1737 // _freeFinger is the address of the current LHC
duke@435 1738 class SweepClosure: public BlkClosureCareful {
duke@435 1739 CMSCollector* _collector; // collector doing the work
duke@435 1740 ConcurrentMarkSweepGeneration* _g; // Generation being swept
duke@435 1741 CompactibleFreeListSpace* _sp; // Space being swept
ysr@2943 1742 HeapWord* _limit;// the address at or above which the sweep should stop
ysr@2943 1743 // because we do not expect newly garbage blocks
ysr@2943 1744 // eligible for sweeping past that address.
duke@435 1745 Mutex* _freelistLock; // Free list lock (in space)
duke@435 1746 CMSBitMap* _bitMap; // Marking bit map (in
duke@435 1747 // generation)
duke@435 1748 bool _inFreeRange; // Indicates if we are in the
duke@435 1749 // midst of a free run
duke@435 1750 bool _freeRangeInFreeLists;
duke@435 1751 // Often, we have just found
duke@435 1752 // a free chunk and started
duke@435 1753 // a new free range; we do not
duke@435 1754 // eagerly remove this chunk from
duke@435 1755 // the free lists unless there is
duke@435 1756 // a possibility of coalescing.
duke@435 1757 // When true, this flag indicates
duke@435 1758 // that the _freeFinger below
duke@435 1759 // points to a potentially free chunk
duke@435 1760 // that may still be in the free lists
duke@435 1761 bool _lastFreeRangeCoalesced;
duke@435 1762 // free range contains chunks
duke@435 1763 // coalesced
duke@435 1764 bool _yield;
duke@435 1765 // Whether sweeping should be
duke@435 1766 // done with yields. For instance
duke@435 1767 // when done by the foreground
duke@435 1768 // collector we shouldn't yield.
duke@435 1769 HeapWord* _freeFinger; // When _inFreeRange is set, the
duke@435 1770 // pointer to the "left hand
duke@435 1771 // chunk"
duke@435 1772 size_t _freeRangeSize;
duke@435 1773 // When _inFreeRange is set, this
duke@435 1774 // indicates the accumulated size
duke@435 1775 // of the "left hand chunk"
duke@435 1776 NOT_PRODUCT(
duke@435 1777 size_t _numObjectsFreed;
duke@435 1778 size_t _numWordsFreed;
duke@435 1779 size_t _numObjectsLive;
duke@435 1780 size_t _numWordsLive;
duke@435 1781 size_t _numObjectsAlreadyFree;
duke@435 1782 size_t _numWordsAlreadyFree;
duke@435 1783 FreeChunk* _last_fc;
duke@435 1784 )
duke@435 1785 private:
duke@435 1786 // Code that is common to a free chunk or garbage when
duke@435 1787 // encountered during sweeping.
ysr@2452 1788 void do_post_free_or_garbage_chunk(FreeChunk *fc, size_t chunkSize);
duke@435 1789 // Process a free chunk during sweeping.
ysr@2452 1790 void do_already_free_chunk(FreeChunk *fc);
ysr@2943 1791 // Work method called when processing an already free or a
ysr@2943 1792 // freshly garbage chunk to do a lookahead and possibly a
ysr@2943 1793 // premptive flush if crossing over _limit.
ysr@2943 1794 void lookahead_and_flush(FreeChunk* fc, size_t chunkSize);
duke@435 1795 // Process a garbage chunk during sweeping.
ysr@2452 1796 size_t do_garbage_chunk(FreeChunk *fc);
duke@435 1797 // Process a live chunk during sweeping.
ysr@2452 1798 size_t do_live_chunk(FreeChunk* fc);
duke@435 1799
duke@435 1800 // Accessors.
duke@435 1801 HeapWord* freeFinger() const { return _freeFinger; }
duke@435 1802 void set_freeFinger(HeapWord* v) { _freeFinger = v; }
duke@435 1803 bool inFreeRange() const { return _inFreeRange; }
duke@435 1804 void set_inFreeRange(bool v) { _inFreeRange = v; }
duke@435 1805 bool lastFreeRangeCoalesced() const { return _lastFreeRangeCoalesced; }
duke@435 1806 void set_lastFreeRangeCoalesced(bool v) { _lastFreeRangeCoalesced = v; }
duke@435 1807 bool freeRangeInFreeLists() const { return _freeRangeInFreeLists; }
duke@435 1808 void set_freeRangeInFreeLists(bool v) { _freeRangeInFreeLists = v; }
duke@435 1809
duke@435 1810 // Initialize a free range.
duke@435 1811 void initialize_free_range(HeapWord* freeFinger, bool freeRangeInFreeLists);
duke@435 1812 // Return this chunk to the free lists.
ysr@2452 1813 void flush_cur_free_chunk(HeapWord* chunk, size_t size);
duke@435 1814
duke@435 1815 // Check if we should yield and do so when necessary.
duke@435 1816 inline void do_yield_check(HeapWord* addr);
duke@435 1817
duke@435 1818 // Yield
duke@435 1819 void do_yield_work(HeapWord* addr);
duke@435 1820
duke@435 1821 // Debugging/Printing
ysr@2943 1822 void print_free_block_coalesced(FreeChunk* fc) const;
duke@435 1823
duke@435 1824 public:
duke@435 1825 SweepClosure(CMSCollector* collector, ConcurrentMarkSweepGeneration* g,
duke@435 1826 CMSBitMap* bitMap, bool should_yield);
ysr@2943 1827 ~SweepClosure() PRODUCT_RETURN;
duke@435 1828
duke@435 1829 size_t do_blk_careful(HeapWord* addr);
ysr@2943 1830 void print() const { print_on(tty); }
ysr@2943 1831 void print_on(outputStream *st) const;
duke@435 1832 };
duke@435 1833
duke@435 1834 // Closures related to weak references processing
duke@435 1835
duke@435 1836 // During CMS' weak reference processing, this is a
duke@435 1837 // work-routine/closure used to complete transitive
duke@435 1838 // marking of objects as live after a certain point
duke@435 1839 // in which an initial set has been completely accumulated.
ysr@887 1840 // This closure is currently used both during the final
ysr@887 1841 // remark stop-world phase, as well as during the concurrent
ysr@887 1842 // precleaning of the discovered reference lists.
duke@435 1843 class CMSDrainMarkingStackClosure: public VoidClosure {
duke@435 1844 CMSCollector* _collector;
duke@435 1845 MemRegion _span;
duke@435 1846 CMSMarkStack* _mark_stack;
duke@435 1847 CMSBitMap* _bit_map;
duke@435 1848 CMSKeepAliveClosure* _keep_alive;
ysr@887 1849 bool _concurrent_precleaning;
duke@435 1850 public:
duke@435 1851 CMSDrainMarkingStackClosure(CMSCollector* collector, MemRegion span,
duke@435 1852 CMSBitMap* bit_map, CMSMarkStack* mark_stack,
ysr@887 1853 CMSKeepAliveClosure* keep_alive,
ysr@887 1854 bool cpc):
duke@435 1855 _collector(collector),
duke@435 1856 _span(span),
duke@435 1857 _bit_map(bit_map),
duke@435 1858 _mark_stack(mark_stack),
ysr@887 1859 _keep_alive(keep_alive),
ysr@887 1860 _concurrent_precleaning(cpc) {
ysr@887 1861 assert(_concurrent_precleaning == _keep_alive->concurrent_precleaning(),
ysr@887 1862 "Mismatch");
ysr@887 1863 }
duke@435 1864
duke@435 1865 void do_void();
duke@435 1866 };
duke@435 1867
duke@435 1868 // A parallel version of CMSDrainMarkingStackClosure above.
duke@435 1869 class CMSParDrainMarkingStackClosure: public VoidClosure {
duke@435 1870 CMSCollector* _collector;
duke@435 1871 MemRegion _span;
duke@435 1872 OopTaskQueue* _work_queue;
duke@435 1873 CMSBitMap* _bit_map;
duke@435 1874 CMSInnerParMarkAndPushClosure _mark_and_push;
duke@435 1875
duke@435 1876 public:
duke@435 1877 CMSParDrainMarkingStackClosure(CMSCollector* collector,
duke@435 1878 MemRegion span, CMSBitMap* bit_map,
duke@435 1879 OopTaskQueue* work_queue):
duke@435 1880 _collector(collector),
duke@435 1881 _span(span),
duke@435 1882 _bit_map(bit_map),
duke@435 1883 _work_queue(work_queue),
coleenp@4037 1884 _mark_and_push(collector, span, bit_map, work_queue) { }
duke@435 1885
duke@435 1886 public:
duke@435 1887 void trim_queue(uint max);
duke@435 1888 void do_void();
duke@435 1889 };
duke@435 1890
duke@435 1891 // Allow yielding or short-circuiting of reference list
duke@435 1892 // prelceaning work.
duke@435 1893 class CMSPrecleanRefsYieldClosure: public YieldClosure {
duke@435 1894 CMSCollector* _collector;
duke@435 1895 void do_yield_work();
duke@435 1896 public:
duke@435 1897 CMSPrecleanRefsYieldClosure(CMSCollector* collector):
duke@435 1898 _collector(collector) {}
duke@435 1899 virtual bool should_return();
duke@435 1900 };
duke@435 1901
duke@435 1902
duke@435 1903 // Convenience class that locks free list locks for given CMS collector
duke@435 1904 class FreelistLocker: public StackObj {
duke@435 1905 private:
duke@435 1906 CMSCollector* _collector;
duke@435 1907 public:
duke@435 1908 FreelistLocker(CMSCollector* collector):
duke@435 1909 _collector(collector) {
duke@435 1910 _collector->getFreelistLocks();
duke@435 1911 }
duke@435 1912
duke@435 1913 ~FreelistLocker() {
duke@435 1914 _collector->releaseFreelistLocks();
duke@435 1915 }
duke@435 1916 };
duke@435 1917
duke@435 1918 // Mark all dead objects in a given space.
duke@435 1919 class MarkDeadObjectsClosure: public BlkClosure {
duke@435 1920 const CMSCollector* _collector;
duke@435 1921 const CompactibleFreeListSpace* _sp;
duke@435 1922 CMSBitMap* _live_bit_map;
duke@435 1923 CMSBitMap* _dead_bit_map;
duke@435 1924 public:
duke@435 1925 MarkDeadObjectsClosure(const CMSCollector* collector,
duke@435 1926 const CompactibleFreeListSpace* sp,
duke@435 1927 CMSBitMap *live_bit_map,
duke@435 1928 CMSBitMap *dead_bit_map) :
duke@435 1929 _collector(collector),
duke@435 1930 _sp(sp),
duke@435 1931 _live_bit_map(live_bit_map),
duke@435 1932 _dead_bit_map(dead_bit_map) {}
duke@435 1933 size_t do_blk(HeapWord* addr);
duke@435 1934 };
kevinw@2058 1935
kevinw@2058 1936 class TraceCMSMemoryManagerStats : public TraceMemoryManagerStats {
kevinw@2058 1937
kevinw@2058 1938 public:
fparain@2888 1939 TraceCMSMemoryManagerStats(CMSCollector::CollectorState phase, GCCause::Cause cause);
kevinw@2058 1940 };
kevinw@2058 1941
stefank@2314 1942
stefank@2314 1943 #endif // SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_CONCURRENTMARKSWEEPGENERATION_HPP

mercurial