src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp

Thu, 20 Nov 2008 16:56:09 -0800

author
ysr
date
Thu, 20 Nov 2008 16:56:09 -0800
changeset 888
c96030fff130
parent 887
00b023ae2d78
child 952
e9be0e04635a
permissions
-rw-r--r--

6684579: SoftReference processing can be made more efficient
Summary: For current soft-ref clearing policies, we can decide at marking time if a soft-reference will definitely not be cleared, postponing the decision of whether it will definitely be cleared to the final reference processing phase. This can be especially beneficial in the case of concurrent collectors where the marking is usually concurrent but reference processing is usually not.
Reviewed-by: jmasa

duke@435 1 /*
xdono@631 2 * Copyright 2001-2008 Sun Microsystems, Inc. All Rights Reserved.
duke@435 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
duke@435 4 *
duke@435 5 * This code is free software; you can redistribute it and/or modify it
duke@435 6 * under the terms of the GNU General Public License version 2 only, as
duke@435 7 * published by the Free Software Foundation.
duke@435 8 *
duke@435 9 * This code is distributed in the hope that it will be useful, but WITHOUT
duke@435 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
duke@435 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
duke@435 12 * version 2 for more details (a copy is included in the LICENSE file that
duke@435 13 * accompanied this code).
duke@435 14 *
duke@435 15 * You should have received a copy of the GNU General Public License version
duke@435 16 * 2 along with this work; if not, write to the Free Software Foundation,
duke@435 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
duke@435 18 *
duke@435 19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
duke@435 20 * CA 95054 USA or visit www.sun.com if you need additional information or
duke@435 21 * have any questions.
duke@435 22 *
duke@435 23 */
duke@435 24
duke@435 25 // ConcurrentMarkSweepGeneration is in support of a concurrent
duke@435 26 // mark-sweep old generation in the Detlefs-Printezis--Boehm-Demers-Schenker
duke@435 27 // style. We assume, for now, that this generation is always the
duke@435 28 // seniormost generation (modulo the PermGeneration), and for simplicity
duke@435 29 // in the first implementation, that this generation is a single compactible
duke@435 30 // space. Neither of these restrictions appears essential, and will be
duke@435 31 // relaxed in the future when more time is available to implement the
duke@435 32 // greater generality (and there's a need for it).
duke@435 33 //
duke@435 34 // Concurrent mode failures are currently handled by
duke@435 35 // means of a sliding mark-compact.
duke@435 36
duke@435 37 class CMSAdaptiveSizePolicy;
duke@435 38 class CMSConcMarkingTask;
duke@435 39 class CMSGCAdaptivePolicyCounters;
duke@435 40 class ConcurrentMarkSweepGeneration;
duke@435 41 class ConcurrentMarkSweepPolicy;
duke@435 42 class ConcurrentMarkSweepThread;
duke@435 43 class CompactibleFreeListSpace;
duke@435 44 class FreeChunk;
duke@435 45 class PromotionInfo;
duke@435 46 class ScanMarkedObjectsAgainCarefullyClosure;
duke@435 47
duke@435 48 // A generic CMS bit map. It's the basis for both the CMS marking bit map
duke@435 49 // as well as for the mod union table (in each case only a subset of the
duke@435 50 // methods are used). This is essentially a wrapper around the BitMap class,
duke@435 51 // with one bit per (1<<_shifter) HeapWords. (i.e. for the marking bit map,
duke@435 52 // we have _shifter == 0. and for the mod union table we have
duke@435 53 // shifter == CardTableModRefBS::card_shift - LogHeapWordSize.)
duke@435 54 // XXX 64-bit issues in BitMap?
duke@435 55 class CMSBitMap VALUE_OBJ_CLASS_SPEC {
duke@435 56 friend class VMStructs;
duke@435 57
duke@435 58 HeapWord* _bmStartWord; // base address of range covered by map
duke@435 59 size_t _bmWordSize; // map size (in #HeapWords covered)
duke@435 60 const int _shifter; // shifts to convert HeapWord to bit position
duke@435 61 VirtualSpace _virtual_space; // underlying the bit map
duke@435 62 BitMap _bm; // the bit map itself
duke@435 63 public:
duke@435 64 Mutex* const _lock; // mutex protecting _bm;
duke@435 65
duke@435 66 public:
duke@435 67 // constructor
duke@435 68 CMSBitMap(int shifter, int mutex_rank, const char* mutex_name);
duke@435 69
duke@435 70 // allocates the actual storage for the map
duke@435 71 bool allocate(MemRegion mr);
duke@435 72 // field getter
duke@435 73 Mutex* lock() const { return _lock; }
duke@435 74 // locking verifier convenience function
duke@435 75 void assert_locked() const PRODUCT_RETURN;
duke@435 76
duke@435 77 // inquiries
duke@435 78 HeapWord* startWord() const { return _bmStartWord; }
duke@435 79 size_t sizeInWords() const { return _bmWordSize; }
duke@435 80 size_t sizeInBits() const { return _bm.size(); }
duke@435 81 // the following is one past the last word in space
duke@435 82 HeapWord* endWord() const { return _bmStartWord + _bmWordSize; }
duke@435 83
duke@435 84 // reading marks
duke@435 85 bool isMarked(HeapWord* addr) const;
duke@435 86 bool par_isMarked(HeapWord* addr) const; // do not lock checks
duke@435 87 bool isUnmarked(HeapWord* addr) const;
duke@435 88 bool isAllClear() const;
duke@435 89
duke@435 90 // writing marks
duke@435 91 void mark(HeapWord* addr);
duke@435 92 // For marking by parallel GC threads;
duke@435 93 // returns true if we did, false if another thread did
duke@435 94 bool par_mark(HeapWord* addr);
duke@435 95
duke@435 96 void mark_range(MemRegion mr);
duke@435 97 void par_mark_range(MemRegion mr);
duke@435 98 void mark_large_range(MemRegion mr);
duke@435 99 void par_mark_large_range(MemRegion mr);
duke@435 100 void par_clear(HeapWord* addr); // For unmarking by parallel GC threads.
duke@435 101 void clear_range(MemRegion mr);
duke@435 102 void par_clear_range(MemRegion mr);
duke@435 103 void clear_large_range(MemRegion mr);
duke@435 104 void par_clear_large_range(MemRegion mr);
duke@435 105 void clear_all();
duke@435 106 void clear_all_incrementally(); // Not yet implemented!!
duke@435 107
duke@435 108 NOT_PRODUCT(
duke@435 109 // checks the memory region for validity
duke@435 110 void region_invariant(MemRegion mr);
duke@435 111 )
duke@435 112
duke@435 113 // iteration
duke@435 114 void iterate(BitMapClosure* cl) {
duke@435 115 _bm.iterate(cl);
duke@435 116 }
duke@435 117 void iterate(BitMapClosure* cl, HeapWord* left, HeapWord* right);
duke@435 118 void dirty_range_iterate_clear(MemRegionClosure* cl);
duke@435 119 void dirty_range_iterate_clear(MemRegion mr, MemRegionClosure* cl);
duke@435 120
duke@435 121 // auxiliary support for iteration
duke@435 122 HeapWord* getNextMarkedWordAddress(HeapWord* addr) const;
duke@435 123 HeapWord* getNextMarkedWordAddress(HeapWord* start_addr,
duke@435 124 HeapWord* end_addr) const;
duke@435 125 HeapWord* getNextUnmarkedWordAddress(HeapWord* addr) const;
duke@435 126 HeapWord* getNextUnmarkedWordAddress(HeapWord* start_addr,
duke@435 127 HeapWord* end_addr) const;
duke@435 128 MemRegion getAndClearMarkedRegion(HeapWord* addr);
duke@435 129 MemRegion getAndClearMarkedRegion(HeapWord* start_addr,
duke@435 130 HeapWord* end_addr);
duke@435 131
duke@435 132 // conversion utilities
duke@435 133 HeapWord* offsetToHeapWord(size_t offset) const;
duke@435 134 size_t heapWordToOffset(HeapWord* addr) const;
duke@435 135 size_t heapWordDiffToOffsetDiff(size_t diff) const;
duke@435 136
duke@435 137 // debugging
duke@435 138 // is this address range covered by the bit-map?
duke@435 139 NOT_PRODUCT(
duke@435 140 bool covers(MemRegion mr) const;
duke@435 141 bool covers(HeapWord* start, size_t size = 0) const;
duke@435 142 )
duke@435 143 void verifyNoOneBitsInRange(HeapWord* left, HeapWord* right) PRODUCT_RETURN;
duke@435 144 };
duke@435 145
duke@435 146 // Represents a marking stack used by the CMS collector.
duke@435 147 // Ideally this should be GrowableArray<> just like MSC's marking stack(s).
duke@435 148 class CMSMarkStack: public CHeapObj {
duke@435 149 //
duke@435 150 friend class CMSCollector; // to get at expasion stats further below
duke@435 151 //
duke@435 152
duke@435 153 VirtualSpace _virtual_space; // space for the stack
duke@435 154 oop* _base; // bottom of stack
duke@435 155 size_t _index; // one more than last occupied index
duke@435 156 size_t _capacity; // max #elements
duke@435 157 Mutex _par_lock; // an advisory lock used in case of parallel access
duke@435 158 NOT_PRODUCT(size_t _max_depth;) // max depth plumbed during run
duke@435 159
duke@435 160 protected:
duke@435 161 size_t _hit_limit; // we hit max stack size limit
duke@435 162 size_t _failed_double; // we failed expansion before hitting limit
duke@435 163
duke@435 164 public:
duke@435 165 CMSMarkStack():
duke@435 166 _par_lock(Mutex::event, "CMSMarkStack._par_lock", true),
duke@435 167 _hit_limit(0),
duke@435 168 _failed_double(0) {}
duke@435 169
duke@435 170 bool allocate(size_t size);
duke@435 171
duke@435 172 size_t capacity() const { return _capacity; }
duke@435 173
duke@435 174 oop pop() {
duke@435 175 if (!isEmpty()) {
duke@435 176 return _base[--_index] ;
duke@435 177 }
duke@435 178 return NULL;
duke@435 179 }
duke@435 180
duke@435 181 bool push(oop ptr) {
duke@435 182 if (isFull()) {
duke@435 183 return false;
duke@435 184 } else {
duke@435 185 _base[_index++] = ptr;
duke@435 186 NOT_PRODUCT(_max_depth = MAX2(_max_depth, _index));
duke@435 187 return true;
duke@435 188 }
duke@435 189 }
duke@435 190
duke@435 191 bool isEmpty() const { return _index == 0; }
duke@435 192 bool isFull() const {
duke@435 193 assert(_index <= _capacity, "buffer overflow");
duke@435 194 return _index == _capacity;
duke@435 195 }
duke@435 196
duke@435 197 size_t length() { return _index; }
duke@435 198
duke@435 199 // "Parallel versions" of some of the above
duke@435 200 oop par_pop() {
duke@435 201 // lock and pop
duke@435 202 MutexLockerEx x(&_par_lock, Mutex::_no_safepoint_check_flag);
duke@435 203 return pop();
duke@435 204 }
duke@435 205
duke@435 206 bool par_push(oop ptr) {
duke@435 207 // lock and push
duke@435 208 MutexLockerEx x(&_par_lock, Mutex::_no_safepoint_check_flag);
duke@435 209 return push(ptr);
duke@435 210 }
duke@435 211
duke@435 212 // Forcibly reset the stack, losing all of its contents.
duke@435 213 void reset() {
duke@435 214 _index = 0;
duke@435 215 }
duke@435 216
duke@435 217 // Expand the stack, typically in response to an overflow condition
duke@435 218 void expand();
duke@435 219
duke@435 220 // Compute the least valued stack element.
duke@435 221 oop least_value(HeapWord* low) {
duke@435 222 oop least = (oop)low;
duke@435 223 for (size_t i = 0; i < _index; i++) {
duke@435 224 least = MIN2(least, _base[i]);
duke@435 225 }
duke@435 226 return least;
duke@435 227 }
duke@435 228
duke@435 229 // Exposed here to allow stack expansion in || case
duke@435 230 Mutex* par_lock() { return &_par_lock; }
duke@435 231 };
duke@435 232
duke@435 233 class CardTableRS;
duke@435 234 class CMSParGCThreadState;
duke@435 235
duke@435 236 class ModUnionClosure: public MemRegionClosure {
duke@435 237 protected:
duke@435 238 CMSBitMap* _t;
duke@435 239 public:
duke@435 240 ModUnionClosure(CMSBitMap* t): _t(t) { }
duke@435 241 void do_MemRegion(MemRegion mr);
duke@435 242 };
duke@435 243
duke@435 244 class ModUnionClosurePar: public ModUnionClosure {
duke@435 245 public:
duke@435 246 ModUnionClosurePar(CMSBitMap* t): ModUnionClosure(t) { }
duke@435 247 void do_MemRegion(MemRegion mr);
duke@435 248 };
duke@435 249
duke@435 250 // Survivor Chunk Array in support of parallelization of
duke@435 251 // Survivor Space rescan.
duke@435 252 class ChunkArray: public CHeapObj {
duke@435 253 size_t _index;
duke@435 254 size_t _capacity;
duke@435 255 HeapWord** _array; // storage for array
duke@435 256
duke@435 257 public:
duke@435 258 ChunkArray() : _index(0), _capacity(0), _array(NULL) {}
duke@435 259 ChunkArray(HeapWord** a, size_t c):
duke@435 260 _index(0), _capacity(c), _array(a) {}
duke@435 261
duke@435 262 HeapWord** array() { return _array; }
duke@435 263 void set_array(HeapWord** a) { _array = a; }
duke@435 264
duke@435 265 size_t capacity() { return _capacity; }
duke@435 266 void set_capacity(size_t c) { _capacity = c; }
duke@435 267
duke@435 268 size_t end() {
duke@435 269 assert(_index < capacity(), "_index out of bounds");
duke@435 270 return _index;
duke@435 271 } // exclusive
duke@435 272
duke@435 273 HeapWord* nth(size_t n) {
duke@435 274 assert(n < end(), "Out of bounds access");
duke@435 275 return _array[n];
duke@435 276 }
duke@435 277
duke@435 278 void reset() {
duke@435 279 _index = 0;
duke@435 280 }
duke@435 281
duke@435 282 void record_sample(HeapWord* p, size_t sz) {
duke@435 283 // For now we do not do anything with the size
duke@435 284 if (_index < _capacity) {
duke@435 285 _array[_index++] = p;
duke@435 286 }
duke@435 287 }
duke@435 288 };
duke@435 289
duke@435 290 //
duke@435 291 // Timing, allocation and promotion statistics for gc scheduling and incremental
duke@435 292 // mode pacing. Most statistics are exponential averages.
duke@435 293 //
duke@435 294 class CMSStats VALUE_OBJ_CLASS_SPEC {
duke@435 295 private:
duke@435 296 ConcurrentMarkSweepGeneration* const _cms_gen; // The cms (old) gen.
duke@435 297
duke@435 298 // The following are exponential averages with factor alpha:
duke@435 299 // avg = (100 - alpha) * avg + alpha * cur_sample
duke@435 300 //
duke@435 301 // The durations measure: end_time[n] - start_time[n]
duke@435 302 // The periods measure: start_time[n] - start_time[n-1]
duke@435 303 //
duke@435 304 // The cms period and duration include only concurrent collections; time spent
duke@435 305 // in foreground cms collections due to System.gc() or because of a failure to
duke@435 306 // keep up are not included.
duke@435 307 //
duke@435 308 // There are 3 alphas to "bootstrap" the statistics. The _saved_alpha is the
duke@435 309 // real value, but is used only after the first period. A value of 100 is
duke@435 310 // used for the first sample so it gets the entire weight.
duke@435 311 unsigned int _saved_alpha; // 0-100
duke@435 312 unsigned int _gc0_alpha;
duke@435 313 unsigned int _cms_alpha;
duke@435 314
duke@435 315 double _gc0_duration;
duke@435 316 double _gc0_period;
duke@435 317 size_t _gc0_promoted; // bytes promoted per gc0
duke@435 318 double _cms_duration;
duke@435 319 double _cms_duration_pre_sweep; // time from initiation to start of sweep
duke@435 320 double _cms_duration_per_mb;
duke@435 321 double _cms_period;
duke@435 322 size_t _cms_allocated; // bytes of direct allocation per gc0 period
duke@435 323
duke@435 324 // Timers.
duke@435 325 elapsedTimer _cms_timer;
duke@435 326 TimeStamp _gc0_begin_time;
duke@435 327 TimeStamp _cms_begin_time;
duke@435 328 TimeStamp _cms_end_time;
duke@435 329
duke@435 330 // Snapshots of the amount used in the CMS generation.
duke@435 331 size_t _cms_used_at_gc0_begin;
duke@435 332 size_t _cms_used_at_gc0_end;
duke@435 333 size_t _cms_used_at_cms_begin;
duke@435 334
duke@435 335 // Used to prevent the duty cycle from being reduced in the middle of a cms
duke@435 336 // cycle.
duke@435 337 bool _allow_duty_cycle_reduction;
duke@435 338
duke@435 339 enum {
duke@435 340 _GC0_VALID = 0x1,
duke@435 341 _CMS_VALID = 0x2,
duke@435 342 _ALL_VALID = _GC0_VALID | _CMS_VALID
duke@435 343 };
duke@435 344
duke@435 345 unsigned int _valid_bits;
duke@435 346
duke@435 347 unsigned int _icms_duty_cycle; // icms duty cycle (0-100).
duke@435 348
duke@435 349 protected:
duke@435 350
duke@435 351 // Return a duty cycle that avoids wild oscillations, by limiting the amount
duke@435 352 // of change between old_duty_cycle and new_duty_cycle (the latter is treated
duke@435 353 // as a recommended value).
duke@435 354 static unsigned int icms_damped_duty_cycle(unsigned int old_duty_cycle,
duke@435 355 unsigned int new_duty_cycle);
duke@435 356 unsigned int icms_update_duty_cycle_impl();
duke@435 357
duke@435 358 public:
duke@435 359 CMSStats(ConcurrentMarkSweepGeneration* cms_gen,
duke@435 360 unsigned int alpha = CMSExpAvgFactor);
duke@435 361
duke@435 362 // Whether or not the statistics contain valid data; higher level statistics
duke@435 363 // cannot be called until this returns true (they require at least one young
duke@435 364 // gen and one cms cycle to have completed).
duke@435 365 bool valid() const;
duke@435 366
duke@435 367 // Record statistics.
duke@435 368 void record_gc0_begin();
duke@435 369 void record_gc0_end(size_t cms_gen_bytes_used);
duke@435 370 void record_cms_begin();
duke@435 371 void record_cms_end();
duke@435 372
duke@435 373 // Allow management of the cms timer, which must be stopped/started around
duke@435 374 // yield points.
duke@435 375 elapsedTimer& cms_timer() { return _cms_timer; }
duke@435 376 void start_cms_timer() { _cms_timer.start(); }
duke@435 377 void stop_cms_timer() { _cms_timer.stop(); }
duke@435 378
duke@435 379 // Basic statistics; units are seconds or bytes.
duke@435 380 double gc0_period() const { return _gc0_period; }
duke@435 381 double gc0_duration() const { return _gc0_duration; }
duke@435 382 size_t gc0_promoted() const { return _gc0_promoted; }
duke@435 383 double cms_period() const { return _cms_period; }
duke@435 384 double cms_duration() const { return _cms_duration; }
duke@435 385 double cms_duration_per_mb() const { return _cms_duration_per_mb; }
duke@435 386 size_t cms_allocated() const { return _cms_allocated; }
duke@435 387
duke@435 388 size_t cms_used_at_gc0_end() const { return _cms_used_at_gc0_end;}
duke@435 389
duke@435 390 // Seconds since the last background cms cycle began or ended.
duke@435 391 double cms_time_since_begin() const;
duke@435 392 double cms_time_since_end() const;
duke@435 393
duke@435 394 // Higher level statistics--caller must check that valid() returns true before
duke@435 395 // calling.
duke@435 396
duke@435 397 // Returns bytes promoted per second of wall clock time.
duke@435 398 double promotion_rate() const;
duke@435 399
duke@435 400 // Returns bytes directly allocated per second of wall clock time.
duke@435 401 double cms_allocation_rate() const;
duke@435 402
duke@435 403 // Rate at which space in the cms generation is being consumed (sum of the
duke@435 404 // above two).
duke@435 405 double cms_consumption_rate() const;
duke@435 406
duke@435 407 // Returns an estimate of the number of seconds until the cms generation will
duke@435 408 // fill up, assuming no collection work is done.
duke@435 409 double time_until_cms_gen_full() const;
duke@435 410
duke@435 411 // Returns an estimate of the number of seconds remaining until
duke@435 412 // the cms generation collection should start.
duke@435 413 double time_until_cms_start() const;
duke@435 414
duke@435 415 // End of higher level statistics.
duke@435 416
duke@435 417 // Returns the cms incremental mode duty cycle, as a percentage (0-100).
duke@435 418 unsigned int icms_duty_cycle() const { return _icms_duty_cycle; }
duke@435 419
duke@435 420 // Update the duty cycle and return the new value.
duke@435 421 unsigned int icms_update_duty_cycle();
duke@435 422
duke@435 423 // Debugging.
duke@435 424 void print_on(outputStream* st) const PRODUCT_RETURN;
duke@435 425 void print() const { print_on(gclog_or_tty); }
duke@435 426 };
duke@435 427
duke@435 428 // A closure related to weak references processing which
duke@435 429 // we embed in the CMSCollector, since we need to pass
duke@435 430 // it to the reference processor for secondary filtering
duke@435 431 // of references based on reachability of referent;
duke@435 432 // see role of _is_alive_non_header closure in the
duke@435 433 // ReferenceProcessor class.
duke@435 434 // For objects in the CMS generation, this closure checks
duke@435 435 // if the object is "live" (reachable). Used in weak
duke@435 436 // reference processing.
duke@435 437 class CMSIsAliveClosure: public BoolObjectClosure {
ysr@578 438 const MemRegion _span;
duke@435 439 const CMSBitMap* _bit_map;
duke@435 440
duke@435 441 friend class CMSCollector;
duke@435 442 public:
duke@435 443 CMSIsAliveClosure(MemRegion span,
duke@435 444 CMSBitMap* bit_map):
duke@435 445 _span(span),
ysr@578 446 _bit_map(bit_map) {
ysr@578 447 assert(!span.is_empty(), "Empty span could spell trouble");
ysr@578 448 }
ysr@578 449
duke@435 450 void do_object(oop obj) {
duke@435 451 assert(false, "not to be invoked");
duke@435 452 }
ysr@578 453
duke@435 454 bool do_object_b(oop obj);
duke@435 455 };
duke@435 456
duke@435 457
duke@435 458 // Implements AbstractRefProcTaskExecutor for CMS.
duke@435 459 class CMSRefProcTaskExecutor: public AbstractRefProcTaskExecutor {
duke@435 460 public:
duke@435 461
duke@435 462 CMSRefProcTaskExecutor(CMSCollector& collector)
duke@435 463 : _collector(collector)
duke@435 464 { }
duke@435 465
duke@435 466 // Executes a task using worker threads.
duke@435 467 virtual void execute(ProcessTask& task);
duke@435 468 virtual void execute(EnqueueTask& task);
duke@435 469 private:
duke@435 470 CMSCollector& _collector;
duke@435 471 };
duke@435 472
duke@435 473
duke@435 474 class CMSCollector: public CHeapObj {
duke@435 475 friend class VMStructs;
duke@435 476 friend class ConcurrentMarkSweepThread;
duke@435 477 friend class ConcurrentMarkSweepGeneration;
duke@435 478 friend class CompactibleFreeListSpace;
duke@435 479 friend class CMSParRemarkTask;
duke@435 480 friend class CMSConcMarkingTask;
duke@435 481 friend class CMSRefProcTaskProxy;
duke@435 482 friend class CMSRefProcTaskExecutor;
duke@435 483 friend class ScanMarkedObjectsAgainCarefullyClosure; // for sampling eden
duke@435 484 friend class SurvivorSpacePrecleanClosure; // --- ditto -------
duke@435 485 friend class PushOrMarkClosure; // to access _restart_addr
duke@435 486 friend class Par_PushOrMarkClosure; // to access _restart_addr
duke@435 487 friend class MarkFromRootsClosure; // -- ditto --
duke@435 488 // ... and for clearing cards
duke@435 489 friend class Par_MarkFromRootsClosure; // to access _restart_addr
duke@435 490 // ... and for clearing cards
duke@435 491 friend class Par_ConcMarkingClosure; // to access _restart_addr etc.
duke@435 492 friend class MarkFromRootsVerifyClosure; // to access _restart_addr
duke@435 493 friend class PushAndMarkVerifyClosure; // -- ditto --
duke@435 494 friend class MarkRefsIntoAndScanClosure; // to access _overflow_list
duke@435 495 friend class PushAndMarkClosure; // -- ditto --
duke@435 496 friend class Par_PushAndMarkClosure; // -- ditto --
duke@435 497 friend class CMSKeepAliveClosure; // -- ditto --
duke@435 498 friend class CMSDrainMarkingStackClosure; // -- ditto --
duke@435 499 friend class CMSInnerParMarkAndPushClosure; // -- ditto --
duke@435 500 NOT_PRODUCT(friend class ScanMarkedObjectsAgainClosure;) // assertion on _overflow_list
duke@435 501 friend class ReleaseForegroundGC; // to access _foregroundGCShouldWait
duke@435 502 friend class VM_CMS_Operation;
duke@435 503 friend class VM_CMS_Initial_Mark;
duke@435 504 friend class VM_CMS_Final_Remark;
duke@435 505
duke@435 506 private:
duke@435 507 jlong _time_of_last_gc;
duke@435 508 void update_time_of_last_gc(jlong now) {
duke@435 509 _time_of_last_gc = now;
duke@435 510 }
duke@435 511
duke@435 512 OopTaskQueueSet* _task_queues;
duke@435 513
duke@435 514 // Overflow list of grey objects, threaded through mark-word
duke@435 515 // Manipulated with CAS in the parallel/multi-threaded case.
duke@435 516 oop _overflow_list;
duke@435 517 // The following array-pair keeps track of mark words
duke@435 518 // displaced for accomodating overflow list above.
duke@435 519 // This code will likely be revisited under RFE#4922830.
duke@435 520 GrowableArray<oop>* _preserved_oop_stack;
duke@435 521 GrowableArray<markOop>* _preserved_mark_stack;
duke@435 522
duke@435 523 int* _hash_seed;
duke@435 524
duke@435 525 // In support of multi-threaded concurrent phases
duke@435 526 YieldingFlexibleWorkGang* _conc_workers;
duke@435 527
duke@435 528 // Performance Counters
duke@435 529 CollectorCounters* _gc_counters;
duke@435 530
duke@435 531 // Initialization Errors
duke@435 532 bool _completed_initialization;
duke@435 533
duke@435 534 // In support of ExplicitGCInvokesConcurrent
duke@435 535 static bool _full_gc_requested;
duke@435 536 unsigned int _collection_count_start;
ysr@529 537
duke@435 538 // Should we unload classes this concurrent cycle?
ysr@529 539 bool _should_unload_classes;
ysr@529 540 unsigned int _concurrent_cycles_since_last_unload;
ysr@529 541 unsigned int concurrent_cycles_since_last_unload() const {
ysr@529 542 return _concurrent_cycles_since_last_unload;
ysr@529 543 }
duke@435 544 // Did we (allow) unload classes in the previous concurrent cycle?
ysr@529 545 bool unloaded_classes_last_cycle() const {
ysr@529 546 return concurrent_cycles_since_last_unload() == 0;
duke@435 547 }
duke@435 548
duke@435 549 // Verification support
duke@435 550 CMSBitMap _verification_mark_bm;
duke@435 551 void verify_after_remark_work_1();
duke@435 552 void verify_after_remark_work_2();
duke@435 553
duke@435 554 // true if any verification flag is on.
duke@435 555 bool _verifying;
duke@435 556 bool verifying() const { return _verifying; }
duke@435 557 void set_verifying(bool v) { _verifying = v; }
duke@435 558
duke@435 559 // Collector policy
duke@435 560 ConcurrentMarkSweepPolicy* _collector_policy;
duke@435 561 ConcurrentMarkSweepPolicy* collector_policy() { return _collector_policy; }
duke@435 562
duke@435 563 // Check whether the gc time limit has been
duke@435 564 // exceeded and set the size policy flag
duke@435 565 // appropriately.
duke@435 566 void check_gc_time_limit();
duke@435 567 // XXX Move these to CMSStats ??? FIX ME !!!
duke@435 568 elapsedTimer _sweep_timer;
duke@435 569 AdaptivePaddedAverage _sweep_estimate;
duke@435 570
duke@435 571 protected:
duke@435 572 ConcurrentMarkSweepGeneration* _cmsGen; // old gen (CMS)
duke@435 573 ConcurrentMarkSweepGeneration* _permGen; // perm gen
duke@435 574 MemRegion _span; // span covering above two
duke@435 575 CardTableRS* _ct; // card table
duke@435 576
duke@435 577 // CMS marking support structures
duke@435 578 CMSBitMap _markBitMap;
duke@435 579 CMSBitMap _modUnionTable;
duke@435 580 CMSMarkStack _markStack;
duke@435 581 CMSMarkStack _revisitStack; // used to keep track of klassKlass objects
duke@435 582 // to revisit
duke@435 583 CMSBitMap _perm_gen_verify_bit_map; // Mark bit map for perm gen verification support.
duke@435 584
duke@435 585 HeapWord* _restart_addr; // in support of marking stack overflow
duke@435 586 void lower_restart_addr(HeapWord* low);
duke@435 587
duke@435 588 // Counters in support of marking stack / work queue overflow handling:
duke@435 589 // a non-zero value indicates certain types of overflow events during
duke@435 590 // the current CMS cycle and could lead to stack resizing efforts at
duke@435 591 // an opportune future time.
duke@435 592 size_t _ser_pmc_preclean_ovflw;
duke@435 593 size_t _ser_pmc_remark_ovflw;
duke@435 594 size_t _par_pmc_remark_ovflw;
ysr@887 595 size_t _ser_kac_preclean_ovflw;
duke@435 596 size_t _ser_kac_ovflw;
duke@435 597 size_t _par_kac_ovflw;
duke@435 598 NOT_PRODUCT(size_t _num_par_pushes;)
duke@435 599
duke@435 600 // ("Weak") Reference processing support
duke@435 601 ReferenceProcessor* _ref_processor;
duke@435 602 CMSIsAliveClosure _is_alive_closure;
ysr@578 603 // keep this textually after _markBitMap and _span; c'tor dependency
duke@435 604
duke@435 605 ConcurrentMarkSweepThread* _cmsThread; // the thread doing the work
duke@435 606 ModUnionClosure _modUnionClosure;
duke@435 607 ModUnionClosurePar _modUnionClosurePar;
duke@435 608
duke@435 609 // CMS abstract state machine
duke@435 610 // initial_state: Idling
duke@435 611 // next_state(Idling) = {Marking}
duke@435 612 // next_state(Marking) = {Precleaning, Sweeping}
duke@435 613 // next_state(Precleaning) = {AbortablePreclean, FinalMarking}
duke@435 614 // next_state(AbortablePreclean) = {FinalMarking}
duke@435 615 // next_state(FinalMarking) = {Sweeping}
duke@435 616 // next_state(Sweeping) = {Resizing}
duke@435 617 // next_state(Resizing) = {Resetting}
duke@435 618 // next_state(Resetting) = {Idling}
duke@435 619 // The numeric values below are chosen so that:
duke@435 620 // . _collectorState <= Idling == post-sweep && pre-mark
duke@435 621 // . _collectorState in (Idling, Sweeping) == {initial,final}marking ||
duke@435 622 // precleaning || abortablePrecleanb
duke@435 623 enum CollectorState {
duke@435 624 Resizing = 0,
duke@435 625 Resetting = 1,
duke@435 626 Idling = 2,
duke@435 627 InitialMarking = 3,
duke@435 628 Marking = 4,
duke@435 629 Precleaning = 5,
duke@435 630 AbortablePreclean = 6,
duke@435 631 FinalMarking = 7,
duke@435 632 Sweeping = 8
duke@435 633 };
duke@435 634 static CollectorState _collectorState;
duke@435 635
duke@435 636 // State related to prologue/epilogue invocation for my generations
duke@435 637 bool _between_prologue_and_epilogue;
duke@435 638
duke@435 639 // Signalling/State related to coordination between fore- and backgroud GC
duke@435 640 // Note: When the baton has been passed from background GC to foreground GC,
duke@435 641 // _foregroundGCIsActive is true and _foregroundGCShouldWait is false.
duke@435 642 static bool _foregroundGCIsActive; // true iff foreground collector is active or
duke@435 643 // wants to go active
duke@435 644 static bool _foregroundGCShouldWait; // true iff background GC is active and has not
duke@435 645 // yet passed the baton to the foreground GC
duke@435 646
duke@435 647 // Support for CMSScheduleRemark (abortable preclean)
duke@435 648 bool _abort_preclean;
duke@435 649 bool _start_sampling;
duke@435 650
duke@435 651 int _numYields;
duke@435 652 size_t _numDirtyCards;
duke@435 653 uint _sweepCount;
duke@435 654 // number of full gc's since the last concurrent gc.
duke@435 655 uint _full_gcs_since_conc_gc;
duke@435 656
duke@435 657 // occupancy used for bootstrapping stats
duke@435 658 double _bootstrap_occupancy;
duke@435 659
duke@435 660 // timer
duke@435 661 elapsedTimer _timer;
duke@435 662
duke@435 663 // Timing, allocation and promotion statistics, used for scheduling.
duke@435 664 CMSStats _stats;
duke@435 665
duke@435 666 // Allocation limits installed in the young gen, used only in
duke@435 667 // CMSIncrementalMode. When an allocation in the young gen would cross one of
duke@435 668 // these limits, the cms generation is notified and the cms thread is started
duke@435 669 // or stopped, respectively.
duke@435 670 HeapWord* _icms_start_limit;
duke@435 671 HeapWord* _icms_stop_limit;
duke@435 672
duke@435 673 enum CMS_op_type {
duke@435 674 CMS_op_checkpointRootsInitial,
duke@435 675 CMS_op_checkpointRootsFinal
duke@435 676 };
duke@435 677
duke@435 678 void do_CMS_operation(CMS_op_type op);
duke@435 679 bool stop_world_and_do(CMS_op_type op);
duke@435 680
duke@435 681 OopTaskQueueSet* task_queues() { return _task_queues; }
duke@435 682 int* hash_seed(int i) { return &_hash_seed[i]; }
duke@435 683 YieldingFlexibleWorkGang* conc_workers() { return _conc_workers; }
duke@435 684
duke@435 685 // Support for parallelizing Eden rescan in CMS remark phase
duke@435 686 void sample_eden(); // ... sample Eden space top
duke@435 687
duke@435 688 private:
duke@435 689 // Support for parallelizing young gen rescan in CMS remark phase
duke@435 690 Generation* _young_gen; // the younger gen
duke@435 691 HeapWord** _top_addr; // ... Top of Eden
duke@435 692 HeapWord** _end_addr; // ... End of Eden
duke@435 693 HeapWord** _eden_chunk_array; // ... Eden partitioning array
duke@435 694 size_t _eden_chunk_index; // ... top (exclusive) of array
duke@435 695 size_t _eden_chunk_capacity; // ... max entries in array
duke@435 696
duke@435 697 // Support for parallelizing survivor space rescan
duke@435 698 HeapWord** _survivor_chunk_array;
duke@435 699 size_t _survivor_chunk_index;
duke@435 700 size_t _survivor_chunk_capacity;
duke@435 701 size_t* _cursor;
duke@435 702 ChunkArray* _survivor_plab_array;
duke@435 703
duke@435 704 // Support for marking stack overflow handling
duke@435 705 bool take_from_overflow_list(size_t num, CMSMarkStack* to_stack);
duke@435 706 bool par_take_from_overflow_list(size_t num, OopTaskQueue* to_work_q);
duke@435 707 void push_on_overflow_list(oop p);
duke@435 708 void par_push_on_overflow_list(oop p);
duke@435 709 // the following is, obviously, not, in general, "MT-stable"
duke@435 710 bool overflow_list_is_empty() const;
duke@435 711
duke@435 712 void preserve_mark_if_necessary(oop p);
duke@435 713 void par_preserve_mark_if_necessary(oop p);
duke@435 714 void preserve_mark_work(oop p, markOop m);
duke@435 715 void restore_preserved_marks_if_any();
duke@435 716 NOT_PRODUCT(bool no_preserved_marks() const;)
duke@435 717 // in support of testing overflow code
duke@435 718 NOT_PRODUCT(int _overflow_counter;)
duke@435 719 NOT_PRODUCT(bool simulate_overflow();) // sequential
duke@435 720 NOT_PRODUCT(bool par_simulate_overflow();) // MT version
duke@435 721
duke@435 722 int _roots_scanning_options;
duke@435 723 int roots_scanning_options() const { return _roots_scanning_options; }
duke@435 724 void add_root_scanning_option(int o) { _roots_scanning_options |= o; }
duke@435 725 void remove_root_scanning_option(int o) { _roots_scanning_options &= ~o; }
duke@435 726
duke@435 727 // CMS work methods
duke@435 728 void checkpointRootsInitialWork(bool asynch); // initial checkpoint work
duke@435 729
duke@435 730 // a return value of false indicates failure due to stack overflow
duke@435 731 bool markFromRootsWork(bool asynch); // concurrent marking work
duke@435 732
duke@435 733 public: // FIX ME!!! only for testing
duke@435 734 bool do_marking_st(bool asynch); // single-threaded marking
duke@435 735 bool do_marking_mt(bool asynch); // multi-threaded marking
duke@435 736
duke@435 737 private:
duke@435 738
duke@435 739 // concurrent precleaning work
duke@435 740 size_t preclean_mod_union_table(ConcurrentMarkSweepGeneration* gen,
duke@435 741 ScanMarkedObjectsAgainCarefullyClosure* cl);
duke@435 742 size_t preclean_card_table(ConcurrentMarkSweepGeneration* gen,
duke@435 743 ScanMarkedObjectsAgainCarefullyClosure* cl);
duke@435 744 // Does precleaning work, returning a quantity indicative of
duke@435 745 // the amount of "useful work" done.
duke@435 746 size_t preclean_work(bool clean_refs, bool clean_survivors);
duke@435 747 void abortable_preclean(); // Preclean while looking for possible abort
duke@435 748 void initialize_sequential_subtasks_for_young_gen_rescan(int i);
duke@435 749 // Helper function for above; merge-sorts the per-thread plab samples
duke@435 750 void merge_survivor_plab_arrays(ContiguousSpace* surv);
duke@435 751 // Resets (i.e. clears) the per-thread plab sample vectors
duke@435 752 void reset_survivor_plab_arrays();
duke@435 753
duke@435 754 // final (second) checkpoint work
duke@435 755 void checkpointRootsFinalWork(bool asynch, bool clear_all_soft_refs,
duke@435 756 bool init_mark_was_synchronous);
duke@435 757 // work routine for parallel version of remark
duke@435 758 void do_remark_parallel();
duke@435 759 // work routine for non-parallel version of remark
duke@435 760 void do_remark_non_parallel();
duke@435 761 // reference processing work routine (during second checkpoint)
duke@435 762 void refProcessingWork(bool asynch, bool clear_all_soft_refs);
duke@435 763
duke@435 764 // concurrent sweeping work
duke@435 765 void sweepWork(ConcurrentMarkSweepGeneration* gen, bool asynch);
duke@435 766
duke@435 767 // (concurrent) resetting of support data structures
duke@435 768 void reset(bool asynch);
duke@435 769
duke@435 770 // Clear _expansion_cause fields of constituent generations
duke@435 771 void clear_expansion_cause();
duke@435 772
duke@435 773 // An auxilliary method used to record the ends of
duke@435 774 // used regions of each generation to limit the extent of sweep
duke@435 775 void save_sweep_limits();
duke@435 776
duke@435 777 // Resize the generations included in the collector.
duke@435 778 void compute_new_size();
duke@435 779
duke@435 780 // A work method used by foreground collection to determine
duke@435 781 // what type of collection (compacting or not, continuing or fresh)
duke@435 782 // it should do.
duke@435 783 void decide_foreground_collection_type(bool clear_all_soft_refs,
duke@435 784 bool* should_compact, bool* should_start_over);
duke@435 785
duke@435 786 // A work method used by the foreground collector to do
duke@435 787 // a mark-sweep-compact.
duke@435 788 void do_compaction_work(bool clear_all_soft_refs);
duke@435 789
duke@435 790 // A work method used by the foreground collector to do
duke@435 791 // a mark-sweep, after taking over from a possibly on-going
duke@435 792 // concurrent mark-sweep collection.
duke@435 793 void do_mark_sweep_work(bool clear_all_soft_refs,
duke@435 794 CollectorState first_state, bool should_start_over);
duke@435 795
duke@435 796 // If the backgrould GC is active, acquire control from the background
duke@435 797 // GC and do the collection.
duke@435 798 void acquire_control_and_collect(bool full, bool clear_all_soft_refs);
duke@435 799
duke@435 800 // For synchronizing passing of control from background to foreground
duke@435 801 // GC. waitForForegroundGC() is called by the background
duke@435 802 // collector. It if had to wait for a foreground collection,
duke@435 803 // it returns true and the background collection should assume
duke@435 804 // that the collection was finished by the foreground
duke@435 805 // collector.
duke@435 806 bool waitForForegroundGC();
duke@435 807
duke@435 808 // Incremental mode triggering: recompute the icms duty cycle and set the
duke@435 809 // allocation limits in the young gen.
duke@435 810 void icms_update_allocation_limits();
duke@435 811
duke@435 812 size_t block_size_using_printezis_bits(HeapWord* addr) const;
duke@435 813 size_t block_size_if_printezis_bits(HeapWord* addr) const;
duke@435 814 HeapWord* next_card_start_after_block(HeapWord* addr) const;
duke@435 815
duke@435 816 void setup_cms_unloading_and_verification_state();
duke@435 817 public:
duke@435 818 CMSCollector(ConcurrentMarkSweepGeneration* cmsGen,
duke@435 819 ConcurrentMarkSweepGeneration* permGen,
duke@435 820 CardTableRS* ct,
duke@435 821 ConcurrentMarkSweepPolicy* cp);
duke@435 822 ConcurrentMarkSweepThread* cmsThread() { return _cmsThread; }
duke@435 823
duke@435 824 ReferenceProcessor* ref_processor() { return _ref_processor; }
duke@435 825 void ref_processor_init();
duke@435 826
duke@435 827 Mutex* bitMapLock() const { return _markBitMap.lock(); }
duke@435 828 static CollectorState abstract_state() { return _collectorState; }
duke@435 829
duke@435 830 bool should_abort_preclean() const; // Whether preclean should be aborted.
duke@435 831 size_t get_eden_used() const;
duke@435 832 size_t get_eden_capacity() const;
duke@435 833
duke@435 834 ConcurrentMarkSweepGeneration* cmsGen() { return _cmsGen; }
duke@435 835
duke@435 836 // locking checks
duke@435 837 NOT_PRODUCT(static bool have_cms_token();)
duke@435 838
duke@435 839 // XXXPERM bool should_collect(bool full, size_t size, bool tlab);
duke@435 840 bool shouldConcurrentCollect();
duke@435 841
duke@435 842 void collect(bool full,
duke@435 843 bool clear_all_soft_refs,
duke@435 844 size_t size,
duke@435 845 bool tlab);
duke@435 846 void collect_in_background(bool clear_all_soft_refs);
duke@435 847 void collect_in_foreground(bool clear_all_soft_refs);
duke@435 848
duke@435 849 // In support of ExplicitGCInvokesConcurrent
duke@435 850 static void request_full_gc(unsigned int full_gc_count);
duke@435 851 // Should we unload classes in a particular concurrent cycle?
ysr@529 852 bool should_unload_classes() const {
ysr@529 853 return _should_unload_classes;
duke@435 854 }
ysr@529 855 bool update_should_unload_classes();
duke@435 856
duke@435 857 void direct_allocated(HeapWord* start, size_t size);
duke@435 858
duke@435 859 // Object is dead if not marked and current phase is sweeping.
duke@435 860 bool is_dead_obj(oop obj) const;
duke@435 861
duke@435 862 // After a promotion (of "start"), do any necessary marking.
duke@435 863 // If "par", then it's being done by a parallel GC thread.
duke@435 864 // The last two args indicate if we need precise marking
duke@435 865 // and if so the size of the object so it can be dirtied
duke@435 866 // in its entirety.
duke@435 867 void promoted(bool par, HeapWord* start,
duke@435 868 bool is_obj_array, size_t obj_size);
duke@435 869
duke@435 870 HeapWord* allocation_limit_reached(Space* space, HeapWord* top,
duke@435 871 size_t word_size);
duke@435 872
duke@435 873 void getFreelistLocks() const;
duke@435 874 void releaseFreelistLocks() const;
duke@435 875 bool haveFreelistLocks() const;
duke@435 876
duke@435 877 // GC prologue and epilogue
duke@435 878 void gc_prologue(bool full);
duke@435 879 void gc_epilogue(bool full);
duke@435 880
duke@435 881 jlong time_of_last_gc(jlong now) {
duke@435 882 if (_collectorState <= Idling) {
duke@435 883 // gc not in progress
duke@435 884 return _time_of_last_gc;
duke@435 885 } else {
duke@435 886 // collection in progress
duke@435 887 return now;
duke@435 888 }
duke@435 889 }
duke@435 890
duke@435 891 // Support for parallel remark of survivor space
duke@435 892 void* get_data_recorder(int thr_num);
duke@435 893
duke@435 894 CMSBitMap* markBitMap() { return &_markBitMap; }
duke@435 895 void directAllocated(HeapWord* start, size_t size);
duke@435 896
duke@435 897 // main CMS steps and related support
duke@435 898 void checkpointRootsInitial(bool asynch);
duke@435 899 bool markFromRoots(bool asynch); // a return value of false indicates failure
duke@435 900 // due to stack overflow
duke@435 901 void preclean();
duke@435 902 void checkpointRootsFinal(bool asynch, bool clear_all_soft_refs,
duke@435 903 bool init_mark_was_synchronous);
duke@435 904 void sweep(bool asynch);
duke@435 905
duke@435 906 // Check that the currently executing thread is the expected
duke@435 907 // one (foreground collector or background collector).
duke@435 908 void check_correct_thread_executing() PRODUCT_RETURN;
duke@435 909 // XXXPERM void print_statistics() PRODUCT_RETURN;
duke@435 910
duke@435 911 bool is_cms_reachable(HeapWord* addr);
duke@435 912
duke@435 913 // Performance Counter Support
duke@435 914 CollectorCounters* counters() { return _gc_counters; }
duke@435 915
duke@435 916 // timer stuff
duke@435 917 void startTimer() { assert(!_timer.is_active(), "Error"); _timer.start(); }
duke@435 918 void stopTimer() { assert( _timer.is_active(), "Error"); _timer.stop(); }
duke@435 919 void resetTimer() { assert(!_timer.is_active(), "Error"); _timer.reset(); }
duke@435 920 double timerValue() { assert(!_timer.is_active(), "Error"); return _timer.seconds(); }
duke@435 921
duke@435 922 int yields() { return _numYields; }
duke@435 923 void resetYields() { _numYields = 0; }
duke@435 924 void incrementYields() { _numYields++; }
duke@435 925 void resetNumDirtyCards() { _numDirtyCards = 0; }
duke@435 926 void incrementNumDirtyCards(size_t num) { _numDirtyCards += num; }
duke@435 927 size_t numDirtyCards() { return _numDirtyCards; }
duke@435 928
duke@435 929 static bool foregroundGCShouldWait() { return _foregroundGCShouldWait; }
duke@435 930 static void set_foregroundGCShouldWait(bool v) { _foregroundGCShouldWait = v; }
duke@435 931 static bool foregroundGCIsActive() { return _foregroundGCIsActive; }
duke@435 932 static void set_foregroundGCIsActive(bool v) { _foregroundGCIsActive = v; }
duke@435 933 uint sweepCount() const { return _sweepCount; }
duke@435 934 void incrementSweepCount() { _sweepCount++; }
duke@435 935
duke@435 936 // Timers/stats for gc scheduling and incremental mode pacing.
duke@435 937 CMSStats& stats() { return _stats; }
duke@435 938
duke@435 939 // Convenience methods that check whether CMSIncrementalMode is enabled and
duke@435 940 // forward to the corresponding methods in ConcurrentMarkSweepThread.
duke@435 941 static void start_icms();
duke@435 942 static void stop_icms(); // Called at the end of the cms cycle.
duke@435 943 static void disable_icms(); // Called before a foreground collection.
duke@435 944 static void enable_icms(); // Called after a foreground collection.
duke@435 945 void icms_wait(); // Called at yield points.
duke@435 946
duke@435 947 // Adaptive size policy
duke@435 948 CMSAdaptiveSizePolicy* size_policy();
duke@435 949 CMSGCAdaptivePolicyCounters* gc_adaptive_policy_counters();
duke@435 950
duke@435 951 // debugging
duke@435 952 void verify(bool);
duke@435 953 bool verify_after_remark();
duke@435 954 void verify_ok_to_terminate() const PRODUCT_RETURN;
duke@435 955 void verify_work_stacks_empty() const PRODUCT_RETURN;
duke@435 956 void verify_overflow_empty() const PRODUCT_RETURN;
duke@435 957
duke@435 958 // convenience methods in support of debugging
duke@435 959 static const size_t skip_header_HeapWords() PRODUCT_RETURN0;
duke@435 960 HeapWord* block_start(const void* p) const PRODUCT_RETURN0;
duke@435 961
duke@435 962 // accessors
duke@435 963 CMSMarkStack* verification_mark_stack() { return &_markStack; }
duke@435 964 CMSBitMap* verification_mark_bm() { return &_verification_mark_bm; }
duke@435 965
duke@435 966 // Get the bit map with a perm gen "deadness" information.
duke@435 967 CMSBitMap* perm_gen_verify_bit_map() { return &_perm_gen_verify_bit_map; }
duke@435 968
duke@435 969 // Initialization errors
duke@435 970 bool completed_initialization() { return _completed_initialization; }
duke@435 971 };
duke@435 972
duke@435 973 class CMSExpansionCause : public AllStatic {
duke@435 974 public:
duke@435 975 enum Cause {
duke@435 976 _no_expansion,
duke@435 977 _satisfy_free_ratio,
duke@435 978 _satisfy_promotion,
duke@435 979 _satisfy_allocation,
duke@435 980 _allocate_par_lab,
duke@435 981 _allocate_par_spooling_space,
duke@435 982 _adaptive_size_policy
duke@435 983 };
duke@435 984 // Return a string describing the cause of the expansion.
duke@435 985 static const char* to_string(CMSExpansionCause::Cause cause);
duke@435 986 };
duke@435 987
duke@435 988 class ConcurrentMarkSweepGeneration: public CardGeneration {
duke@435 989 friend class VMStructs;
duke@435 990 friend class ConcurrentMarkSweepThread;
duke@435 991 friend class ConcurrentMarkSweep;
duke@435 992 friend class CMSCollector;
duke@435 993 protected:
duke@435 994 static CMSCollector* _collector; // the collector that collects us
duke@435 995 CompactibleFreeListSpace* _cmsSpace; // underlying space (only one for now)
duke@435 996
duke@435 997 // Performance Counters
duke@435 998 GenerationCounters* _gen_counters;
duke@435 999 GSpaceCounters* _space_counters;
duke@435 1000
duke@435 1001 // Words directly allocated, used by CMSStats.
duke@435 1002 size_t _direct_allocated_words;
duke@435 1003
duke@435 1004 // Non-product stat counters
duke@435 1005 NOT_PRODUCT(
duke@435 1006 int _numObjectsPromoted;
duke@435 1007 int _numWordsPromoted;
duke@435 1008 int _numObjectsAllocated;
duke@435 1009 int _numWordsAllocated;
duke@435 1010 )
duke@435 1011
duke@435 1012 // Used for sizing decisions
duke@435 1013 bool _incremental_collection_failed;
duke@435 1014 bool incremental_collection_failed() {
duke@435 1015 return _incremental_collection_failed;
duke@435 1016 }
duke@435 1017 void set_incremental_collection_failed() {
duke@435 1018 _incremental_collection_failed = true;
duke@435 1019 }
duke@435 1020 void clear_incremental_collection_failed() {
duke@435 1021 _incremental_collection_failed = false;
duke@435 1022 }
duke@435 1023
ysr@529 1024 // accessors
ysr@529 1025 void set_expansion_cause(CMSExpansionCause::Cause v) { _expansion_cause = v;}
ysr@529 1026 CMSExpansionCause::Cause expansion_cause() const { return _expansion_cause; }
ysr@529 1027
duke@435 1028 private:
duke@435 1029 // For parallel young-gen GC support.
duke@435 1030 CMSParGCThreadState** _par_gc_thread_states;
duke@435 1031
duke@435 1032 // Reason generation was expanded
duke@435 1033 CMSExpansionCause::Cause _expansion_cause;
duke@435 1034
duke@435 1035 // In support of MinChunkSize being larger than min object size
duke@435 1036 const double _dilatation_factor;
duke@435 1037
duke@435 1038 enum CollectionTypes {
duke@435 1039 Concurrent_collection_type = 0,
duke@435 1040 MS_foreground_collection_type = 1,
duke@435 1041 MSC_foreground_collection_type = 2,
duke@435 1042 Unknown_collection_type = 3
duke@435 1043 };
duke@435 1044
duke@435 1045 CollectionTypes _debug_collection_type;
duke@435 1046
ysr@529 1047 // Fraction of current occupancy at which to start a CMS collection which
ysr@529 1048 // will collect this generation (at least).
ysr@529 1049 double _initiating_occupancy;
ysr@529 1050
duke@435 1051 protected:
duke@435 1052 // Shrink generation by specified size (returns false if unable to shrink)
duke@435 1053 virtual void shrink_by(size_t bytes);
duke@435 1054
duke@435 1055 // Update statistics for GC
duke@435 1056 virtual void update_gc_stats(int level, bool full);
duke@435 1057
duke@435 1058 // Maximum available space in the generation (including uncommitted)
duke@435 1059 // space.
duke@435 1060 size_t max_available() const;
duke@435 1061
ysr@529 1062 // getter and initializer for _initiating_occupancy field.
ysr@529 1063 double initiating_occupancy() const { return _initiating_occupancy; }
ysr@529 1064 void init_initiating_occupancy(intx io, intx tr);
ysr@529 1065
duke@435 1066 public:
duke@435 1067 ConcurrentMarkSweepGeneration(ReservedSpace rs, size_t initial_byte_size,
duke@435 1068 int level, CardTableRS* ct,
duke@435 1069 bool use_adaptive_freelists,
duke@435 1070 FreeBlockDictionary::DictionaryChoice);
duke@435 1071
duke@435 1072 // Accessors
duke@435 1073 CMSCollector* collector() const { return _collector; }
duke@435 1074 static void set_collector(CMSCollector* collector) {
duke@435 1075 assert(_collector == NULL, "already set");
duke@435 1076 _collector = collector;
duke@435 1077 }
duke@435 1078 CompactibleFreeListSpace* cmsSpace() const { return _cmsSpace; }
duke@435 1079
duke@435 1080 Mutex* freelistLock() const;
duke@435 1081
duke@435 1082 virtual Generation::Name kind() { return Generation::ConcurrentMarkSweep; }
duke@435 1083
duke@435 1084 // Adaptive size policy
duke@435 1085 CMSAdaptiveSizePolicy* size_policy();
duke@435 1086
duke@435 1087 bool refs_discovery_is_atomic() const { return false; }
duke@435 1088 bool refs_discovery_is_mt() const {
duke@435 1089 // Note: CMS does MT-discovery during the parallel-remark
duke@435 1090 // phases. Use ReferenceProcessorMTMutator to make refs
duke@435 1091 // discovery MT-safe during such phases or other parallel
duke@435 1092 // discovery phases in the future. This may all go away
duke@435 1093 // if/when we decide that refs discovery is sufficiently
duke@435 1094 // rare that the cost of the CAS's involved is in the
duke@435 1095 // noise. That's a measurement that should be done, and
duke@435 1096 // the code simplified if that turns out to be the case.
duke@435 1097 return false;
duke@435 1098 }
duke@435 1099
duke@435 1100 // Override
duke@435 1101 virtual void ref_processor_init();
duke@435 1102
jmasa@706 1103 // Grow generation by specified size (returns false if unable to grow)
jmasa@706 1104 bool grow_by(size_t bytes);
jmasa@706 1105 // Grow generation to reserved size.
jmasa@706 1106 bool grow_to_reserved();
jmasa@706 1107
duke@435 1108 void clear_expansion_cause() { _expansion_cause = CMSExpansionCause::_no_expansion; }
duke@435 1109
duke@435 1110 // Space enquiries
duke@435 1111 size_t capacity() const;
duke@435 1112 size_t used() const;
duke@435 1113 size_t free() const;
ysr@529 1114 double occupancy() const { return ((double)used())/((double)capacity()); }
duke@435 1115 size_t contiguous_available() const;
duke@435 1116 size_t unsafe_max_alloc_nogc() const;
duke@435 1117
duke@435 1118 // over-rides
duke@435 1119 MemRegion used_region() const;
duke@435 1120 MemRegion used_region_at_save_marks() const;
duke@435 1121
duke@435 1122 // Does a "full" (forced) collection invoked on this generation collect
duke@435 1123 // all younger generations as well? Note that the second conjunct is a
duke@435 1124 // hack to allow the collection of the younger gen first if the flag is
duke@435 1125 // set. This is better than using th policy's should_collect_gen0_first()
duke@435 1126 // since that causes us to do an extra unnecessary pair of restart-&-stop-world.
duke@435 1127 virtual bool full_collects_younger_generations() const {
duke@435 1128 return UseCMSCompactAtFullCollection && !CollectGen0First;
duke@435 1129 }
duke@435 1130
duke@435 1131 void space_iterate(SpaceClosure* blk, bool usedOnly = false);
duke@435 1132
duke@435 1133 // Support for compaction
duke@435 1134 CompactibleSpace* first_compaction_space() const;
duke@435 1135 // Adjust quantites in the generation affected by
duke@435 1136 // the compaction.
duke@435 1137 void reset_after_compaction();
duke@435 1138
duke@435 1139 // Allocation support
duke@435 1140 HeapWord* allocate(size_t size, bool tlab);
duke@435 1141 HeapWord* have_lock_and_allocate(size_t size, bool tlab);
coleenp@548 1142 oop promote(oop obj, size_t obj_size);
duke@435 1143 HeapWord* par_allocate(size_t size, bool tlab) {
duke@435 1144 return allocate(size, tlab);
duke@435 1145 }
duke@435 1146
duke@435 1147 // Incremental mode triggering.
duke@435 1148 HeapWord* allocation_limit_reached(Space* space, HeapWord* top,
duke@435 1149 size_t word_size);
duke@435 1150
duke@435 1151 // Used by CMSStats to track direct allocation. The value is sampled and
duke@435 1152 // reset after each young gen collection.
duke@435 1153 size_t direct_allocated_words() const { return _direct_allocated_words; }
duke@435 1154 void reset_direct_allocated_words() { _direct_allocated_words = 0; }
duke@435 1155
duke@435 1156 // Overrides for parallel promotion.
duke@435 1157 virtual oop par_promote(int thread_num,
duke@435 1158 oop obj, markOop m, size_t word_sz);
duke@435 1159 // This one should not be called for CMS.
duke@435 1160 virtual void par_promote_alloc_undo(int thread_num,
duke@435 1161 HeapWord* obj, size_t word_sz);
duke@435 1162 virtual void par_promote_alloc_done(int thread_num);
duke@435 1163 virtual void par_oop_since_save_marks_iterate_done(int thread_num);
duke@435 1164
duke@435 1165 virtual bool promotion_attempt_is_safe(size_t promotion_in_bytes,
duke@435 1166 bool younger_handles_promotion_failure) const;
duke@435 1167
duke@435 1168 bool should_collect(bool full, size_t size, bool tlab);
ysr@529 1169 virtual bool should_concurrent_collect() const;
ysr@529 1170 virtual bool is_too_full() const;
duke@435 1171 void collect(bool full,
duke@435 1172 bool clear_all_soft_refs,
duke@435 1173 size_t size,
duke@435 1174 bool tlab);
duke@435 1175
duke@435 1176 HeapWord* expand_and_allocate(size_t word_size,
duke@435 1177 bool tlab,
duke@435 1178 bool parallel = false);
duke@435 1179
duke@435 1180 // GC prologue and epilogue
duke@435 1181 void gc_prologue(bool full);
duke@435 1182 void gc_prologue_work(bool full, bool registerClosure,
duke@435 1183 ModUnionClosure* modUnionClosure);
duke@435 1184 void gc_epilogue(bool full);
duke@435 1185 void gc_epilogue_work(bool full);
duke@435 1186
duke@435 1187 // Time since last GC of this generation
duke@435 1188 jlong time_of_last_gc(jlong now) {
duke@435 1189 return collector()->time_of_last_gc(now);
duke@435 1190 }
duke@435 1191 void update_time_of_last_gc(jlong now) {
duke@435 1192 collector()-> update_time_of_last_gc(now);
duke@435 1193 }
duke@435 1194
duke@435 1195 // Allocation failure
duke@435 1196 void expand(size_t bytes, size_t expand_bytes,
duke@435 1197 CMSExpansionCause::Cause cause);
jmasa@706 1198 virtual bool expand(size_t bytes, size_t expand_bytes);
duke@435 1199 void shrink(size_t bytes);
duke@435 1200 HeapWord* expand_and_par_lab_allocate(CMSParGCThreadState* ps, size_t word_sz);
duke@435 1201 bool expand_and_ensure_spooling_space(PromotionInfo* promo);
duke@435 1202
duke@435 1203 // Iteration support and related enquiries
duke@435 1204 void save_marks();
duke@435 1205 bool no_allocs_since_save_marks();
duke@435 1206 void object_iterate_since_last_GC(ObjectClosure* cl);
duke@435 1207 void younger_refs_iterate(OopsInGenClosure* cl);
duke@435 1208
duke@435 1209 // Iteration support specific to CMS generations
duke@435 1210 void save_sweep_limit();
duke@435 1211
duke@435 1212 // More iteration support
duke@435 1213 virtual void oop_iterate(MemRegion mr, OopClosure* cl);
duke@435 1214 virtual void oop_iterate(OopClosure* cl);
duke@435 1215 virtual void object_iterate(ObjectClosure* cl);
duke@435 1216
duke@435 1217 // Need to declare the full complement of closures, whether we'll
duke@435 1218 // override them or not, or get message from the compiler:
duke@435 1219 // oop_since_save_marks_iterate_nv hides virtual function...
duke@435 1220 #define CMS_SINCE_SAVE_MARKS_DECL(OopClosureType, nv_suffix) \
duke@435 1221 void oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl);
duke@435 1222 ALL_SINCE_SAVE_MARKS_CLOSURES(CMS_SINCE_SAVE_MARKS_DECL)
duke@435 1223
duke@435 1224 // Smart allocation XXX -- move to CFLSpace?
duke@435 1225 void setNearLargestChunk();
duke@435 1226 bool isNearLargestChunk(HeapWord* addr);
duke@435 1227
duke@435 1228 // Get the chunk at the end of the space. Delagates to
duke@435 1229 // the space.
duke@435 1230 FreeChunk* find_chunk_at_end();
duke@435 1231
duke@435 1232 // Overriding of unused functionality (sharing not yet supported with CMS)
duke@435 1233 void pre_adjust_pointers();
duke@435 1234 void post_compact();
duke@435 1235
duke@435 1236 // Debugging
duke@435 1237 void prepare_for_verify();
duke@435 1238 void verify(bool allow_dirty);
duke@435 1239 void print_statistics() PRODUCT_RETURN;
duke@435 1240
duke@435 1241 // Performance Counters support
duke@435 1242 virtual void update_counters();
duke@435 1243 virtual void update_counters(size_t used);
duke@435 1244 void initialize_performance_counters();
duke@435 1245 CollectorCounters* counters() { return collector()->counters(); }
duke@435 1246
duke@435 1247 // Support for parallel remark of survivor space
duke@435 1248 void* get_data_recorder(int thr_num) {
duke@435 1249 //Delegate to collector
duke@435 1250 return collector()->get_data_recorder(thr_num);
duke@435 1251 }
duke@435 1252
duke@435 1253 // Printing
duke@435 1254 const char* name() const;
duke@435 1255 virtual const char* short_name() const { return "CMS"; }
duke@435 1256 void print() const;
duke@435 1257 void printOccupancy(const char* s);
duke@435 1258 bool must_be_youngest() const { return false; }
duke@435 1259 bool must_be_oldest() const { return true; }
duke@435 1260
duke@435 1261 void compute_new_size();
duke@435 1262
duke@435 1263 CollectionTypes debug_collection_type() { return _debug_collection_type; }
duke@435 1264 void rotate_debug_collection_type();
duke@435 1265 };
duke@435 1266
duke@435 1267 class ASConcurrentMarkSweepGeneration : public ConcurrentMarkSweepGeneration {
duke@435 1268
duke@435 1269 // Return the size policy from the heap's collector
duke@435 1270 // policy casted to CMSAdaptiveSizePolicy*.
duke@435 1271 CMSAdaptiveSizePolicy* cms_size_policy() const;
duke@435 1272
duke@435 1273 // Resize the generation based on the adaptive size
duke@435 1274 // policy.
duke@435 1275 void resize(size_t cur_promo, size_t desired_promo);
duke@435 1276
duke@435 1277 // Return the GC counters from the collector policy
duke@435 1278 CMSGCAdaptivePolicyCounters* gc_adaptive_policy_counters();
duke@435 1279
duke@435 1280 virtual void shrink_by(size_t bytes);
duke@435 1281
duke@435 1282 public:
duke@435 1283 virtual void compute_new_size();
duke@435 1284 ASConcurrentMarkSweepGeneration(ReservedSpace rs, size_t initial_byte_size,
duke@435 1285 int level, CardTableRS* ct,
duke@435 1286 bool use_adaptive_freelists,
duke@435 1287 FreeBlockDictionary::DictionaryChoice
duke@435 1288 dictionaryChoice) :
duke@435 1289 ConcurrentMarkSweepGeneration(rs, initial_byte_size, level, ct,
duke@435 1290 use_adaptive_freelists, dictionaryChoice) {}
duke@435 1291
duke@435 1292 virtual const char* short_name() const { return "ASCMS"; }
duke@435 1293 virtual Generation::Name kind() { return Generation::ASConcurrentMarkSweep; }
duke@435 1294
duke@435 1295 virtual void update_counters();
duke@435 1296 virtual void update_counters(size_t used);
duke@435 1297 };
duke@435 1298
duke@435 1299 //
duke@435 1300 // Closures of various sorts used by CMS to accomplish its work
duke@435 1301 //
duke@435 1302
duke@435 1303 // This closure is used to check that a certain set of oops is empty.
duke@435 1304 class FalseClosure: public OopClosure {
duke@435 1305 public:
coleenp@548 1306 void do_oop(oop* p) { guarantee(false, "Should be an empty set"); }
coleenp@548 1307 void do_oop(narrowOop* p) { guarantee(false, "Should be an empty set"); }
duke@435 1308 };
duke@435 1309
duke@435 1310 // This closure is used to do concurrent marking from the roots
duke@435 1311 // following the first checkpoint.
duke@435 1312 class MarkFromRootsClosure: public BitMapClosure {
duke@435 1313 CMSCollector* _collector;
duke@435 1314 MemRegion _span;
duke@435 1315 CMSBitMap* _bitMap;
duke@435 1316 CMSBitMap* _mut;
duke@435 1317 CMSMarkStack* _markStack;
duke@435 1318 CMSMarkStack* _revisitStack;
duke@435 1319 bool _yield;
duke@435 1320 int _skipBits;
duke@435 1321 HeapWord* _finger;
duke@435 1322 HeapWord* _threshold;
duke@435 1323 DEBUG_ONLY(bool _verifying;)
duke@435 1324
duke@435 1325 public:
duke@435 1326 MarkFromRootsClosure(CMSCollector* collector, MemRegion span,
duke@435 1327 CMSBitMap* bitMap,
duke@435 1328 CMSMarkStack* markStack,
duke@435 1329 CMSMarkStack* revisitStack,
duke@435 1330 bool should_yield, bool verifying = false);
ysr@777 1331 bool do_bit(size_t offset);
duke@435 1332 void reset(HeapWord* addr);
duke@435 1333 inline void do_yield_check();
duke@435 1334
duke@435 1335 private:
duke@435 1336 void scanOopsInOop(HeapWord* ptr);
duke@435 1337 void do_yield_work();
duke@435 1338 };
duke@435 1339
duke@435 1340 // This closure is used to do concurrent multi-threaded
duke@435 1341 // marking from the roots following the first checkpoint.
duke@435 1342 // XXX This should really be a subclass of The serial version
duke@435 1343 // above, but i have not had the time to refactor things cleanly.
duke@435 1344 // That willbe done for Dolphin.
duke@435 1345 class Par_MarkFromRootsClosure: public BitMapClosure {
duke@435 1346 CMSCollector* _collector;
duke@435 1347 MemRegion _whole_span;
duke@435 1348 MemRegion _span;
duke@435 1349 CMSBitMap* _bit_map;
duke@435 1350 CMSBitMap* _mut;
duke@435 1351 OopTaskQueue* _work_queue;
duke@435 1352 CMSMarkStack* _overflow_stack;
duke@435 1353 CMSMarkStack* _revisit_stack;
duke@435 1354 bool _yield;
duke@435 1355 int _skip_bits;
duke@435 1356 HeapWord* _finger;
duke@435 1357 HeapWord* _threshold;
duke@435 1358 CMSConcMarkingTask* _task;
duke@435 1359 public:
duke@435 1360 Par_MarkFromRootsClosure(CMSConcMarkingTask* task, CMSCollector* collector,
duke@435 1361 MemRegion span,
duke@435 1362 CMSBitMap* bit_map,
duke@435 1363 OopTaskQueue* work_queue,
duke@435 1364 CMSMarkStack* overflow_stack,
duke@435 1365 CMSMarkStack* revisit_stack,
duke@435 1366 bool should_yield);
ysr@777 1367 bool do_bit(size_t offset);
duke@435 1368 inline void do_yield_check();
duke@435 1369
duke@435 1370 private:
duke@435 1371 void scan_oops_in_oop(HeapWord* ptr);
duke@435 1372 void do_yield_work();
duke@435 1373 bool get_work_from_overflow_stack();
duke@435 1374 };
duke@435 1375
duke@435 1376 // The following closures are used to do certain kinds of verification of
duke@435 1377 // CMS marking.
duke@435 1378 class PushAndMarkVerifyClosure: public OopClosure {
duke@435 1379 CMSCollector* _collector;
duke@435 1380 MemRegion _span;
duke@435 1381 CMSBitMap* _verification_bm;
duke@435 1382 CMSBitMap* _cms_bm;
duke@435 1383 CMSMarkStack* _mark_stack;
coleenp@548 1384 protected:
coleenp@548 1385 void do_oop(oop p);
coleenp@548 1386 template <class T> inline void do_oop_work(T *p) {
coleenp@548 1387 oop obj = oopDesc::load_decode_heap_oop_not_null(p);
coleenp@548 1388 do_oop(obj);
coleenp@548 1389 }
duke@435 1390 public:
duke@435 1391 PushAndMarkVerifyClosure(CMSCollector* cms_collector,
duke@435 1392 MemRegion span,
duke@435 1393 CMSBitMap* verification_bm,
duke@435 1394 CMSBitMap* cms_bm,
duke@435 1395 CMSMarkStack* mark_stack);
duke@435 1396 void do_oop(oop* p);
coleenp@548 1397 void do_oop(narrowOop* p);
duke@435 1398 // Deal with a stack overflow condition
duke@435 1399 void handle_stack_overflow(HeapWord* lost);
duke@435 1400 };
duke@435 1401
duke@435 1402 class MarkFromRootsVerifyClosure: public BitMapClosure {
duke@435 1403 CMSCollector* _collector;
duke@435 1404 MemRegion _span;
duke@435 1405 CMSBitMap* _verification_bm;
duke@435 1406 CMSBitMap* _cms_bm;
duke@435 1407 CMSMarkStack* _mark_stack;
duke@435 1408 HeapWord* _finger;
duke@435 1409 PushAndMarkVerifyClosure _pam_verify_closure;
duke@435 1410 public:
duke@435 1411 MarkFromRootsVerifyClosure(CMSCollector* collector, MemRegion span,
duke@435 1412 CMSBitMap* verification_bm,
duke@435 1413 CMSBitMap* cms_bm,
duke@435 1414 CMSMarkStack* mark_stack);
ysr@777 1415 bool do_bit(size_t offset);
duke@435 1416 void reset(HeapWord* addr);
duke@435 1417 };
duke@435 1418
duke@435 1419
duke@435 1420 // This closure is used to check that a certain set of bits is
duke@435 1421 // "empty" (i.e. the bit vector doesn't have any 1-bits).
duke@435 1422 class FalseBitMapClosure: public BitMapClosure {
duke@435 1423 public:
ysr@777 1424 bool do_bit(size_t offset) {
duke@435 1425 guarantee(false, "Should not have a 1 bit");
ysr@777 1426 return true;
duke@435 1427 }
duke@435 1428 };
duke@435 1429
duke@435 1430 // This closure is used during the second checkpointing phase
duke@435 1431 // to rescan the marked objects on the dirty cards in the mod
duke@435 1432 // union table and the card table proper. It's invoked via
duke@435 1433 // MarkFromDirtyCardsClosure below. It uses either
duke@435 1434 // [Par_]MarkRefsIntoAndScanClosure (Par_ in the parallel case)
duke@435 1435 // declared in genOopClosures.hpp to accomplish some of its work.
duke@435 1436 // In the parallel case the bitMap is shared, so access to
duke@435 1437 // it needs to be suitably synchronized for updates by embedded
duke@435 1438 // closures that update it; however, this closure itself only
duke@435 1439 // reads the bit_map and because it is idempotent, is immune to
duke@435 1440 // reading stale values.
duke@435 1441 class ScanMarkedObjectsAgainClosure: public UpwardsObjectClosure {
duke@435 1442 #ifdef ASSERT
duke@435 1443 CMSCollector* _collector;
duke@435 1444 MemRegion _span;
duke@435 1445 union {
duke@435 1446 CMSMarkStack* _mark_stack;
duke@435 1447 OopTaskQueue* _work_queue;
duke@435 1448 };
duke@435 1449 #endif // ASSERT
duke@435 1450 bool _parallel;
duke@435 1451 CMSBitMap* _bit_map;
duke@435 1452 union {
duke@435 1453 MarkRefsIntoAndScanClosure* _scan_closure;
duke@435 1454 Par_MarkRefsIntoAndScanClosure* _par_scan_closure;
duke@435 1455 };
duke@435 1456
duke@435 1457 public:
duke@435 1458 ScanMarkedObjectsAgainClosure(CMSCollector* collector,
duke@435 1459 MemRegion span,
duke@435 1460 ReferenceProcessor* rp,
duke@435 1461 CMSBitMap* bit_map,
duke@435 1462 CMSMarkStack* mark_stack,
duke@435 1463 CMSMarkStack* revisit_stack,
duke@435 1464 MarkRefsIntoAndScanClosure* cl):
duke@435 1465 #ifdef ASSERT
duke@435 1466 _collector(collector),
duke@435 1467 _span(span),
duke@435 1468 _mark_stack(mark_stack),
duke@435 1469 #endif // ASSERT
duke@435 1470 _parallel(false),
duke@435 1471 _bit_map(bit_map),
duke@435 1472 _scan_closure(cl) { }
duke@435 1473
duke@435 1474 ScanMarkedObjectsAgainClosure(CMSCollector* collector,
duke@435 1475 MemRegion span,
duke@435 1476 ReferenceProcessor* rp,
duke@435 1477 CMSBitMap* bit_map,
duke@435 1478 OopTaskQueue* work_queue,
duke@435 1479 CMSMarkStack* revisit_stack,
duke@435 1480 Par_MarkRefsIntoAndScanClosure* cl):
duke@435 1481 #ifdef ASSERT
duke@435 1482 _collector(collector),
duke@435 1483 _span(span),
duke@435 1484 _work_queue(work_queue),
duke@435 1485 #endif // ASSERT
duke@435 1486 _parallel(true),
duke@435 1487 _bit_map(bit_map),
duke@435 1488 _par_scan_closure(cl) { }
duke@435 1489
duke@435 1490 void do_object(oop obj) {
duke@435 1491 guarantee(false, "Call do_object_b(oop, MemRegion) instead");
duke@435 1492 }
duke@435 1493 bool do_object_b(oop obj) {
duke@435 1494 guarantee(false, "Call do_object_b(oop, MemRegion) form instead");
duke@435 1495 return false;
duke@435 1496 }
duke@435 1497 bool do_object_bm(oop p, MemRegion mr);
duke@435 1498 };
duke@435 1499
duke@435 1500 // This closure is used during the second checkpointing phase
duke@435 1501 // to rescan the marked objects on the dirty cards in the mod
duke@435 1502 // union table and the card table proper. It invokes
duke@435 1503 // ScanMarkedObjectsAgainClosure above to accomplish much of its work.
duke@435 1504 // In the parallel case, the bit map is shared and requires
duke@435 1505 // synchronized access.
duke@435 1506 class MarkFromDirtyCardsClosure: public MemRegionClosure {
duke@435 1507 CompactibleFreeListSpace* _space;
duke@435 1508 ScanMarkedObjectsAgainClosure _scan_cl;
duke@435 1509 size_t _num_dirty_cards;
duke@435 1510
duke@435 1511 public:
duke@435 1512 MarkFromDirtyCardsClosure(CMSCollector* collector,
duke@435 1513 MemRegion span,
duke@435 1514 CompactibleFreeListSpace* space,
duke@435 1515 CMSBitMap* bit_map,
duke@435 1516 CMSMarkStack* mark_stack,
duke@435 1517 CMSMarkStack* revisit_stack,
duke@435 1518 MarkRefsIntoAndScanClosure* cl):
duke@435 1519 _space(space),
duke@435 1520 _num_dirty_cards(0),
duke@435 1521 _scan_cl(collector, span, collector->ref_processor(), bit_map,
duke@435 1522 mark_stack, revisit_stack, cl) { }
duke@435 1523
duke@435 1524 MarkFromDirtyCardsClosure(CMSCollector* collector,
duke@435 1525 MemRegion span,
duke@435 1526 CompactibleFreeListSpace* space,
duke@435 1527 CMSBitMap* bit_map,
duke@435 1528 OopTaskQueue* work_queue,
duke@435 1529 CMSMarkStack* revisit_stack,
duke@435 1530 Par_MarkRefsIntoAndScanClosure* cl):
duke@435 1531 _space(space),
duke@435 1532 _num_dirty_cards(0),
duke@435 1533 _scan_cl(collector, span, collector->ref_processor(), bit_map,
duke@435 1534 work_queue, revisit_stack, cl) { }
duke@435 1535
duke@435 1536 void do_MemRegion(MemRegion mr);
duke@435 1537 void set_space(CompactibleFreeListSpace* space) { _space = space; }
duke@435 1538 size_t num_dirty_cards() { return _num_dirty_cards; }
duke@435 1539 };
duke@435 1540
duke@435 1541 // This closure is used in the non-product build to check
duke@435 1542 // that there are no MemRegions with a certain property.
duke@435 1543 class FalseMemRegionClosure: public MemRegionClosure {
duke@435 1544 void do_MemRegion(MemRegion mr) {
duke@435 1545 guarantee(!mr.is_empty(), "Shouldn't be empty");
duke@435 1546 guarantee(false, "Should never be here");
duke@435 1547 }
duke@435 1548 };
duke@435 1549
duke@435 1550 // This closure is used during the precleaning phase
duke@435 1551 // to "carefully" rescan marked objects on dirty cards.
duke@435 1552 // It uses MarkRefsIntoAndScanClosure declared in genOopClosures.hpp
duke@435 1553 // to accomplish some of its work.
duke@435 1554 class ScanMarkedObjectsAgainCarefullyClosure: public ObjectClosureCareful {
duke@435 1555 CMSCollector* _collector;
duke@435 1556 MemRegion _span;
duke@435 1557 bool _yield;
duke@435 1558 Mutex* _freelistLock;
duke@435 1559 CMSBitMap* _bitMap;
duke@435 1560 CMSMarkStack* _markStack;
duke@435 1561 MarkRefsIntoAndScanClosure* _scanningClosure;
duke@435 1562
duke@435 1563 public:
duke@435 1564 ScanMarkedObjectsAgainCarefullyClosure(CMSCollector* collector,
duke@435 1565 MemRegion span,
duke@435 1566 CMSBitMap* bitMap,
duke@435 1567 CMSMarkStack* markStack,
duke@435 1568 CMSMarkStack* revisitStack,
duke@435 1569 MarkRefsIntoAndScanClosure* cl,
duke@435 1570 bool should_yield):
duke@435 1571 _collector(collector),
duke@435 1572 _span(span),
duke@435 1573 _yield(should_yield),
duke@435 1574 _bitMap(bitMap),
duke@435 1575 _markStack(markStack),
duke@435 1576 _scanningClosure(cl) {
duke@435 1577 }
duke@435 1578
duke@435 1579 void do_object(oop p) {
duke@435 1580 guarantee(false, "call do_object_careful instead");
duke@435 1581 }
duke@435 1582
duke@435 1583 size_t do_object_careful(oop p) {
duke@435 1584 guarantee(false, "Unexpected caller");
duke@435 1585 return 0;
duke@435 1586 }
duke@435 1587
duke@435 1588 size_t do_object_careful_m(oop p, MemRegion mr);
duke@435 1589
duke@435 1590 void setFreelistLock(Mutex* m) {
duke@435 1591 _freelistLock = m;
duke@435 1592 _scanningClosure->set_freelistLock(m);
duke@435 1593 }
duke@435 1594
duke@435 1595 private:
duke@435 1596 inline bool do_yield_check();
duke@435 1597
duke@435 1598 void do_yield_work();
duke@435 1599 };
duke@435 1600
duke@435 1601 class SurvivorSpacePrecleanClosure: public ObjectClosureCareful {
duke@435 1602 CMSCollector* _collector;
duke@435 1603 MemRegion _span;
duke@435 1604 bool _yield;
duke@435 1605 CMSBitMap* _bit_map;
duke@435 1606 CMSMarkStack* _mark_stack;
duke@435 1607 PushAndMarkClosure* _scanning_closure;
duke@435 1608 unsigned int _before_count;
duke@435 1609
duke@435 1610 public:
duke@435 1611 SurvivorSpacePrecleanClosure(CMSCollector* collector,
duke@435 1612 MemRegion span,
duke@435 1613 CMSBitMap* bit_map,
duke@435 1614 CMSMarkStack* mark_stack,
duke@435 1615 PushAndMarkClosure* cl,
duke@435 1616 unsigned int before_count,
duke@435 1617 bool should_yield):
duke@435 1618 _collector(collector),
duke@435 1619 _span(span),
duke@435 1620 _yield(should_yield),
duke@435 1621 _bit_map(bit_map),
duke@435 1622 _mark_stack(mark_stack),
duke@435 1623 _scanning_closure(cl),
duke@435 1624 _before_count(before_count)
duke@435 1625 { }
duke@435 1626
duke@435 1627 void do_object(oop p) {
duke@435 1628 guarantee(false, "call do_object_careful instead");
duke@435 1629 }
duke@435 1630
duke@435 1631 size_t do_object_careful(oop p);
duke@435 1632
duke@435 1633 size_t do_object_careful_m(oop p, MemRegion mr) {
duke@435 1634 guarantee(false, "Unexpected caller");
duke@435 1635 return 0;
duke@435 1636 }
duke@435 1637
duke@435 1638 private:
duke@435 1639 inline void do_yield_check();
duke@435 1640 void do_yield_work();
duke@435 1641 };
duke@435 1642
duke@435 1643 // This closure is used to accomplish the sweeping work
duke@435 1644 // after the second checkpoint but before the concurrent reset
duke@435 1645 // phase.
duke@435 1646 //
duke@435 1647 // Terminology
duke@435 1648 // left hand chunk (LHC) - block of one or more chunks currently being
duke@435 1649 // coalesced. The LHC is available for coalescing with a new chunk.
duke@435 1650 // right hand chunk (RHC) - block that is currently being swept that is
duke@435 1651 // free or garbage that can be coalesced with the LHC.
duke@435 1652 // _inFreeRange is true if there is currently a LHC
duke@435 1653 // _lastFreeRangeCoalesced is true if the LHC consists of more than one chunk.
duke@435 1654 // _freeRangeInFreeLists is true if the LHC is in the free lists.
duke@435 1655 // _freeFinger is the address of the current LHC
duke@435 1656 class SweepClosure: public BlkClosureCareful {
duke@435 1657 CMSCollector* _collector; // collector doing the work
duke@435 1658 ConcurrentMarkSweepGeneration* _g; // Generation being swept
duke@435 1659 CompactibleFreeListSpace* _sp; // Space being swept
duke@435 1660 HeapWord* _limit;
duke@435 1661 Mutex* _freelistLock; // Free list lock (in space)
duke@435 1662 CMSBitMap* _bitMap; // Marking bit map (in
duke@435 1663 // generation)
duke@435 1664 bool _inFreeRange; // Indicates if we are in the
duke@435 1665 // midst of a free run
duke@435 1666 bool _freeRangeInFreeLists;
duke@435 1667 // Often, we have just found
duke@435 1668 // a free chunk and started
duke@435 1669 // a new free range; we do not
duke@435 1670 // eagerly remove this chunk from
duke@435 1671 // the free lists unless there is
duke@435 1672 // a possibility of coalescing.
duke@435 1673 // When true, this flag indicates
duke@435 1674 // that the _freeFinger below
duke@435 1675 // points to a potentially free chunk
duke@435 1676 // that may still be in the free lists
duke@435 1677 bool _lastFreeRangeCoalesced;
duke@435 1678 // free range contains chunks
duke@435 1679 // coalesced
duke@435 1680 bool _yield;
duke@435 1681 // Whether sweeping should be
duke@435 1682 // done with yields. For instance
duke@435 1683 // when done by the foreground
duke@435 1684 // collector we shouldn't yield.
duke@435 1685 HeapWord* _freeFinger; // When _inFreeRange is set, the
duke@435 1686 // pointer to the "left hand
duke@435 1687 // chunk"
duke@435 1688 size_t _freeRangeSize;
duke@435 1689 // When _inFreeRange is set, this
duke@435 1690 // indicates the accumulated size
duke@435 1691 // of the "left hand chunk"
duke@435 1692 NOT_PRODUCT(
duke@435 1693 size_t _numObjectsFreed;
duke@435 1694 size_t _numWordsFreed;
duke@435 1695 size_t _numObjectsLive;
duke@435 1696 size_t _numWordsLive;
duke@435 1697 size_t _numObjectsAlreadyFree;
duke@435 1698 size_t _numWordsAlreadyFree;
duke@435 1699 FreeChunk* _last_fc;
duke@435 1700 )
duke@435 1701 private:
duke@435 1702 // Code that is common to a free chunk or garbage when
duke@435 1703 // encountered during sweeping.
duke@435 1704 void doPostIsFreeOrGarbageChunk(FreeChunk *fc,
duke@435 1705 size_t chunkSize);
duke@435 1706 // Process a free chunk during sweeping.
duke@435 1707 void doAlreadyFreeChunk(FreeChunk *fc);
duke@435 1708 // Process a garbage chunk during sweeping.
duke@435 1709 size_t doGarbageChunk(FreeChunk *fc);
duke@435 1710 // Process a live chunk during sweeping.
duke@435 1711 size_t doLiveChunk(FreeChunk* fc);
duke@435 1712
duke@435 1713 // Accessors.
duke@435 1714 HeapWord* freeFinger() const { return _freeFinger; }
duke@435 1715 void set_freeFinger(HeapWord* v) { _freeFinger = v; }
duke@435 1716 size_t freeRangeSize() const { return _freeRangeSize; }
duke@435 1717 void set_freeRangeSize(size_t v) { _freeRangeSize = v; }
duke@435 1718 bool inFreeRange() const { return _inFreeRange; }
duke@435 1719 void set_inFreeRange(bool v) { _inFreeRange = v; }
duke@435 1720 bool lastFreeRangeCoalesced() const { return _lastFreeRangeCoalesced; }
duke@435 1721 void set_lastFreeRangeCoalesced(bool v) { _lastFreeRangeCoalesced = v; }
duke@435 1722 bool freeRangeInFreeLists() const { return _freeRangeInFreeLists; }
duke@435 1723 void set_freeRangeInFreeLists(bool v) { _freeRangeInFreeLists = v; }
duke@435 1724
duke@435 1725 // Initialize a free range.
duke@435 1726 void initialize_free_range(HeapWord* freeFinger, bool freeRangeInFreeLists);
duke@435 1727 // Return this chunk to the free lists.
duke@435 1728 void flushCurFreeChunk(HeapWord* chunk, size_t size);
duke@435 1729
duke@435 1730 // Check if we should yield and do so when necessary.
duke@435 1731 inline void do_yield_check(HeapWord* addr);
duke@435 1732
duke@435 1733 // Yield
duke@435 1734 void do_yield_work(HeapWord* addr);
duke@435 1735
duke@435 1736 // Debugging/Printing
duke@435 1737 void record_free_block_coalesced(FreeChunk* fc) const PRODUCT_RETURN;
duke@435 1738
duke@435 1739 public:
duke@435 1740 SweepClosure(CMSCollector* collector, ConcurrentMarkSweepGeneration* g,
duke@435 1741 CMSBitMap* bitMap, bool should_yield);
duke@435 1742 ~SweepClosure();
duke@435 1743
duke@435 1744 size_t do_blk_careful(HeapWord* addr);
duke@435 1745 };
duke@435 1746
duke@435 1747 // Closures related to weak references processing
duke@435 1748
duke@435 1749 // During CMS' weak reference processing, this is a
duke@435 1750 // work-routine/closure used to complete transitive
duke@435 1751 // marking of objects as live after a certain point
duke@435 1752 // in which an initial set has been completely accumulated.
ysr@887 1753 // This closure is currently used both during the final
ysr@887 1754 // remark stop-world phase, as well as during the concurrent
ysr@887 1755 // precleaning of the discovered reference lists.
duke@435 1756 class CMSDrainMarkingStackClosure: public VoidClosure {
duke@435 1757 CMSCollector* _collector;
duke@435 1758 MemRegion _span;
duke@435 1759 CMSMarkStack* _mark_stack;
duke@435 1760 CMSBitMap* _bit_map;
duke@435 1761 CMSKeepAliveClosure* _keep_alive;
ysr@887 1762 bool _concurrent_precleaning;
duke@435 1763 public:
duke@435 1764 CMSDrainMarkingStackClosure(CMSCollector* collector, MemRegion span,
duke@435 1765 CMSBitMap* bit_map, CMSMarkStack* mark_stack,
ysr@887 1766 CMSKeepAliveClosure* keep_alive,
ysr@887 1767 bool cpc):
duke@435 1768 _collector(collector),
duke@435 1769 _span(span),
duke@435 1770 _bit_map(bit_map),
duke@435 1771 _mark_stack(mark_stack),
ysr@887 1772 _keep_alive(keep_alive),
ysr@887 1773 _concurrent_precleaning(cpc) {
ysr@887 1774 assert(_concurrent_precleaning == _keep_alive->concurrent_precleaning(),
ysr@887 1775 "Mismatch");
ysr@887 1776 }
duke@435 1777
duke@435 1778 void do_void();
duke@435 1779 };
duke@435 1780
duke@435 1781 // A parallel version of CMSDrainMarkingStackClosure above.
duke@435 1782 class CMSParDrainMarkingStackClosure: public VoidClosure {
duke@435 1783 CMSCollector* _collector;
duke@435 1784 MemRegion _span;
duke@435 1785 OopTaskQueue* _work_queue;
duke@435 1786 CMSBitMap* _bit_map;
duke@435 1787 CMSInnerParMarkAndPushClosure _mark_and_push;
duke@435 1788
duke@435 1789 public:
duke@435 1790 CMSParDrainMarkingStackClosure(CMSCollector* collector,
duke@435 1791 MemRegion span, CMSBitMap* bit_map,
duke@435 1792 OopTaskQueue* work_queue):
duke@435 1793 _collector(collector),
duke@435 1794 _span(span),
duke@435 1795 _bit_map(bit_map),
duke@435 1796 _work_queue(work_queue),
duke@435 1797 _mark_and_push(collector, span, bit_map, work_queue) { }
duke@435 1798
duke@435 1799 public:
duke@435 1800 void trim_queue(uint max);
duke@435 1801 void do_void();
duke@435 1802 };
duke@435 1803
duke@435 1804 // Allow yielding or short-circuiting of reference list
duke@435 1805 // prelceaning work.
duke@435 1806 class CMSPrecleanRefsYieldClosure: public YieldClosure {
duke@435 1807 CMSCollector* _collector;
duke@435 1808 void do_yield_work();
duke@435 1809 public:
duke@435 1810 CMSPrecleanRefsYieldClosure(CMSCollector* collector):
duke@435 1811 _collector(collector) {}
duke@435 1812 virtual bool should_return();
duke@435 1813 };
duke@435 1814
duke@435 1815
duke@435 1816 // Convenience class that locks free list locks for given CMS collector
duke@435 1817 class FreelistLocker: public StackObj {
duke@435 1818 private:
duke@435 1819 CMSCollector* _collector;
duke@435 1820 public:
duke@435 1821 FreelistLocker(CMSCollector* collector):
duke@435 1822 _collector(collector) {
duke@435 1823 _collector->getFreelistLocks();
duke@435 1824 }
duke@435 1825
duke@435 1826 ~FreelistLocker() {
duke@435 1827 _collector->releaseFreelistLocks();
duke@435 1828 }
duke@435 1829 };
duke@435 1830
duke@435 1831 // Mark all dead objects in a given space.
duke@435 1832 class MarkDeadObjectsClosure: public BlkClosure {
duke@435 1833 const CMSCollector* _collector;
duke@435 1834 const CompactibleFreeListSpace* _sp;
duke@435 1835 CMSBitMap* _live_bit_map;
duke@435 1836 CMSBitMap* _dead_bit_map;
duke@435 1837 public:
duke@435 1838 MarkDeadObjectsClosure(const CMSCollector* collector,
duke@435 1839 const CompactibleFreeListSpace* sp,
duke@435 1840 CMSBitMap *live_bit_map,
duke@435 1841 CMSBitMap *dead_bit_map) :
duke@435 1842 _collector(collector),
duke@435 1843 _sp(sp),
duke@435 1844 _live_bit_map(live_bit_map),
duke@435 1845 _dead_bit_map(dead_bit_map) {}
duke@435 1846 size_t do_blk(HeapWord* addr);
duke@435 1847 };

mercurial