Mon, 10 May 2010 12:31:52 -0700
6951188: CMS: move PromotionInfo into its own file
Summary: Moved PromotionInfo and friends into new files promotionInfo.{h,c}pp from their previous compactibleFreeListSpace.{h,c}pp home.
Reviewed-by: apetrusenko
duke@435 | 1 | /* |
jmasa@1822 | 2 | * Copyright 2001-2010 Sun Microsystems, Inc. All Rights Reserved. |
duke@435 | 3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
duke@435 | 4 | * |
duke@435 | 5 | * This code is free software; you can redistribute it and/or modify it |
duke@435 | 6 | * under the terms of the GNU General Public License version 2 only, as |
duke@435 | 7 | * published by the Free Software Foundation. |
duke@435 | 8 | * |
duke@435 | 9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
duke@435 | 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
duke@435 | 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
duke@435 | 12 | * version 2 for more details (a copy is included in the LICENSE file that |
duke@435 | 13 | * accompanied this code). |
duke@435 | 14 | * |
duke@435 | 15 | * You should have received a copy of the GNU General Public License version |
duke@435 | 16 | * 2 along with this work; if not, write to the Free Software Foundation, |
duke@435 | 17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
duke@435 | 18 | * |
duke@435 | 19 | * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, |
duke@435 | 20 | * CA 95054 USA or visit www.sun.com if you need additional information or |
duke@435 | 21 | * have any questions. |
duke@435 | 22 | * |
duke@435 | 23 | */ |
duke@435 | 24 | |
duke@435 | 25 | // ConcurrentMarkSweepGeneration is in support of a concurrent |
duke@435 | 26 | // mark-sweep old generation in the Detlefs-Printezis--Boehm-Demers-Schenker |
duke@435 | 27 | // style. We assume, for now, that this generation is always the |
duke@435 | 28 | // seniormost generation (modulo the PermGeneration), and for simplicity |
duke@435 | 29 | // in the first implementation, that this generation is a single compactible |
duke@435 | 30 | // space. Neither of these restrictions appears essential, and will be |
duke@435 | 31 | // relaxed in the future when more time is available to implement the |
duke@435 | 32 | // greater generality (and there's a need for it). |
duke@435 | 33 | // |
duke@435 | 34 | // Concurrent mode failures are currently handled by |
duke@435 | 35 | // means of a sliding mark-compact. |
duke@435 | 36 | |
duke@435 | 37 | class CMSAdaptiveSizePolicy; |
duke@435 | 38 | class CMSConcMarkingTask; |
duke@435 | 39 | class CMSGCAdaptivePolicyCounters; |
duke@435 | 40 | class ConcurrentMarkSweepGeneration; |
duke@435 | 41 | class ConcurrentMarkSweepPolicy; |
duke@435 | 42 | class ConcurrentMarkSweepThread; |
duke@435 | 43 | class CompactibleFreeListSpace; |
duke@435 | 44 | class FreeChunk; |
duke@435 | 45 | class PromotionInfo; |
duke@435 | 46 | class ScanMarkedObjectsAgainCarefullyClosure; |
duke@435 | 47 | |
duke@435 | 48 | // A generic CMS bit map. It's the basis for both the CMS marking bit map |
duke@435 | 49 | // as well as for the mod union table (in each case only a subset of the |
duke@435 | 50 | // methods are used). This is essentially a wrapper around the BitMap class, |
duke@435 | 51 | // with one bit per (1<<_shifter) HeapWords. (i.e. for the marking bit map, |
duke@435 | 52 | // we have _shifter == 0. and for the mod union table we have |
duke@435 | 53 | // shifter == CardTableModRefBS::card_shift - LogHeapWordSize.) |
duke@435 | 54 | // XXX 64-bit issues in BitMap? |
duke@435 | 55 | class CMSBitMap VALUE_OBJ_CLASS_SPEC { |
duke@435 | 56 | friend class VMStructs; |
duke@435 | 57 | |
duke@435 | 58 | HeapWord* _bmStartWord; // base address of range covered by map |
duke@435 | 59 | size_t _bmWordSize; // map size (in #HeapWords covered) |
duke@435 | 60 | const int _shifter; // shifts to convert HeapWord to bit position |
duke@435 | 61 | VirtualSpace _virtual_space; // underlying the bit map |
duke@435 | 62 | BitMap _bm; // the bit map itself |
duke@435 | 63 | public: |
duke@435 | 64 | Mutex* const _lock; // mutex protecting _bm; |
duke@435 | 65 | |
duke@435 | 66 | public: |
duke@435 | 67 | // constructor |
duke@435 | 68 | CMSBitMap(int shifter, int mutex_rank, const char* mutex_name); |
duke@435 | 69 | |
duke@435 | 70 | // allocates the actual storage for the map |
duke@435 | 71 | bool allocate(MemRegion mr); |
duke@435 | 72 | // field getter |
duke@435 | 73 | Mutex* lock() const { return _lock; } |
duke@435 | 74 | // locking verifier convenience function |
duke@435 | 75 | void assert_locked() const PRODUCT_RETURN; |
duke@435 | 76 | |
duke@435 | 77 | // inquiries |
duke@435 | 78 | HeapWord* startWord() const { return _bmStartWord; } |
duke@435 | 79 | size_t sizeInWords() const { return _bmWordSize; } |
duke@435 | 80 | size_t sizeInBits() const { return _bm.size(); } |
duke@435 | 81 | // the following is one past the last word in space |
duke@435 | 82 | HeapWord* endWord() const { return _bmStartWord + _bmWordSize; } |
duke@435 | 83 | |
duke@435 | 84 | // reading marks |
duke@435 | 85 | bool isMarked(HeapWord* addr) const; |
duke@435 | 86 | bool par_isMarked(HeapWord* addr) const; // do not lock checks |
duke@435 | 87 | bool isUnmarked(HeapWord* addr) const; |
duke@435 | 88 | bool isAllClear() const; |
duke@435 | 89 | |
duke@435 | 90 | // writing marks |
duke@435 | 91 | void mark(HeapWord* addr); |
duke@435 | 92 | // For marking by parallel GC threads; |
duke@435 | 93 | // returns true if we did, false if another thread did |
duke@435 | 94 | bool par_mark(HeapWord* addr); |
duke@435 | 95 | |
duke@435 | 96 | void mark_range(MemRegion mr); |
duke@435 | 97 | void par_mark_range(MemRegion mr); |
duke@435 | 98 | void mark_large_range(MemRegion mr); |
duke@435 | 99 | void par_mark_large_range(MemRegion mr); |
duke@435 | 100 | void par_clear(HeapWord* addr); // For unmarking by parallel GC threads. |
duke@435 | 101 | void clear_range(MemRegion mr); |
duke@435 | 102 | void par_clear_range(MemRegion mr); |
duke@435 | 103 | void clear_large_range(MemRegion mr); |
duke@435 | 104 | void par_clear_large_range(MemRegion mr); |
duke@435 | 105 | void clear_all(); |
duke@435 | 106 | void clear_all_incrementally(); // Not yet implemented!! |
duke@435 | 107 | |
duke@435 | 108 | NOT_PRODUCT( |
duke@435 | 109 | // checks the memory region for validity |
duke@435 | 110 | void region_invariant(MemRegion mr); |
duke@435 | 111 | ) |
duke@435 | 112 | |
duke@435 | 113 | // iteration |
duke@435 | 114 | void iterate(BitMapClosure* cl) { |
duke@435 | 115 | _bm.iterate(cl); |
duke@435 | 116 | } |
duke@435 | 117 | void iterate(BitMapClosure* cl, HeapWord* left, HeapWord* right); |
duke@435 | 118 | void dirty_range_iterate_clear(MemRegionClosure* cl); |
duke@435 | 119 | void dirty_range_iterate_clear(MemRegion mr, MemRegionClosure* cl); |
duke@435 | 120 | |
duke@435 | 121 | // auxiliary support for iteration |
duke@435 | 122 | HeapWord* getNextMarkedWordAddress(HeapWord* addr) const; |
duke@435 | 123 | HeapWord* getNextMarkedWordAddress(HeapWord* start_addr, |
duke@435 | 124 | HeapWord* end_addr) const; |
duke@435 | 125 | HeapWord* getNextUnmarkedWordAddress(HeapWord* addr) const; |
duke@435 | 126 | HeapWord* getNextUnmarkedWordAddress(HeapWord* start_addr, |
duke@435 | 127 | HeapWord* end_addr) const; |
duke@435 | 128 | MemRegion getAndClearMarkedRegion(HeapWord* addr); |
duke@435 | 129 | MemRegion getAndClearMarkedRegion(HeapWord* start_addr, |
duke@435 | 130 | HeapWord* end_addr); |
duke@435 | 131 | |
duke@435 | 132 | // conversion utilities |
duke@435 | 133 | HeapWord* offsetToHeapWord(size_t offset) const; |
duke@435 | 134 | size_t heapWordToOffset(HeapWord* addr) const; |
duke@435 | 135 | size_t heapWordDiffToOffsetDiff(size_t diff) const; |
duke@435 | 136 | |
duke@435 | 137 | // debugging |
duke@435 | 138 | // is this address range covered by the bit-map? |
duke@435 | 139 | NOT_PRODUCT( |
duke@435 | 140 | bool covers(MemRegion mr) const; |
duke@435 | 141 | bool covers(HeapWord* start, size_t size = 0) const; |
duke@435 | 142 | ) |
duke@435 | 143 | void verifyNoOneBitsInRange(HeapWord* left, HeapWord* right) PRODUCT_RETURN; |
duke@435 | 144 | }; |
duke@435 | 145 | |
duke@435 | 146 | // Represents a marking stack used by the CMS collector. |
duke@435 | 147 | // Ideally this should be GrowableArray<> just like MSC's marking stack(s). |
duke@435 | 148 | class CMSMarkStack: public CHeapObj { |
duke@435 | 149 | // |
duke@435 | 150 | friend class CMSCollector; // to get at expasion stats further below |
duke@435 | 151 | // |
duke@435 | 152 | |
duke@435 | 153 | VirtualSpace _virtual_space; // space for the stack |
duke@435 | 154 | oop* _base; // bottom of stack |
duke@435 | 155 | size_t _index; // one more than last occupied index |
duke@435 | 156 | size_t _capacity; // max #elements |
duke@435 | 157 | Mutex _par_lock; // an advisory lock used in case of parallel access |
duke@435 | 158 | NOT_PRODUCT(size_t _max_depth;) // max depth plumbed during run |
duke@435 | 159 | |
duke@435 | 160 | protected: |
duke@435 | 161 | size_t _hit_limit; // we hit max stack size limit |
duke@435 | 162 | size_t _failed_double; // we failed expansion before hitting limit |
duke@435 | 163 | |
duke@435 | 164 | public: |
duke@435 | 165 | CMSMarkStack(): |
duke@435 | 166 | _par_lock(Mutex::event, "CMSMarkStack._par_lock", true), |
duke@435 | 167 | _hit_limit(0), |
duke@435 | 168 | _failed_double(0) {} |
duke@435 | 169 | |
duke@435 | 170 | bool allocate(size_t size); |
duke@435 | 171 | |
duke@435 | 172 | size_t capacity() const { return _capacity; } |
duke@435 | 173 | |
duke@435 | 174 | oop pop() { |
duke@435 | 175 | if (!isEmpty()) { |
duke@435 | 176 | return _base[--_index] ; |
duke@435 | 177 | } |
duke@435 | 178 | return NULL; |
duke@435 | 179 | } |
duke@435 | 180 | |
duke@435 | 181 | bool push(oop ptr) { |
duke@435 | 182 | if (isFull()) { |
duke@435 | 183 | return false; |
duke@435 | 184 | } else { |
duke@435 | 185 | _base[_index++] = ptr; |
duke@435 | 186 | NOT_PRODUCT(_max_depth = MAX2(_max_depth, _index)); |
duke@435 | 187 | return true; |
duke@435 | 188 | } |
duke@435 | 189 | } |
duke@435 | 190 | |
duke@435 | 191 | bool isEmpty() const { return _index == 0; } |
duke@435 | 192 | bool isFull() const { |
duke@435 | 193 | assert(_index <= _capacity, "buffer overflow"); |
duke@435 | 194 | return _index == _capacity; |
duke@435 | 195 | } |
duke@435 | 196 | |
duke@435 | 197 | size_t length() { return _index; } |
duke@435 | 198 | |
duke@435 | 199 | // "Parallel versions" of some of the above |
duke@435 | 200 | oop par_pop() { |
duke@435 | 201 | // lock and pop |
duke@435 | 202 | MutexLockerEx x(&_par_lock, Mutex::_no_safepoint_check_flag); |
duke@435 | 203 | return pop(); |
duke@435 | 204 | } |
duke@435 | 205 | |
duke@435 | 206 | bool par_push(oop ptr) { |
duke@435 | 207 | // lock and push |
duke@435 | 208 | MutexLockerEx x(&_par_lock, Mutex::_no_safepoint_check_flag); |
duke@435 | 209 | return push(ptr); |
duke@435 | 210 | } |
duke@435 | 211 | |
duke@435 | 212 | // Forcibly reset the stack, losing all of its contents. |
duke@435 | 213 | void reset() { |
duke@435 | 214 | _index = 0; |
duke@435 | 215 | } |
duke@435 | 216 | |
duke@435 | 217 | // Expand the stack, typically in response to an overflow condition |
duke@435 | 218 | void expand(); |
duke@435 | 219 | |
duke@435 | 220 | // Compute the least valued stack element. |
duke@435 | 221 | oop least_value(HeapWord* low) { |
duke@435 | 222 | oop least = (oop)low; |
duke@435 | 223 | for (size_t i = 0; i < _index; i++) { |
duke@435 | 224 | least = MIN2(least, _base[i]); |
duke@435 | 225 | } |
duke@435 | 226 | return least; |
duke@435 | 227 | } |
duke@435 | 228 | |
duke@435 | 229 | // Exposed here to allow stack expansion in || case |
duke@435 | 230 | Mutex* par_lock() { return &_par_lock; } |
duke@435 | 231 | }; |
duke@435 | 232 | |
duke@435 | 233 | class CardTableRS; |
duke@435 | 234 | class CMSParGCThreadState; |
duke@435 | 235 | |
duke@435 | 236 | class ModUnionClosure: public MemRegionClosure { |
duke@435 | 237 | protected: |
duke@435 | 238 | CMSBitMap* _t; |
duke@435 | 239 | public: |
duke@435 | 240 | ModUnionClosure(CMSBitMap* t): _t(t) { } |
duke@435 | 241 | void do_MemRegion(MemRegion mr); |
duke@435 | 242 | }; |
duke@435 | 243 | |
duke@435 | 244 | class ModUnionClosurePar: public ModUnionClosure { |
duke@435 | 245 | public: |
duke@435 | 246 | ModUnionClosurePar(CMSBitMap* t): ModUnionClosure(t) { } |
duke@435 | 247 | void do_MemRegion(MemRegion mr); |
duke@435 | 248 | }; |
duke@435 | 249 | |
duke@435 | 250 | // Survivor Chunk Array in support of parallelization of |
duke@435 | 251 | // Survivor Space rescan. |
duke@435 | 252 | class ChunkArray: public CHeapObj { |
duke@435 | 253 | size_t _index; |
duke@435 | 254 | size_t _capacity; |
duke@435 | 255 | HeapWord** _array; // storage for array |
duke@435 | 256 | |
duke@435 | 257 | public: |
duke@435 | 258 | ChunkArray() : _index(0), _capacity(0), _array(NULL) {} |
duke@435 | 259 | ChunkArray(HeapWord** a, size_t c): |
duke@435 | 260 | _index(0), _capacity(c), _array(a) {} |
duke@435 | 261 | |
duke@435 | 262 | HeapWord** array() { return _array; } |
duke@435 | 263 | void set_array(HeapWord** a) { _array = a; } |
duke@435 | 264 | |
duke@435 | 265 | size_t capacity() { return _capacity; } |
duke@435 | 266 | void set_capacity(size_t c) { _capacity = c; } |
duke@435 | 267 | |
duke@435 | 268 | size_t end() { |
duke@435 | 269 | assert(_index < capacity(), "_index out of bounds"); |
duke@435 | 270 | return _index; |
duke@435 | 271 | } // exclusive |
duke@435 | 272 | |
duke@435 | 273 | HeapWord* nth(size_t n) { |
duke@435 | 274 | assert(n < end(), "Out of bounds access"); |
duke@435 | 275 | return _array[n]; |
duke@435 | 276 | } |
duke@435 | 277 | |
duke@435 | 278 | void reset() { |
duke@435 | 279 | _index = 0; |
duke@435 | 280 | } |
duke@435 | 281 | |
duke@435 | 282 | void record_sample(HeapWord* p, size_t sz) { |
duke@435 | 283 | // For now we do not do anything with the size |
duke@435 | 284 | if (_index < _capacity) { |
duke@435 | 285 | _array[_index++] = p; |
duke@435 | 286 | } |
duke@435 | 287 | } |
duke@435 | 288 | }; |
duke@435 | 289 | |
duke@435 | 290 | // |
duke@435 | 291 | // Timing, allocation and promotion statistics for gc scheduling and incremental |
duke@435 | 292 | // mode pacing. Most statistics are exponential averages. |
duke@435 | 293 | // |
duke@435 | 294 | class CMSStats VALUE_OBJ_CLASS_SPEC { |
duke@435 | 295 | private: |
duke@435 | 296 | ConcurrentMarkSweepGeneration* const _cms_gen; // The cms (old) gen. |
duke@435 | 297 | |
duke@435 | 298 | // The following are exponential averages with factor alpha: |
duke@435 | 299 | // avg = (100 - alpha) * avg + alpha * cur_sample |
duke@435 | 300 | // |
duke@435 | 301 | // The durations measure: end_time[n] - start_time[n] |
duke@435 | 302 | // The periods measure: start_time[n] - start_time[n-1] |
duke@435 | 303 | // |
duke@435 | 304 | // The cms period and duration include only concurrent collections; time spent |
duke@435 | 305 | // in foreground cms collections due to System.gc() or because of a failure to |
duke@435 | 306 | // keep up are not included. |
duke@435 | 307 | // |
duke@435 | 308 | // There are 3 alphas to "bootstrap" the statistics. The _saved_alpha is the |
duke@435 | 309 | // real value, but is used only after the first period. A value of 100 is |
duke@435 | 310 | // used for the first sample so it gets the entire weight. |
duke@435 | 311 | unsigned int _saved_alpha; // 0-100 |
duke@435 | 312 | unsigned int _gc0_alpha; |
duke@435 | 313 | unsigned int _cms_alpha; |
duke@435 | 314 | |
duke@435 | 315 | double _gc0_duration; |
duke@435 | 316 | double _gc0_period; |
duke@435 | 317 | size_t _gc0_promoted; // bytes promoted per gc0 |
duke@435 | 318 | double _cms_duration; |
duke@435 | 319 | double _cms_duration_pre_sweep; // time from initiation to start of sweep |
duke@435 | 320 | double _cms_duration_per_mb; |
duke@435 | 321 | double _cms_period; |
duke@435 | 322 | size_t _cms_allocated; // bytes of direct allocation per gc0 period |
duke@435 | 323 | |
duke@435 | 324 | // Timers. |
duke@435 | 325 | elapsedTimer _cms_timer; |
duke@435 | 326 | TimeStamp _gc0_begin_time; |
duke@435 | 327 | TimeStamp _cms_begin_time; |
duke@435 | 328 | TimeStamp _cms_end_time; |
duke@435 | 329 | |
duke@435 | 330 | // Snapshots of the amount used in the CMS generation. |
duke@435 | 331 | size_t _cms_used_at_gc0_begin; |
duke@435 | 332 | size_t _cms_used_at_gc0_end; |
duke@435 | 333 | size_t _cms_used_at_cms_begin; |
duke@435 | 334 | |
duke@435 | 335 | // Used to prevent the duty cycle from being reduced in the middle of a cms |
duke@435 | 336 | // cycle. |
duke@435 | 337 | bool _allow_duty_cycle_reduction; |
duke@435 | 338 | |
duke@435 | 339 | enum { |
duke@435 | 340 | _GC0_VALID = 0x1, |
duke@435 | 341 | _CMS_VALID = 0x2, |
duke@435 | 342 | _ALL_VALID = _GC0_VALID | _CMS_VALID |
duke@435 | 343 | }; |
duke@435 | 344 | |
duke@435 | 345 | unsigned int _valid_bits; |
duke@435 | 346 | |
duke@435 | 347 | unsigned int _icms_duty_cycle; // icms duty cycle (0-100). |
duke@435 | 348 | |
duke@435 | 349 | protected: |
duke@435 | 350 | |
duke@435 | 351 | // Return a duty cycle that avoids wild oscillations, by limiting the amount |
duke@435 | 352 | // of change between old_duty_cycle and new_duty_cycle (the latter is treated |
duke@435 | 353 | // as a recommended value). |
duke@435 | 354 | static unsigned int icms_damped_duty_cycle(unsigned int old_duty_cycle, |
duke@435 | 355 | unsigned int new_duty_cycle); |
duke@435 | 356 | unsigned int icms_update_duty_cycle_impl(); |
duke@435 | 357 | |
ysr@1580 | 358 | // In support of adjusting of cms trigger ratios based on history |
ysr@1580 | 359 | // of concurrent mode failure. |
ysr@1580 | 360 | double cms_free_adjustment_factor(size_t free) const; |
ysr@1580 | 361 | void adjust_cms_free_adjustment_factor(bool fail, size_t free); |
ysr@1580 | 362 | |
duke@435 | 363 | public: |
duke@435 | 364 | CMSStats(ConcurrentMarkSweepGeneration* cms_gen, |
duke@435 | 365 | unsigned int alpha = CMSExpAvgFactor); |
duke@435 | 366 | |
duke@435 | 367 | // Whether or not the statistics contain valid data; higher level statistics |
duke@435 | 368 | // cannot be called until this returns true (they require at least one young |
duke@435 | 369 | // gen and one cms cycle to have completed). |
duke@435 | 370 | bool valid() const; |
duke@435 | 371 | |
duke@435 | 372 | // Record statistics. |
duke@435 | 373 | void record_gc0_begin(); |
duke@435 | 374 | void record_gc0_end(size_t cms_gen_bytes_used); |
duke@435 | 375 | void record_cms_begin(); |
duke@435 | 376 | void record_cms_end(); |
duke@435 | 377 | |
duke@435 | 378 | // Allow management of the cms timer, which must be stopped/started around |
duke@435 | 379 | // yield points. |
duke@435 | 380 | elapsedTimer& cms_timer() { return _cms_timer; } |
duke@435 | 381 | void start_cms_timer() { _cms_timer.start(); } |
duke@435 | 382 | void stop_cms_timer() { _cms_timer.stop(); } |
duke@435 | 383 | |
duke@435 | 384 | // Basic statistics; units are seconds or bytes. |
duke@435 | 385 | double gc0_period() const { return _gc0_period; } |
duke@435 | 386 | double gc0_duration() const { return _gc0_duration; } |
duke@435 | 387 | size_t gc0_promoted() const { return _gc0_promoted; } |
duke@435 | 388 | double cms_period() const { return _cms_period; } |
duke@435 | 389 | double cms_duration() const { return _cms_duration; } |
duke@435 | 390 | double cms_duration_per_mb() const { return _cms_duration_per_mb; } |
duke@435 | 391 | size_t cms_allocated() const { return _cms_allocated; } |
duke@435 | 392 | |
duke@435 | 393 | size_t cms_used_at_gc0_end() const { return _cms_used_at_gc0_end;} |
duke@435 | 394 | |
duke@435 | 395 | // Seconds since the last background cms cycle began or ended. |
duke@435 | 396 | double cms_time_since_begin() const; |
duke@435 | 397 | double cms_time_since_end() const; |
duke@435 | 398 | |
duke@435 | 399 | // Higher level statistics--caller must check that valid() returns true before |
duke@435 | 400 | // calling. |
duke@435 | 401 | |
duke@435 | 402 | // Returns bytes promoted per second of wall clock time. |
duke@435 | 403 | double promotion_rate() const; |
duke@435 | 404 | |
duke@435 | 405 | // Returns bytes directly allocated per second of wall clock time. |
duke@435 | 406 | double cms_allocation_rate() const; |
duke@435 | 407 | |
duke@435 | 408 | // Rate at which space in the cms generation is being consumed (sum of the |
duke@435 | 409 | // above two). |
duke@435 | 410 | double cms_consumption_rate() const; |
duke@435 | 411 | |
duke@435 | 412 | // Returns an estimate of the number of seconds until the cms generation will |
duke@435 | 413 | // fill up, assuming no collection work is done. |
duke@435 | 414 | double time_until_cms_gen_full() const; |
duke@435 | 415 | |
duke@435 | 416 | // Returns an estimate of the number of seconds remaining until |
duke@435 | 417 | // the cms generation collection should start. |
duke@435 | 418 | double time_until_cms_start() const; |
duke@435 | 419 | |
duke@435 | 420 | // End of higher level statistics. |
duke@435 | 421 | |
duke@435 | 422 | // Returns the cms incremental mode duty cycle, as a percentage (0-100). |
duke@435 | 423 | unsigned int icms_duty_cycle() const { return _icms_duty_cycle; } |
duke@435 | 424 | |
duke@435 | 425 | // Update the duty cycle and return the new value. |
duke@435 | 426 | unsigned int icms_update_duty_cycle(); |
duke@435 | 427 | |
duke@435 | 428 | // Debugging. |
duke@435 | 429 | void print_on(outputStream* st) const PRODUCT_RETURN; |
duke@435 | 430 | void print() const { print_on(gclog_or_tty); } |
duke@435 | 431 | }; |
duke@435 | 432 | |
duke@435 | 433 | // A closure related to weak references processing which |
duke@435 | 434 | // we embed in the CMSCollector, since we need to pass |
duke@435 | 435 | // it to the reference processor for secondary filtering |
duke@435 | 436 | // of references based on reachability of referent; |
duke@435 | 437 | // see role of _is_alive_non_header closure in the |
duke@435 | 438 | // ReferenceProcessor class. |
duke@435 | 439 | // For objects in the CMS generation, this closure checks |
duke@435 | 440 | // if the object is "live" (reachable). Used in weak |
duke@435 | 441 | // reference processing. |
duke@435 | 442 | class CMSIsAliveClosure: public BoolObjectClosure { |
ysr@578 | 443 | const MemRegion _span; |
duke@435 | 444 | const CMSBitMap* _bit_map; |
duke@435 | 445 | |
duke@435 | 446 | friend class CMSCollector; |
duke@435 | 447 | public: |
duke@435 | 448 | CMSIsAliveClosure(MemRegion span, |
duke@435 | 449 | CMSBitMap* bit_map): |
duke@435 | 450 | _span(span), |
ysr@578 | 451 | _bit_map(bit_map) { |
ysr@578 | 452 | assert(!span.is_empty(), "Empty span could spell trouble"); |
ysr@578 | 453 | } |
ysr@578 | 454 | |
duke@435 | 455 | void do_object(oop obj) { |
duke@435 | 456 | assert(false, "not to be invoked"); |
duke@435 | 457 | } |
ysr@578 | 458 | |
duke@435 | 459 | bool do_object_b(oop obj); |
duke@435 | 460 | }; |
duke@435 | 461 | |
duke@435 | 462 | |
duke@435 | 463 | // Implements AbstractRefProcTaskExecutor for CMS. |
duke@435 | 464 | class CMSRefProcTaskExecutor: public AbstractRefProcTaskExecutor { |
duke@435 | 465 | public: |
duke@435 | 466 | |
duke@435 | 467 | CMSRefProcTaskExecutor(CMSCollector& collector) |
duke@435 | 468 | : _collector(collector) |
duke@435 | 469 | { } |
duke@435 | 470 | |
duke@435 | 471 | // Executes a task using worker threads. |
duke@435 | 472 | virtual void execute(ProcessTask& task); |
duke@435 | 473 | virtual void execute(EnqueueTask& task); |
duke@435 | 474 | private: |
duke@435 | 475 | CMSCollector& _collector; |
duke@435 | 476 | }; |
duke@435 | 477 | |
duke@435 | 478 | |
duke@435 | 479 | class CMSCollector: public CHeapObj { |
duke@435 | 480 | friend class VMStructs; |
duke@435 | 481 | friend class ConcurrentMarkSweepThread; |
duke@435 | 482 | friend class ConcurrentMarkSweepGeneration; |
duke@435 | 483 | friend class CompactibleFreeListSpace; |
duke@435 | 484 | friend class CMSParRemarkTask; |
duke@435 | 485 | friend class CMSConcMarkingTask; |
duke@435 | 486 | friend class CMSRefProcTaskProxy; |
duke@435 | 487 | friend class CMSRefProcTaskExecutor; |
duke@435 | 488 | friend class ScanMarkedObjectsAgainCarefullyClosure; // for sampling eden |
duke@435 | 489 | friend class SurvivorSpacePrecleanClosure; // --- ditto ------- |
duke@435 | 490 | friend class PushOrMarkClosure; // to access _restart_addr |
duke@435 | 491 | friend class Par_PushOrMarkClosure; // to access _restart_addr |
duke@435 | 492 | friend class MarkFromRootsClosure; // -- ditto -- |
duke@435 | 493 | // ... and for clearing cards |
duke@435 | 494 | friend class Par_MarkFromRootsClosure; // to access _restart_addr |
duke@435 | 495 | // ... and for clearing cards |
duke@435 | 496 | friend class Par_ConcMarkingClosure; // to access _restart_addr etc. |
duke@435 | 497 | friend class MarkFromRootsVerifyClosure; // to access _restart_addr |
duke@435 | 498 | friend class PushAndMarkVerifyClosure; // -- ditto -- |
duke@435 | 499 | friend class MarkRefsIntoAndScanClosure; // to access _overflow_list |
duke@435 | 500 | friend class PushAndMarkClosure; // -- ditto -- |
duke@435 | 501 | friend class Par_PushAndMarkClosure; // -- ditto -- |
duke@435 | 502 | friend class CMSKeepAliveClosure; // -- ditto -- |
duke@435 | 503 | friend class CMSDrainMarkingStackClosure; // -- ditto -- |
duke@435 | 504 | friend class CMSInnerParMarkAndPushClosure; // -- ditto -- |
duke@435 | 505 | NOT_PRODUCT(friend class ScanMarkedObjectsAgainClosure;) // assertion on _overflow_list |
duke@435 | 506 | friend class ReleaseForegroundGC; // to access _foregroundGCShouldWait |
duke@435 | 507 | friend class VM_CMS_Operation; |
duke@435 | 508 | friend class VM_CMS_Initial_Mark; |
duke@435 | 509 | friend class VM_CMS_Final_Remark; |
duke@435 | 510 | |
duke@435 | 511 | private: |
duke@435 | 512 | jlong _time_of_last_gc; |
duke@435 | 513 | void update_time_of_last_gc(jlong now) { |
duke@435 | 514 | _time_of_last_gc = now; |
duke@435 | 515 | } |
duke@435 | 516 | |
duke@435 | 517 | OopTaskQueueSet* _task_queues; |
duke@435 | 518 | |
duke@435 | 519 | // Overflow list of grey objects, threaded through mark-word |
duke@435 | 520 | // Manipulated with CAS in the parallel/multi-threaded case. |
duke@435 | 521 | oop _overflow_list; |
duke@435 | 522 | // The following array-pair keeps track of mark words |
duke@435 | 523 | // displaced for accomodating overflow list above. |
duke@435 | 524 | // This code will likely be revisited under RFE#4922830. |
duke@435 | 525 | GrowableArray<oop>* _preserved_oop_stack; |
duke@435 | 526 | GrowableArray<markOop>* _preserved_mark_stack; |
duke@435 | 527 | |
duke@435 | 528 | int* _hash_seed; |
duke@435 | 529 | |
duke@435 | 530 | // In support of multi-threaded concurrent phases |
duke@435 | 531 | YieldingFlexibleWorkGang* _conc_workers; |
duke@435 | 532 | |
duke@435 | 533 | // Performance Counters |
duke@435 | 534 | CollectorCounters* _gc_counters; |
duke@435 | 535 | |
duke@435 | 536 | // Initialization Errors |
duke@435 | 537 | bool _completed_initialization; |
duke@435 | 538 | |
duke@435 | 539 | // In support of ExplicitGCInvokesConcurrent |
duke@435 | 540 | static bool _full_gc_requested; |
duke@435 | 541 | unsigned int _collection_count_start; |
ysr@529 | 542 | |
duke@435 | 543 | // Should we unload classes this concurrent cycle? |
ysr@529 | 544 | bool _should_unload_classes; |
ysr@529 | 545 | unsigned int _concurrent_cycles_since_last_unload; |
ysr@529 | 546 | unsigned int concurrent_cycles_since_last_unload() const { |
ysr@529 | 547 | return _concurrent_cycles_since_last_unload; |
ysr@529 | 548 | } |
duke@435 | 549 | // Did we (allow) unload classes in the previous concurrent cycle? |
ysr@529 | 550 | bool unloaded_classes_last_cycle() const { |
ysr@529 | 551 | return concurrent_cycles_since_last_unload() == 0; |
duke@435 | 552 | } |
ysr@1233 | 553 | // Root scanning options for perm gen |
ysr@1233 | 554 | int _roots_scanning_options; |
ysr@1233 | 555 | int roots_scanning_options() const { return _roots_scanning_options; } |
ysr@1233 | 556 | void add_root_scanning_option(int o) { _roots_scanning_options |= o; } |
ysr@1233 | 557 | void remove_root_scanning_option(int o) { _roots_scanning_options &= ~o; } |
duke@435 | 558 | |
duke@435 | 559 | // Verification support |
duke@435 | 560 | CMSBitMap _verification_mark_bm; |
duke@435 | 561 | void verify_after_remark_work_1(); |
duke@435 | 562 | void verify_after_remark_work_2(); |
duke@435 | 563 | |
duke@435 | 564 | // true if any verification flag is on. |
duke@435 | 565 | bool _verifying; |
duke@435 | 566 | bool verifying() const { return _verifying; } |
duke@435 | 567 | void set_verifying(bool v) { _verifying = v; } |
duke@435 | 568 | |
duke@435 | 569 | // Collector policy |
duke@435 | 570 | ConcurrentMarkSweepPolicy* _collector_policy; |
duke@435 | 571 | ConcurrentMarkSweepPolicy* collector_policy() { return _collector_policy; } |
duke@435 | 572 | |
duke@435 | 573 | // XXX Move these to CMSStats ??? FIX ME !!! |
ysr@1580 | 574 | elapsedTimer _inter_sweep_timer; // time between sweeps |
ysr@1580 | 575 | elapsedTimer _intra_sweep_timer; // time _in_ sweeps |
ysr@1580 | 576 | // padded decaying average estimates of the above |
ysr@1580 | 577 | AdaptivePaddedAverage _inter_sweep_estimate; |
ysr@1580 | 578 | AdaptivePaddedAverage _intra_sweep_estimate; |
duke@435 | 579 | |
duke@435 | 580 | protected: |
duke@435 | 581 | ConcurrentMarkSweepGeneration* _cmsGen; // old gen (CMS) |
duke@435 | 582 | ConcurrentMarkSweepGeneration* _permGen; // perm gen |
duke@435 | 583 | MemRegion _span; // span covering above two |
duke@435 | 584 | CardTableRS* _ct; // card table |
duke@435 | 585 | |
duke@435 | 586 | // CMS marking support structures |
duke@435 | 587 | CMSBitMap _markBitMap; |
duke@435 | 588 | CMSBitMap _modUnionTable; |
duke@435 | 589 | CMSMarkStack _markStack; |
duke@435 | 590 | CMSMarkStack _revisitStack; // used to keep track of klassKlass objects |
duke@435 | 591 | // to revisit |
duke@435 | 592 | CMSBitMap _perm_gen_verify_bit_map; // Mark bit map for perm gen verification support. |
duke@435 | 593 | |
duke@435 | 594 | HeapWord* _restart_addr; // in support of marking stack overflow |
duke@435 | 595 | void lower_restart_addr(HeapWord* low); |
duke@435 | 596 | |
duke@435 | 597 | // Counters in support of marking stack / work queue overflow handling: |
duke@435 | 598 | // a non-zero value indicates certain types of overflow events during |
duke@435 | 599 | // the current CMS cycle and could lead to stack resizing efforts at |
duke@435 | 600 | // an opportune future time. |
duke@435 | 601 | size_t _ser_pmc_preclean_ovflw; |
duke@435 | 602 | size_t _ser_pmc_remark_ovflw; |
duke@435 | 603 | size_t _par_pmc_remark_ovflw; |
ysr@887 | 604 | size_t _ser_kac_preclean_ovflw; |
duke@435 | 605 | size_t _ser_kac_ovflw; |
duke@435 | 606 | size_t _par_kac_ovflw; |
ysr@969 | 607 | NOT_PRODUCT(ssize_t _num_par_pushes;) |
duke@435 | 608 | |
duke@435 | 609 | // ("Weak") Reference processing support |
duke@435 | 610 | ReferenceProcessor* _ref_processor; |
duke@435 | 611 | CMSIsAliveClosure _is_alive_closure; |
ysr@578 | 612 | // keep this textually after _markBitMap and _span; c'tor dependency |
duke@435 | 613 | |
duke@435 | 614 | ConcurrentMarkSweepThread* _cmsThread; // the thread doing the work |
duke@435 | 615 | ModUnionClosure _modUnionClosure; |
duke@435 | 616 | ModUnionClosurePar _modUnionClosurePar; |
duke@435 | 617 | |
duke@435 | 618 | // CMS abstract state machine |
duke@435 | 619 | // initial_state: Idling |
duke@435 | 620 | // next_state(Idling) = {Marking} |
duke@435 | 621 | // next_state(Marking) = {Precleaning, Sweeping} |
duke@435 | 622 | // next_state(Precleaning) = {AbortablePreclean, FinalMarking} |
duke@435 | 623 | // next_state(AbortablePreclean) = {FinalMarking} |
duke@435 | 624 | // next_state(FinalMarking) = {Sweeping} |
duke@435 | 625 | // next_state(Sweeping) = {Resizing} |
duke@435 | 626 | // next_state(Resizing) = {Resetting} |
duke@435 | 627 | // next_state(Resetting) = {Idling} |
duke@435 | 628 | // The numeric values below are chosen so that: |
duke@435 | 629 | // . _collectorState <= Idling == post-sweep && pre-mark |
duke@435 | 630 | // . _collectorState in (Idling, Sweeping) == {initial,final}marking || |
duke@435 | 631 | // precleaning || abortablePrecleanb |
ysr@1580 | 632 | public: |
duke@435 | 633 | enum CollectorState { |
duke@435 | 634 | Resizing = 0, |
duke@435 | 635 | Resetting = 1, |
duke@435 | 636 | Idling = 2, |
duke@435 | 637 | InitialMarking = 3, |
duke@435 | 638 | Marking = 4, |
duke@435 | 639 | Precleaning = 5, |
duke@435 | 640 | AbortablePreclean = 6, |
duke@435 | 641 | FinalMarking = 7, |
duke@435 | 642 | Sweeping = 8 |
duke@435 | 643 | }; |
ysr@1580 | 644 | protected: |
duke@435 | 645 | static CollectorState _collectorState; |
duke@435 | 646 | |
duke@435 | 647 | // State related to prologue/epilogue invocation for my generations |
duke@435 | 648 | bool _between_prologue_and_epilogue; |
duke@435 | 649 | |
duke@435 | 650 | // Signalling/State related to coordination between fore- and backgroud GC |
duke@435 | 651 | // Note: When the baton has been passed from background GC to foreground GC, |
duke@435 | 652 | // _foregroundGCIsActive is true and _foregroundGCShouldWait is false. |
duke@435 | 653 | static bool _foregroundGCIsActive; // true iff foreground collector is active or |
duke@435 | 654 | // wants to go active |
duke@435 | 655 | static bool _foregroundGCShouldWait; // true iff background GC is active and has not |
duke@435 | 656 | // yet passed the baton to the foreground GC |
duke@435 | 657 | |
duke@435 | 658 | // Support for CMSScheduleRemark (abortable preclean) |
duke@435 | 659 | bool _abort_preclean; |
duke@435 | 660 | bool _start_sampling; |
duke@435 | 661 | |
duke@435 | 662 | int _numYields; |
duke@435 | 663 | size_t _numDirtyCards; |
ysr@1580 | 664 | size_t _sweep_count; |
duke@435 | 665 | // number of full gc's since the last concurrent gc. |
duke@435 | 666 | uint _full_gcs_since_conc_gc; |
duke@435 | 667 | |
duke@435 | 668 | // occupancy used for bootstrapping stats |
duke@435 | 669 | double _bootstrap_occupancy; |
duke@435 | 670 | |
duke@435 | 671 | // timer |
duke@435 | 672 | elapsedTimer _timer; |
duke@435 | 673 | |
duke@435 | 674 | // Timing, allocation and promotion statistics, used for scheduling. |
duke@435 | 675 | CMSStats _stats; |
duke@435 | 676 | |
duke@435 | 677 | // Allocation limits installed in the young gen, used only in |
duke@435 | 678 | // CMSIncrementalMode. When an allocation in the young gen would cross one of |
duke@435 | 679 | // these limits, the cms generation is notified and the cms thread is started |
duke@435 | 680 | // or stopped, respectively. |
duke@435 | 681 | HeapWord* _icms_start_limit; |
duke@435 | 682 | HeapWord* _icms_stop_limit; |
duke@435 | 683 | |
duke@435 | 684 | enum CMS_op_type { |
duke@435 | 685 | CMS_op_checkpointRootsInitial, |
duke@435 | 686 | CMS_op_checkpointRootsFinal |
duke@435 | 687 | }; |
duke@435 | 688 | |
duke@435 | 689 | void do_CMS_operation(CMS_op_type op); |
duke@435 | 690 | bool stop_world_and_do(CMS_op_type op); |
duke@435 | 691 | |
duke@435 | 692 | OopTaskQueueSet* task_queues() { return _task_queues; } |
duke@435 | 693 | int* hash_seed(int i) { return &_hash_seed[i]; } |
duke@435 | 694 | YieldingFlexibleWorkGang* conc_workers() { return _conc_workers; } |
duke@435 | 695 | |
duke@435 | 696 | // Support for parallelizing Eden rescan in CMS remark phase |
duke@435 | 697 | void sample_eden(); // ... sample Eden space top |
duke@435 | 698 | |
duke@435 | 699 | private: |
duke@435 | 700 | // Support for parallelizing young gen rescan in CMS remark phase |
duke@435 | 701 | Generation* _young_gen; // the younger gen |
duke@435 | 702 | HeapWord** _top_addr; // ... Top of Eden |
duke@435 | 703 | HeapWord** _end_addr; // ... End of Eden |
duke@435 | 704 | HeapWord** _eden_chunk_array; // ... Eden partitioning array |
duke@435 | 705 | size_t _eden_chunk_index; // ... top (exclusive) of array |
duke@435 | 706 | size_t _eden_chunk_capacity; // ... max entries in array |
duke@435 | 707 | |
duke@435 | 708 | // Support for parallelizing survivor space rescan |
duke@435 | 709 | HeapWord** _survivor_chunk_array; |
duke@435 | 710 | size_t _survivor_chunk_index; |
duke@435 | 711 | size_t _survivor_chunk_capacity; |
duke@435 | 712 | size_t* _cursor; |
duke@435 | 713 | ChunkArray* _survivor_plab_array; |
duke@435 | 714 | |
duke@435 | 715 | // Support for marking stack overflow handling |
duke@435 | 716 | bool take_from_overflow_list(size_t num, CMSMarkStack* to_stack); |
duke@435 | 717 | bool par_take_from_overflow_list(size_t num, OopTaskQueue* to_work_q); |
duke@435 | 718 | void push_on_overflow_list(oop p); |
duke@435 | 719 | void par_push_on_overflow_list(oop p); |
duke@435 | 720 | // the following is, obviously, not, in general, "MT-stable" |
duke@435 | 721 | bool overflow_list_is_empty() const; |
duke@435 | 722 | |
duke@435 | 723 | void preserve_mark_if_necessary(oop p); |
duke@435 | 724 | void par_preserve_mark_if_necessary(oop p); |
duke@435 | 725 | void preserve_mark_work(oop p, markOop m); |
duke@435 | 726 | void restore_preserved_marks_if_any(); |
duke@435 | 727 | NOT_PRODUCT(bool no_preserved_marks() const;) |
duke@435 | 728 | // in support of testing overflow code |
duke@435 | 729 | NOT_PRODUCT(int _overflow_counter;) |
duke@435 | 730 | NOT_PRODUCT(bool simulate_overflow();) // sequential |
duke@435 | 731 | NOT_PRODUCT(bool par_simulate_overflow();) // MT version |
duke@435 | 732 | |
duke@435 | 733 | // CMS work methods |
duke@435 | 734 | void checkpointRootsInitialWork(bool asynch); // initial checkpoint work |
duke@435 | 735 | |
duke@435 | 736 | // a return value of false indicates failure due to stack overflow |
duke@435 | 737 | bool markFromRootsWork(bool asynch); // concurrent marking work |
duke@435 | 738 | |
duke@435 | 739 | public: // FIX ME!!! only for testing |
duke@435 | 740 | bool do_marking_st(bool asynch); // single-threaded marking |
duke@435 | 741 | bool do_marking_mt(bool asynch); // multi-threaded marking |
duke@435 | 742 | |
duke@435 | 743 | private: |
duke@435 | 744 | |
duke@435 | 745 | // concurrent precleaning work |
duke@435 | 746 | size_t preclean_mod_union_table(ConcurrentMarkSweepGeneration* gen, |
duke@435 | 747 | ScanMarkedObjectsAgainCarefullyClosure* cl); |
duke@435 | 748 | size_t preclean_card_table(ConcurrentMarkSweepGeneration* gen, |
duke@435 | 749 | ScanMarkedObjectsAgainCarefullyClosure* cl); |
duke@435 | 750 | // Does precleaning work, returning a quantity indicative of |
duke@435 | 751 | // the amount of "useful work" done. |
duke@435 | 752 | size_t preclean_work(bool clean_refs, bool clean_survivors); |
duke@435 | 753 | void abortable_preclean(); // Preclean while looking for possible abort |
duke@435 | 754 | void initialize_sequential_subtasks_for_young_gen_rescan(int i); |
duke@435 | 755 | // Helper function for above; merge-sorts the per-thread plab samples |
duke@435 | 756 | void merge_survivor_plab_arrays(ContiguousSpace* surv); |
duke@435 | 757 | // Resets (i.e. clears) the per-thread plab sample vectors |
duke@435 | 758 | void reset_survivor_plab_arrays(); |
duke@435 | 759 | |
duke@435 | 760 | // final (second) checkpoint work |
duke@435 | 761 | void checkpointRootsFinalWork(bool asynch, bool clear_all_soft_refs, |
duke@435 | 762 | bool init_mark_was_synchronous); |
duke@435 | 763 | // work routine for parallel version of remark |
duke@435 | 764 | void do_remark_parallel(); |
duke@435 | 765 | // work routine for non-parallel version of remark |
duke@435 | 766 | void do_remark_non_parallel(); |
duke@435 | 767 | // reference processing work routine (during second checkpoint) |
duke@435 | 768 | void refProcessingWork(bool asynch, bool clear_all_soft_refs); |
duke@435 | 769 | |
duke@435 | 770 | // concurrent sweeping work |
duke@435 | 771 | void sweepWork(ConcurrentMarkSweepGeneration* gen, bool asynch); |
duke@435 | 772 | |
duke@435 | 773 | // (concurrent) resetting of support data structures |
duke@435 | 774 | void reset(bool asynch); |
duke@435 | 775 | |
duke@435 | 776 | // Clear _expansion_cause fields of constituent generations |
duke@435 | 777 | void clear_expansion_cause(); |
duke@435 | 778 | |
duke@435 | 779 | // An auxilliary method used to record the ends of |
duke@435 | 780 | // used regions of each generation to limit the extent of sweep |
duke@435 | 781 | void save_sweep_limits(); |
duke@435 | 782 | |
duke@435 | 783 | // Resize the generations included in the collector. |
duke@435 | 784 | void compute_new_size(); |
duke@435 | 785 | |
duke@435 | 786 | // A work method used by foreground collection to determine |
duke@435 | 787 | // what type of collection (compacting or not, continuing or fresh) |
duke@435 | 788 | // it should do. |
duke@435 | 789 | void decide_foreground_collection_type(bool clear_all_soft_refs, |
duke@435 | 790 | bool* should_compact, bool* should_start_over); |
duke@435 | 791 | |
duke@435 | 792 | // A work method used by the foreground collector to do |
duke@435 | 793 | // a mark-sweep-compact. |
duke@435 | 794 | void do_compaction_work(bool clear_all_soft_refs); |
duke@435 | 795 | |
duke@435 | 796 | // A work method used by the foreground collector to do |
duke@435 | 797 | // a mark-sweep, after taking over from a possibly on-going |
duke@435 | 798 | // concurrent mark-sweep collection. |
duke@435 | 799 | void do_mark_sweep_work(bool clear_all_soft_refs, |
duke@435 | 800 | CollectorState first_state, bool should_start_over); |
duke@435 | 801 | |
duke@435 | 802 | // If the backgrould GC is active, acquire control from the background |
duke@435 | 803 | // GC and do the collection. |
duke@435 | 804 | void acquire_control_and_collect(bool full, bool clear_all_soft_refs); |
duke@435 | 805 | |
duke@435 | 806 | // For synchronizing passing of control from background to foreground |
duke@435 | 807 | // GC. waitForForegroundGC() is called by the background |
duke@435 | 808 | // collector. It if had to wait for a foreground collection, |
duke@435 | 809 | // it returns true and the background collection should assume |
duke@435 | 810 | // that the collection was finished by the foreground |
duke@435 | 811 | // collector. |
duke@435 | 812 | bool waitForForegroundGC(); |
duke@435 | 813 | |
duke@435 | 814 | // Incremental mode triggering: recompute the icms duty cycle and set the |
duke@435 | 815 | // allocation limits in the young gen. |
duke@435 | 816 | void icms_update_allocation_limits(); |
duke@435 | 817 | |
duke@435 | 818 | size_t block_size_using_printezis_bits(HeapWord* addr) const; |
duke@435 | 819 | size_t block_size_if_printezis_bits(HeapWord* addr) const; |
duke@435 | 820 | HeapWord* next_card_start_after_block(HeapWord* addr) const; |
duke@435 | 821 | |
duke@435 | 822 | void setup_cms_unloading_and_verification_state(); |
duke@435 | 823 | public: |
duke@435 | 824 | CMSCollector(ConcurrentMarkSweepGeneration* cmsGen, |
duke@435 | 825 | ConcurrentMarkSweepGeneration* permGen, |
duke@435 | 826 | CardTableRS* ct, |
duke@435 | 827 | ConcurrentMarkSweepPolicy* cp); |
duke@435 | 828 | ConcurrentMarkSweepThread* cmsThread() { return _cmsThread; } |
duke@435 | 829 | |
duke@435 | 830 | ReferenceProcessor* ref_processor() { return _ref_processor; } |
duke@435 | 831 | void ref_processor_init(); |
duke@435 | 832 | |
duke@435 | 833 | Mutex* bitMapLock() const { return _markBitMap.lock(); } |
duke@435 | 834 | static CollectorState abstract_state() { return _collectorState; } |
duke@435 | 835 | |
duke@435 | 836 | bool should_abort_preclean() const; // Whether preclean should be aborted. |
duke@435 | 837 | size_t get_eden_used() const; |
duke@435 | 838 | size_t get_eden_capacity() const; |
duke@435 | 839 | |
duke@435 | 840 | ConcurrentMarkSweepGeneration* cmsGen() { return _cmsGen; } |
duke@435 | 841 | |
duke@435 | 842 | // locking checks |
duke@435 | 843 | NOT_PRODUCT(static bool have_cms_token();) |
duke@435 | 844 | |
duke@435 | 845 | // XXXPERM bool should_collect(bool full, size_t size, bool tlab); |
duke@435 | 846 | bool shouldConcurrentCollect(); |
duke@435 | 847 | |
duke@435 | 848 | void collect(bool full, |
duke@435 | 849 | bool clear_all_soft_refs, |
duke@435 | 850 | size_t size, |
duke@435 | 851 | bool tlab); |
duke@435 | 852 | void collect_in_background(bool clear_all_soft_refs); |
duke@435 | 853 | void collect_in_foreground(bool clear_all_soft_refs); |
duke@435 | 854 | |
duke@435 | 855 | // In support of ExplicitGCInvokesConcurrent |
duke@435 | 856 | static void request_full_gc(unsigned int full_gc_count); |
duke@435 | 857 | // Should we unload classes in a particular concurrent cycle? |
ysr@529 | 858 | bool should_unload_classes() const { |
ysr@529 | 859 | return _should_unload_classes; |
duke@435 | 860 | } |
ysr@529 | 861 | bool update_should_unload_classes(); |
duke@435 | 862 | |
duke@435 | 863 | void direct_allocated(HeapWord* start, size_t size); |
duke@435 | 864 | |
duke@435 | 865 | // Object is dead if not marked and current phase is sweeping. |
duke@435 | 866 | bool is_dead_obj(oop obj) const; |
duke@435 | 867 | |
duke@435 | 868 | // After a promotion (of "start"), do any necessary marking. |
duke@435 | 869 | // If "par", then it's being done by a parallel GC thread. |
duke@435 | 870 | // The last two args indicate if we need precise marking |
duke@435 | 871 | // and if so the size of the object so it can be dirtied |
duke@435 | 872 | // in its entirety. |
duke@435 | 873 | void promoted(bool par, HeapWord* start, |
duke@435 | 874 | bool is_obj_array, size_t obj_size); |
duke@435 | 875 | |
duke@435 | 876 | HeapWord* allocation_limit_reached(Space* space, HeapWord* top, |
duke@435 | 877 | size_t word_size); |
duke@435 | 878 | |
duke@435 | 879 | void getFreelistLocks() const; |
duke@435 | 880 | void releaseFreelistLocks() const; |
duke@435 | 881 | bool haveFreelistLocks() const; |
duke@435 | 882 | |
duke@435 | 883 | // GC prologue and epilogue |
duke@435 | 884 | void gc_prologue(bool full); |
duke@435 | 885 | void gc_epilogue(bool full); |
duke@435 | 886 | |
duke@435 | 887 | jlong time_of_last_gc(jlong now) { |
duke@435 | 888 | if (_collectorState <= Idling) { |
duke@435 | 889 | // gc not in progress |
duke@435 | 890 | return _time_of_last_gc; |
duke@435 | 891 | } else { |
duke@435 | 892 | // collection in progress |
duke@435 | 893 | return now; |
duke@435 | 894 | } |
duke@435 | 895 | } |
duke@435 | 896 | |
duke@435 | 897 | // Support for parallel remark of survivor space |
duke@435 | 898 | void* get_data_recorder(int thr_num); |
duke@435 | 899 | |
duke@435 | 900 | CMSBitMap* markBitMap() { return &_markBitMap; } |
duke@435 | 901 | void directAllocated(HeapWord* start, size_t size); |
duke@435 | 902 | |
duke@435 | 903 | // main CMS steps and related support |
duke@435 | 904 | void checkpointRootsInitial(bool asynch); |
duke@435 | 905 | bool markFromRoots(bool asynch); // a return value of false indicates failure |
duke@435 | 906 | // due to stack overflow |
duke@435 | 907 | void preclean(); |
duke@435 | 908 | void checkpointRootsFinal(bool asynch, bool clear_all_soft_refs, |
duke@435 | 909 | bool init_mark_was_synchronous); |
duke@435 | 910 | void sweep(bool asynch); |
duke@435 | 911 | |
duke@435 | 912 | // Check that the currently executing thread is the expected |
duke@435 | 913 | // one (foreground collector or background collector). |
ysr@1580 | 914 | static void check_correct_thread_executing() PRODUCT_RETURN; |
duke@435 | 915 | // XXXPERM void print_statistics() PRODUCT_RETURN; |
duke@435 | 916 | |
duke@435 | 917 | bool is_cms_reachable(HeapWord* addr); |
duke@435 | 918 | |
duke@435 | 919 | // Performance Counter Support |
duke@435 | 920 | CollectorCounters* counters() { return _gc_counters; } |
duke@435 | 921 | |
duke@435 | 922 | // timer stuff |
duke@435 | 923 | void startTimer() { assert(!_timer.is_active(), "Error"); _timer.start(); } |
duke@435 | 924 | void stopTimer() { assert( _timer.is_active(), "Error"); _timer.stop(); } |
duke@435 | 925 | void resetTimer() { assert(!_timer.is_active(), "Error"); _timer.reset(); } |
duke@435 | 926 | double timerValue() { assert(!_timer.is_active(), "Error"); return _timer.seconds(); } |
duke@435 | 927 | |
duke@435 | 928 | int yields() { return _numYields; } |
duke@435 | 929 | void resetYields() { _numYields = 0; } |
duke@435 | 930 | void incrementYields() { _numYields++; } |
duke@435 | 931 | void resetNumDirtyCards() { _numDirtyCards = 0; } |
duke@435 | 932 | void incrementNumDirtyCards(size_t num) { _numDirtyCards += num; } |
duke@435 | 933 | size_t numDirtyCards() { return _numDirtyCards; } |
duke@435 | 934 | |
duke@435 | 935 | static bool foregroundGCShouldWait() { return _foregroundGCShouldWait; } |
duke@435 | 936 | static void set_foregroundGCShouldWait(bool v) { _foregroundGCShouldWait = v; } |
duke@435 | 937 | static bool foregroundGCIsActive() { return _foregroundGCIsActive; } |
duke@435 | 938 | static void set_foregroundGCIsActive(bool v) { _foregroundGCIsActive = v; } |
ysr@1580 | 939 | size_t sweep_count() const { return _sweep_count; } |
ysr@1580 | 940 | void increment_sweep_count() { _sweep_count++; } |
duke@435 | 941 | |
duke@435 | 942 | // Timers/stats for gc scheduling and incremental mode pacing. |
duke@435 | 943 | CMSStats& stats() { return _stats; } |
duke@435 | 944 | |
duke@435 | 945 | // Convenience methods that check whether CMSIncrementalMode is enabled and |
duke@435 | 946 | // forward to the corresponding methods in ConcurrentMarkSweepThread. |
duke@435 | 947 | static void start_icms(); |
duke@435 | 948 | static void stop_icms(); // Called at the end of the cms cycle. |
duke@435 | 949 | static void disable_icms(); // Called before a foreground collection. |
duke@435 | 950 | static void enable_icms(); // Called after a foreground collection. |
duke@435 | 951 | void icms_wait(); // Called at yield points. |
duke@435 | 952 | |
duke@435 | 953 | // Adaptive size policy |
duke@435 | 954 | CMSAdaptiveSizePolicy* size_policy(); |
duke@435 | 955 | CMSGCAdaptivePolicyCounters* gc_adaptive_policy_counters(); |
duke@435 | 956 | |
duke@435 | 957 | // debugging |
duke@435 | 958 | void verify(bool); |
duke@435 | 959 | bool verify_after_remark(); |
duke@435 | 960 | void verify_ok_to_terminate() const PRODUCT_RETURN; |
duke@435 | 961 | void verify_work_stacks_empty() const PRODUCT_RETURN; |
duke@435 | 962 | void verify_overflow_empty() const PRODUCT_RETURN; |
duke@435 | 963 | |
duke@435 | 964 | // convenience methods in support of debugging |
duke@435 | 965 | static const size_t skip_header_HeapWords() PRODUCT_RETURN0; |
duke@435 | 966 | HeapWord* block_start(const void* p) const PRODUCT_RETURN0; |
duke@435 | 967 | |
duke@435 | 968 | // accessors |
duke@435 | 969 | CMSMarkStack* verification_mark_stack() { return &_markStack; } |
duke@435 | 970 | CMSBitMap* verification_mark_bm() { return &_verification_mark_bm; } |
duke@435 | 971 | |
duke@435 | 972 | // Get the bit map with a perm gen "deadness" information. |
duke@435 | 973 | CMSBitMap* perm_gen_verify_bit_map() { return &_perm_gen_verify_bit_map; } |
duke@435 | 974 | |
duke@435 | 975 | // Initialization errors |
duke@435 | 976 | bool completed_initialization() { return _completed_initialization; } |
duke@435 | 977 | }; |
duke@435 | 978 | |
duke@435 | 979 | class CMSExpansionCause : public AllStatic { |
duke@435 | 980 | public: |
duke@435 | 981 | enum Cause { |
duke@435 | 982 | _no_expansion, |
duke@435 | 983 | _satisfy_free_ratio, |
duke@435 | 984 | _satisfy_promotion, |
duke@435 | 985 | _satisfy_allocation, |
duke@435 | 986 | _allocate_par_lab, |
duke@435 | 987 | _allocate_par_spooling_space, |
duke@435 | 988 | _adaptive_size_policy |
duke@435 | 989 | }; |
duke@435 | 990 | // Return a string describing the cause of the expansion. |
duke@435 | 991 | static const char* to_string(CMSExpansionCause::Cause cause); |
duke@435 | 992 | }; |
duke@435 | 993 | |
duke@435 | 994 | class ConcurrentMarkSweepGeneration: public CardGeneration { |
duke@435 | 995 | friend class VMStructs; |
duke@435 | 996 | friend class ConcurrentMarkSweepThread; |
duke@435 | 997 | friend class ConcurrentMarkSweep; |
duke@435 | 998 | friend class CMSCollector; |
duke@435 | 999 | protected: |
duke@435 | 1000 | static CMSCollector* _collector; // the collector that collects us |
duke@435 | 1001 | CompactibleFreeListSpace* _cmsSpace; // underlying space (only one for now) |
duke@435 | 1002 | |
duke@435 | 1003 | // Performance Counters |
duke@435 | 1004 | GenerationCounters* _gen_counters; |
duke@435 | 1005 | GSpaceCounters* _space_counters; |
duke@435 | 1006 | |
duke@435 | 1007 | // Words directly allocated, used by CMSStats. |
duke@435 | 1008 | size_t _direct_allocated_words; |
duke@435 | 1009 | |
duke@435 | 1010 | // Non-product stat counters |
duke@435 | 1011 | NOT_PRODUCT( |
duke@435 | 1012 | int _numObjectsPromoted; |
duke@435 | 1013 | int _numWordsPromoted; |
duke@435 | 1014 | int _numObjectsAllocated; |
duke@435 | 1015 | int _numWordsAllocated; |
duke@435 | 1016 | ) |
duke@435 | 1017 | |
duke@435 | 1018 | // Used for sizing decisions |
duke@435 | 1019 | bool _incremental_collection_failed; |
duke@435 | 1020 | bool incremental_collection_failed() { |
duke@435 | 1021 | return _incremental_collection_failed; |
duke@435 | 1022 | } |
duke@435 | 1023 | void set_incremental_collection_failed() { |
duke@435 | 1024 | _incremental_collection_failed = true; |
duke@435 | 1025 | } |
duke@435 | 1026 | void clear_incremental_collection_failed() { |
duke@435 | 1027 | _incremental_collection_failed = false; |
duke@435 | 1028 | } |
duke@435 | 1029 | |
ysr@529 | 1030 | // accessors |
ysr@529 | 1031 | void set_expansion_cause(CMSExpansionCause::Cause v) { _expansion_cause = v;} |
ysr@529 | 1032 | CMSExpansionCause::Cause expansion_cause() const { return _expansion_cause; } |
ysr@529 | 1033 | |
duke@435 | 1034 | private: |
duke@435 | 1035 | // For parallel young-gen GC support. |
duke@435 | 1036 | CMSParGCThreadState** _par_gc_thread_states; |
duke@435 | 1037 | |
duke@435 | 1038 | // Reason generation was expanded |
duke@435 | 1039 | CMSExpansionCause::Cause _expansion_cause; |
duke@435 | 1040 | |
duke@435 | 1041 | // In support of MinChunkSize being larger than min object size |
duke@435 | 1042 | const double _dilatation_factor; |
duke@435 | 1043 | |
duke@435 | 1044 | enum CollectionTypes { |
duke@435 | 1045 | Concurrent_collection_type = 0, |
duke@435 | 1046 | MS_foreground_collection_type = 1, |
duke@435 | 1047 | MSC_foreground_collection_type = 2, |
duke@435 | 1048 | Unknown_collection_type = 3 |
duke@435 | 1049 | }; |
duke@435 | 1050 | |
duke@435 | 1051 | CollectionTypes _debug_collection_type; |
duke@435 | 1052 | |
ysr@529 | 1053 | // Fraction of current occupancy at which to start a CMS collection which |
ysr@529 | 1054 | // will collect this generation (at least). |
ysr@529 | 1055 | double _initiating_occupancy; |
ysr@529 | 1056 | |
duke@435 | 1057 | protected: |
duke@435 | 1058 | // Shrink generation by specified size (returns false if unable to shrink) |
duke@435 | 1059 | virtual void shrink_by(size_t bytes); |
duke@435 | 1060 | |
duke@435 | 1061 | // Update statistics for GC |
duke@435 | 1062 | virtual void update_gc_stats(int level, bool full); |
duke@435 | 1063 | |
duke@435 | 1064 | // Maximum available space in the generation (including uncommitted) |
duke@435 | 1065 | // space. |
duke@435 | 1066 | size_t max_available() const; |
duke@435 | 1067 | |
ysr@529 | 1068 | // getter and initializer for _initiating_occupancy field. |
ysr@529 | 1069 | double initiating_occupancy() const { return _initiating_occupancy; } |
ysr@529 | 1070 | void init_initiating_occupancy(intx io, intx tr); |
ysr@529 | 1071 | |
duke@435 | 1072 | public: |
duke@435 | 1073 | ConcurrentMarkSweepGeneration(ReservedSpace rs, size_t initial_byte_size, |
duke@435 | 1074 | int level, CardTableRS* ct, |
duke@435 | 1075 | bool use_adaptive_freelists, |
duke@435 | 1076 | FreeBlockDictionary::DictionaryChoice); |
duke@435 | 1077 | |
duke@435 | 1078 | // Accessors |
duke@435 | 1079 | CMSCollector* collector() const { return _collector; } |
duke@435 | 1080 | static void set_collector(CMSCollector* collector) { |
duke@435 | 1081 | assert(_collector == NULL, "already set"); |
duke@435 | 1082 | _collector = collector; |
duke@435 | 1083 | } |
duke@435 | 1084 | CompactibleFreeListSpace* cmsSpace() const { return _cmsSpace; } |
duke@435 | 1085 | |
duke@435 | 1086 | Mutex* freelistLock() const; |
duke@435 | 1087 | |
duke@435 | 1088 | virtual Generation::Name kind() { return Generation::ConcurrentMarkSweep; } |
duke@435 | 1089 | |
duke@435 | 1090 | // Adaptive size policy |
duke@435 | 1091 | CMSAdaptiveSizePolicy* size_policy(); |
duke@435 | 1092 | |
duke@435 | 1093 | bool refs_discovery_is_atomic() const { return false; } |
duke@435 | 1094 | bool refs_discovery_is_mt() const { |
duke@435 | 1095 | // Note: CMS does MT-discovery during the parallel-remark |
duke@435 | 1096 | // phases. Use ReferenceProcessorMTMutator to make refs |
duke@435 | 1097 | // discovery MT-safe during such phases or other parallel |
duke@435 | 1098 | // discovery phases in the future. This may all go away |
duke@435 | 1099 | // if/when we decide that refs discovery is sufficiently |
duke@435 | 1100 | // rare that the cost of the CAS's involved is in the |
duke@435 | 1101 | // noise. That's a measurement that should be done, and |
duke@435 | 1102 | // the code simplified if that turns out to be the case. |
duke@435 | 1103 | return false; |
duke@435 | 1104 | } |
duke@435 | 1105 | |
duke@435 | 1106 | // Override |
duke@435 | 1107 | virtual void ref_processor_init(); |
duke@435 | 1108 | |
jmasa@706 | 1109 | // Grow generation by specified size (returns false if unable to grow) |
jmasa@706 | 1110 | bool grow_by(size_t bytes); |
jmasa@706 | 1111 | // Grow generation to reserved size. |
jmasa@706 | 1112 | bool grow_to_reserved(); |
jmasa@706 | 1113 | |
duke@435 | 1114 | void clear_expansion_cause() { _expansion_cause = CMSExpansionCause::_no_expansion; } |
duke@435 | 1115 | |
duke@435 | 1116 | // Space enquiries |
duke@435 | 1117 | size_t capacity() const; |
duke@435 | 1118 | size_t used() const; |
duke@435 | 1119 | size_t free() const; |
ysr@529 | 1120 | double occupancy() const { return ((double)used())/((double)capacity()); } |
duke@435 | 1121 | size_t contiguous_available() const; |
duke@435 | 1122 | size_t unsafe_max_alloc_nogc() const; |
duke@435 | 1123 | |
duke@435 | 1124 | // over-rides |
duke@435 | 1125 | MemRegion used_region() const; |
duke@435 | 1126 | MemRegion used_region_at_save_marks() const; |
duke@435 | 1127 | |
duke@435 | 1128 | // Does a "full" (forced) collection invoked on this generation collect |
duke@435 | 1129 | // all younger generations as well? Note that the second conjunct is a |
duke@435 | 1130 | // hack to allow the collection of the younger gen first if the flag is |
duke@435 | 1131 | // set. This is better than using th policy's should_collect_gen0_first() |
duke@435 | 1132 | // since that causes us to do an extra unnecessary pair of restart-&-stop-world. |
duke@435 | 1133 | virtual bool full_collects_younger_generations() const { |
duke@435 | 1134 | return UseCMSCompactAtFullCollection && !CollectGen0First; |
duke@435 | 1135 | } |
duke@435 | 1136 | |
duke@435 | 1137 | void space_iterate(SpaceClosure* blk, bool usedOnly = false); |
duke@435 | 1138 | |
duke@435 | 1139 | // Support for compaction |
duke@435 | 1140 | CompactibleSpace* first_compaction_space() const; |
duke@435 | 1141 | // Adjust quantites in the generation affected by |
duke@435 | 1142 | // the compaction. |
duke@435 | 1143 | void reset_after_compaction(); |
duke@435 | 1144 | |
duke@435 | 1145 | // Allocation support |
duke@435 | 1146 | HeapWord* allocate(size_t size, bool tlab); |
duke@435 | 1147 | HeapWord* have_lock_and_allocate(size_t size, bool tlab); |
coleenp@548 | 1148 | oop promote(oop obj, size_t obj_size); |
duke@435 | 1149 | HeapWord* par_allocate(size_t size, bool tlab) { |
duke@435 | 1150 | return allocate(size, tlab); |
duke@435 | 1151 | } |
duke@435 | 1152 | |
duke@435 | 1153 | // Incremental mode triggering. |
duke@435 | 1154 | HeapWord* allocation_limit_reached(Space* space, HeapWord* top, |
duke@435 | 1155 | size_t word_size); |
duke@435 | 1156 | |
duke@435 | 1157 | // Used by CMSStats to track direct allocation. The value is sampled and |
duke@435 | 1158 | // reset after each young gen collection. |
duke@435 | 1159 | size_t direct_allocated_words() const { return _direct_allocated_words; } |
duke@435 | 1160 | void reset_direct_allocated_words() { _direct_allocated_words = 0; } |
duke@435 | 1161 | |
duke@435 | 1162 | // Overrides for parallel promotion. |
duke@435 | 1163 | virtual oop par_promote(int thread_num, |
duke@435 | 1164 | oop obj, markOop m, size_t word_sz); |
duke@435 | 1165 | // This one should not be called for CMS. |
duke@435 | 1166 | virtual void par_promote_alloc_undo(int thread_num, |
duke@435 | 1167 | HeapWord* obj, size_t word_sz); |
duke@435 | 1168 | virtual void par_promote_alloc_done(int thread_num); |
duke@435 | 1169 | virtual void par_oop_since_save_marks_iterate_done(int thread_num); |
duke@435 | 1170 | |
duke@435 | 1171 | virtual bool promotion_attempt_is_safe(size_t promotion_in_bytes, |
duke@435 | 1172 | bool younger_handles_promotion_failure) const; |
duke@435 | 1173 | |
ysr@1580 | 1174 | // Inform this (non-young) generation that a promotion failure was |
ysr@1580 | 1175 | // encountered during a collection of a younger generation that |
ysr@1580 | 1176 | // promotes into this generation. |
ysr@1580 | 1177 | virtual void promotion_failure_occurred(); |
ysr@1580 | 1178 | |
duke@435 | 1179 | bool should_collect(bool full, size_t size, bool tlab); |
ysr@529 | 1180 | virtual bool should_concurrent_collect() const; |
ysr@529 | 1181 | virtual bool is_too_full() const; |
duke@435 | 1182 | void collect(bool full, |
duke@435 | 1183 | bool clear_all_soft_refs, |
duke@435 | 1184 | size_t size, |
duke@435 | 1185 | bool tlab); |
duke@435 | 1186 | |
duke@435 | 1187 | HeapWord* expand_and_allocate(size_t word_size, |
duke@435 | 1188 | bool tlab, |
duke@435 | 1189 | bool parallel = false); |
duke@435 | 1190 | |
duke@435 | 1191 | // GC prologue and epilogue |
duke@435 | 1192 | void gc_prologue(bool full); |
duke@435 | 1193 | void gc_prologue_work(bool full, bool registerClosure, |
duke@435 | 1194 | ModUnionClosure* modUnionClosure); |
duke@435 | 1195 | void gc_epilogue(bool full); |
duke@435 | 1196 | void gc_epilogue_work(bool full); |
duke@435 | 1197 | |
duke@435 | 1198 | // Time since last GC of this generation |
duke@435 | 1199 | jlong time_of_last_gc(jlong now) { |
duke@435 | 1200 | return collector()->time_of_last_gc(now); |
duke@435 | 1201 | } |
duke@435 | 1202 | void update_time_of_last_gc(jlong now) { |
duke@435 | 1203 | collector()-> update_time_of_last_gc(now); |
duke@435 | 1204 | } |
duke@435 | 1205 | |
duke@435 | 1206 | // Allocation failure |
duke@435 | 1207 | void expand(size_t bytes, size_t expand_bytes, |
duke@435 | 1208 | CMSExpansionCause::Cause cause); |
jmasa@706 | 1209 | virtual bool expand(size_t bytes, size_t expand_bytes); |
duke@435 | 1210 | void shrink(size_t bytes); |
duke@435 | 1211 | HeapWord* expand_and_par_lab_allocate(CMSParGCThreadState* ps, size_t word_sz); |
duke@435 | 1212 | bool expand_and_ensure_spooling_space(PromotionInfo* promo); |
duke@435 | 1213 | |
duke@435 | 1214 | // Iteration support and related enquiries |
duke@435 | 1215 | void save_marks(); |
duke@435 | 1216 | bool no_allocs_since_save_marks(); |
duke@435 | 1217 | void object_iterate_since_last_GC(ObjectClosure* cl); |
duke@435 | 1218 | void younger_refs_iterate(OopsInGenClosure* cl); |
duke@435 | 1219 | |
duke@435 | 1220 | // Iteration support specific to CMS generations |
duke@435 | 1221 | void save_sweep_limit(); |
duke@435 | 1222 | |
duke@435 | 1223 | // More iteration support |
duke@435 | 1224 | virtual void oop_iterate(MemRegion mr, OopClosure* cl); |
duke@435 | 1225 | virtual void oop_iterate(OopClosure* cl); |
jmasa@952 | 1226 | virtual void safe_object_iterate(ObjectClosure* cl); |
duke@435 | 1227 | virtual void object_iterate(ObjectClosure* cl); |
duke@435 | 1228 | |
duke@435 | 1229 | // Need to declare the full complement of closures, whether we'll |
duke@435 | 1230 | // override them or not, or get message from the compiler: |
duke@435 | 1231 | // oop_since_save_marks_iterate_nv hides virtual function... |
duke@435 | 1232 | #define CMS_SINCE_SAVE_MARKS_DECL(OopClosureType, nv_suffix) \ |
duke@435 | 1233 | void oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl); |
duke@435 | 1234 | ALL_SINCE_SAVE_MARKS_CLOSURES(CMS_SINCE_SAVE_MARKS_DECL) |
duke@435 | 1235 | |
duke@435 | 1236 | // Smart allocation XXX -- move to CFLSpace? |
duke@435 | 1237 | void setNearLargestChunk(); |
duke@435 | 1238 | bool isNearLargestChunk(HeapWord* addr); |
duke@435 | 1239 | |
duke@435 | 1240 | // Get the chunk at the end of the space. Delagates to |
duke@435 | 1241 | // the space. |
duke@435 | 1242 | FreeChunk* find_chunk_at_end(); |
duke@435 | 1243 | |
duke@435 | 1244 | // Overriding of unused functionality (sharing not yet supported with CMS) |
duke@435 | 1245 | void pre_adjust_pointers(); |
duke@435 | 1246 | void post_compact(); |
duke@435 | 1247 | |
duke@435 | 1248 | // Debugging |
duke@435 | 1249 | void prepare_for_verify(); |
duke@435 | 1250 | void verify(bool allow_dirty); |
duke@435 | 1251 | void print_statistics() PRODUCT_RETURN; |
duke@435 | 1252 | |
duke@435 | 1253 | // Performance Counters support |
duke@435 | 1254 | virtual void update_counters(); |
duke@435 | 1255 | virtual void update_counters(size_t used); |
duke@435 | 1256 | void initialize_performance_counters(); |
duke@435 | 1257 | CollectorCounters* counters() { return collector()->counters(); } |
duke@435 | 1258 | |
duke@435 | 1259 | // Support for parallel remark of survivor space |
duke@435 | 1260 | void* get_data_recorder(int thr_num) { |
duke@435 | 1261 | //Delegate to collector |
duke@435 | 1262 | return collector()->get_data_recorder(thr_num); |
duke@435 | 1263 | } |
duke@435 | 1264 | |
duke@435 | 1265 | // Printing |
duke@435 | 1266 | const char* name() const; |
duke@435 | 1267 | virtual const char* short_name() const { return "CMS"; } |
duke@435 | 1268 | void print() const; |
duke@435 | 1269 | void printOccupancy(const char* s); |
duke@435 | 1270 | bool must_be_youngest() const { return false; } |
duke@435 | 1271 | bool must_be_oldest() const { return true; } |
duke@435 | 1272 | |
duke@435 | 1273 | void compute_new_size(); |
duke@435 | 1274 | |
duke@435 | 1275 | CollectionTypes debug_collection_type() { return _debug_collection_type; } |
duke@435 | 1276 | void rotate_debug_collection_type(); |
duke@435 | 1277 | }; |
duke@435 | 1278 | |
duke@435 | 1279 | class ASConcurrentMarkSweepGeneration : public ConcurrentMarkSweepGeneration { |
duke@435 | 1280 | |
duke@435 | 1281 | // Return the size policy from the heap's collector |
duke@435 | 1282 | // policy casted to CMSAdaptiveSizePolicy*. |
duke@435 | 1283 | CMSAdaptiveSizePolicy* cms_size_policy() const; |
duke@435 | 1284 | |
duke@435 | 1285 | // Resize the generation based on the adaptive size |
duke@435 | 1286 | // policy. |
duke@435 | 1287 | void resize(size_t cur_promo, size_t desired_promo); |
duke@435 | 1288 | |
duke@435 | 1289 | // Return the GC counters from the collector policy |
duke@435 | 1290 | CMSGCAdaptivePolicyCounters* gc_adaptive_policy_counters(); |
duke@435 | 1291 | |
duke@435 | 1292 | virtual void shrink_by(size_t bytes); |
duke@435 | 1293 | |
duke@435 | 1294 | public: |
duke@435 | 1295 | virtual void compute_new_size(); |
duke@435 | 1296 | ASConcurrentMarkSweepGeneration(ReservedSpace rs, size_t initial_byte_size, |
duke@435 | 1297 | int level, CardTableRS* ct, |
duke@435 | 1298 | bool use_adaptive_freelists, |
duke@435 | 1299 | FreeBlockDictionary::DictionaryChoice |
duke@435 | 1300 | dictionaryChoice) : |
duke@435 | 1301 | ConcurrentMarkSweepGeneration(rs, initial_byte_size, level, ct, |
duke@435 | 1302 | use_adaptive_freelists, dictionaryChoice) {} |
duke@435 | 1303 | |
duke@435 | 1304 | virtual const char* short_name() const { return "ASCMS"; } |
duke@435 | 1305 | virtual Generation::Name kind() { return Generation::ASConcurrentMarkSweep; } |
duke@435 | 1306 | |
duke@435 | 1307 | virtual void update_counters(); |
duke@435 | 1308 | virtual void update_counters(size_t used); |
duke@435 | 1309 | }; |
duke@435 | 1310 | |
duke@435 | 1311 | // |
duke@435 | 1312 | // Closures of various sorts used by CMS to accomplish its work |
duke@435 | 1313 | // |
duke@435 | 1314 | |
duke@435 | 1315 | // This closure is used to check that a certain set of oops is empty. |
duke@435 | 1316 | class FalseClosure: public OopClosure { |
duke@435 | 1317 | public: |
coleenp@548 | 1318 | void do_oop(oop* p) { guarantee(false, "Should be an empty set"); } |
coleenp@548 | 1319 | void do_oop(narrowOop* p) { guarantee(false, "Should be an empty set"); } |
duke@435 | 1320 | }; |
duke@435 | 1321 | |
duke@435 | 1322 | // This closure is used to do concurrent marking from the roots |
duke@435 | 1323 | // following the first checkpoint. |
duke@435 | 1324 | class MarkFromRootsClosure: public BitMapClosure { |
duke@435 | 1325 | CMSCollector* _collector; |
duke@435 | 1326 | MemRegion _span; |
duke@435 | 1327 | CMSBitMap* _bitMap; |
duke@435 | 1328 | CMSBitMap* _mut; |
duke@435 | 1329 | CMSMarkStack* _markStack; |
duke@435 | 1330 | CMSMarkStack* _revisitStack; |
duke@435 | 1331 | bool _yield; |
duke@435 | 1332 | int _skipBits; |
duke@435 | 1333 | HeapWord* _finger; |
duke@435 | 1334 | HeapWord* _threshold; |
duke@435 | 1335 | DEBUG_ONLY(bool _verifying;) |
duke@435 | 1336 | |
duke@435 | 1337 | public: |
duke@435 | 1338 | MarkFromRootsClosure(CMSCollector* collector, MemRegion span, |
duke@435 | 1339 | CMSBitMap* bitMap, |
duke@435 | 1340 | CMSMarkStack* markStack, |
duke@435 | 1341 | CMSMarkStack* revisitStack, |
duke@435 | 1342 | bool should_yield, bool verifying = false); |
ysr@777 | 1343 | bool do_bit(size_t offset); |
duke@435 | 1344 | void reset(HeapWord* addr); |
duke@435 | 1345 | inline void do_yield_check(); |
duke@435 | 1346 | |
duke@435 | 1347 | private: |
duke@435 | 1348 | void scanOopsInOop(HeapWord* ptr); |
duke@435 | 1349 | void do_yield_work(); |
duke@435 | 1350 | }; |
duke@435 | 1351 | |
duke@435 | 1352 | // This closure is used to do concurrent multi-threaded |
duke@435 | 1353 | // marking from the roots following the first checkpoint. |
duke@435 | 1354 | // XXX This should really be a subclass of The serial version |
duke@435 | 1355 | // above, but i have not had the time to refactor things cleanly. |
duke@435 | 1356 | // That willbe done for Dolphin. |
duke@435 | 1357 | class Par_MarkFromRootsClosure: public BitMapClosure { |
duke@435 | 1358 | CMSCollector* _collector; |
duke@435 | 1359 | MemRegion _whole_span; |
duke@435 | 1360 | MemRegion _span; |
duke@435 | 1361 | CMSBitMap* _bit_map; |
duke@435 | 1362 | CMSBitMap* _mut; |
duke@435 | 1363 | OopTaskQueue* _work_queue; |
duke@435 | 1364 | CMSMarkStack* _overflow_stack; |
duke@435 | 1365 | CMSMarkStack* _revisit_stack; |
duke@435 | 1366 | bool _yield; |
duke@435 | 1367 | int _skip_bits; |
duke@435 | 1368 | HeapWord* _finger; |
duke@435 | 1369 | HeapWord* _threshold; |
duke@435 | 1370 | CMSConcMarkingTask* _task; |
duke@435 | 1371 | public: |
duke@435 | 1372 | Par_MarkFromRootsClosure(CMSConcMarkingTask* task, CMSCollector* collector, |
duke@435 | 1373 | MemRegion span, |
duke@435 | 1374 | CMSBitMap* bit_map, |
duke@435 | 1375 | OopTaskQueue* work_queue, |
duke@435 | 1376 | CMSMarkStack* overflow_stack, |
duke@435 | 1377 | CMSMarkStack* revisit_stack, |
duke@435 | 1378 | bool should_yield); |
ysr@777 | 1379 | bool do_bit(size_t offset); |
duke@435 | 1380 | inline void do_yield_check(); |
duke@435 | 1381 | |
duke@435 | 1382 | private: |
duke@435 | 1383 | void scan_oops_in_oop(HeapWord* ptr); |
duke@435 | 1384 | void do_yield_work(); |
duke@435 | 1385 | bool get_work_from_overflow_stack(); |
duke@435 | 1386 | }; |
duke@435 | 1387 | |
duke@435 | 1388 | // The following closures are used to do certain kinds of verification of |
duke@435 | 1389 | // CMS marking. |
duke@435 | 1390 | class PushAndMarkVerifyClosure: public OopClosure { |
duke@435 | 1391 | CMSCollector* _collector; |
duke@435 | 1392 | MemRegion _span; |
duke@435 | 1393 | CMSBitMap* _verification_bm; |
duke@435 | 1394 | CMSBitMap* _cms_bm; |
duke@435 | 1395 | CMSMarkStack* _mark_stack; |
coleenp@548 | 1396 | protected: |
coleenp@548 | 1397 | void do_oop(oop p); |
coleenp@548 | 1398 | template <class T> inline void do_oop_work(T *p) { |
coleenp@548 | 1399 | oop obj = oopDesc::load_decode_heap_oop_not_null(p); |
coleenp@548 | 1400 | do_oop(obj); |
coleenp@548 | 1401 | } |
duke@435 | 1402 | public: |
duke@435 | 1403 | PushAndMarkVerifyClosure(CMSCollector* cms_collector, |
duke@435 | 1404 | MemRegion span, |
duke@435 | 1405 | CMSBitMap* verification_bm, |
duke@435 | 1406 | CMSBitMap* cms_bm, |
duke@435 | 1407 | CMSMarkStack* mark_stack); |
duke@435 | 1408 | void do_oop(oop* p); |
coleenp@548 | 1409 | void do_oop(narrowOop* p); |
duke@435 | 1410 | // Deal with a stack overflow condition |
duke@435 | 1411 | void handle_stack_overflow(HeapWord* lost); |
duke@435 | 1412 | }; |
duke@435 | 1413 | |
duke@435 | 1414 | class MarkFromRootsVerifyClosure: public BitMapClosure { |
duke@435 | 1415 | CMSCollector* _collector; |
duke@435 | 1416 | MemRegion _span; |
duke@435 | 1417 | CMSBitMap* _verification_bm; |
duke@435 | 1418 | CMSBitMap* _cms_bm; |
duke@435 | 1419 | CMSMarkStack* _mark_stack; |
duke@435 | 1420 | HeapWord* _finger; |
duke@435 | 1421 | PushAndMarkVerifyClosure _pam_verify_closure; |
duke@435 | 1422 | public: |
duke@435 | 1423 | MarkFromRootsVerifyClosure(CMSCollector* collector, MemRegion span, |
duke@435 | 1424 | CMSBitMap* verification_bm, |
duke@435 | 1425 | CMSBitMap* cms_bm, |
duke@435 | 1426 | CMSMarkStack* mark_stack); |
ysr@777 | 1427 | bool do_bit(size_t offset); |
duke@435 | 1428 | void reset(HeapWord* addr); |
duke@435 | 1429 | }; |
duke@435 | 1430 | |
duke@435 | 1431 | |
duke@435 | 1432 | // This closure is used to check that a certain set of bits is |
duke@435 | 1433 | // "empty" (i.e. the bit vector doesn't have any 1-bits). |
duke@435 | 1434 | class FalseBitMapClosure: public BitMapClosure { |
duke@435 | 1435 | public: |
ysr@777 | 1436 | bool do_bit(size_t offset) { |
duke@435 | 1437 | guarantee(false, "Should not have a 1 bit"); |
ysr@777 | 1438 | return true; |
duke@435 | 1439 | } |
duke@435 | 1440 | }; |
duke@435 | 1441 | |
duke@435 | 1442 | // This closure is used during the second checkpointing phase |
duke@435 | 1443 | // to rescan the marked objects on the dirty cards in the mod |
duke@435 | 1444 | // union table and the card table proper. It's invoked via |
duke@435 | 1445 | // MarkFromDirtyCardsClosure below. It uses either |
duke@435 | 1446 | // [Par_]MarkRefsIntoAndScanClosure (Par_ in the parallel case) |
duke@435 | 1447 | // declared in genOopClosures.hpp to accomplish some of its work. |
duke@435 | 1448 | // In the parallel case the bitMap is shared, so access to |
duke@435 | 1449 | // it needs to be suitably synchronized for updates by embedded |
duke@435 | 1450 | // closures that update it; however, this closure itself only |
duke@435 | 1451 | // reads the bit_map and because it is idempotent, is immune to |
duke@435 | 1452 | // reading stale values. |
duke@435 | 1453 | class ScanMarkedObjectsAgainClosure: public UpwardsObjectClosure { |
duke@435 | 1454 | #ifdef ASSERT |
duke@435 | 1455 | CMSCollector* _collector; |
duke@435 | 1456 | MemRegion _span; |
duke@435 | 1457 | union { |
duke@435 | 1458 | CMSMarkStack* _mark_stack; |
duke@435 | 1459 | OopTaskQueue* _work_queue; |
duke@435 | 1460 | }; |
duke@435 | 1461 | #endif // ASSERT |
duke@435 | 1462 | bool _parallel; |
duke@435 | 1463 | CMSBitMap* _bit_map; |
duke@435 | 1464 | union { |
duke@435 | 1465 | MarkRefsIntoAndScanClosure* _scan_closure; |
duke@435 | 1466 | Par_MarkRefsIntoAndScanClosure* _par_scan_closure; |
duke@435 | 1467 | }; |
duke@435 | 1468 | |
duke@435 | 1469 | public: |
duke@435 | 1470 | ScanMarkedObjectsAgainClosure(CMSCollector* collector, |
duke@435 | 1471 | MemRegion span, |
duke@435 | 1472 | ReferenceProcessor* rp, |
duke@435 | 1473 | CMSBitMap* bit_map, |
duke@435 | 1474 | CMSMarkStack* mark_stack, |
duke@435 | 1475 | CMSMarkStack* revisit_stack, |
duke@435 | 1476 | MarkRefsIntoAndScanClosure* cl): |
duke@435 | 1477 | #ifdef ASSERT |
duke@435 | 1478 | _collector(collector), |
duke@435 | 1479 | _span(span), |
duke@435 | 1480 | _mark_stack(mark_stack), |
duke@435 | 1481 | #endif // ASSERT |
duke@435 | 1482 | _parallel(false), |
duke@435 | 1483 | _bit_map(bit_map), |
duke@435 | 1484 | _scan_closure(cl) { } |
duke@435 | 1485 | |
duke@435 | 1486 | ScanMarkedObjectsAgainClosure(CMSCollector* collector, |
duke@435 | 1487 | MemRegion span, |
duke@435 | 1488 | ReferenceProcessor* rp, |
duke@435 | 1489 | CMSBitMap* bit_map, |
duke@435 | 1490 | OopTaskQueue* work_queue, |
duke@435 | 1491 | CMSMarkStack* revisit_stack, |
duke@435 | 1492 | Par_MarkRefsIntoAndScanClosure* cl): |
duke@435 | 1493 | #ifdef ASSERT |
duke@435 | 1494 | _collector(collector), |
duke@435 | 1495 | _span(span), |
duke@435 | 1496 | _work_queue(work_queue), |
duke@435 | 1497 | #endif // ASSERT |
duke@435 | 1498 | _parallel(true), |
duke@435 | 1499 | _bit_map(bit_map), |
duke@435 | 1500 | _par_scan_closure(cl) { } |
duke@435 | 1501 | |
duke@435 | 1502 | void do_object(oop obj) { |
duke@435 | 1503 | guarantee(false, "Call do_object_b(oop, MemRegion) instead"); |
duke@435 | 1504 | } |
duke@435 | 1505 | bool do_object_b(oop obj) { |
duke@435 | 1506 | guarantee(false, "Call do_object_b(oop, MemRegion) form instead"); |
duke@435 | 1507 | return false; |
duke@435 | 1508 | } |
duke@435 | 1509 | bool do_object_bm(oop p, MemRegion mr); |
duke@435 | 1510 | }; |
duke@435 | 1511 | |
duke@435 | 1512 | // This closure is used during the second checkpointing phase |
duke@435 | 1513 | // to rescan the marked objects on the dirty cards in the mod |
duke@435 | 1514 | // union table and the card table proper. It invokes |
duke@435 | 1515 | // ScanMarkedObjectsAgainClosure above to accomplish much of its work. |
duke@435 | 1516 | // In the parallel case, the bit map is shared and requires |
duke@435 | 1517 | // synchronized access. |
duke@435 | 1518 | class MarkFromDirtyCardsClosure: public MemRegionClosure { |
duke@435 | 1519 | CompactibleFreeListSpace* _space; |
duke@435 | 1520 | ScanMarkedObjectsAgainClosure _scan_cl; |
duke@435 | 1521 | size_t _num_dirty_cards; |
duke@435 | 1522 | |
duke@435 | 1523 | public: |
duke@435 | 1524 | MarkFromDirtyCardsClosure(CMSCollector* collector, |
duke@435 | 1525 | MemRegion span, |
duke@435 | 1526 | CompactibleFreeListSpace* space, |
duke@435 | 1527 | CMSBitMap* bit_map, |
duke@435 | 1528 | CMSMarkStack* mark_stack, |
duke@435 | 1529 | CMSMarkStack* revisit_stack, |
duke@435 | 1530 | MarkRefsIntoAndScanClosure* cl): |
duke@435 | 1531 | _space(space), |
duke@435 | 1532 | _num_dirty_cards(0), |
duke@435 | 1533 | _scan_cl(collector, span, collector->ref_processor(), bit_map, |
duke@435 | 1534 | mark_stack, revisit_stack, cl) { } |
duke@435 | 1535 | |
duke@435 | 1536 | MarkFromDirtyCardsClosure(CMSCollector* collector, |
duke@435 | 1537 | MemRegion span, |
duke@435 | 1538 | CompactibleFreeListSpace* space, |
duke@435 | 1539 | CMSBitMap* bit_map, |
duke@435 | 1540 | OopTaskQueue* work_queue, |
duke@435 | 1541 | CMSMarkStack* revisit_stack, |
duke@435 | 1542 | Par_MarkRefsIntoAndScanClosure* cl): |
duke@435 | 1543 | _space(space), |
duke@435 | 1544 | _num_dirty_cards(0), |
duke@435 | 1545 | _scan_cl(collector, span, collector->ref_processor(), bit_map, |
duke@435 | 1546 | work_queue, revisit_stack, cl) { } |
duke@435 | 1547 | |
duke@435 | 1548 | void do_MemRegion(MemRegion mr); |
duke@435 | 1549 | void set_space(CompactibleFreeListSpace* space) { _space = space; } |
duke@435 | 1550 | size_t num_dirty_cards() { return _num_dirty_cards; } |
duke@435 | 1551 | }; |
duke@435 | 1552 | |
duke@435 | 1553 | // This closure is used in the non-product build to check |
duke@435 | 1554 | // that there are no MemRegions with a certain property. |
duke@435 | 1555 | class FalseMemRegionClosure: public MemRegionClosure { |
duke@435 | 1556 | void do_MemRegion(MemRegion mr) { |
duke@435 | 1557 | guarantee(!mr.is_empty(), "Shouldn't be empty"); |
duke@435 | 1558 | guarantee(false, "Should never be here"); |
duke@435 | 1559 | } |
duke@435 | 1560 | }; |
duke@435 | 1561 | |
duke@435 | 1562 | // This closure is used during the precleaning phase |
duke@435 | 1563 | // to "carefully" rescan marked objects on dirty cards. |
duke@435 | 1564 | // It uses MarkRefsIntoAndScanClosure declared in genOopClosures.hpp |
duke@435 | 1565 | // to accomplish some of its work. |
duke@435 | 1566 | class ScanMarkedObjectsAgainCarefullyClosure: public ObjectClosureCareful { |
duke@435 | 1567 | CMSCollector* _collector; |
duke@435 | 1568 | MemRegion _span; |
duke@435 | 1569 | bool _yield; |
duke@435 | 1570 | Mutex* _freelistLock; |
duke@435 | 1571 | CMSBitMap* _bitMap; |
duke@435 | 1572 | CMSMarkStack* _markStack; |
duke@435 | 1573 | MarkRefsIntoAndScanClosure* _scanningClosure; |
duke@435 | 1574 | |
duke@435 | 1575 | public: |
duke@435 | 1576 | ScanMarkedObjectsAgainCarefullyClosure(CMSCollector* collector, |
duke@435 | 1577 | MemRegion span, |
duke@435 | 1578 | CMSBitMap* bitMap, |
duke@435 | 1579 | CMSMarkStack* markStack, |
duke@435 | 1580 | CMSMarkStack* revisitStack, |
duke@435 | 1581 | MarkRefsIntoAndScanClosure* cl, |
duke@435 | 1582 | bool should_yield): |
duke@435 | 1583 | _collector(collector), |
duke@435 | 1584 | _span(span), |
duke@435 | 1585 | _yield(should_yield), |
duke@435 | 1586 | _bitMap(bitMap), |
duke@435 | 1587 | _markStack(markStack), |
duke@435 | 1588 | _scanningClosure(cl) { |
duke@435 | 1589 | } |
duke@435 | 1590 | |
duke@435 | 1591 | void do_object(oop p) { |
duke@435 | 1592 | guarantee(false, "call do_object_careful instead"); |
duke@435 | 1593 | } |
duke@435 | 1594 | |
duke@435 | 1595 | size_t do_object_careful(oop p) { |
duke@435 | 1596 | guarantee(false, "Unexpected caller"); |
duke@435 | 1597 | return 0; |
duke@435 | 1598 | } |
duke@435 | 1599 | |
duke@435 | 1600 | size_t do_object_careful_m(oop p, MemRegion mr); |
duke@435 | 1601 | |
duke@435 | 1602 | void setFreelistLock(Mutex* m) { |
duke@435 | 1603 | _freelistLock = m; |
duke@435 | 1604 | _scanningClosure->set_freelistLock(m); |
duke@435 | 1605 | } |
duke@435 | 1606 | |
duke@435 | 1607 | private: |
duke@435 | 1608 | inline bool do_yield_check(); |
duke@435 | 1609 | |
duke@435 | 1610 | void do_yield_work(); |
duke@435 | 1611 | }; |
duke@435 | 1612 | |
duke@435 | 1613 | class SurvivorSpacePrecleanClosure: public ObjectClosureCareful { |
duke@435 | 1614 | CMSCollector* _collector; |
duke@435 | 1615 | MemRegion _span; |
duke@435 | 1616 | bool _yield; |
duke@435 | 1617 | CMSBitMap* _bit_map; |
duke@435 | 1618 | CMSMarkStack* _mark_stack; |
duke@435 | 1619 | PushAndMarkClosure* _scanning_closure; |
duke@435 | 1620 | unsigned int _before_count; |
duke@435 | 1621 | |
duke@435 | 1622 | public: |
duke@435 | 1623 | SurvivorSpacePrecleanClosure(CMSCollector* collector, |
duke@435 | 1624 | MemRegion span, |
duke@435 | 1625 | CMSBitMap* bit_map, |
duke@435 | 1626 | CMSMarkStack* mark_stack, |
duke@435 | 1627 | PushAndMarkClosure* cl, |
duke@435 | 1628 | unsigned int before_count, |
duke@435 | 1629 | bool should_yield): |
duke@435 | 1630 | _collector(collector), |
duke@435 | 1631 | _span(span), |
duke@435 | 1632 | _yield(should_yield), |
duke@435 | 1633 | _bit_map(bit_map), |
duke@435 | 1634 | _mark_stack(mark_stack), |
duke@435 | 1635 | _scanning_closure(cl), |
duke@435 | 1636 | _before_count(before_count) |
duke@435 | 1637 | { } |
duke@435 | 1638 | |
duke@435 | 1639 | void do_object(oop p) { |
duke@435 | 1640 | guarantee(false, "call do_object_careful instead"); |
duke@435 | 1641 | } |
duke@435 | 1642 | |
duke@435 | 1643 | size_t do_object_careful(oop p); |
duke@435 | 1644 | |
duke@435 | 1645 | size_t do_object_careful_m(oop p, MemRegion mr) { |
duke@435 | 1646 | guarantee(false, "Unexpected caller"); |
duke@435 | 1647 | return 0; |
duke@435 | 1648 | } |
duke@435 | 1649 | |
duke@435 | 1650 | private: |
duke@435 | 1651 | inline void do_yield_check(); |
duke@435 | 1652 | void do_yield_work(); |
duke@435 | 1653 | }; |
duke@435 | 1654 | |
duke@435 | 1655 | // This closure is used to accomplish the sweeping work |
duke@435 | 1656 | // after the second checkpoint but before the concurrent reset |
duke@435 | 1657 | // phase. |
duke@435 | 1658 | // |
duke@435 | 1659 | // Terminology |
duke@435 | 1660 | // left hand chunk (LHC) - block of one or more chunks currently being |
duke@435 | 1661 | // coalesced. The LHC is available for coalescing with a new chunk. |
duke@435 | 1662 | // right hand chunk (RHC) - block that is currently being swept that is |
duke@435 | 1663 | // free or garbage that can be coalesced with the LHC. |
duke@435 | 1664 | // _inFreeRange is true if there is currently a LHC |
duke@435 | 1665 | // _lastFreeRangeCoalesced is true if the LHC consists of more than one chunk. |
duke@435 | 1666 | // _freeRangeInFreeLists is true if the LHC is in the free lists. |
duke@435 | 1667 | // _freeFinger is the address of the current LHC |
duke@435 | 1668 | class SweepClosure: public BlkClosureCareful { |
duke@435 | 1669 | CMSCollector* _collector; // collector doing the work |
duke@435 | 1670 | ConcurrentMarkSweepGeneration* _g; // Generation being swept |
duke@435 | 1671 | CompactibleFreeListSpace* _sp; // Space being swept |
duke@435 | 1672 | HeapWord* _limit; |
duke@435 | 1673 | Mutex* _freelistLock; // Free list lock (in space) |
duke@435 | 1674 | CMSBitMap* _bitMap; // Marking bit map (in |
duke@435 | 1675 | // generation) |
duke@435 | 1676 | bool _inFreeRange; // Indicates if we are in the |
duke@435 | 1677 | // midst of a free run |
duke@435 | 1678 | bool _freeRangeInFreeLists; |
duke@435 | 1679 | // Often, we have just found |
duke@435 | 1680 | // a free chunk and started |
duke@435 | 1681 | // a new free range; we do not |
duke@435 | 1682 | // eagerly remove this chunk from |
duke@435 | 1683 | // the free lists unless there is |
duke@435 | 1684 | // a possibility of coalescing. |
duke@435 | 1685 | // When true, this flag indicates |
duke@435 | 1686 | // that the _freeFinger below |
duke@435 | 1687 | // points to a potentially free chunk |
duke@435 | 1688 | // that may still be in the free lists |
duke@435 | 1689 | bool _lastFreeRangeCoalesced; |
duke@435 | 1690 | // free range contains chunks |
duke@435 | 1691 | // coalesced |
duke@435 | 1692 | bool _yield; |
duke@435 | 1693 | // Whether sweeping should be |
duke@435 | 1694 | // done with yields. For instance |
duke@435 | 1695 | // when done by the foreground |
duke@435 | 1696 | // collector we shouldn't yield. |
duke@435 | 1697 | HeapWord* _freeFinger; // When _inFreeRange is set, the |
duke@435 | 1698 | // pointer to the "left hand |
duke@435 | 1699 | // chunk" |
duke@435 | 1700 | size_t _freeRangeSize; |
duke@435 | 1701 | // When _inFreeRange is set, this |
duke@435 | 1702 | // indicates the accumulated size |
duke@435 | 1703 | // of the "left hand chunk" |
duke@435 | 1704 | NOT_PRODUCT( |
duke@435 | 1705 | size_t _numObjectsFreed; |
duke@435 | 1706 | size_t _numWordsFreed; |
duke@435 | 1707 | size_t _numObjectsLive; |
duke@435 | 1708 | size_t _numWordsLive; |
duke@435 | 1709 | size_t _numObjectsAlreadyFree; |
duke@435 | 1710 | size_t _numWordsAlreadyFree; |
duke@435 | 1711 | FreeChunk* _last_fc; |
duke@435 | 1712 | ) |
duke@435 | 1713 | private: |
duke@435 | 1714 | // Code that is common to a free chunk or garbage when |
duke@435 | 1715 | // encountered during sweeping. |
duke@435 | 1716 | void doPostIsFreeOrGarbageChunk(FreeChunk *fc, |
duke@435 | 1717 | size_t chunkSize); |
duke@435 | 1718 | // Process a free chunk during sweeping. |
duke@435 | 1719 | void doAlreadyFreeChunk(FreeChunk *fc); |
duke@435 | 1720 | // Process a garbage chunk during sweeping. |
duke@435 | 1721 | size_t doGarbageChunk(FreeChunk *fc); |
duke@435 | 1722 | // Process a live chunk during sweeping. |
duke@435 | 1723 | size_t doLiveChunk(FreeChunk* fc); |
duke@435 | 1724 | |
duke@435 | 1725 | // Accessors. |
duke@435 | 1726 | HeapWord* freeFinger() const { return _freeFinger; } |
duke@435 | 1727 | void set_freeFinger(HeapWord* v) { _freeFinger = v; } |
duke@435 | 1728 | size_t freeRangeSize() const { return _freeRangeSize; } |
duke@435 | 1729 | void set_freeRangeSize(size_t v) { _freeRangeSize = v; } |
duke@435 | 1730 | bool inFreeRange() const { return _inFreeRange; } |
duke@435 | 1731 | void set_inFreeRange(bool v) { _inFreeRange = v; } |
duke@435 | 1732 | bool lastFreeRangeCoalesced() const { return _lastFreeRangeCoalesced; } |
duke@435 | 1733 | void set_lastFreeRangeCoalesced(bool v) { _lastFreeRangeCoalesced = v; } |
duke@435 | 1734 | bool freeRangeInFreeLists() const { return _freeRangeInFreeLists; } |
duke@435 | 1735 | void set_freeRangeInFreeLists(bool v) { _freeRangeInFreeLists = v; } |
duke@435 | 1736 | |
duke@435 | 1737 | // Initialize a free range. |
duke@435 | 1738 | void initialize_free_range(HeapWord* freeFinger, bool freeRangeInFreeLists); |
duke@435 | 1739 | // Return this chunk to the free lists. |
duke@435 | 1740 | void flushCurFreeChunk(HeapWord* chunk, size_t size); |
duke@435 | 1741 | |
duke@435 | 1742 | // Check if we should yield and do so when necessary. |
duke@435 | 1743 | inline void do_yield_check(HeapWord* addr); |
duke@435 | 1744 | |
duke@435 | 1745 | // Yield |
duke@435 | 1746 | void do_yield_work(HeapWord* addr); |
duke@435 | 1747 | |
duke@435 | 1748 | // Debugging/Printing |
duke@435 | 1749 | void record_free_block_coalesced(FreeChunk* fc) const PRODUCT_RETURN; |
duke@435 | 1750 | |
duke@435 | 1751 | public: |
duke@435 | 1752 | SweepClosure(CMSCollector* collector, ConcurrentMarkSweepGeneration* g, |
duke@435 | 1753 | CMSBitMap* bitMap, bool should_yield); |
duke@435 | 1754 | ~SweepClosure(); |
duke@435 | 1755 | |
duke@435 | 1756 | size_t do_blk_careful(HeapWord* addr); |
duke@435 | 1757 | }; |
duke@435 | 1758 | |
duke@435 | 1759 | // Closures related to weak references processing |
duke@435 | 1760 | |
duke@435 | 1761 | // During CMS' weak reference processing, this is a |
duke@435 | 1762 | // work-routine/closure used to complete transitive |
duke@435 | 1763 | // marking of objects as live after a certain point |
duke@435 | 1764 | // in which an initial set has been completely accumulated. |
ysr@887 | 1765 | // This closure is currently used both during the final |
ysr@887 | 1766 | // remark stop-world phase, as well as during the concurrent |
ysr@887 | 1767 | // precleaning of the discovered reference lists. |
duke@435 | 1768 | class CMSDrainMarkingStackClosure: public VoidClosure { |
duke@435 | 1769 | CMSCollector* _collector; |
duke@435 | 1770 | MemRegion _span; |
duke@435 | 1771 | CMSMarkStack* _mark_stack; |
duke@435 | 1772 | CMSBitMap* _bit_map; |
duke@435 | 1773 | CMSKeepAliveClosure* _keep_alive; |
ysr@887 | 1774 | bool _concurrent_precleaning; |
duke@435 | 1775 | public: |
duke@435 | 1776 | CMSDrainMarkingStackClosure(CMSCollector* collector, MemRegion span, |
duke@435 | 1777 | CMSBitMap* bit_map, CMSMarkStack* mark_stack, |
ysr@887 | 1778 | CMSKeepAliveClosure* keep_alive, |
ysr@887 | 1779 | bool cpc): |
duke@435 | 1780 | _collector(collector), |
duke@435 | 1781 | _span(span), |
duke@435 | 1782 | _bit_map(bit_map), |
duke@435 | 1783 | _mark_stack(mark_stack), |
ysr@887 | 1784 | _keep_alive(keep_alive), |
ysr@887 | 1785 | _concurrent_precleaning(cpc) { |
ysr@887 | 1786 | assert(_concurrent_precleaning == _keep_alive->concurrent_precleaning(), |
ysr@887 | 1787 | "Mismatch"); |
ysr@887 | 1788 | } |
duke@435 | 1789 | |
duke@435 | 1790 | void do_void(); |
duke@435 | 1791 | }; |
duke@435 | 1792 | |
duke@435 | 1793 | // A parallel version of CMSDrainMarkingStackClosure above. |
duke@435 | 1794 | class CMSParDrainMarkingStackClosure: public VoidClosure { |
duke@435 | 1795 | CMSCollector* _collector; |
duke@435 | 1796 | MemRegion _span; |
duke@435 | 1797 | OopTaskQueue* _work_queue; |
duke@435 | 1798 | CMSBitMap* _bit_map; |
duke@435 | 1799 | CMSInnerParMarkAndPushClosure _mark_and_push; |
duke@435 | 1800 | |
duke@435 | 1801 | public: |
duke@435 | 1802 | CMSParDrainMarkingStackClosure(CMSCollector* collector, |
duke@435 | 1803 | MemRegion span, CMSBitMap* bit_map, |
jmasa@1370 | 1804 | CMSMarkStack* revisit_stack, |
duke@435 | 1805 | OopTaskQueue* work_queue): |
duke@435 | 1806 | _collector(collector), |
duke@435 | 1807 | _span(span), |
duke@435 | 1808 | _bit_map(bit_map), |
duke@435 | 1809 | _work_queue(work_queue), |
jmasa@1370 | 1810 | _mark_and_push(collector, span, bit_map, revisit_stack, work_queue) { } |
duke@435 | 1811 | |
duke@435 | 1812 | public: |
duke@435 | 1813 | void trim_queue(uint max); |
duke@435 | 1814 | void do_void(); |
duke@435 | 1815 | }; |
duke@435 | 1816 | |
duke@435 | 1817 | // Allow yielding or short-circuiting of reference list |
duke@435 | 1818 | // prelceaning work. |
duke@435 | 1819 | class CMSPrecleanRefsYieldClosure: public YieldClosure { |
duke@435 | 1820 | CMSCollector* _collector; |
duke@435 | 1821 | void do_yield_work(); |
duke@435 | 1822 | public: |
duke@435 | 1823 | CMSPrecleanRefsYieldClosure(CMSCollector* collector): |
duke@435 | 1824 | _collector(collector) {} |
duke@435 | 1825 | virtual bool should_return(); |
duke@435 | 1826 | }; |
duke@435 | 1827 | |
duke@435 | 1828 | |
duke@435 | 1829 | // Convenience class that locks free list locks for given CMS collector |
duke@435 | 1830 | class FreelistLocker: public StackObj { |
duke@435 | 1831 | private: |
duke@435 | 1832 | CMSCollector* _collector; |
duke@435 | 1833 | public: |
duke@435 | 1834 | FreelistLocker(CMSCollector* collector): |
duke@435 | 1835 | _collector(collector) { |
duke@435 | 1836 | _collector->getFreelistLocks(); |
duke@435 | 1837 | } |
duke@435 | 1838 | |
duke@435 | 1839 | ~FreelistLocker() { |
duke@435 | 1840 | _collector->releaseFreelistLocks(); |
duke@435 | 1841 | } |
duke@435 | 1842 | }; |
duke@435 | 1843 | |
duke@435 | 1844 | // Mark all dead objects in a given space. |
duke@435 | 1845 | class MarkDeadObjectsClosure: public BlkClosure { |
duke@435 | 1846 | const CMSCollector* _collector; |
duke@435 | 1847 | const CompactibleFreeListSpace* _sp; |
duke@435 | 1848 | CMSBitMap* _live_bit_map; |
duke@435 | 1849 | CMSBitMap* _dead_bit_map; |
duke@435 | 1850 | public: |
duke@435 | 1851 | MarkDeadObjectsClosure(const CMSCollector* collector, |
duke@435 | 1852 | const CompactibleFreeListSpace* sp, |
duke@435 | 1853 | CMSBitMap *live_bit_map, |
duke@435 | 1854 | CMSBitMap *dead_bit_map) : |
duke@435 | 1855 | _collector(collector), |
duke@435 | 1856 | _sp(sp), |
duke@435 | 1857 | _live_bit_map(live_bit_map), |
duke@435 | 1858 | _dead_bit_map(dead_bit_map) {} |
duke@435 | 1859 | size_t do_blk(HeapWord* addr); |
duke@435 | 1860 | }; |