Mon, 07 Jul 2014 10:12:40 +0200
8049421: G1 Class Unloading after completing a concurrent mark cycle
Reviewed-by: tschatzl, ehelin, brutisso, coleenp, roland, iveresov
Contributed-by: stefan.karlsson@oracle.com, mikael.gerdin@oracle.com
duke@435 | 1 | /* |
jwilhelm@4576 | 2 | * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved. |
duke@435 | 3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
duke@435 | 4 | * |
duke@435 | 5 | * This code is free software; you can redistribute it and/or modify it |
duke@435 | 6 | * under the terms of the GNU General Public License version 2 only, as |
duke@435 | 7 | * published by the Free Software Foundation. |
duke@435 | 8 | * |
duke@435 | 9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
duke@435 | 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
duke@435 | 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
duke@435 | 12 | * version 2 for more details (a copy is included in the LICENSE file that |
duke@435 | 13 | * accompanied this code). |
duke@435 | 14 | * |
duke@435 | 15 | * You should have received a copy of the GNU General Public License version |
duke@435 | 16 | * 2 along with this work; if not, write to the Free Software Foundation, |
duke@435 | 17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
duke@435 | 18 | * |
trims@1907 | 19 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
trims@1907 | 20 | * or visit www.oracle.com if you need additional information or have any |
trims@1907 | 21 | * questions. |
duke@435 | 22 | * |
duke@435 | 23 | */ |
duke@435 | 24 | |
stefank@2314 | 25 | #ifndef SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_CONCURRENTMARKSWEEPGENERATION_HPP |
stefank@2314 | 26 | #define SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_CONCURRENTMARKSWEEPGENERATION_HPP |
stefank@2314 | 27 | |
sla@5237 | 28 | #include "gc_implementation/shared/gcHeapSummary.hpp" |
stefank@2314 | 29 | #include "gc_implementation/shared/gSpaceCounters.hpp" |
stefank@2314 | 30 | #include "gc_implementation/shared/gcStats.hpp" |
sla@5237 | 31 | #include "gc_implementation/shared/gcWhen.hpp" |
stefank@2314 | 32 | #include "gc_implementation/shared/generationCounters.hpp" |
jmasa@3730 | 33 | #include "memory/freeBlockDictionary.hpp" |
stefank@2314 | 34 | #include "memory/generation.hpp" |
stefank@6992 | 35 | #include "memory/iterator.hpp" |
stefank@2314 | 36 | #include "runtime/mutexLocker.hpp" |
stefank@2314 | 37 | #include "runtime/virtualspace.hpp" |
stefank@2314 | 38 | #include "services/memoryService.hpp" |
stefank@2314 | 39 | #include "utilities/bitMap.inline.hpp" |
stefank@2314 | 40 | #include "utilities/stack.inline.hpp" |
stefank@2314 | 41 | #include "utilities/taskqueue.hpp" |
stefank@2314 | 42 | #include "utilities/yieldingWorkgroup.hpp" |
stefank@2314 | 43 | |
duke@435 | 44 | // ConcurrentMarkSweepGeneration is in support of a concurrent |
duke@435 | 45 | // mark-sweep old generation in the Detlefs-Printezis--Boehm-Demers-Schenker |
duke@435 | 46 | // style. We assume, for now, that this generation is always the |
coleenp@4037 | 47 | // seniormost generation and for simplicity |
duke@435 | 48 | // in the first implementation, that this generation is a single compactible |
duke@435 | 49 | // space. Neither of these restrictions appears essential, and will be |
duke@435 | 50 | // relaxed in the future when more time is available to implement the |
duke@435 | 51 | // greater generality (and there's a need for it). |
duke@435 | 52 | // |
duke@435 | 53 | // Concurrent mode failures are currently handled by |
duke@435 | 54 | // means of a sliding mark-compact. |
duke@435 | 55 | |
duke@435 | 56 | class CMSAdaptiveSizePolicy; |
duke@435 | 57 | class CMSConcMarkingTask; |
duke@435 | 58 | class CMSGCAdaptivePolicyCounters; |
sla@5237 | 59 | class CMSTracer; |
sla@5237 | 60 | class ConcurrentGCTimer; |
duke@435 | 61 | class ConcurrentMarkSweepGeneration; |
duke@435 | 62 | class ConcurrentMarkSweepPolicy; |
duke@435 | 63 | class ConcurrentMarkSweepThread; |
duke@435 | 64 | class CompactibleFreeListSpace; |
duke@435 | 65 | class FreeChunk; |
duke@435 | 66 | class PromotionInfo; |
duke@435 | 67 | class ScanMarkedObjectsAgainCarefullyClosure; |
jmasa@4900 | 68 | class TenuredGeneration; |
sla@5237 | 69 | class SerialOldTracer; |
duke@435 | 70 | |
duke@435 | 71 | // A generic CMS bit map. It's the basis for both the CMS marking bit map |
duke@435 | 72 | // as well as for the mod union table (in each case only a subset of the |
duke@435 | 73 | // methods are used). This is essentially a wrapper around the BitMap class, |
duke@435 | 74 | // with one bit per (1<<_shifter) HeapWords. (i.e. for the marking bit map, |
duke@435 | 75 | // we have _shifter == 0. and for the mod union table we have |
duke@435 | 76 | // shifter == CardTableModRefBS::card_shift - LogHeapWordSize.) |
duke@435 | 77 | // XXX 64-bit issues in BitMap? |
duke@435 | 78 | class CMSBitMap VALUE_OBJ_CLASS_SPEC { |
duke@435 | 79 | friend class VMStructs; |
duke@435 | 80 | |
duke@435 | 81 | HeapWord* _bmStartWord; // base address of range covered by map |
duke@435 | 82 | size_t _bmWordSize; // map size (in #HeapWords covered) |
duke@435 | 83 | const int _shifter; // shifts to convert HeapWord to bit position |
duke@435 | 84 | VirtualSpace _virtual_space; // underlying the bit map |
duke@435 | 85 | BitMap _bm; // the bit map itself |
duke@435 | 86 | public: |
duke@435 | 87 | Mutex* const _lock; // mutex protecting _bm; |
duke@435 | 88 | |
duke@435 | 89 | public: |
duke@435 | 90 | // constructor |
duke@435 | 91 | CMSBitMap(int shifter, int mutex_rank, const char* mutex_name); |
duke@435 | 92 | |
duke@435 | 93 | // allocates the actual storage for the map |
duke@435 | 94 | bool allocate(MemRegion mr); |
duke@435 | 95 | // field getter |
duke@435 | 96 | Mutex* lock() const { return _lock; } |
duke@435 | 97 | // locking verifier convenience function |
duke@435 | 98 | void assert_locked() const PRODUCT_RETURN; |
duke@435 | 99 | |
duke@435 | 100 | // inquiries |
duke@435 | 101 | HeapWord* startWord() const { return _bmStartWord; } |
duke@435 | 102 | size_t sizeInWords() const { return _bmWordSize; } |
duke@435 | 103 | size_t sizeInBits() const { return _bm.size(); } |
duke@435 | 104 | // the following is one past the last word in space |
duke@435 | 105 | HeapWord* endWord() const { return _bmStartWord + _bmWordSize; } |
duke@435 | 106 | |
duke@435 | 107 | // reading marks |
duke@435 | 108 | bool isMarked(HeapWord* addr) const; |
duke@435 | 109 | bool par_isMarked(HeapWord* addr) const; // do not lock checks |
duke@435 | 110 | bool isUnmarked(HeapWord* addr) const; |
duke@435 | 111 | bool isAllClear() const; |
duke@435 | 112 | |
duke@435 | 113 | // writing marks |
duke@435 | 114 | void mark(HeapWord* addr); |
duke@435 | 115 | // For marking by parallel GC threads; |
duke@435 | 116 | // returns true if we did, false if another thread did |
duke@435 | 117 | bool par_mark(HeapWord* addr); |
duke@435 | 118 | |
duke@435 | 119 | void mark_range(MemRegion mr); |
duke@435 | 120 | void par_mark_range(MemRegion mr); |
duke@435 | 121 | void mark_large_range(MemRegion mr); |
duke@435 | 122 | void par_mark_large_range(MemRegion mr); |
duke@435 | 123 | void par_clear(HeapWord* addr); // For unmarking by parallel GC threads. |
duke@435 | 124 | void clear_range(MemRegion mr); |
duke@435 | 125 | void par_clear_range(MemRegion mr); |
duke@435 | 126 | void clear_large_range(MemRegion mr); |
duke@435 | 127 | void par_clear_large_range(MemRegion mr); |
duke@435 | 128 | void clear_all(); |
duke@435 | 129 | void clear_all_incrementally(); // Not yet implemented!! |
duke@435 | 130 | |
duke@435 | 131 | NOT_PRODUCT( |
duke@435 | 132 | // checks the memory region for validity |
duke@435 | 133 | void region_invariant(MemRegion mr); |
duke@435 | 134 | ) |
duke@435 | 135 | |
duke@435 | 136 | // iteration |
duke@435 | 137 | void iterate(BitMapClosure* cl) { |
duke@435 | 138 | _bm.iterate(cl); |
duke@435 | 139 | } |
duke@435 | 140 | void iterate(BitMapClosure* cl, HeapWord* left, HeapWord* right); |
duke@435 | 141 | void dirty_range_iterate_clear(MemRegionClosure* cl); |
duke@435 | 142 | void dirty_range_iterate_clear(MemRegion mr, MemRegionClosure* cl); |
duke@435 | 143 | |
duke@435 | 144 | // auxiliary support for iteration |
duke@435 | 145 | HeapWord* getNextMarkedWordAddress(HeapWord* addr) const; |
duke@435 | 146 | HeapWord* getNextMarkedWordAddress(HeapWord* start_addr, |
duke@435 | 147 | HeapWord* end_addr) const; |
duke@435 | 148 | HeapWord* getNextUnmarkedWordAddress(HeapWord* addr) const; |
duke@435 | 149 | HeapWord* getNextUnmarkedWordAddress(HeapWord* start_addr, |
duke@435 | 150 | HeapWord* end_addr) const; |
duke@435 | 151 | MemRegion getAndClearMarkedRegion(HeapWord* addr); |
duke@435 | 152 | MemRegion getAndClearMarkedRegion(HeapWord* start_addr, |
duke@435 | 153 | HeapWord* end_addr); |
duke@435 | 154 | |
duke@435 | 155 | // conversion utilities |
duke@435 | 156 | HeapWord* offsetToHeapWord(size_t offset) const; |
duke@435 | 157 | size_t heapWordToOffset(HeapWord* addr) const; |
duke@435 | 158 | size_t heapWordDiffToOffsetDiff(size_t diff) const; |
duke@435 | 159 | |
stefank@4904 | 160 | void print_on_error(outputStream* st, const char* prefix) const; |
stefank@4904 | 161 | |
duke@435 | 162 | // debugging |
duke@435 | 163 | // is this address range covered by the bit-map? |
duke@435 | 164 | NOT_PRODUCT( |
duke@435 | 165 | bool covers(MemRegion mr) const; |
duke@435 | 166 | bool covers(HeapWord* start, size_t size = 0) const; |
duke@435 | 167 | ) |
duke@435 | 168 | void verifyNoOneBitsInRange(HeapWord* left, HeapWord* right) PRODUCT_RETURN; |
duke@435 | 169 | }; |
duke@435 | 170 | |
duke@435 | 171 | // Represents a marking stack used by the CMS collector. |
duke@435 | 172 | // Ideally this should be GrowableArray<> just like MSC's marking stack(s). |
zgu@3900 | 173 | class CMSMarkStack: public CHeapObj<mtGC> { |
duke@435 | 174 | // |
duke@435 | 175 | friend class CMSCollector; // to get at expasion stats further below |
duke@435 | 176 | // |
duke@435 | 177 | |
duke@435 | 178 | VirtualSpace _virtual_space; // space for the stack |
duke@435 | 179 | oop* _base; // bottom of stack |
duke@435 | 180 | size_t _index; // one more than last occupied index |
duke@435 | 181 | size_t _capacity; // max #elements |
duke@435 | 182 | Mutex _par_lock; // an advisory lock used in case of parallel access |
duke@435 | 183 | NOT_PRODUCT(size_t _max_depth;) // max depth plumbed during run |
duke@435 | 184 | |
duke@435 | 185 | protected: |
duke@435 | 186 | size_t _hit_limit; // we hit max stack size limit |
duke@435 | 187 | size_t _failed_double; // we failed expansion before hitting limit |
duke@435 | 188 | |
duke@435 | 189 | public: |
duke@435 | 190 | CMSMarkStack(): |
duke@435 | 191 | _par_lock(Mutex::event, "CMSMarkStack._par_lock", true), |
duke@435 | 192 | _hit_limit(0), |
duke@435 | 193 | _failed_double(0) {} |
duke@435 | 194 | |
duke@435 | 195 | bool allocate(size_t size); |
duke@435 | 196 | |
duke@435 | 197 | size_t capacity() const { return _capacity; } |
duke@435 | 198 | |
duke@435 | 199 | oop pop() { |
duke@435 | 200 | if (!isEmpty()) { |
duke@435 | 201 | return _base[--_index] ; |
duke@435 | 202 | } |
duke@435 | 203 | return NULL; |
duke@435 | 204 | } |
duke@435 | 205 | |
duke@435 | 206 | bool push(oop ptr) { |
duke@435 | 207 | if (isFull()) { |
duke@435 | 208 | return false; |
duke@435 | 209 | } else { |
duke@435 | 210 | _base[_index++] = ptr; |
duke@435 | 211 | NOT_PRODUCT(_max_depth = MAX2(_max_depth, _index)); |
duke@435 | 212 | return true; |
duke@435 | 213 | } |
duke@435 | 214 | } |
duke@435 | 215 | |
duke@435 | 216 | bool isEmpty() const { return _index == 0; } |
duke@435 | 217 | bool isFull() const { |
duke@435 | 218 | assert(_index <= _capacity, "buffer overflow"); |
duke@435 | 219 | return _index == _capacity; |
duke@435 | 220 | } |
duke@435 | 221 | |
duke@435 | 222 | size_t length() { return _index; } |
duke@435 | 223 | |
duke@435 | 224 | // "Parallel versions" of some of the above |
duke@435 | 225 | oop par_pop() { |
duke@435 | 226 | // lock and pop |
duke@435 | 227 | MutexLockerEx x(&_par_lock, Mutex::_no_safepoint_check_flag); |
duke@435 | 228 | return pop(); |
duke@435 | 229 | } |
duke@435 | 230 | |
duke@435 | 231 | bool par_push(oop ptr) { |
duke@435 | 232 | // lock and push |
duke@435 | 233 | MutexLockerEx x(&_par_lock, Mutex::_no_safepoint_check_flag); |
duke@435 | 234 | return push(ptr); |
duke@435 | 235 | } |
duke@435 | 236 | |
duke@435 | 237 | // Forcibly reset the stack, losing all of its contents. |
duke@435 | 238 | void reset() { |
duke@435 | 239 | _index = 0; |
duke@435 | 240 | } |
duke@435 | 241 | |
duke@435 | 242 | // Expand the stack, typically in response to an overflow condition |
duke@435 | 243 | void expand(); |
duke@435 | 244 | |
duke@435 | 245 | // Compute the least valued stack element. |
duke@435 | 246 | oop least_value(HeapWord* low) { |
duke@435 | 247 | oop least = (oop)low; |
duke@435 | 248 | for (size_t i = 0; i < _index; i++) { |
duke@435 | 249 | least = MIN2(least, _base[i]); |
duke@435 | 250 | } |
duke@435 | 251 | return least; |
duke@435 | 252 | } |
duke@435 | 253 | |
duke@435 | 254 | // Exposed here to allow stack expansion in || case |
duke@435 | 255 | Mutex* par_lock() { return &_par_lock; } |
duke@435 | 256 | }; |
duke@435 | 257 | |
duke@435 | 258 | class CardTableRS; |
duke@435 | 259 | class CMSParGCThreadState; |
duke@435 | 260 | |
duke@435 | 261 | class ModUnionClosure: public MemRegionClosure { |
duke@435 | 262 | protected: |
duke@435 | 263 | CMSBitMap* _t; |
duke@435 | 264 | public: |
duke@435 | 265 | ModUnionClosure(CMSBitMap* t): _t(t) { } |
duke@435 | 266 | void do_MemRegion(MemRegion mr); |
duke@435 | 267 | }; |
duke@435 | 268 | |
duke@435 | 269 | class ModUnionClosurePar: public ModUnionClosure { |
duke@435 | 270 | public: |
duke@435 | 271 | ModUnionClosurePar(CMSBitMap* t): ModUnionClosure(t) { } |
duke@435 | 272 | void do_MemRegion(MemRegion mr); |
duke@435 | 273 | }; |
duke@435 | 274 | |
duke@435 | 275 | // Survivor Chunk Array in support of parallelization of |
duke@435 | 276 | // Survivor Space rescan. |
zgu@3900 | 277 | class ChunkArray: public CHeapObj<mtGC> { |
duke@435 | 278 | size_t _index; |
duke@435 | 279 | size_t _capacity; |
ysr@2108 | 280 | size_t _overflows; |
duke@435 | 281 | HeapWord** _array; // storage for array |
duke@435 | 282 | |
duke@435 | 283 | public: |
ysr@2108 | 284 | ChunkArray() : _index(0), _capacity(0), _overflows(0), _array(NULL) {} |
duke@435 | 285 | ChunkArray(HeapWord** a, size_t c): |
ysr@2108 | 286 | _index(0), _capacity(c), _overflows(0), _array(a) {} |
duke@435 | 287 | |
duke@435 | 288 | HeapWord** array() { return _array; } |
duke@435 | 289 | void set_array(HeapWord** a) { _array = a; } |
duke@435 | 290 | |
duke@435 | 291 | size_t capacity() { return _capacity; } |
duke@435 | 292 | void set_capacity(size_t c) { _capacity = c; } |
duke@435 | 293 | |
duke@435 | 294 | size_t end() { |
ysr@2108 | 295 | assert(_index <= capacity(), |
ysr@2108 | 296 | err_msg("_index (" SIZE_FORMAT ") > _capacity (" SIZE_FORMAT "): out of bounds", |
ysr@2108 | 297 | _index, _capacity)); |
duke@435 | 298 | return _index; |
duke@435 | 299 | } // exclusive |
duke@435 | 300 | |
duke@435 | 301 | HeapWord* nth(size_t n) { |
duke@435 | 302 | assert(n < end(), "Out of bounds access"); |
duke@435 | 303 | return _array[n]; |
duke@435 | 304 | } |
duke@435 | 305 | |
duke@435 | 306 | void reset() { |
duke@435 | 307 | _index = 0; |
ysr@2108 | 308 | if (_overflows > 0 && PrintCMSStatistics > 1) { |
ysr@2108 | 309 | warning("CMS: ChunkArray[" SIZE_FORMAT "] overflowed " SIZE_FORMAT " times", |
ysr@2108 | 310 | _capacity, _overflows); |
ysr@2108 | 311 | } |
ysr@2108 | 312 | _overflows = 0; |
duke@435 | 313 | } |
duke@435 | 314 | |
duke@435 | 315 | void record_sample(HeapWord* p, size_t sz) { |
duke@435 | 316 | // For now we do not do anything with the size |
duke@435 | 317 | if (_index < _capacity) { |
duke@435 | 318 | _array[_index++] = p; |
ysr@2108 | 319 | } else { |
ysr@2108 | 320 | ++_overflows; |
ysr@2108 | 321 | assert(_index == _capacity, |
ysr@2108 | 322 | err_msg("_index (" SIZE_FORMAT ") > _capacity (" SIZE_FORMAT |
ysr@2108 | 323 | "): out of bounds at overflow#" SIZE_FORMAT, |
ysr@2108 | 324 | _index, _capacity, _overflows)); |
duke@435 | 325 | } |
duke@435 | 326 | } |
duke@435 | 327 | }; |
duke@435 | 328 | |
duke@435 | 329 | // |
duke@435 | 330 | // Timing, allocation and promotion statistics for gc scheduling and incremental |
duke@435 | 331 | // mode pacing. Most statistics are exponential averages. |
duke@435 | 332 | // |
duke@435 | 333 | class CMSStats VALUE_OBJ_CLASS_SPEC { |
duke@435 | 334 | private: |
duke@435 | 335 | ConcurrentMarkSweepGeneration* const _cms_gen; // The cms (old) gen. |
duke@435 | 336 | |
duke@435 | 337 | // The following are exponential averages with factor alpha: |
duke@435 | 338 | // avg = (100 - alpha) * avg + alpha * cur_sample |
duke@435 | 339 | // |
duke@435 | 340 | // The durations measure: end_time[n] - start_time[n] |
duke@435 | 341 | // The periods measure: start_time[n] - start_time[n-1] |
duke@435 | 342 | // |
duke@435 | 343 | // The cms period and duration include only concurrent collections; time spent |
duke@435 | 344 | // in foreground cms collections due to System.gc() or because of a failure to |
duke@435 | 345 | // keep up are not included. |
duke@435 | 346 | // |
duke@435 | 347 | // There are 3 alphas to "bootstrap" the statistics. The _saved_alpha is the |
duke@435 | 348 | // real value, but is used only after the first period. A value of 100 is |
duke@435 | 349 | // used for the first sample so it gets the entire weight. |
duke@435 | 350 | unsigned int _saved_alpha; // 0-100 |
duke@435 | 351 | unsigned int _gc0_alpha; |
duke@435 | 352 | unsigned int _cms_alpha; |
duke@435 | 353 | |
duke@435 | 354 | double _gc0_duration; |
duke@435 | 355 | double _gc0_period; |
duke@435 | 356 | size_t _gc0_promoted; // bytes promoted per gc0 |
duke@435 | 357 | double _cms_duration; |
duke@435 | 358 | double _cms_duration_pre_sweep; // time from initiation to start of sweep |
duke@435 | 359 | double _cms_duration_per_mb; |
duke@435 | 360 | double _cms_period; |
duke@435 | 361 | size_t _cms_allocated; // bytes of direct allocation per gc0 period |
duke@435 | 362 | |
duke@435 | 363 | // Timers. |
duke@435 | 364 | elapsedTimer _cms_timer; |
duke@435 | 365 | TimeStamp _gc0_begin_time; |
duke@435 | 366 | TimeStamp _cms_begin_time; |
duke@435 | 367 | TimeStamp _cms_end_time; |
duke@435 | 368 | |
duke@435 | 369 | // Snapshots of the amount used in the CMS generation. |
duke@435 | 370 | size_t _cms_used_at_gc0_begin; |
duke@435 | 371 | size_t _cms_used_at_gc0_end; |
duke@435 | 372 | size_t _cms_used_at_cms_begin; |
duke@435 | 373 | |
duke@435 | 374 | // Used to prevent the duty cycle from being reduced in the middle of a cms |
duke@435 | 375 | // cycle. |
duke@435 | 376 | bool _allow_duty_cycle_reduction; |
duke@435 | 377 | |
duke@435 | 378 | enum { |
duke@435 | 379 | _GC0_VALID = 0x1, |
duke@435 | 380 | _CMS_VALID = 0x2, |
duke@435 | 381 | _ALL_VALID = _GC0_VALID | _CMS_VALID |
duke@435 | 382 | }; |
duke@435 | 383 | |
duke@435 | 384 | unsigned int _valid_bits; |
duke@435 | 385 | |
duke@435 | 386 | unsigned int _icms_duty_cycle; // icms duty cycle (0-100). |
duke@435 | 387 | |
duke@435 | 388 | protected: |
duke@435 | 389 | |
duke@435 | 390 | // Return a duty cycle that avoids wild oscillations, by limiting the amount |
duke@435 | 391 | // of change between old_duty_cycle and new_duty_cycle (the latter is treated |
duke@435 | 392 | // as a recommended value). |
duke@435 | 393 | static unsigned int icms_damped_duty_cycle(unsigned int old_duty_cycle, |
duke@435 | 394 | unsigned int new_duty_cycle); |
duke@435 | 395 | unsigned int icms_update_duty_cycle_impl(); |
duke@435 | 396 | |
ysr@1580 | 397 | // In support of adjusting of cms trigger ratios based on history |
ysr@1580 | 398 | // of concurrent mode failure. |
ysr@1580 | 399 | double cms_free_adjustment_factor(size_t free) const; |
ysr@1580 | 400 | void adjust_cms_free_adjustment_factor(bool fail, size_t free); |
ysr@1580 | 401 | |
duke@435 | 402 | public: |
duke@435 | 403 | CMSStats(ConcurrentMarkSweepGeneration* cms_gen, |
duke@435 | 404 | unsigned int alpha = CMSExpAvgFactor); |
duke@435 | 405 | |
duke@435 | 406 | // Whether or not the statistics contain valid data; higher level statistics |
duke@435 | 407 | // cannot be called until this returns true (they require at least one young |
duke@435 | 408 | // gen and one cms cycle to have completed). |
duke@435 | 409 | bool valid() const; |
duke@435 | 410 | |
duke@435 | 411 | // Record statistics. |
duke@435 | 412 | void record_gc0_begin(); |
duke@435 | 413 | void record_gc0_end(size_t cms_gen_bytes_used); |
duke@435 | 414 | void record_cms_begin(); |
duke@435 | 415 | void record_cms_end(); |
duke@435 | 416 | |
duke@435 | 417 | // Allow management of the cms timer, which must be stopped/started around |
duke@435 | 418 | // yield points. |
duke@435 | 419 | elapsedTimer& cms_timer() { return _cms_timer; } |
duke@435 | 420 | void start_cms_timer() { _cms_timer.start(); } |
duke@435 | 421 | void stop_cms_timer() { _cms_timer.stop(); } |
duke@435 | 422 | |
duke@435 | 423 | // Basic statistics; units are seconds or bytes. |
duke@435 | 424 | double gc0_period() const { return _gc0_period; } |
duke@435 | 425 | double gc0_duration() const { return _gc0_duration; } |
duke@435 | 426 | size_t gc0_promoted() const { return _gc0_promoted; } |
duke@435 | 427 | double cms_period() const { return _cms_period; } |
duke@435 | 428 | double cms_duration() const { return _cms_duration; } |
duke@435 | 429 | double cms_duration_per_mb() const { return _cms_duration_per_mb; } |
duke@435 | 430 | size_t cms_allocated() const { return _cms_allocated; } |
duke@435 | 431 | |
duke@435 | 432 | size_t cms_used_at_gc0_end() const { return _cms_used_at_gc0_end;} |
duke@435 | 433 | |
duke@435 | 434 | // Seconds since the last background cms cycle began or ended. |
duke@435 | 435 | double cms_time_since_begin() const; |
duke@435 | 436 | double cms_time_since_end() const; |
duke@435 | 437 | |
duke@435 | 438 | // Higher level statistics--caller must check that valid() returns true before |
duke@435 | 439 | // calling. |
duke@435 | 440 | |
duke@435 | 441 | // Returns bytes promoted per second of wall clock time. |
duke@435 | 442 | double promotion_rate() const; |
duke@435 | 443 | |
duke@435 | 444 | // Returns bytes directly allocated per second of wall clock time. |
duke@435 | 445 | double cms_allocation_rate() const; |
duke@435 | 446 | |
duke@435 | 447 | // Rate at which space in the cms generation is being consumed (sum of the |
duke@435 | 448 | // above two). |
duke@435 | 449 | double cms_consumption_rate() const; |
duke@435 | 450 | |
duke@435 | 451 | // Returns an estimate of the number of seconds until the cms generation will |
duke@435 | 452 | // fill up, assuming no collection work is done. |
duke@435 | 453 | double time_until_cms_gen_full() const; |
duke@435 | 454 | |
duke@435 | 455 | // Returns an estimate of the number of seconds remaining until |
duke@435 | 456 | // the cms generation collection should start. |
duke@435 | 457 | double time_until_cms_start() const; |
duke@435 | 458 | |
duke@435 | 459 | // End of higher level statistics. |
duke@435 | 460 | |
duke@435 | 461 | // Returns the cms incremental mode duty cycle, as a percentage (0-100). |
duke@435 | 462 | unsigned int icms_duty_cycle() const { return _icms_duty_cycle; } |
duke@435 | 463 | |
duke@435 | 464 | // Update the duty cycle and return the new value. |
duke@435 | 465 | unsigned int icms_update_duty_cycle(); |
duke@435 | 466 | |
duke@435 | 467 | // Debugging. |
duke@435 | 468 | void print_on(outputStream* st) const PRODUCT_RETURN; |
duke@435 | 469 | void print() const { print_on(gclog_or_tty); } |
duke@435 | 470 | }; |
duke@435 | 471 | |
duke@435 | 472 | // A closure related to weak references processing which |
duke@435 | 473 | // we embed in the CMSCollector, since we need to pass |
duke@435 | 474 | // it to the reference processor for secondary filtering |
duke@435 | 475 | // of references based on reachability of referent; |
duke@435 | 476 | // see role of _is_alive_non_header closure in the |
duke@435 | 477 | // ReferenceProcessor class. |
duke@435 | 478 | // For objects in the CMS generation, this closure checks |
duke@435 | 479 | // if the object is "live" (reachable). Used in weak |
duke@435 | 480 | // reference processing. |
duke@435 | 481 | class CMSIsAliveClosure: public BoolObjectClosure { |
ysr@578 | 482 | const MemRegion _span; |
duke@435 | 483 | const CMSBitMap* _bit_map; |
duke@435 | 484 | |
duke@435 | 485 | friend class CMSCollector; |
duke@435 | 486 | public: |
duke@435 | 487 | CMSIsAliveClosure(MemRegion span, |
duke@435 | 488 | CMSBitMap* bit_map): |
duke@435 | 489 | _span(span), |
ysr@578 | 490 | _bit_map(bit_map) { |
ysr@578 | 491 | assert(!span.is_empty(), "Empty span could spell trouble"); |
ysr@578 | 492 | } |
ysr@578 | 493 | |
duke@435 | 494 | bool do_object_b(oop obj); |
duke@435 | 495 | }; |
duke@435 | 496 | |
duke@435 | 497 | |
duke@435 | 498 | // Implements AbstractRefProcTaskExecutor for CMS. |
duke@435 | 499 | class CMSRefProcTaskExecutor: public AbstractRefProcTaskExecutor { |
duke@435 | 500 | public: |
duke@435 | 501 | |
duke@435 | 502 | CMSRefProcTaskExecutor(CMSCollector& collector) |
duke@435 | 503 | : _collector(collector) |
duke@435 | 504 | { } |
duke@435 | 505 | |
duke@435 | 506 | // Executes a task using worker threads. |
duke@435 | 507 | virtual void execute(ProcessTask& task); |
duke@435 | 508 | virtual void execute(EnqueueTask& task); |
duke@435 | 509 | private: |
duke@435 | 510 | CMSCollector& _collector; |
duke@435 | 511 | }; |
duke@435 | 512 | |
duke@435 | 513 | |
zgu@3900 | 514 | class CMSCollector: public CHeapObj<mtGC> { |
duke@435 | 515 | friend class VMStructs; |
duke@435 | 516 | friend class ConcurrentMarkSweepThread; |
duke@435 | 517 | friend class ConcurrentMarkSweepGeneration; |
duke@435 | 518 | friend class CompactibleFreeListSpace; |
jmasa@5461 | 519 | friend class CMSParMarkTask; |
jmasa@5461 | 520 | friend class CMSParInitialMarkTask; |
duke@435 | 521 | friend class CMSParRemarkTask; |
duke@435 | 522 | friend class CMSConcMarkingTask; |
duke@435 | 523 | friend class CMSRefProcTaskProxy; |
duke@435 | 524 | friend class CMSRefProcTaskExecutor; |
duke@435 | 525 | friend class ScanMarkedObjectsAgainCarefullyClosure; // for sampling eden |
duke@435 | 526 | friend class SurvivorSpacePrecleanClosure; // --- ditto ------- |
duke@435 | 527 | friend class PushOrMarkClosure; // to access _restart_addr |
duke@435 | 528 | friend class Par_PushOrMarkClosure; // to access _restart_addr |
duke@435 | 529 | friend class MarkFromRootsClosure; // -- ditto -- |
duke@435 | 530 | // ... and for clearing cards |
duke@435 | 531 | friend class Par_MarkFromRootsClosure; // to access _restart_addr |
duke@435 | 532 | // ... and for clearing cards |
duke@435 | 533 | friend class Par_ConcMarkingClosure; // to access _restart_addr etc. |
duke@435 | 534 | friend class MarkFromRootsVerifyClosure; // to access _restart_addr |
duke@435 | 535 | friend class PushAndMarkVerifyClosure; // -- ditto -- |
duke@435 | 536 | friend class MarkRefsIntoAndScanClosure; // to access _overflow_list |
duke@435 | 537 | friend class PushAndMarkClosure; // -- ditto -- |
duke@435 | 538 | friend class Par_PushAndMarkClosure; // -- ditto -- |
duke@435 | 539 | friend class CMSKeepAliveClosure; // -- ditto -- |
duke@435 | 540 | friend class CMSDrainMarkingStackClosure; // -- ditto -- |
duke@435 | 541 | friend class CMSInnerParMarkAndPushClosure; // -- ditto -- |
duke@435 | 542 | NOT_PRODUCT(friend class ScanMarkedObjectsAgainClosure;) // assertion on _overflow_list |
duke@435 | 543 | friend class ReleaseForegroundGC; // to access _foregroundGCShouldWait |
duke@435 | 544 | friend class VM_CMS_Operation; |
duke@435 | 545 | friend class VM_CMS_Initial_Mark; |
duke@435 | 546 | friend class VM_CMS_Final_Remark; |
kevinw@2058 | 547 | friend class TraceCMSMemoryManagerStats; |
duke@435 | 548 | |
duke@435 | 549 | private: |
duke@435 | 550 | jlong _time_of_last_gc; |
duke@435 | 551 | void update_time_of_last_gc(jlong now) { |
duke@435 | 552 | _time_of_last_gc = now; |
duke@435 | 553 | } |
duke@435 | 554 | |
duke@435 | 555 | OopTaskQueueSet* _task_queues; |
duke@435 | 556 | |
duke@435 | 557 | // Overflow list of grey objects, threaded through mark-word |
duke@435 | 558 | // Manipulated with CAS in the parallel/multi-threaded case. |
duke@435 | 559 | oop _overflow_list; |
duke@435 | 560 | // The following array-pair keeps track of mark words |
duke@435 | 561 | // displaced for accomodating overflow list above. |
duke@435 | 562 | // This code will likely be revisited under RFE#4922830. |
zgu@3900 | 563 | Stack<oop, mtGC> _preserved_oop_stack; |
zgu@3900 | 564 | Stack<markOop, mtGC> _preserved_mark_stack; |
duke@435 | 565 | |
duke@435 | 566 | int* _hash_seed; |
duke@435 | 567 | |
duke@435 | 568 | // In support of multi-threaded concurrent phases |
duke@435 | 569 | YieldingFlexibleWorkGang* _conc_workers; |
duke@435 | 570 | |
duke@435 | 571 | // Performance Counters |
duke@435 | 572 | CollectorCounters* _gc_counters; |
duke@435 | 573 | |
duke@435 | 574 | // Initialization Errors |
duke@435 | 575 | bool _completed_initialization; |
duke@435 | 576 | |
duke@435 | 577 | // In support of ExplicitGCInvokesConcurrent |
sla@5237 | 578 | static bool _full_gc_requested; |
sla@5237 | 579 | static GCCause::Cause _full_gc_cause; |
sla@5237 | 580 | unsigned int _collection_count_start; |
ysr@529 | 581 | |
duke@435 | 582 | // Should we unload classes this concurrent cycle? |
ysr@529 | 583 | bool _should_unload_classes; |
ysr@529 | 584 | unsigned int _concurrent_cycles_since_last_unload; |
ysr@529 | 585 | unsigned int concurrent_cycles_since_last_unload() const { |
ysr@529 | 586 | return _concurrent_cycles_since_last_unload; |
ysr@529 | 587 | } |
duke@435 | 588 | // Did we (allow) unload classes in the previous concurrent cycle? |
ysr@529 | 589 | bool unloaded_classes_last_cycle() const { |
ysr@529 | 590 | return concurrent_cycles_since_last_unload() == 0; |
duke@435 | 591 | } |
ysr@1233 | 592 | // Root scanning options for perm gen |
ysr@1233 | 593 | int _roots_scanning_options; |
ysr@1233 | 594 | int roots_scanning_options() const { return _roots_scanning_options; } |
ysr@1233 | 595 | void add_root_scanning_option(int o) { _roots_scanning_options |= o; } |
ysr@1233 | 596 | void remove_root_scanning_option(int o) { _roots_scanning_options &= ~o; } |
duke@435 | 597 | |
duke@435 | 598 | // Verification support |
duke@435 | 599 | CMSBitMap _verification_mark_bm; |
duke@435 | 600 | void verify_after_remark_work_1(); |
duke@435 | 601 | void verify_after_remark_work_2(); |
duke@435 | 602 | |
duke@435 | 603 | // true if any verification flag is on. |
duke@435 | 604 | bool _verifying; |
duke@435 | 605 | bool verifying() const { return _verifying; } |
duke@435 | 606 | void set_verifying(bool v) { _verifying = v; } |
duke@435 | 607 | |
duke@435 | 608 | // Collector policy |
duke@435 | 609 | ConcurrentMarkSweepPolicy* _collector_policy; |
duke@435 | 610 | ConcurrentMarkSweepPolicy* collector_policy() { return _collector_policy; } |
duke@435 | 611 | |
jmasa@5076 | 612 | void set_did_compact(bool v); |
jmasa@5076 | 613 | |
duke@435 | 614 | // XXX Move these to CMSStats ??? FIX ME !!! |
ysr@1580 | 615 | elapsedTimer _inter_sweep_timer; // time between sweeps |
ysr@1580 | 616 | elapsedTimer _intra_sweep_timer; // time _in_ sweeps |
ysr@1580 | 617 | // padded decaying average estimates of the above |
ysr@1580 | 618 | AdaptivePaddedAverage _inter_sweep_estimate; |
ysr@1580 | 619 | AdaptivePaddedAverage _intra_sweep_estimate; |
duke@435 | 620 | |
sla@5237 | 621 | CMSTracer* _gc_tracer_cm; |
sla@5237 | 622 | ConcurrentGCTimer* _gc_timer_cm; |
sla@5237 | 623 | |
sla@5237 | 624 | bool _cms_start_registered; |
sla@5237 | 625 | |
sla@5237 | 626 | GCHeapSummary _last_heap_summary; |
sla@5237 | 627 | MetaspaceSummary _last_metaspace_summary; |
sla@5237 | 628 | |
sla@5237 | 629 | void register_foreground_gc_start(GCCause::Cause cause); |
sla@5237 | 630 | void register_gc_start(GCCause::Cause cause); |
sla@5237 | 631 | void register_gc_end(); |
sla@5237 | 632 | void save_heap_summary(); |
sla@5237 | 633 | void report_heap_summary(GCWhen::Type when); |
sla@5237 | 634 | |
duke@435 | 635 | protected: |
duke@435 | 636 | ConcurrentMarkSweepGeneration* _cmsGen; // old gen (CMS) |
duke@435 | 637 | MemRegion _span; // span covering above two |
duke@435 | 638 | CardTableRS* _ct; // card table |
duke@435 | 639 | |
duke@435 | 640 | // CMS marking support structures |
duke@435 | 641 | CMSBitMap _markBitMap; |
duke@435 | 642 | CMSBitMap _modUnionTable; |
duke@435 | 643 | CMSMarkStack _markStack; |
duke@435 | 644 | |
duke@435 | 645 | HeapWord* _restart_addr; // in support of marking stack overflow |
duke@435 | 646 | void lower_restart_addr(HeapWord* low); |
duke@435 | 647 | |
duke@435 | 648 | // Counters in support of marking stack / work queue overflow handling: |
duke@435 | 649 | // a non-zero value indicates certain types of overflow events during |
duke@435 | 650 | // the current CMS cycle and could lead to stack resizing efforts at |
duke@435 | 651 | // an opportune future time. |
duke@435 | 652 | size_t _ser_pmc_preclean_ovflw; |
duke@435 | 653 | size_t _ser_pmc_remark_ovflw; |
duke@435 | 654 | size_t _par_pmc_remark_ovflw; |
ysr@887 | 655 | size_t _ser_kac_preclean_ovflw; |
duke@435 | 656 | size_t _ser_kac_ovflw; |
duke@435 | 657 | size_t _par_kac_ovflw; |
ysr@969 | 658 | NOT_PRODUCT(ssize_t _num_par_pushes;) |
duke@435 | 659 | |
duke@435 | 660 | // ("Weak") Reference processing support |
duke@435 | 661 | ReferenceProcessor* _ref_processor; |
duke@435 | 662 | CMSIsAliveClosure _is_alive_closure; |
ysr@578 | 663 | // keep this textually after _markBitMap and _span; c'tor dependency |
duke@435 | 664 | |
duke@435 | 665 | ConcurrentMarkSweepThread* _cmsThread; // the thread doing the work |
duke@435 | 666 | ModUnionClosure _modUnionClosure; |
duke@435 | 667 | ModUnionClosurePar _modUnionClosurePar; |
duke@435 | 668 | |
duke@435 | 669 | // CMS abstract state machine |
duke@435 | 670 | // initial_state: Idling |
duke@435 | 671 | // next_state(Idling) = {Marking} |
duke@435 | 672 | // next_state(Marking) = {Precleaning, Sweeping} |
duke@435 | 673 | // next_state(Precleaning) = {AbortablePreclean, FinalMarking} |
duke@435 | 674 | // next_state(AbortablePreclean) = {FinalMarking} |
duke@435 | 675 | // next_state(FinalMarking) = {Sweeping} |
duke@435 | 676 | // next_state(Sweeping) = {Resizing} |
duke@435 | 677 | // next_state(Resizing) = {Resetting} |
duke@435 | 678 | // next_state(Resetting) = {Idling} |
duke@435 | 679 | // The numeric values below are chosen so that: |
duke@435 | 680 | // . _collectorState <= Idling == post-sweep && pre-mark |
duke@435 | 681 | // . _collectorState in (Idling, Sweeping) == {initial,final}marking || |
duke@435 | 682 | // precleaning || abortablePrecleanb |
ysr@1580 | 683 | public: |
duke@435 | 684 | enum CollectorState { |
duke@435 | 685 | Resizing = 0, |
duke@435 | 686 | Resetting = 1, |
duke@435 | 687 | Idling = 2, |
duke@435 | 688 | InitialMarking = 3, |
duke@435 | 689 | Marking = 4, |
duke@435 | 690 | Precleaning = 5, |
duke@435 | 691 | AbortablePreclean = 6, |
duke@435 | 692 | FinalMarking = 7, |
duke@435 | 693 | Sweeping = 8 |
duke@435 | 694 | }; |
ysr@1580 | 695 | protected: |
duke@435 | 696 | static CollectorState _collectorState; |
duke@435 | 697 | |
duke@435 | 698 | // State related to prologue/epilogue invocation for my generations |
duke@435 | 699 | bool _between_prologue_and_epilogue; |
duke@435 | 700 | |
duke@435 | 701 | // Signalling/State related to coordination between fore- and backgroud GC |
duke@435 | 702 | // Note: When the baton has been passed from background GC to foreground GC, |
duke@435 | 703 | // _foregroundGCIsActive is true and _foregroundGCShouldWait is false. |
duke@435 | 704 | static bool _foregroundGCIsActive; // true iff foreground collector is active or |
duke@435 | 705 | // wants to go active |
duke@435 | 706 | static bool _foregroundGCShouldWait; // true iff background GC is active and has not |
duke@435 | 707 | // yet passed the baton to the foreground GC |
duke@435 | 708 | |
duke@435 | 709 | // Support for CMSScheduleRemark (abortable preclean) |
duke@435 | 710 | bool _abort_preclean; |
duke@435 | 711 | bool _start_sampling; |
duke@435 | 712 | |
duke@435 | 713 | int _numYields; |
duke@435 | 714 | size_t _numDirtyCards; |
ysr@1580 | 715 | size_t _sweep_count; |
duke@435 | 716 | // number of full gc's since the last concurrent gc. |
duke@435 | 717 | uint _full_gcs_since_conc_gc; |
duke@435 | 718 | |
duke@435 | 719 | // occupancy used for bootstrapping stats |
duke@435 | 720 | double _bootstrap_occupancy; |
duke@435 | 721 | |
duke@435 | 722 | // timer |
duke@435 | 723 | elapsedTimer _timer; |
duke@435 | 724 | |
duke@435 | 725 | // Timing, allocation and promotion statistics, used for scheduling. |
duke@435 | 726 | CMSStats _stats; |
duke@435 | 727 | |
duke@435 | 728 | // Allocation limits installed in the young gen, used only in |
duke@435 | 729 | // CMSIncrementalMode. When an allocation in the young gen would cross one of |
duke@435 | 730 | // these limits, the cms generation is notified and the cms thread is started |
duke@435 | 731 | // or stopped, respectively. |
duke@435 | 732 | HeapWord* _icms_start_limit; |
duke@435 | 733 | HeapWord* _icms_stop_limit; |
duke@435 | 734 | |
duke@435 | 735 | enum CMS_op_type { |
duke@435 | 736 | CMS_op_checkpointRootsInitial, |
duke@435 | 737 | CMS_op_checkpointRootsFinal |
duke@435 | 738 | }; |
duke@435 | 739 | |
brutisso@3767 | 740 | void do_CMS_operation(CMS_op_type op, GCCause::Cause gc_cause); |
duke@435 | 741 | bool stop_world_and_do(CMS_op_type op); |
duke@435 | 742 | |
duke@435 | 743 | OopTaskQueueSet* task_queues() { return _task_queues; } |
duke@435 | 744 | int* hash_seed(int i) { return &_hash_seed[i]; } |
duke@435 | 745 | YieldingFlexibleWorkGang* conc_workers() { return _conc_workers; } |
duke@435 | 746 | |
duke@435 | 747 | // Support for parallelizing Eden rescan in CMS remark phase |
duke@435 | 748 | void sample_eden(); // ... sample Eden space top |
duke@435 | 749 | |
duke@435 | 750 | private: |
duke@435 | 751 | // Support for parallelizing young gen rescan in CMS remark phase |
duke@435 | 752 | Generation* _young_gen; // the younger gen |
duke@435 | 753 | HeapWord** _top_addr; // ... Top of Eden |
duke@435 | 754 | HeapWord** _end_addr; // ... End of Eden |
jmasa@5459 | 755 | Mutex* _eden_chunk_lock; |
duke@435 | 756 | HeapWord** _eden_chunk_array; // ... Eden partitioning array |
duke@435 | 757 | size_t _eden_chunk_index; // ... top (exclusive) of array |
duke@435 | 758 | size_t _eden_chunk_capacity; // ... max entries in array |
duke@435 | 759 | |
duke@435 | 760 | // Support for parallelizing survivor space rescan |
duke@435 | 761 | HeapWord** _survivor_chunk_array; |
duke@435 | 762 | size_t _survivor_chunk_index; |
duke@435 | 763 | size_t _survivor_chunk_capacity; |
duke@435 | 764 | size_t* _cursor; |
duke@435 | 765 | ChunkArray* _survivor_plab_array; |
duke@435 | 766 | |
duke@435 | 767 | // Support for marking stack overflow handling |
duke@435 | 768 | bool take_from_overflow_list(size_t num, CMSMarkStack* to_stack); |
jmasa@2188 | 769 | bool par_take_from_overflow_list(size_t num, |
jmasa@2188 | 770 | OopTaskQueue* to_work_q, |
jmasa@2188 | 771 | int no_of_gc_threads); |
duke@435 | 772 | void push_on_overflow_list(oop p); |
duke@435 | 773 | void par_push_on_overflow_list(oop p); |
duke@435 | 774 | // the following is, obviously, not, in general, "MT-stable" |
duke@435 | 775 | bool overflow_list_is_empty() const; |
duke@435 | 776 | |
duke@435 | 777 | void preserve_mark_if_necessary(oop p); |
duke@435 | 778 | void par_preserve_mark_if_necessary(oop p); |
duke@435 | 779 | void preserve_mark_work(oop p, markOop m); |
duke@435 | 780 | void restore_preserved_marks_if_any(); |
duke@435 | 781 | NOT_PRODUCT(bool no_preserved_marks() const;) |
duke@435 | 782 | // in support of testing overflow code |
duke@435 | 783 | NOT_PRODUCT(int _overflow_counter;) |
duke@435 | 784 | NOT_PRODUCT(bool simulate_overflow();) // sequential |
duke@435 | 785 | NOT_PRODUCT(bool par_simulate_overflow();) // MT version |
duke@435 | 786 | |
duke@435 | 787 | // CMS work methods |
duke@435 | 788 | void checkpointRootsInitialWork(bool asynch); // initial checkpoint work |
duke@435 | 789 | |
duke@435 | 790 | // a return value of false indicates failure due to stack overflow |
duke@435 | 791 | bool markFromRootsWork(bool asynch); // concurrent marking work |
duke@435 | 792 | |
duke@435 | 793 | public: // FIX ME!!! only for testing |
duke@435 | 794 | bool do_marking_st(bool asynch); // single-threaded marking |
duke@435 | 795 | bool do_marking_mt(bool asynch); // multi-threaded marking |
duke@435 | 796 | |
duke@435 | 797 | private: |
duke@435 | 798 | |
duke@435 | 799 | // concurrent precleaning work |
duke@435 | 800 | size_t preclean_mod_union_table(ConcurrentMarkSweepGeneration* gen, |
duke@435 | 801 | ScanMarkedObjectsAgainCarefullyClosure* cl); |
duke@435 | 802 | size_t preclean_card_table(ConcurrentMarkSweepGeneration* gen, |
duke@435 | 803 | ScanMarkedObjectsAgainCarefullyClosure* cl); |
duke@435 | 804 | // Does precleaning work, returning a quantity indicative of |
duke@435 | 805 | // the amount of "useful work" done. |
duke@435 | 806 | size_t preclean_work(bool clean_refs, bool clean_survivors); |
coleenp@4037 | 807 | void preclean_klasses(MarkRefsIntoAndScanClosure* cl, Mutex* freelistLock); |
duke@435 | 808 | void abortable_preclean(); // Preclean while looking for possible abort |
duke@435 | 809 | void initialize_sequential_subtasks_for_young_gen_rescan(int i); |
duke@435 | 810 | // Helper function for above; merge-sorts the per-thread plab samples |
jmasa@2188 | 811 | void merge_survivor_plab_arrays(ContiguousSpace* surv, int no_of_gc_threads); |
duke@435 | 812 | // Resets (i.e. clears) the per-thread plab sample vectors |
duke@435 | 813 | void reset_survivor_plab_arrays(); |
duke@435 | 814 | |
duke@435 | 815 | // final (second) checkpoint work |
duke@435 | 816 | void checkpointRootsFinalWork(bool asynch, bool clear_all_soft_refs, |
duke@435 | 817 | bool init_mark_was_synchronous); |
duke@435 | 818 | // work routine for parallel version of remark |
duke@435 | 819 | void do_remark_parallel(); |
duke@435 | 820 | // work routine for non-parallel version of remark |
duke@435 | 821 | void do_remark_non_parallel(); |
duke@435 | 822 | // reference processing work routine (during second checkpoint) |
duke@435 | 823 | void refProcessingWork(bool asynch, bool clear_all_soft_refs); |
duke@435 | 824 | |
duke@435 | 825 | // concurrent sweeping work |
duke@435 | 826 | void sweepWork(ConcurrentMarkSweepGeneration* gen, bool asynch); |
duke@435 | 827 | |
duke@435 | 828 | // (concurrent) resetting of support data structures |
duke@435 | 829 | void reset(bool asynch); |
duke@435 | 830 | |
duke@435 | 831 | // Clear _expansion_cause fields of constituent generations |
duke@435 | 832 | void clear_expansion_cause(); |
duke@435 | 833 | |
duke@435 | 834 | // An auxilliary method used to record the ends of |
duke@435 | 835 | // used regions of each generation to limit the extent of sweep |
duke@435 | 836 | void save_sweep_limits(); |
duke@435 | 837 | |
duke@435 | 838 | // A work method used by foreground collection to determine |
duke@435 | 839 | // what type of collection (compacting or not, continuing or fresh) |
duke@435 | 840 | // it should do. |
duke@435 | 841 | void decide_foreground_collection_type(bool clear_all_soft_refs, |
duke@435 | 842 | bool* should_compact, bool* should_start_over); |
duke@435 | 843 | |
duke@435 | 844 | // A work method used by the foreground collector to do |
duke@435 | 845 | // a mark-sweep-compact. |
duke@435 | 846 | void do_compaction_work(bool clear_all_soft_refs); |
duke@435 | 847 | |
duke@435 | 848 | // A work method used by the foreground collector to do |
duke@435 | 849 | // a mark-sweep, after taking over from a possibly on-going |
duke@435 | 850 | // concurrent mark-sweep collection. |
duke@435 | 851 | void do_mark_sweep_work(bool clear_all_soft_refs, |
duke@435 | 852 | CollectorState first_state, bool should_start_over); |
duke@435 | 853 | |
sla@5237 | 854 | // Work methods for reporting concurrent mode interruption or failure |
sla@5237 | 855 | bool is_external_interruption(); |
sla@5237 | 856 | void report_concurrent_mode_interruption(); |
sla@5237 | 857 | |
duke@435 | 858 | // If the backgrould GC is active, acquire control from the background |
duke@435 | 859 | // GC and do the collection. |
duke@435 | 860 | void acquire_control_and_collect(bool full, bool clear_all_soft_refs); |
duke@435 | 861 | |
duke@435 | 862 | // For synchronizing passing of control from background to foreground |
duke@435 | 863 | // GC. waitForForegroundGC() is called by the background |
duke@435 | 864 | // collector. It if had to wait for a foreground collection, |
duke@435 | 865 | // it returns true and the background collection should assume |
duke@435 | 866 | // that the collection was finished by the foreground |
duke@435 | 867 | // collector. |
duke@435 | 868 | bool waitForForegroundGC(); |
duke@435 | 869 | |
duke@435 | 870 | // Incremental mode triggering: recompute the icms duty cycle and set the |
duke@435 | 871 | // allocation limits in the young gen. |
duke@435 | 872 | void icms_update_allocation_limits(); |
duke@435 | 873 | |
duke@435 | 874 | size_t block_size_using_printezis_bits(HeapWord* addr) const; |
duke@435 | 875 | size_t block_size_if_printezis_bits(HeapWord* addr) const; |
duke@435 | 876 | HeapWord* next_card_start_after_block(HeapWord* addr) const; |
duke@435 | 877 | |
duke@435 | 878 | void setup_cms_unloading_and_verification_state(); |
duke@435 | 879 | public: |
duke@435 | 880 | CMSCollector(ConcurrentMarkSweepGeneration* cmsGen, |
duke@435 | 881 | CardTableRS* ct, |
duke@435 | 882 | ConcurrentMarkSweepPolicy* cp); |
duke@435 | 883 | ConcurrentMarkSweepThread* cmsThread() { return _cmsThread; } |
duke@435 | 884 | |
duke@435 | 885 | ReferenceProcessor* ref_processor() { return _ref_processor; } |
duke@435 | 886 | void ref_processor_init(); |
duke@435 | 887 | |
duke@435 | 888 | Mutex* bitMapLock() const { return _markBitMap.lock(); } |
duke@435 | 889 | static CollectorState abstract_state() { return _collectorState; } |
duke@435 | 890 | |
duke@435 | 891 | bool should_abort_preclean() const; // Whether preclean should be aborted. |
duke@435 | 892 | size_t get_eden_used() const; |
duke@435 | 893 | size_t get_eden_capacity() const; |
duke@435 | 894 | |
duke@435 | 895 | ConcurrentMarkSweepGeneration* cmsGen() { return _cmsGen; } |
duke@435 | 896 | |
duke@435 | 897 | // locking checks |
duke@435 | 898 | NOT_PRODUCT(static bool have_cms_token();) |
duke@435 | 899 | |
duke@435 | 900 | // XXXPERM bool should_collect(bool full, size_t size, bool tlab); |
duke@435 | 901 | bool shouldConcurrentCollect(); |
duke@435 | 902 | |
duke@435 | 903 | void collect(bool full, |
duke@435 | 904 | bool clear_all_soft_refs, |
duke@435 | 905 | size_t size, |
duke@435 | 906 | bool tlab); |
sla@5237 | 907 | void collect_in_background(bool clear_all_soft_refs, GCCause::Cause cause); |
sla@5237 | 908 | void collect_in_foreground(bool clear_all_soft_refs, GCCause::Cause cause); |
duke@435 | 909 | |
duke@435 | 910 | // In support of ExplicitGCInvokesConcurrent |
sla@5237 | 911 | static void request_full_gc(unsigned int full_gc_count, GCCause::Cause cause); |
duke@435 | 912 | // Should we unload classes in a particular concurrent cycle? |
ysr@529 | 913 | bool should_unload_classes() const { |
ysr@529 | 914 | return _should_unload_classes; |
duke@435 | 915 | } |
coleenp@4037 | 916 | void update_should_unload_classes(); |
duke@435 | 917 | |
duke@435 | 918 | void direct_allocated(HeapWord* start, size_t size); |
duke@435 | 919 | |
duke@435 | 920 | // Object is dead if not marked and current phase is sweeping. |
duke@435 | 921 | bool is_dead_obj(oop obj) const; |
duke@435 | 922 | |
duke@435 | 923 | // After a promotion (of "start"), do any necessary marking. |
duke@435 | 924 | // If "par", then it's being done by a parallel GC thread. |
duke@435 | 925 | // The last two args indicate if we need precise marking |
duke@435 | 926 | // and if so the size of the object so it can be dirtied |
duke@435 | 927 | // in its entirety. |
duke@435 | 928 | void promoted(bool par, HeapWord* start, |
duke@435 | 929 | bool is_obj_array, size_t obj_size); |
duke@435 | 930 | |
duke@435 | 931 | HeapWord* allocation_limit_reached(Space* space, HeapWord* top, |
duke@435 | 932 | size_t word_size); |
duke@435 | 933 | |
duke@435 | 934 | void getFreelistLocks() const; |
duke@435 | 935 | void releaseFreelistLocks() const; |
duke@435 | 936 | bool haveFreelistLocks() const; |
duke@435 | 937 | |
jmasa@4900 | 938 | // Adjust size of underlying generation |
jmasa@4900 | 939 | void compute_new_size(); |
jmasa@4900 | 940 | |
duke@435 | 941 | // GC prologue and epilogue |
duke@435 | 942 | void gc_prologue(bool full); |
duke@435 | 943 | void gc_epilogue(bool full); |
duke@435 | 944 | |
duke@435 | 945 | jlong time_of_last_gc(jlong now) { |
duke@435 | 946 | if (_collectorState <= Idling) { |
duke@435 | 947 | // gc not in progress |
duke@435 | 948 | return _time_of_last_gc; |
duke@435 | 949 | } else { |
duke@435 | 950 | // collection in progress |
duke@435 | 951 | return now; |
duke@435 | 952 | } |
duke@435 | 953 | } |
duke@435 | 954 | |
duke@435 | 955 | // Support for parallel remark of survivor space |
duke@435 | 956 | void* get_data_recorder(int thr_num); |
jmasa@5459 | 957 | void sample_eden_chunk(); |
duke@435 | 958 | |
duke@435 | 959 | CMSBitMap* markBitMap() { return &_markBitMap; } |
duke@435 | 960 | void directAllocated(HeapWord* start, size_t size); |
duke@435 | 961 | |
duke@435 | 962 | // main CMS steps and related support |
duke@435 | 963 | void checkpointRootsInitial(bool asynch); |
duke@435 | 964 | bool markFromRoots(bool asynch); // a return value of false indicates failure |
duke@435 | 965 | // due to stack overflow |
duke@435 | 966 | void preclean(); |
duke@435 | 967 | void checkpointRootsFinal(bool asynch, bool clear_all_soft_refs, |
duke@435 | 968 | bool init_mark_was_synchronous); |
duke@435 | 969 | void sweep(bool asynch); |
duke@435 | 970 | |
duke@435 | 971 | // Check that the currently executing thread is the expected |
duke@435 | 972 | // one (foreground collector or background collector). |
ysr@1580 | 973 | static void check_correct_thread_executing() PRODUCT_RETURN; |
duke@435 | 974 | // XXXPERM void print_statistics() PRODUCT_RETURN; |
duke@435 | 975 | |
duke@435 | 976 | bool is_cms_reachable(HeapWord* addr); |
duke@435 | 977 | |
duke@435 | 978 | // Performance Counter Support |
duke@435 | 979 | CollectorCounters* counters() { return _gc_counters; } |
duke@435 | 980 | |
duke@435 | 981 | // timer stuff |
duke@435 | 982 | void startTimer() { assert(!_timer.is_active(), "Error"); _timer.start(); } |
duke@435 | 983 | void stopTimer() { assert( _timer.is_active(), "Error"); _timer.stop(); } |
duke@435 | 984 | void resetTimer() { assert(!_timer.is_active(), "Error"); _timer.reset(); } |
duke@435 | 985 | double timerValue() { assert(!_timer.is_active(), "Error"); return _timer.seconds(); } |
duke@435 | 986 | |
duke@435 | 987 | int yields() { return _numYields; } |
duke@435 | 988 | void resetYields() { _numYields = 0; } |
duke@435 | 989 | void incrementYields() { _numYields++; } |
duke@435 | 990 | void resetNumDirtyCards() { _numDirtyCards = 0; } |
duke@435 | 991 | void incrementNumDirtyCards(size_t num) { _numDirtyCards += num; } |
duke@435 | 992 | size_t numDirtyCards() { return _numDirtyCards; } |
duke@435 | 993 | |
duke@435 | 994 | static bool foregroundGCShouldWait() { return _foregroundGCShouldWait; } |
duke@435 | 995 | static void set_foregroundGCShouldWait(bool v) { _foregroundGCShouldWait = v; } |
duke@435 | 996 | static bool foregroundGCIsActive() { return _foregroundGCIsActive; } |
duke@435 | 997 | static void set_foregroundGCIsActive(bool v) { _foregroundGCIsActive = v; } |
ysr@1580 | 998 | size_t sweep_count() const { return _sweep_count; } |
ysr@1580 | 999 | void increment_sweep_count() { _sweep_count++; } |
duke@435 | 1000 | |
duke@435 | 1001 | // Timers/stats for gc scheduling and incremental mode pacing. |
duke@435 | 1002 | CMSStats& stats() { return _stats; } |
duke@435 | 1003 | |
duke@435 | 1004 | // Convenience methods that check whether CMSIncrementalMode is enabled and |
duke@435 | 1005 | // forward to the corresponding methods in ConcurrentMarkSweepThread. |
duke@435 | 1006 | static void start_icms(); |
duke@435 | 1007 | static void stop_icms(); // Called at the end of the cms cycle. |
duke@435 | 1008 | static void disable_icms(); // Called before a foreground collection. |
duke@435 | 1009 | static void enable_icms(); // Called after a foreground collection. |
duke@435 | 1010 | void icms_wait(); // Called at yield points. |
duke@435 | 1011 | |
duke@435 | 1012 | // Adaptive size policy |
duke@435 | 1013 | CMSAdaptiveSizePolicy* size_policy(); |
duke@435 | 1014 | CMSGCAdaptivePolicyCounters* gc_adaptive_policy_counters(); |
duke@435 | 1015 | |
stefank@4904 | 1016 | static void print_on_error(outputStream* st); |
stefank@4904 | 1017 | |
duke@435 | 1018 | // debugging |
brutisso@3711 | 1019 | void verify(); |
stefank@5018 | 1020 | bool verify_after_remark(bool silent = VerifySilently); |
duke@435 | 1021 | void verify_ok_to_terminate() const PRODUCT_RETURN; |
duke@435 | 1022 | void verify_work_stacks_empty() const PRODUCT_RETURN; |
duke@435 | 1023 | void verify_overflow_empty() const PRODUCT_RETURN; |
duke@435 | 1024 | |
duke@435 | 1025 | // convenience methods in support of debugging |
duke@435 | 1026 | static const size_t skip_header_HeapWords() PRODUCT_RETURN0; |
duke@435 | 1027 | HeapWord* block_start(const void* p) const PRODUCT_RETURN0; |
duke@435 | 1028 | |
duke@435 | 1029 | // accessors |
duke@435 | 1030 | CMSMarkStack* verification_mark_stack() { return &_markStack; } |
duke@435 | 1031 | CMSBitMap* verification_mark_bm() { return &_verification_mark_bm; } |
duke@435 | 1032 | |
duke@435 | 1033 | // Initialization errors |
duke@435 | 1034 | bool completed_initialization() { return _completed_initialization; } |
jmasa@5459 | 1035 | |
jmasa@5459 | 1036 | void print_eden_and_survivor_chunk_arrays(); |
duke@435 | 1037 | }; |
duke@435 | 1038 | |
duke@435 | 1039 | class CMSExpansionCause : public AllStatic { |
duke@435 | 1040 | public: |
duke@435 | 1041 | enum Cause { |
duke@435 | 1042 | _no_expansion, |
duke@435 | 1043 | _satisfy_free_ratio, |
duke@435 | 1044 | _satisfy_promotion, |
duke@435 | 1045 | _satisfy_allocation, |
duke@435 | 1046 | _allocate_par_lab, |
duke@435 | 1047 | _allocate_par_spooling_space, |
duke@435 | 1048 | _adaptive_size_policy |
duke@435 | 1049 | }; |
duke@435 | 1050 | // Return a string describing the cause of the expansion. |
duke@435 | 1051 | static const char* to_string(CMSExpansionCause::Cause cause); |
duke@435 | 1052 | }; |
duke@435 | 1053 | |
duke@435 | 1054 | class ConcurrentMarkSweepGeneration: public CardGeneration { |
duke@435 | 1055 | friend class VMStructs; |
duke@435 | 1056 | friend class ConcurrentMarkSweepThread; |
duke@435 | 1057 | friend class ConcurrentMarkSweep; |
duke@435 | 1058 | friend class CMSCollector; |
duke@435 | 1059 | protected: |
duke@435 | 1060 | static CMSCollector* _collector; // the collector that collects us |
duke@435 | 1061 | CompactibleFreeListSpace* _cmsSpace; // underlying space (only one for now) |
duke@435 | 1062 | |
duke@435 | 1063 | // Performance Counters |
duke@435 | 1064 | GenerationCounters* _gen_counters; |
duke@435 | 1065 | GSpaceCounters* _space_counters; |
duke@435 | 1066 | |
duke@435 | 1067 | // Words directly allocated, used by CMSStats. |
duke@435 | 1068 | size_t _direct_allocated_words; |
duke@435 | 1069 | |
duke@435 | 1070 | // Non-product stat counters |
duke@435 | 1071 | NOT_PRODUCT( |
ysr@2071 | 1072 | size_t _numObjectsPromoted; |
ysr@2071 | 1073 | size_t _numWordsPromoted; |
ysr@2071 | 1074 | size_t _numObjectsAllocated; |
ysr@2071 | 1075 | size_t _numWordsAllocated; |
duke@435 | 1076 | ) |
duke@435 | 1077 | |
duke@435 | 1078 | // Used for sizing decisions |
duke@435 | 1079 | bool _incremental_collection_failed; |
duke@435 | 1080 | bool incremental_collection_failed() { |
duke@435 | 1081 | return _incremental_collection_failed; |
duke@435 | 1082 | } |
duke@435 | 1083 | void set_incremental_collection_failed() { |
duke@435 | 1084 | _incremental_collection_failed = true; |
duke@435 | 1085 | } |
duke@435 | 1086 | void clear_incremental_collection_failed() { |
duke@435 | 1087 | _incremental_collection_failed = false; |
duke@435 | 1088 | } |
duke@435 | 1089 | |
ysr@529 | 1090 | // accessors |
ysr@529 | 1091 | void set_expansion_cause(CMSExpansionCause::Cause v) { _expansion_cause = v;} |
ysr@529 | 1092 | CMSExpansionCause::Cause expansion_cause() const { return _expansion_cause; } |
ysr@529 | 1093 | |
duke@435 | 1094 | private: |
duke@435 | 1095 | // For parallel young-gen GC support. |
duke@435 | 1096 | CMSParGCThreadState** _par_gc_thread_states; |
duke@435 | 1097 | |
duke@435 | 1098 | // Reason generation was expanded |
duke@435 | 1099 | CMSExpansionCause::Cause _expansion_cause; |
duke@435 | 1100 | |
duke@435 | 1101 | // In support of MinChunkSize being larger than min object size |
duke@435 | 1102 | const double _dilatation_factor; |
duke@435 | 1103 | |
duke@435 | 1104 | enum CollectionTypes { |
duke@435 | 1105 | Concurrent_collection_type = 0, |
duke@435 | 1106 | MS_foreground_collection_type = 1, |
duke@435 | 1107 | MSC_foreground_collection_type = 2, |
duke@435 | 1108 | Unknown_collection_type = 3 |
duke@435 | 1109 | }; |
duke@435 | 1110 | |
duke@435 | 1111 | CollectionTypes _debug_collection_type; |
duke@435 | 1112 | |
jmasa@5076 | 1113 | // True if a compactiing collection was done. |
jmasa@5076 | 1114 | bool _did_compact; |
jmasa@5076 | 1115 | bool did_compact() { return _did_compact; } |
jmasa@5076 | 1116 | |
ysr@529 | 1117 | // Fraction of current occupancy at which to start a CMS collection which |
ysr@529 | 1118 | // will collect this generation (at least). |
ysr@529 | 1119 | double _initiating_occupancy; |
ysr@529 | 1120 | |
duke@435 | 1121 | protected: |
duke@435 | 1122 | // Shrink generation by specified size (returns false if unable to shrink) |
jmasa@4900 | 1123 | void shrink_free_list_by(size_t bytes); |
duke@435 | 1124 | |
duke@435 | 1125 | // Update statistics for GC |
duke@435 | 1126 | virtual void update_gc_stats(int level, bool full); |
duke@435 | 1127 | |
duke@435 | 1128 | // Maximum available space in the generation (including uncommitted) |
duke@435 | 1129 | // space. |
duke@435 | 1130 | size_t max_available() const; |
duke@435 | 1131 | |
ysr@529 | 1132 | // getter and initializer for _initiating_occupancy field. |
ysr@529 | 1133 | double initiating_occupancy() const { return _initiating_occupancy; } |
jwilhelm@4576 | 1134 | void init_initiating_occupancy(intx io, uintx tr); |
ysr@529 | 1135 | |
duke@435 | 1136 | public: |
duke@435 | 1137 | ConcurrentMarkSweepGeneration(ReservedSpace rs, size_t initial_byte_size, |
duke@435 | 1138 | int level, CardTableRS* ct, |
duke@435 | 1139 | bool use_adaptive_freelists, |
jmasa@3730 | 1140 | FreeBlockDictionary<FreeChunk>::DictionaryChoice); |
duke@435 | 1141 | |
duke@435 | 1142 | // Accessors |
duke@435 | 1143 | CMSCollector* collector() const { return _collector; } |
duke@435 | 1144 | static void set_collector(CMSCollector* collector) { |
duke@435 | 1145 | assert(_collector == NULL, "already set"); |
duke@435 | 1146 | _collector = collector; |
duke@435 | 1147 | } |
duke@435 | 1148 | CompactibleFreeListSpace* cmsSpace() const { return _cmsSpace; } |
duke@435 | 1149 | |
duke@435 | 1150 | Mutex* freelistLock() const; |
duke@435 | 1151 | |
duke@435 | 1152 | virtual Generation::Name kind() { return Generation::ConcurrentMarkSweep; } |
duke@435 | 1153 | |
duke@435 | 1154 | // Adaptive size policy |
duke@435 | 1155 | CMSAdaptiveSizePolicy* size_policy(); |
duke@435 | 1156 | |
jmasa@5076 | 1157 | void set_did_compact(bool v) { _did_compact = v; } |
jmasa@5076 | 1158 | |
duke@435 | 1159 | bool refs_discovery_is_atomic() const { return false; } |
duke@435 | 1160 | bool refs_discovery_is_mt() const { |
duke@435 | 1161 | // Note: CMS does MT-discovery during the parallel-remark |
duke@435 | 1162 | // phases. Use ReferenceProcessorMTMutator to make refs |
duke@435 | 1163 | // discovery MT-safe during such phases or other parallel |
duke@435 | 1164 | // discovery phases in the future. This may all go away |
duke@435 | 1165 | // if/when we decide that refs discovery is sufficiently |
duke@435 | 1166 | // rare that the cost of the CAS's involved is in the |
duke@435 | 1167 | // noise. That's a measurement that should be done, and |
duke@435 | 1168 | // the code simplified if that turns out to be the case. |
ysr@2651 | 1169 | return ConcGCThreads > 1; |
duke@435 | 1170 | } |
duke@435 | 1171 | |
duke@435 | 1172 | // Override |
duke@435 | 1173 | virtual void ref_processor_init(); |
duke@435 | 1174 | |
jmasa@706 | 1175 | // Grow generation by specified size (returns false if unable to grow) |
jmasa@706 | 1176 | bool grow_by(size_t bytes); |
jmasa@706 | 1177 | // Grow generation to reserved size. |
jmasa@706 | 1178 | bool grow_to_reserved(); |
jmasa@706 | 1179 | |
duke@435 | 1180 | void clear_expansion_cause() { _expansion_cause = CMSExpansionCause::_no_expansion; } |
duke@435 | 1181 | |
duke@435 | 1182 | // Space enquiries |
duke@435 | 1183 | size_t capacity() const; |
duke@435 | 1184 | size_t used() const; |
duke@435 | 1185 | size_t free() const; |
ysr@529 | 1186 | double occupancy() const { return ((double)used())/((double)capacity()); } |
duke@435 | 1187 | size_t contiguous_available() const; |
duke@435 | 1188 | size_t unsafe_max_alloc_nogc() const; |
duke@435 | 1189 | |
duke@435 | 1190 | // over-rides |
duke@435 | 1191 | MemRegion used_region() const; |
duke@435 | 1192 | MemRegion used_region_at_save_marks() const; |
duke@435 | 1193 | |
duke@435 | 1194 | // Does a "full" (forced) collection invoked on this generation collect |
duke@435 | 1195 | // all younger generations as well? Note that the second conjunct is a |
duke@435 | 1196 | // hack to allow the collection of the younger gen first if the flag is |
duke@435 | 1197 | // set. This is better than using th policy's should_collect_gen0_first() |
duke@435 | 1198 | // since that causes us to do an extra unnecessary pair of restart-&-stop-world. |
duke@435 | 1199 | virtual bool full_collects_younger_generations() const { |
duke@435 | 1200 | return UseCMSCompactAtFullCollection && !CollectGen0First; |
duke@435 | 1201 | } |
duke@435 | 1202 | |
duke@435 | 1203 | void space_iterate(SpaceClosure* blk, bool usedOnly = false); |
duke@435 | 1204 | |
duke@435 | 1205 | // Support for compaction |
duke@435 | 1206 | CompactibleSpace* first_compaction_space() const; |
duke@435 | 1207 | // Adjust quantites in the generation affected by |
duke@435 | 1208 | // the compaction. |
duke@435 | 1209 | void reset_after_compaction(); |
duke@435 | 1210 | |
duke@435 | 1211 | // Allocation support |
duke@435 | 1212 | HeapWord* allocate(size_t size, bool tlab); |
duke@435 | 1213 | HeapWord* have_lock_and_allocate(size_t size, bool tlab); |
coleenp@548 | 1214 | oop promote(oop obj, size_t obj_size); |
duke@435 | 1215 | HeapWord* par_allocate(size_t size, bool tlab) { |
duke@435 | 1216 | return allocate(size, tlab); |
duke@435 | 1217 | } |
duke@435 | 1218 | |
duke@435 | 1219 | // Incremental mode triggering. |
duke@435 | 1220 | HeapWord* allocation_limit_reached(Space* space, HeapWord* top, |
duke@435 | 1221 | size_t word_size); |
duke@435 | 1222 | |
duke@435 | 1223 | // Used by CMSStats to track direct allocation. The value is sampled and |
duke@435 | 1224 | // reset after each young gen collection. |
duke@435 | 1225 | size_t direct_allocated_words() const { return _direct_allocated_words; } |
duke@435 | 1226 | void reset_direct_allocated_words() { _direct_allocated_words = 0; } |
duke@435 | 1227 | |
duke@435 | 1228 | // Overrides for parallel promotion. |
duke@435 | 1229 | virtual oop par_promote(int thread_num, |
duke@435 | 1230 | oop obj, markOop m, size_t word_sz); |
duke@435 | 1231 | // This one should not be called for CMS. |
duke@435 | 1232 | virtual void par_promote_alloc_undo(int thread_num, |
duke@435 | 1233 | HeapWord* obj, size_t word_sz); |
duke@435 | 1234 | virtual void par_promote_alloc_done(int thread_num); |
duke@435 | 1235 | virtual void par_oop_since_save_marks_iterate_done(int thread_num); |
duke@435 | 1236 | |
ysr@2243 | 1237 | virtual bool promotion_attempt_is_safe(size_t promotion_in_bytes) const; |
duke@435 | 1238 | |
ysr@1580 | 1239 | // Inform this (non-young) generation that a promotion failure was |
ysr@1580 | 1240 | // encountered during a collection of a younger generation that |
ysr@1580 | 1241 | // promotes into this generation. |
ysr@1580 | 1242 | virtual void promotion_failure_occurred(); |
ysr@1580 | 1243 | |
duke@435 | 1244 | bool should_collect(bool full, size_t size, bool tlab); |
ysr@529 | 1245 | virtual bool should_concurrent_collect() const; |
ysr@529 | 1246 | virtual bool is_too_full() const; |
duke@435 | 1247 | void collect(bool full, |
duke@435 | 1248 | bool clear_all_soft_refs, |
duke@435 | 1249 | size_t size, |
duke@435 | 1250 | bool tlab); |
duke@435 | 1251 | |
duke@435 | 1252 | HeapWord* expand_and_allocate(size_t word_size, |
duke@435 | 1253 | bool tlab, |
duke@435 | 1254 | bool parallel = false); |
duke@435 | 1255 | |
duke@435 | 1256 | // GC prologue and epilogue |
duke@435 | 1257 | void gc_prologue(bool full); |
duke@435 | 1258 | void gc_prologue_work(bool full, bool registerClosure, |
duke@435 | 1259 | ModUnionClosure* modUnionClosure); |
duke@435 | 1260 | void gc_epilogue(bool full); |
duke@435 | 1261 | void gc_epilogue_work(bool full); |
duke@435 | 1262 | |
duke@435 | 1263 | // Time since last GC of this generation |
duke@435 | 1264 | jlong time_of_last_gc(jlong now) { |
duke@435 | 1265 | return collector()->time_of_last_gc(now); |
duke@435 | 1266 | } |
duke@435 | 1267 | void update_time_of_last_gc(jlong now) { |
duke@435 | 1268 | collector()-> update_time_of_last_gc(now); |
duke@435 | 1269 | } |
duke@435 | 1270 | |
duke@435 | 1271 | // Allocation failure |
duke@435 | 1272 | void expand(size_t bytes, size_t expand_bytes, |
duke@435 | 1273 | CMSExpansionCause::Cause cause); |
jmasa@706 | 1274 | virtual bool expand(size_t bytes, size_t expand_bytes); |
duke@435 | 1275 | void shrink(size_t bytes); |
jmasa@4900 | 1276 | void shrink_by(size_t bytes); |
duke@435 | 1277 | HeapWord* expand_and_par_lab_allocate(CMSParGCThreadState* ps, size_t word_sz); |
duke@435 | 1278 | bool expand_and_ensure_spooling_space(PromotionInfo* promo); |
duke@435 | 1279 | |
duke@435 | 1280 | // Iteration support and related enquiries |
duke@435 | 1281 | void save_marks(); |
duke@435 | 1282 | bool no_allocs_since_save_marks(); |
duke@435 | 1283 | void younger_refs_iterate(OopsInGenClosure* cl); |
duke@435 | 1284 | |
duke@435 | 1285 | // Iteration support specific to CMS generations |
duke@435 | 1286 | void save_sweep_limit(); |
duke@435 | 1287 | |
duke@435 | 1288 | // More iteration support |
coleenp@4037 | 1289 | virtual void oop_iterate(ExtendedOopClosure* cl); |
jmasa@952 | 1290 | virtual void safe_object_iterate(ObjectClosure* cl); |
duke@435 | 1291 | virtual void object_iterate(ObjectClosure* cl); |
duke@435 | 1292 | |
duke@435 | 1293 | // Need to declare the full complement of closures, whether we'll |
duke@435 | 1294 | // override them or not, or get message from the compiler: |
duke@435 | 1295 | // oop_since_save_marks_iterate_nv hides virtual function... |
duke@435 | 1296 | #define CMS_SINCE_SAVE_MARKS_DECL(OopClosureType, nv_suffix) \ |
duke@435 | 1297 | void oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl); |
duke@435 | 1298 | ALL_SINCE_SAVE_MARKS_CLOSURES(CMS_SINCE_SAVE_MARKS_DECL) |
duke@435 | 1299 | |
duke@435 | 1300 | // Smart allocation XXX -- move to CFLSpace? |
duke@435 | 1301 | void setNearLargestChunk(); |
duke@435 | 1302 | bool isNearLargestChunk(HeapWord* addr); |
duke@435 | 1303 | |
duke@435 | 1304 | // Get the chunk at the end of the space. Delagates to |
duke@435 | 1305 | // the space. |
duke@435 | 1306 | FreeChunk* find_chunk_at_end(); |
duke@435 | 1307 | |
duke@435 | 1308 | void post_compact(); |
duke@435 | 1309 | |
duke@435 | 1310 | // Debugging |
duke@435 | 1311 | void prepare_for_verify(); |
brutisso@3711 | 1312 | void verify(); |
duke@435 | 1313 | void print_statistics() PRODUCT_RETURN; |
duke@435 | 1314 | |
duke@435 | 1315 | // Performance Counters support |
duke@435 | 1316 | virtual void update_counters(); |
duke@435 | 1317 | virtual void update_counters(size_t used); |
duke@435 | 1318 | void initialize_performance_counters(); |
duke@435 | 1319 | CollectorCounters* counters() { return collector()->counters(); } |
duke@435 | 1320 | |
duke@435 | 1321 | // Support for parallel remark of survivor space |
duke@435 | 1322 | void* get_data_recorder(int thr_num) { |
duke@435 | 1323 | //Delegate to collector |
duke@435 | 1324 | return collector()->get_data_recorder(thr_num); |
duke@435 | 1325 | } |
jmasa@5459 | 1326 | void sample_eden_chunk() { |
jmasa@5459 | 1327 | //Delegate to collector |
jmasa@5459 | 1328 | return collector()->sample_eden_chunk(); |
jmasa@5459 | 1329 | } |
duke@435 | 1330 | |
duke@435 | 1331 | // Printing |
duke@435 | 1332 | const char* name() const; |
duke@435 | 1333 | virtual const char* short_name() const { return "CMS"; } |
duke@435 | 1334 | void print() const; |
duke@435 | 1335 | void printOccupancy(const char* s); |
duke@435 | 1336 | bool must_be_youngest() const { return false; } |
duke@435 | 1337 | bool must_be_oldest() const { return true; } |
duke@435 | 1338 | |
jmasa@4900 | 1339 | // Resize the generation after a compacting GC. The |
jmasa@4900 | 1340 | // generation can be treated as a contiguous space |
jmasa@4900 | 1341 | // after the compaction. |
jmasa@4900 | 1342 | virtual void compute_new_size(); |
jmasa@4900 | 1343 | // Resize the generation after a non-compacting |
jmasa@4900 | 1344 | // collection. |
jmasa@4900 | 1345 | void compute_new_size_free_list(); |
duke@435 | 1346 | |
duke@435 | 1347 | CollectionTypes debug_collection_type() { return _debug_collection_type; } |
duke@435 | 1348 | void rotate_debug_collection_type(); |
duke@435 | 1349 | }; |
duke@435 | 1350 | |
duke@435 | 1351 | class ASConcurrentMarkSweepGeneration : public ConcurrentMarkSweepGeneration { |
duke@435 | 1352 | |
duke@435 | 1353 | // Return the size policy from the heap's collector |
duke@435 | 1354 | // policy casted to CMSAdaptiveSizePolicy*. |
duke@435 | 1355 | CMSAdaptiveSizePolicy* cms_size_policy() const; |
duke@435 | 1356 | |
duke@435 | 1357 | // Resize the generation based on the adaptive size |
duke@435 | 1358 | // policy. |
duke@435 | 1359 | void resize(size_t cur_promo, size_t desired_promo); |
duke@435 | 1360 | |
duke@435 | 1361 | // Return the GC counters from the collector policy |
duke@435 | 1362 | CMSGCAdaptivePolicyCounters* gc_adaptive_policy_counters(); |
duke@435 | 1363 | |
duke@435 | 1364 | virtual void shrink_by(size_t bytes); |
duke@435 | 1365 | |
duke@435 | 1366 | public: |
duke@435 | 1367 | ASConcurrentMarkSweepGeneration(ReservedSpace rs, size_t initial_byte_size, |
duke@435 | 1368 | int level, CardTableRS* ct, |
duke@435 | 1369 | bool use_adaptive_freelists, |
jmasa@3730 | 1370 | FreeBlockDictionary<FreeChunk>::DictionaryChoice |
duke@435 | 1371 | dictionaryChoice) : |
duke@435 | 1372 | ConcurrentMarkSweepGeneration(rs, initial_byte_size, level, ct, |
duke@435 | 1373 | use_adaptive_freelists, dictionaryChoice) {} |
duke@435 | 1374 | |
duke@435 | 1375 | virtual const char* short_name() const { return "ASCMS"; } |
duke@435 | 1376 | virtual Generation::Name kind() { return Generation::ASConcurrentMarkSweep; } |
duke@435 | 1377 | |
duke@435 | 1378 | virtual void update_counters(); |
duke@435 | 1379 | virtual void update_counters(size_t used); |
duke@435 | 1380 | }; |
duke@435 | 1381 | |
duke@435 | 1382 | // |
duke@435 | 1383 | // Closures of various sorts used by CMS to accomplish its work |
duke@435 | 1384 | // |
duke@435 | 1385 | |
duke@435 | 1386 | // This closure is used to do concurrent marking from the roots |
duke@435 | 1387 | // following the first checkpoint. |
duke@435 | 1388 | class MarkFromRootsClosure: public BitMapClosure { |
duke@435 | 1389 | CMSCollector* _collector; |
duke@435 | 1390 | MemRegion _span; |
duke@435 | 1391 | CMSBitMap* _bitMap; |
duke@435 | 1392 | CMSBitMap* _mut; |
duke@435 | 1393 | CMSMarkStack* _markStack; |
duke@435 | 1394 | bool _yield; |
duke@435 | 1395 | int _skipBits; |
duke@435 | 1396 | HeapWord* _finger; |
duke@435 | 1397 | HeapWord* _threshold; |
duke@435 | 1398 | DEBUG_ONLY(bool _verifying;) |
duke@435 | 1399 | |
duke@435 | 1400 | public: |
duke@435 | 1401 | MarkFromRootsClosure(CMSCollector* collector, MemRegion span, |
duke@435 | 1402 | CMSBitMap* bitMap, |
duke@435 | 1403 | CMSMarkStack* markStack, |
duke@435 | 1404 | bool should_yield, bool verifying = false); |
ysr@777 | 1405 | bool do_bit(size_t offset); |
duke@435 | 1406 | void reset(HeapWord* addr); |
duke@435 | 1407 | inline void do_yield_check(); |
duke@435 | 1408 | |
duke@435 | 1409 | private: |
duke@435 | 1410 | void scanOopsInOop(HeapWord* ptr); |
duke@435 | 1411 | void do_yield_work(); |
duke@435 | 1412 | }; |
duke@435 | 1413 | |
duke@435 | 1414 | // This closure is used to do concurrent multi-threaded |
duke@435 | 1415 | // marking from the roots following the first checkpoint. |
duke@435 | 1416 | // XXX This should really be a subclass of The serial version |
duke@435 | 1417 | // above, but i have not had the time to refactor things cleanly. |
duke@435 | 1418 | // That willbe done for Dolphin. |
duke@435 | 1419 | class Par_MarkFromRootsClosure: public BitMapClosure { |
duke@435 | 1420 | CMSCollector* _collector; |
duke@435 | 1421 | MemRegion _whole_span; |
duke@435 | 1422 | MemRegion _span; |
duke@435 | 1423 | CMSBitMap* _bit_map; |
duke@435 | 1424 | CMSBitMap* _mut; |
duke@435 | 1425 | OopTaskQueue* _work_queue; |
duke@435 | 1426 | CMSMarkStack* _overflow_stack; |
duke@435 | 1427 | bool _yield; |
duke@435 | 1428 | int _skip_bits; |
duke@435 | 1429 | HeapWord* _finger; |
duke@435 | 1430 | HeapWord* _threshold; |
duke@435 | 1431 | CMSConcMarkingTask* _task; |
duke@435 | 1432 | public: |
duke@435 | 1433 | Par_MarkFromRootsClosure(CMSConcMarkingTask* task, CMSCollector* collector, |
duke@435 | 1434 | MemRegion span, |
duke@435 | 1435 | CMSBitMap* bit_map, |
duke@435 | 1436 | OopTaskQueue* work_queue, |
duke@435 | 1437 | CMSMarkStack* overflow_stack, |
duke@435 | 1438 | bool should_yield); |
ysr@777 | 1439 | bool do_bit(size_t offset); |
duke@435 | 1440 | inline void do_yield_check(); |
duke@435 | 1441 | |
duke@435 | 1442 | private: |
duke@435 | 1443 | void scan_oops_in_oop(HeapWord* ptr); |
duke@435 | 1444 | void do_yield_work(); |
duke@435 | 1445 | bool get_work_from_overflow_stack(); |
duke@435 | 1446 | }; |
duke@435 | 1447 | |
duke@435 | 1448 | // The following closures are used to do certain kinds of verification of |
duke@435 | 1449 | // CMS marking. |
stefank@6982 | 1450 | class PushAndMarkVerifyClosure: public MetadataAwareOopClosure { |
duke@435 | 1451 | CMSCollector* _collector; |
duke@435 | 1452 | MemRegion _span; |
duke@435 | 1453 | CMSBitMap* _verification_bm; |
duke@435 | 1454 | CMSBitMap* _cms_bm; |
duke@435 | 1455 | CMSMarkStack* _mark_stack; |
coleenp@548 | 1456 | protected: |
coleenp@548 | 1457 | void do_oop(oop p); |
coleenp@548 | 1458 | template <class T> inline void do_oop_work(T *p) { |
coleenp@4037 | 1459 | oop obj = oopDesc::load_decode_heap_oop(p); |
coleenp@548 | 1460 | do_oop(obj); |
coleenp@548 | 1461 | } |
duke@435 | 1462 | public: |
duke@435 | 1463 | PushAndMarkVerifyClosure(CMSCollector* cms_collector, |
duke@435 | 1464 | MemRegion span, |
duke@435 | 1465 | CMSBitMap* verification_bm, |
duke@435 | 1466 | CMSBitMap* cms_bm, |
duke@435 | 1467 | CMSMarkStack* mark_stack); |
duke@435 | 1468 | void do_oop(oop* p); |
coleenp@548 | 1469 | void do_oop(narrowOop* p); |
coleenp@4037 | 1470 | |
duke@435 | 1471 | // Deal with a stack overflow condition |
duke@435 | 1472 | void handle_stack_overflow(HeapWord* lost); |
duke@435 | 1473 | }; |
duke@435 | 1474 | |
duke@435 | 1475 | class MarkFromRootsVerifyClosure: public BitMapClosure { |
duke@435 | 1476 | CMSCollector* _collector; |
duke@435 | 1477 | MemRegion _span; |
duke@435 | 1478 | CMSBitMap* _verification_bm; |
duke@435 | 1479 | CMSBitMap* _cms_bm; |
duke@435 | 1480 | CMSMarkStack* _mark_stack; |
duke@435 | 1481 | HeapWord* _finger; |
duke@435 | 1482 | PushAndMarkVerifyClosure _pam_verify_closure; |
duke@435 | 1483 | public: |
duke@435 | 1484 | MarkFromRootsVerifyClosure(CMSCollector* collector, MemRegion span, |
duke@435 | 1485 | CMSBitMap* verification_bm, |
duke@435 | 1486 | CMSBitMap* cms_bm, |
duke@435 | 1487 | CMSMarkStack* mark_stack); |
ysr@777 | 1488 | bool do_bit(size_t offset); |
duke@435 | 1489 | void reset(HeapWord* addr); |
duke@435 | 1490 | }; |
duke@435 | 1491 | |
duke@435 | 1492 | |
duke@435 | 1493 | // This closure is used to check that a certain set of bits is |
duke@435 | 1494 | // "empty" (i.e. the bit vector doesn't have any 1-bits). |
duke@435 | 1495 | class FalseBitMapClosure: public BitMapClosure { |
duke@435 | 1496 | public: |
ysr@777 | 1497 | bool do_bit(size_t offset) { |
duke@435 | 1498 | guarantee(false, "Should not have a 1 bit"); |
ysr@777 | 1499 | return true; |
duke@435 | 1500 | } |
duke@435 | 1501 | }; |
duke@435 | 1502 | |
mgerdin@6979 | 1503 | // A version of ObjectClosure with "memory" (see _previous_address below) |
mgerdin@6979 | 1504 | class UpwardsObjectClosure: public BoolObjectClosure { |
mgerdin@6979 | 1505 | HeapWord* _previous_address; |
mgerdin@6979 | 1506 | public: |
mgerdin@6979 | 1507 | UpwardsObjectClosure() : _previous_address(NULL) { } |
mgerdin@6979 | 1508 | void set_previous(HeapWord* addr) { _previous_address = addr; } |
mgerdin@6979 | 1509 | HeapWord* previous() { return _previous_address; } |
mgerdin@6979 | 1510 | // A return value of "true" can be used by the caller to decide |
mgerdin@6979 | 1511 | // if this object's end should *NOT* be recorded in |
mgerdin@6979 | 1512 | // _previous_address above. |
mgerdin@6979 | 1513 | virtual bool do_object_bm(oop obj, MemRegion mr) = 0; |
mgerdin@6979 | 1514 | }; |
mgerdin@6979 | 1515 | |
duke@435 | 1516 | // This closure is used during the second checkpointing phase |
duke@435 | 1517 | // to rescan the marked objects on the dirty cards in the mod |
duke@435 | 1518 | // union table and the card table proper. It's invoked via |
duke@435 | 1519 | // MarkFromDirtyCardsClosure below. It uses either |
duke@435 | 1520 | // [Par_]MarkRefsIntoAndScanClosure (Par_ in the parallel case) |
duke@435 | 1521 | // declared in genOopClosures.hpp to accomplish some of its work. |
duke@435 | 1522 | // In the parallel case the bitMap is shared, so access to |
duke@435 | 1523 | // it needs to be suitably synchronized for updates by embedded |
duke@435 | 1524 | // closures that update it; however, this closure itself only |
duke@435 | 1525 | // reads the bit_map and because it is idempotent, is immune to |
duke@435 | 1526 | // reading stale values. |
duke@435 | 1527 | class ScanMarkedObjectsAgainClosure: public UpwardsObjectClosure { |
duke@435 | 1528 | #ifdef ASSERT |
duke@435 | 1529 | CMSCollector* _collector; |
duke@435 | 1530 | MemRegion _span; |
duke@435 | 1531 | union { |
duke@435 | 1532 | CMSMarkStack* _mark_stack; |
duke@435 | 1533 | OopTaskQueue* _work_queue; |
duke@435 | 1534 | }; |
duke@435 | 1535 | #endif // ASSERT |
duke@435 | 1536 | bool _parallel; |
duke@435 | 1537 | CMSBitMap* _bit_map; |
duke@435 | 1538 | union { |
duke@435 | 1539 | MarkRefsIntoAndScanClosure* _scan_closure; |
duke@435 | 1540 | Par_MarkRefsIntoAndScanClosure* _par_scan_closure; |
duke@435 | 1541 | }; |
duke@435 | 1542 | |
duke@435 | 1543 | public: |
duke@435 | 1544 | ScanMarkedObjectsAgainClosure(CMSCollector* collector, |
duke@435 | 1545 | MemRegion span, |
duke@435 | 1546 | ReferenceProcessor* rp, |
duke@435 | 1547 | CMSBitMap* bit_map, |
duke@435 | 1548 | CMSMarkStack* mark_stack, |
duke@435 | 1549 | MarkRefsIntoAndScanClosure* cl): |
duke@435 | 1550 | #ifdef ASSERT |
duke@435 | 1551 | _collector(collector), |
duke@435 | 1552 | _span(span), |
duke@435 | 1553 | _mark_stack(mark_stack), |
duke@435 | 1554 | #endif // ASSERT |
duke@435 | 1555 | _parallel(false), |
duke@435 | 1556 | _bit_map(bit_map), |
duke@435 | 1557 | _scan_closure(cl) { } |
duke@435 | 1558 | |
duke@435 | 1559 | ScanMarkedObjectsAgainClosure(CMSCollector* collector, |
duke@435 | 1560 | MemRegion span, |
duke@435 | 1561 | ReferenceProcessor* rp, |
duke@435 | 1562 | CMSBitMap* bit_map, |
duke@435 | 1563 | OopTaskQueue* work_queue, |
duke@435 | 1564 | Par_MarkRefsIntoAndScanClosure* cl): |
duke@435 | 1565 | #ifdef ASSERT |
duke@435 | 1566 | _collector(collector), |
duke@435 | 1567 | _span(span), |
duke@435 | 1568 | _work_queue(work_queue), |
duke@435 | 1569 | #endif // ASSERT |
duke@435 | 1570 | _parallel(true), |
duke@435 | 1571 | _bit_map(bit_map), |
duke@435 | 1572 | _par_scan_closure(cl) { } |
duke@435 | 1573 | |
duke@435 | 1574 | bool do_object_b(oop obj) { |
duke@435 | 1575 | guarantee(false, "Call do_object_b(oop, MemRegion) form instead"); |
duke@435 | 1576 | return false; |
duke@435 | 1577 | } |
duke@435 | 1578 | bool do_object_bm(oop p, MemRegion mr); |
duke@435 | 1579 | }; |
duke@435 | 1580 | |
duke@435 | 1581 | // This closure is used during the second checkpointing phase |
duke@435 | 1582 | // to rescan the marked objects on the dirty cards in the mod |
duke@435 | 1583 | // union table and the card table proper. It invokes |
duke@435 | 1584 | // ScanMarkedObjectsAgainClosure above to accomplish much of its work. |
duke@435 | 1585 | // In the parallel case, the bit map is shared and requires |
duke@435 | 1586 | // synchronized access. |
duke@435 | 1587 | class MarkFromDirtyCardsClosure: public MemRegionClosure { |
duke@435 | 1588 | CompactibleFreeListSpace* _space; |
duke@435 | 1589 | ScanMarkedObjectsAgainClosure _scan_cl; |
duke@435 | 1590 | size_t _num_dirty_cards; |
duke@435 | 1591 | |
duke@435 | 1592 | public: |
duke@435 | 1593 | MarkFromDirtyCardsClosure(CMSCollector* collector, |
duke@435 | 1594 | MemRegion span, |
duke@435 | 1595 | CompactibleFreeListSpace* space, |
duke@435 | 1596 | CMSBitMap* bit_map, |
duke@435 | 1597 | CMSMarkStack* mark_stack, |
duke@435 | 1598 | MarkRefsIntoAndScanClosure* cl): |
duke@435 | 1599 | _space(space), |
duke@435 | 1600 | _num_dirty_cards(0), |
duke@435 | 1601 | _scan_cl(collector, span, collector->ref_processor(), bit_map, |
coleenp@4037 | 1602 | mark_stack, cl) { } |
duke@435 | 1603 | |
duke@435 | 1604 | MarkFromDirtyCardsClosure(CMSCollector* collector, |
duke@435 | 1605 | MemRegion span, |
duke@435 | 1606 | CompactibleFreeListSpace* space, |
duke@435 | 1607 | CMSBitMap* bit_map, |
duke@435 | 1608 | OopTaskQueue* work_queue, |
duke@435 | 1609 | Par_MarkRefsIntoAndScanClosure* cl): |
duke@435 | 1610 | _space(space), |
duke@435 | 1611 | _num_dirty_cards(0), |
duke@435 | 1612 | _scan_cl(collector, span, collector->ref_processor(), bit_map, |
coleenp@4037 | 1613 | work_queue, cl) { } |
duke@435 | 1614 | |
duke@435 | 1615 | void do_MemRegion(MemRegion mr); |
duke@435 | 1616 | void set_space(CompactibleFreeListSpace* space) { _space = space; } |
duke@435 | 1617 | size_t num_dirty_cards() { return _num_dirty_cards; } |
duke@435 | 1618 | }; |
duke@435 | 1619 | |
duke@435 | 1620 | // This closure is used in the non-product build to check |
duke@435 | 1621 | // that there are no MemRegions with a certain property. |
duke@435 | 1622 | class FalseMemRegionClosure: public MemRegionClosure { |
duke@435 | 1623 | void do_MemRegion(MemRegion mr) { |
duke@435 | 1624 | guarantee(!mr.is_empty(), "Shouldn't be empty"); |
duke@435 | 1625 | guarantee(false, "Should never be here"); |
duke@435 | 1626 | } |
duke@435 | 1627 | }; |
duke@435 | 1628 | |
duke@435 | 1629 | // This closure is used during the precleaning phase |
duke@435 | 1630 | // to "carefully" rescan marked objects on dirty cards. |
duke@435 | 1631 | // It uses MarkRefsIntoAndScanClosure declared in genOopClosures.hpp |
duke@435 | 1632 | // to accomplish some of its work. |
duke@435 | 1633 | class ScanMarkedObjectsAgainCarefullyClosure: public ObjectClosureCareful { |
duke@435 | 1634 | CMSCollector* _collector; |
duke@435 | 1635 | MemRegion _span; |
duke@435 | 1636 | bool _yield; |
duke@435 | 1637 | Mutex* _freelistLock; |
duke@435 | 1638 | CMSBitMap* _bitMap; |
duke@435 | 1639 | CMSMarkStack* _markStack; |
duke@435 | 1640 | MarkRefsIntoAndScanClosure* _scanningClosure; |
duke@435 | 1641 | |
duke@435 | 1642 | public: |
duke@435 | 1643 | ScanMarkedObjectsAgainCarefullyClosure(CMSCollector* collector, |
duke@435 | 1644 | MemRegion span, |
duke@435 | 1645 | CMSBitMap* bitMap, |
duke@435 | 1646 | CMSMarkStack* markStack, |
duke@435 | 1647 | MarkRefsIntoAndScanClosure* cl, |
duke@435 | 1648 | bool should_yield): |
duke@435 | 1649 | _collector(collector), |
duke@435 | 1650 | _span(span), |
duke@435 | 1651 | _yield(should_yield), |
duke@435 | 1652 | _bitMap(bitMap), |
duke@435 | 1653 | _markStack(markStack), |
duke@435 | 1654 | _scanningClosure(cl) { |
duke@435 | 1655 | } |
duke@435 | 1656 | |
duke@435 | 1657 | void do_object(oop p) { |
duke@435 | 1658 | guarantee(false, "call do_object_careful instead"); |
duke@435 | 1659 | } |
duke@435 | 1660 | |
duke@435 | 1661 | size_t do_object_careful(oop p) { |
duke@435 | 1662 | guarantee(false, "Unexpected caller"); |
duke@435 | 1663 | return 0; |
duke@435 | 1664 | } |
duke@435 | 1665 | |
duke@435 | 1666 | size_t do_object_careful_m(oop p, MemRegion mr); |
duke@435 | 1667 | |
duke@435 | 1668 | void setFreelistLock(Mutex* m) { |
duke@435 | 1669 | _freelistLock = m; |
duke@435 | 1670 | _scanningClosure->set_freelistLock(m); |
duke@435 | 1671 | } |
duke@435 | 1672 | |
duke@435 | 1673 | private: |
duke@435 | 1674 | inline bool do_yield_check(); |
duke@435 | 1675 | |
duke@435 | 1676 | void do_yield_work(); |
duke@435 | 1677 | }; |
duke@435 | 1678 | |
duke@435 | 1679 | class SurvivorSpacePrecleanClosure: public ObjectClosureCareful { |
duke@435 | 1680 | CMSCollector* _collector; |
duke@435 | 1681 | MemRegion _span; |
duke@435 | 1682 | bool _yield; |
duke@435 | 1683 | CMSBitMap* _bit_map; |
duke@435 | 1684 | CMSMarkStack* _mark_stack; |
duke@435 | 1685 | PushAndMarkClosure* _scanning_closure; |
duke@435 | 1686 | unsigned int _before_count; |
duke@435 | 1687 | |
duke@435 | 1688 | public: |
duke@435 | 1689 | SurvivorSpacePrecleanClosure(CMSCollector* collector, |
duke@435 | 1690 | MemRegion span, |
duke@435 | 1691 | CMSBitMap* bit_map, |
duke@435 | 1692 | CMSMarkStack* mark_stack, |
duke@435 | 1693 | PushAndMarkClosure* cl, |
duke@435 | 1694 | unsigned int before_count, |
duke@435 | 1695 | bool should_yield): |
duke@435 | 1696 | _collector(collector), |
duke@435 | 1697 | _span(span), |
duke@435 | 1698 | _yield(should_yield), |
duke@435 | 1699 | _bit_map(bit_map), |
duke@435 | 1700 | _mark_stack(mark_stack), |
duke@435 | 1701 | _scanning_closure(cl), |
duke@435 | 1702 | _before_count(before_count) |
duke@435 | 1703 | { } |
duke@435 | 1704 | |
duke@435 | 1705 | void do_object(oop p) { |
duke@435 | 1706 | guarantee(false, "call do_object_careful instead"); |
duke@435 | 1707 | } |
duke@435 | 1708 | |
duke@435 | 1709 | size_t do_object_careful(oop p); |
duke@435 | 1710 | |
duke@435 | 1711 | size_t do_object_careful_m(oop p, MemRegion mr) { |
duke@435 | 1712 | guarantee(false, "Unexpected caller"); |
duke@435 | 1713 | return 0; |
duke@435 | 1714 | } |
duke@435 | 1715 | |
duke@435 | 1716 | private: |
duke@435 | 1717 | inline void do_yield_check(); |
duke@435 | 1718 | void do_yield_work(); |
duke@435 | 1719 | }; |
duke@435 | 1720 | |
duke@435 | 1721 | // This closure is used to accomplish the sweeping work |
duke@435 | 1722 | // after the second checkpoint but before the concurrent reset |
duke@435 | 1723 | // phase. |
duke@435 | 1724 | // |
duke@435 | 1725 | // Terminology |
duke@435 | 1726 | // left hand chunk (LHC) - block of one or more chunks currently being |
duke@435 | 1727 | // coalesced. The LHC is available for coalescing with a new chunk. |
duke@435 | 1728 | // right hand chunk (RHC) - block that is currently being swept that is |
duke@435 | 1729 | // free or garbage that can be coalesced with the LHC. |
duke@435 | 1730 | // _inFreeRange is true if there is currently a LHC |
duke@435 | 1731 | // _lastFreeRangeCoalesced is true if the LHC consists of more than one chunk. |
duke@435 | 1732 | // _freeRangeInFreeLists is true if the LHC is in the free lists. |
duke@435 | 1733 | // _freeFinger is the address of the current LHC |
duke@435 | 1734 | class SweepClosure: public BlkClosureCareful { |
duke@435 | 1735 | CMSCollector* _collector; // collector doing the work |
duke@435 | 1736 | ConcurrentMarkSweepGeneration* _g; // Generation being swept |
duke@435 | 1737 | CompactibleFreeListSpace* _sp; // Space being swept |
ysr@2943 | 1738 | HeapWord* _limit;// the address at or above which the sweep should stop |
ysr@2943 | 1739 | // because we do not expect newly garbage blocks |
ysr@2943 | 1740 | // eligible for sweeping past that address. |
duke@435 | 1741 | Mutex* _freelistLock; // Free list lock (in space) |
duke@435 | 1742 | CMSBitMap* _bitMap; // Marking bit map (in |
duke@435 | 1743 | // generation) |
duke@435 | 1744 | bool _inFreeRange; // Indicates if we are in the |
duke@435 | 1745 | // midst of a free run |
duke@435 | 1746 | bool _freeRangeInFreeLists; |
duke@435 | 1747 | // Often, we have just found |
duke@435 | 1748 | // a free chunk and started |
duke@435 | 1749 | // a new free range; we do not |
duke@435 | 1750 | // eagerly remove this chunk from |
duke@435 | 1751 | // the free lists unless there is |
duke@435 | 1752 | // a possibility of coalescing. |
duke@435 | 1753 | // When true, this flag indicates |
duke@435 | 1754 | // that the _freeFinger below |
duke@435 | 1755 | // points to a potentially free chunk |
duke@435 | 1756 | // that may still be in the free lists |
duke@435 | 1757 | bool _lastFreeRangeCoalesced; |
duke@435 | 1758 | // free range contains chunks |
duke@435 | 1759 | // coalesced |
duke@435 | 1760 | bool _yield; |
duke@435 | 1761 | // Whether sweeping should be |
duke@435 | 1762 | // done with yields. For instance |
duke@435 | 1763 | // when done by the foreground |
duke@435 | 1764 | // collector we shouldn't yield. |
duke@435 | 1765 | HeapWord* _freeFinger; // When _inFreeRange is set, the |
duke@435 | 1766 | // pointer to the "left hand |
duke@435 | 1767 | // chunk" |
duke@435 | 1768 | size_t _freeRangeSize; |
duke@435 | 1769 | // When _inFreeRange is set, this |
duke@435 | 1770 | // indicates the accumulated size |
duke@435 | 1771 | // of the "left hand chunk" |
duke@435 | 1772 | NOT_PRODUCT( |
duke@435 | 1773 | size_t _numObjectsFreed; |
duke@435 | 1774 | size_t _numWordsFreed; |
duke@435 | 1775 | size_t _numObjectsLive; |
duke@435 | 1776 | size_t _numWordsLive; |
duke@435 | 1777 | size_t _numObjectsAlreadyFree; |
duke@435 | 1778 | size_t _numWordsAlreadyFree; |
duke@435 | 1779 | FreeChunk* _last_fc; |
duke@435 | 1780 | ) |
duke@435 | 1781 | private: |
duke@435 | 1782 | // Code that is common to a free chunk or garbage when |
duke@435 | 1783 | // encountered during sweeping. |
ysr@2452 | 1784 | void do_post_free_or_garbage_chunk(FreeChunk *fc, size_t chunkSize); |
duke@435 | 1785 | // Process a free chunk during sweeping. |
ysr@2452 | 1786 | void do_already_free_chunk(FreeChunk *fc); |
ysr@2943 | 1787 | // Work method called when processing an already free or a |
ysr@2943 | 1788 | // freshly garbage chunk to do a lookahead and possibly a |
ysr@2943 | 1789 | // premptive flush if crossing over _limit. |
ysr@2943 | 1790 | void lookahead_and_flush(FreeChunk* fc, size_t chunkSize); |
duke@435 | 1791 | // Process a garbage chunk during sweeping. |
ysr@2452 | 1792 | size_t do_garbage_chunk(FreeChunk *fc); |
duke@435 | 1793 | // Process a live chunk during sweeping. |
ysr@2452 | 1794 | size_t do_live_chunk(FreeChunk* fc); |
duke@435 | 1795 | |
duke@435 | 1796 | // Accessors. |
duke@435 | 1797 | HeapWord* freeFinger() const { return _freeFinger; } |
duke@435 | 1798 | void set_freeFinger(HeapWord* v) { _freeFinger = v; } |
duke@435 | 1799 | bool inFreeRange() const { return _inFreeRange; } |
duke@435 | 1800 | void set_inFreeRange(bool v) { _inFreeRange = v; } |
duke@435 | 1801 | bool lastFreeRangeCoalesced() const { return _lastFreeRangeCoalesced; } |
duke@435 | 1802 | void set_lastFreeRangeCoalesced(bool v) { _lastFreeRangeCoalesced = v; } |
duke@435 | 1803 | bool freeRangeInFreeLists() const { return _freeRangeInFreeLists; } |
duke@435 | 1804 | void set_freeRangeInFreeLists(bool v) { _freeRangeInFreeLists = v; } |
duke@435 | 1805 | |
duke@435 | 1806 | // Initialize a free range. |
duke@435 | 1807 | void initialize_free_range(HeapWord* freeFinger, bool freeRangeInFreeLists); |
duke@435 | 1808 | // Return this chunk to the free lists. |
ysr@2452 | 1809 | void flush_cur_free_chunk(HeapWord* chunk, size_t size); |
duke@435 | 1810 | |
duke@435 | 1811 | // Check if we should yield and do so when necessary. |
duke@435 | 1812 | inline void do_yield_check(HeapWord* addr); |
duke@435 | 1813 | |
duke@435 | 1814 | // Yield |
duke@435 | 1815 | void do_yield_work(HeapWord* addr); |
duke@435 | 1816 | |
duke@435 | 1817 | // Debugging/Printing |
ysr@2943 | 1818 | void print_free_block_coalesced(FreeChunk* fc) const; |
duke@435 | 1819 | |
duke@435 | 1820 | public: |
duke@435 | 1821 | SweepClosure(CMSCollector* collector, ConcurrentMarkSweepGeneration* g, |
duke@435 | 1822 | CMSBitMap* bitMap, bool should_yield); |
ysr@2943 | 1823 | ~SweepClosure() PRODUCT_RETURN; |
duke@435 | 1824 | |
duke@435 | 1825 | size_t do_blk_careful(HeapWord* addr); |
ysr@2943 | 1826 | void print() const { print_on(tty); } |
ysr@2943 | 1827 | void print_on(outputStream *st) const; |
duke@435 | 1828 | }; |
duke@435 | 1829 | |
duke@435 | 1830 | // Closures related to weak references processing |
duke@435 | 1831 | |
duke@435 | 1832 | // During CMS' weak reference processing, this is a |
duke@435 | 1833 | // work-routine/closure used to complete transitive |
duke@435 | 1834 | // marking of objects as live after a certain point |
duke@435 | 1835 | // in which an initial set has been completely accumulated. |
ysr@887 | 1836 | // This closure is currently used both during the final |
ysr@887 | 1837 | // remark stop-world phase, as well as during the concurrent |
ysr@887 | 1838 | // precleaning of the discovered reference lists. |
duke@435 | 1839 | class CMSDrainMarkingStackClosure: public VoidClosure { |
duke@435 | 1840 | CMSCollector* _collector; |
duke@435 | 1841 | MemRegion _span; |
duke@435 | 1842 | CMSMarkStack* _mark_stack; |
duke@435 | 1843 | CMSBitMap* _bit_map; |
duke@435 | 1844 | CMSKeepAliveClosure* _keep_alive; |
ysr@887 | 1845 | bool _concurrent_precleaning; |
duke@435 | 1846 | public: |
duke@435 | 1847 | CMSDrainMarkingStackClosure(CMSCollector* collector, MemRegion span, |
duke@435 | 1848 | CMSBitMap* bit_map, CMSMarkStack* mark_stack, |
ysr@887 | 1849 | CMSKeepAliveClosure* keep_alive, |
ysr@887 | 1850 | bool cpc): |
duke@435 | 1851 | _collector(collector), |
duke@435 | 1852 | _span(span), |
duke@435 | 1853 | _bit_map(bit_map), |
duke@435 | 1854 | _mark_stack(mark_stack), |
ysr@887 | 1855 | _keep_alive(keep_alive), |
ysr@887 | 1856 | _concurrent_precleaning(cpc) { |
ysr@887 | 1857 | assert(_concurrent_precleaning == _keep_alive->concurrent_precleaning(), |
ysr@887 | 1858 | "Mismatch"); |
ysr@887 | 1859 | } |
duke@435 | 1860 | |
duke@435 | 1861 | void do_void(); |
duke@435 | 1862 | }; |
duke@435 | 1863 | |
duke@435 | 1864 | // A parallel version of CMSDrainMarkingStackClosure above. |
duke@435 | 1865 | class CMSParDrainMarkingStackClosure: public VoidClosure { |
duke@435 | 1866 | CMSCollector* _collector; |
duke@435 | 1867 | MemRegion _span; |
duke@435 | 1868 | OopTaskQueue* _work_queue; |
duke@435 | 1869 | CMSBitMap* _bit_map; |
duke@435 | 1870 | CMSInnerParMarkAndPushClosure _mark_and_push; |
duke@435 | 1871 | |
duke@435 | 1872 | public: |
duke@435 | 1873 | CMSParDrainMarkingStackClosure(CMSCollector* collector, |
duke@435 | 1874 | MemRegion span, CMSBitMap* bit_map, |
duke@435 | 1875 | OopTaskQueue* work_queue): |
duke@435 | 1876 | _collector(collector), |
duke@435 | 1877 | _span(span), |
duke@435 | 1878 | _bit_map(bit_map), |
duke@435 | 1879 | _work_queue(work_queue), |
coleenp@4037 | 1880 | _mark_and_push(collector, span, bit_map, work_queue) { } |
duke@435 | 1881 | |
duke@435 | 1882 | public: |
duke@435 | 1883 | void trim_queue(uint max); |
duke@435 | 1884 | void do_void(); |
duke@435 | 1885 | }; |
duke@435 | 1886 | |
duke@435 | 1887 | // Allow yielding or short-circuiting of reference list |
duke@435 | 1888 | // prelceaning work. |
duke@435 | 1889 | class CMSPrecleanRefsYieldClosure: public YieldClosure { |
duke@435 | 1890 | CMSCollector* _collector; |
duke@435 | 1891 | void do_yield_work(); |
duke@435 | 1892 | public: |
duke@435 | 1893 | CMSPrecleanRefsYieldClosure(CMSCollector* collector): |
duke@435 | 1894 | _collector(collector) {} |
duke@435 | 1895 | virtual bool should_return(); |
duke@435 | 1896 | }; |
duke@435 | 1897 | |
duke@435 | 1898 | |
duke@435 | 1899 | // Convenience class that locks free list locks for given CMS collector |
duke@435 | 1900 | class FreelistLocker: public StackObj { |
duke@435 | 1901 | private: |
duke@435 | 1902 | CMSCollector* _collector; |
duke@435 | 1903 | public: |
duke@435 | 1904 | FreelistLocker(CMSCollector* collector): |
duke@435 | 1905 | _collector(collector) { |
duke@435 | 1906 | _collector->getFreelistLocks(); |
duke@435 | 1907 | } |
duke@435 | 1908 | |
duke@435 | 1909 | ~FreelistLocker() { |
duke@435 | 1910 | _collector->releaseFreelistLocks(); |
duke@435 | 1911 | } |
duke@435 | 1912 | }; |
duke@435 | 1913 | |
duke@435 | 1914 | // Mark all dead objects in a given space. |
duke@435 | 1915 | class MarkDeadObjectsClosure: public BlkClosure { |
duke@435 | 1916 | const CMSCollector* _collector; |
duke@435 | 1917 | const CompactibleFreeListSpace* _sp; |
duke@435 | 1918 | CMSBitMap* _live_bit_map; |
duke@435 | 1919 | CMSBitMap* _dead_bit_map; |
duke@435 | 1920 | public: |
duke@435 | 1921 | MarkDeadObjectsClosure(const CMSCollector* collector, |
duke@435 | 1922 | const CompactibleFreeListSpace* sp, |
duke@435 | 1923 | CMSBitMap *live_bit_map, |
duke@435 | 1924 | CMSBitMap *dead_bit_map) : |
duke@435 | 1925 | _collector(collector), |
duke@435 | 1926 | _sp(sp), |
duke@435 | 1927 | _live_bit_map(live_bit_map), |
duke@435 | 1928 | _dead_bit_map(dead_bit_map) {} |
duke@435 | 1929 | size_t do_blk(HeapWord* addr); |
duke@435 | 1930 | }; |
kevinw@2058 | 1931 | |
kevinw@2058 | 1932 | class TraceCMSMemoryManagerStats : public TraceMemoryManagerStats { |
kevinw@2058 | 1933 | |
kevinw@2058 | 1934 | public: |
fparain@2888 | 1935 | TraceCMSMemoryManagerStats(CMSCollector::CollectorState phase, GCCause::Cause cause); |
kevinw@2058 | 1936 | }; |
kevinw@2058 | 1937 | |
stefank@2314 | 1938 | |
stefank@2314 | 1939 | #endif // SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_CONCURRENTMARKSWEEPGENERATION_HPP |