Wed, 02 Jul 2008 12:55:16 -0700
6719955: Update copyright year
Summary: Update copyright year for files that have been modified in 2008
Reviewed-by: ohair, tbell
duke@435 | 1 | /* |
xdono@631 | 2 | * Copyright 2001-2008 Sun Microsystems, Inc. All Rights Reserved. |
duke@435 | 3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
duke@435 | 4 | * |
duke@435 | 5 | * This code is free software; you can redistribute it and/or modify it |
duke@435 | 6 | * under the terms of the GNU General Public License version 2 only, as |
duke@435 | 7 | * published by the Free Software Foundation. |
duke@435 | 8 | * |
duke@435 | 9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
duke@435 | 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
duke@435 | 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
duke@435 | 12 | * version 2 for more details (a copy is included in the LICENSE file that |
duke@435 | 13 | * accompanied this code). |
duke@435 | 14 | * |
duke@435 | 15 | * You should have received a copy of the GNU General Public License version |
duke@435 | 16 | * 2 along with this work; if not, write to the Free Software Foundation, |
duke@435 | 17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
duke@435 | 18 | * |
duke@435 | 19 | * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, |
duke@435 | 20 | * CA 95054 USA or visit www.sun.com if you need additional information or |
duke@435 | 21 | * have any questions. |
duke@435 | 22 | * |
duke@435 | 23 | */ |
duke@435 | 24 | |
duke@435 | 25 | // Classes in support of keeping track of promotions into a non-Contiguous |
duke@435 | 26 | // space, in this case a CompactibleFreeListSpace. |
duke@435 | 27 | |
duke@435 | 28 | #define CFLS_LAB_REFILL_STATS 0 |
duke@435 | 29 | |
duke@435 | 30 | // Forward declarations |
duke@435 | 31 | class CompactibleFreeListSpace; |
duke@435 | 32 | class BlkClosure; |
duke@435 | 33 | class BlkClosureCareful; |
duke@435 | 34 | class UpwardsObjectClosure; |
duke@435 | 35 | class ObjectClosureCareful; |
duke@435 | 36 | class Klass; |
duke@435 | 37 | |
duke@435 | 38 | class PromotedObject VALUE_OBJ_CLASS_SPEC { |
duke@435 | 39 | private: |
duke@435 | 40 | enum { |
duke@435 | 41 | promoted_mask = right_n_bits(2), // i.e. 0x3 |
duke@435 | 42 | displaced_mark = nth_bit(2), // i.e. 0x4 |
duke@435 | 43 | next_mask = ~(right_n_bits(3)) // i.e. ~(0x7) |
duke@435 | 44 | }; |
duke@435 | 45 | intptr_t _next; |
duke@435 | 46 | public: |
duke@435 | 47 | inline PromotedObject* next() const { |
duke@435 | 48 | return (PromotedObject*)(_next & next_mask); |
duke@435 | 49 | } |
duke@435 | 50 | inline void setNext(PromotedObject* x) { |
duke@435 | 51 | assert(((intptr_t)x & ~next_mask) == 0, |
duke@435 | 52 | "Conflict in bit usage, " |
duke@435 | 53 | " or insufficient alignment of objects"); |
duke@435 | 54 | _next |= (intptr_t)x; |
duke@435 | 55 | } |
duke@435 | 56 | inline void setPromotedMark() { |
duke@435 | 57 | _next |= promoted_mask; |
duke@435 | 58 | } |
duke@435 | 59 | inline bool hasPromotedMark() const { |
duke@435 | 60 | return (_next & promoted_mask) == promoted_mask; |
duke@435 | 61 | } |
duke@435 | 62 | inline void setDisplacedMark() { |
duke@435 | 63 | _next |= displaced_mark; |
duke@435 | 64 | } |
duke@435 | 65 | inline bool hasDisplacedMark() const { |
duke@435 | 66 | return (_next & displaced_mark) != 0; |
duke@435 | 67 | } |
duke@435 | 68 | inline void clearNext() { _next = 0; } |
duke@435 | 69 | debug_only(void *next_addr() { return (void *) &_next; }) |
duke@435 | 70 | }; |
duke@435 | 71 | |
duke@435 | 72 | class SpoolBlock: public FreeChunk { |
duke@435 | 73 | friend class PromotionInfo; |
duke@435 | 74 | protected: |
duke@435 | 75 | SpoolBlock* nextSpoolBlock; |
duke@435 | 76 | size_t bufferSize; // number of usable words in this block |
duke@435 | 77 | markOop* displacedHdr; // the displaced headers start here |
duke@435 | 78 | |
duke@435 | 79 | // Note about bufferSize: it denotes the number of entries available plus 1; |
duke@435 | 80 | // legal indices range from 1 through BufferSize - 1. See the verification |
duke@435 | 81 | // code verify() that counts the number of displaced headers spooled. |
duke@435 | 82 | size_t computeBufferSize() { |
duke@435 | 83 | return (size() * sizeof(HeapWord) - sizeof(*this)) / sizeof(markOop); |
duke@435 | 84 | } |
duke@435 | 85 | |
duke@435 | 86 | public: |
duke@435 | 87 | void init() { |
duke@435 | 88 | bufferSize = computeBufferSize(); |
duke@435 | 89 | displacedHdr = (markOop*)&displacedHdr; |
duke@435 | 90 | nextSpoolBlock = NULL; |
duke@435 | 91 | } |
duke@435 | 92 | }; |
duke@435 | 93 | |
duke@435 | 94 | class PromotionInfo VALUE_OBJ_CLASS_SPEC { |
duke@435 | 95 | bool _tracking; // set if tracking |
duke@435 | 96 | CompactibleFreeListSpace* _space; // the space to which this belongs |
duke@435 | 97 | PromotedObject* _promoHead; // head of list of promoted objects |
duke@435 | 98 | PromotedObject* _promoTail; // tail of list of promoted objects |
duke@435 | 99 | SpoolBlock* _spoolHead; // first spooling block |
duke@435 | 100 | SpoolBlock* _spoolTail; // last non-full spooling block or null |
duke@435 | 101 | SpoolBlock* _splice_point; // when _spoolTail is null, holds list tail |
duke@435 | 102 | SpoolBlock* _spareSpool; // free spool buffer |
duke@435 | 103 | size_t _firstIndex; // first active index in |
duke@435 | 104 | // first spooling block (_spoolHead) |
duke@435 | 105 | size_t _nextIndex; // last active index + 1 in last |
duke@435 | 106 | // spooling block (_spoolTail) |
duke@435 | 107 | private: |
duke@435 | 108 | // ensure that spooling space exists; return true if there is spooling space |
duke@435 | 109 | bool ensure_spooling_space_work(); |
duke@435 | 110 | |
duke@435 | 111 | public: |
duke@435 | 112 | PromotionInfo() : |
duke@435 | 113 | _tracking(0), _space(NULL), |
duke@435 | 114 | _promoHead(NULL), _promoTail(NULL), |
duke@435 | 115 | _spoolHead(NULL), _spoolTail(NULL), |
duke@435 | 116 | _spareSpool(NULL), _firstIndex(1), |
duke@435 | 117 | _nextIndex(1) {} |
duke@435 | 118 | |
duke@435 | 119 | bool noPromotions() const { |
duke@435 | 120 | assert(_promoHead != NULL || _promoTail == NULL, "list inconsistency"); |
duke@435 | 121 | return _promoHead == NULL; |
duke@435 | 122 | } |
duke@435 | 123 | void startTrackingPromotions(); |
duke@435 | 124 | void stopTrackingPromotions(); |
duke@435 | 125 | bool tracking() const { return _tracking; } |
duke@435 | 126 | void track(PromotedObject* trackOop); // keep track of a promoted oop |
duke@435 | 127 | // The following variant must be used when trackOop is not fully |
duke@435 | 128 | // initialized and has a NULL klass: |
duke@435 | 129 | void track(PromotedObject* trackOop, klassOop klassOfOop); // keep track of a promoted oop |
duke@435 | 130 | void setSpace(CompactibleFreeListSpace* sp) { _space = sp; } |
duke@435 | 131 | CompactibleFreeListSpace* space() const { return _space; } |
duke@435 | 132 | markOop nextDisplacedHeader(); // get next header & forward spool pointer |
duke@435 | 133 | void saveDisplacedHeader(markOop hdr); |
duke@435 | 134 | // save header and forward spool |
duke@435 | 135 | |
duke@435 | 136 | inline size_t refillSize() const; |
duke@435 | 137 | |
duke@435 | 138 | SpoolBlock* getSpoolBlock(); // return a free spooling block |
duke@435 | 139 | inline bool has_spooling_space() { |
duke@435 | 140 | return _spoolTail != NULL && _spoolTail->bufferSize > _nextIndex; |
duke@435 | 141 | } |
duke@435 | 142 | // ensure that spooling space exists |
duke@435 | 143 | bool ensure_spooling_space() { |
duke@435 | 144 | return has_spooling_space() || ensure_spooling_space_work(); |
duke@435 | 145 | } |
duke@435 | 146 | #define PROMOTED_OOPS_ITERATE_DECL(OopClosureType, nv_suffix) \ |
duke@435 | 147 | void promoted_oops_iterate##nv_suffix(OopClosureType* cl); |
duke@435 | 148 | ALL_SINCE_SAVE_MARKS_CLOSURES(PROMOTED_OOPS_ITERATE_DECL) |
duke@435 | 149 | #undef PROMOTED_OOPS_ITERATE_DECL |
duke@435 | 150 | void promoted_oops_iterate(OopsInGenClosure* cl) { |
duke@435 | 151 | promoted_oops_iterate_v(cl); |
duke@435 | 152 | } |
duke@435 | 153 | void verify() const; |
duke@435 | 154 | void reset() { |
duke@435 | 155 | _promoHead = NULL; |
duke@435 | 156 | _promoTail = NULL; |
duke@435 | 157 | _spoolHead = NULL; |
duke@435 | 158 | _spoolTail = NULL; |
duke@435 | 159 | _spareSpool = NULL; |
duke@435 | 160 | _firstIndex = 0; |
duke@435 | 161 | _nextIndex = 0; |
duke@435 | 162 | |
duke@435 | 163 | } |
duke@435 | 164 | }; |
duke@435 | 165 | |
duke@435 | 166 | class LinearAllocBlock VALUE_OBJ_CLASS_SPEC { |
duke@435 | 167 | public: |
duke@435 | 168 | LinearAllocBlock() : _ptr(0), _word_size(0), _refillSize(0), |
duke@435 | 169 | _allocation_size_limit(0) {} |
duke@435 | 170 | void set(HeapWord* ptr, size_t word_size, size_t refill_size, |
duke@435 | 171 | size_t allocation_size_limit) { |
duke@435 | 172 | _ptr = ptr; |
duke@435 | 173 | _word_size = word_size; |
duke@435 | 174 | _refillSize = refill_size; |
duke@435 | 175 | _allocation_size_limit = allocation_size_limit; |
duke@435 | 176 | } |
duke@435 | 177 | HeapWord* _ptr; |
duke@435 | 178 | size_t _word_size; |
duke@435 | 179 | size_t _refillSize; |
duke@435 | 180 | size_t _allocation_size_limit; // largest size that will be allocated |
duke@435 | 181 | }; |
duke@435 | 182 | |
duke@435 | 183 | // Concrete subclass of CompactibleSpace that implements |
duke@435 | 184 | // a free list space, such as used in the concurrent mark sweep |
duke@435 | 185 | // generation. |
duke@435 | 186 | |
duke@435 | 187 | class CompactibleFreeListSpace: public CompactibleSpace { |
duke@435 | 188 | friend class VMStructs; |
duke@435 | 189 | friend class ConcurrentMarkSweepGeneration; |
duke@435 | 190 | friend class ASConcurrentMarkSweepGeneration; |
duke@435 | 191 | friend class CMSCollector; |
duke@435 | 192 | friend class CMSPermGenGen; |
duke@435 | 193 | // Local alloc buffer for promotion into this space. |
duke@435 | 194 | friend class CFLS_LAB; |
duke@435 | 195 | |
duke@435 | 196 | // "Size" of chunks of work (executed during parallel remark phases |
duke@435 | 197 | // of CMS collection); this probably belongs in CMSCollector, although |
duke@435 | 198 | // it's cached here because it's used in |
duke@435 | 199 | // initialize_sequential_subtasks_for_rescan() which modifies |
duke@435 | 200 | // par_seq_tasks which also lives in Space. XXX |
duke@435 | 201 | const size_t _rescan_task_size; |
duke@435 | 202 | const size_t _marking_task_size; |
duke@435 | 203 | |
duke@435 | 204 | // Yet another sequential tasks done structure. This supports |
duke@435 | 205 | // CMS GC, where we have threads dynamically |
duke@435 | 206 | // claiming sub-tasks from a larger parallel task. |
duke@435 | 207 | SequentialSubTasksDone _conc_par_seq_tasks; |
duke@435 | 208 | |
duke@435 | 209 | BlockOffsetArrayNonContigSpace _bt; |
duke@435 | 210 | |
duke@435 | 211 | CMSCollector* _collector; |
duke@435 | 212 | ConcurrentMarkSweepGeneration* _gen; |
duke@435 | 213 | |
duke@435 | 214 | // Data structures for free blocks (used during allocation/sweeping) |
duke@435 | 215 | |
duke@435 | 216 | // Allocation is done linearly from two different blocks depending on |
duke@435 | 217 | // whether the request is small or large, in an effort to reduce |
duke@435 | 218 | // fragmentation. We assume that any locking for allocation is done |
duke@435 | 219 | // by the containing generation. Thus, none of the methods in this |
duke@435 | 220 | // space are re-entrant. |
duke@435 | 221 | enum SomeConstants { |
duke@435 | 222 | SmallForLinearAlloc = 16, // size < this then use _sLAB |
duke@435 | 223 | SmallForDictionary = 257, // size < this then use _indexedFreeList |
duke@435 | 224 | IndexSetSize = SmallForDictionary, // keep this odd-sized |
duke@435 | 225 | IndexSetStart = MinObjAlignment, |
duke@435 | 226 | IndexSetStride = MinObjAlignment |
duke@435 | 227 | }; |
duke@435 | 228 | |
duke@435 | 229 | private: |
duke@435 | 230 | enum FitStrategyOptions { |
duke@435 | 231 | FreeBlockStrategyNone = 0, |
duke@435 | 232 | FreeBlockBestFitFirst |
duke@435 | 233 | }; |
duke@435 | 234 | |
duke@435 | 235 | PromotionInfo _promoInfo; |
duke@435 | 236 | |
duke@435 | 237 | // helps to impose a global total order on freelistLock ranks; |
duke@435 | 238 | // assumes that CFLSpace's are allocated in global total order |
duke@435 | 239 | static int _lockRank; |
duke@435 | 240 | |
duke@435 | 241 | // a lock protecting the free lists and free blocks; |
duke@435 | 242 | // mutable because of ubiquity of locking even for otherwise const methods |
duke@435 | 243 | mutable Mutex _freelistLock; |
duke@435 | 244 | // locking verifier convenience function |
duke@435 | 245 | void assert_locked() const PRODUCT_RETURN; |
duke@435 | 246 | |
duke@435 | 247 | // Linear allocation blocks |
duke@435 | 248 | LinearAllocBlock _smallLinearAllocBlock; |
duke@435 | 249 | |
duke@435 | 250 | FreeBlockDictionary::DictionaryChoice _dictionaryChoice; |
duke@435 | 251 | FreeBlockDictionary* _dictionary; // ptr to dictionary for large size blocks |
duke@435 | 252 | |
duke@435 | 253 | FreeList _indexedFreeList[IndexSetSize]; |
duke@435 | 254 | // indexed array for small size blocks |
duke@435 | 255 | // allocation stategy |
duke@435 | 256 | bool _fitStrategy; // Use best fit strategy. |
duke@435 | 257 | bool _adaptive_freelists; // Use adaptive freelists |
duke@435 | 258 | |
duke@435 | 259 | // This is an address close to the largest free chunk in the heap. |
duke@435 | 260 | // It is currently assumed to be at the end of the heap. Free |
duke@435 | 261 | // chunks with addresses greater than nearLargestChunk are coalesced |
duke@435 | 262 | // in an effort to maintain a large chunk at the end of the heap. |
duke@435 | 263 | HeapWord* _nearLargestChunk; |
duke@435 | 264 | |
duke@435 | 265 | // Used to keep track of limit of sweep for the space |
duke@435 | 266 | HeapWord* _sweep_limit; |
duke@435 | 267 | |
duke@435 | 268 | // Support for compacting cms |
duke@435 | 269 | HeapWord* cross_threshold(HeapWord* start, HeapWord* end); |
duke@435 | 270 | HeapWord* forward(oop q, size_t size, CompactPoint* cp, HeapWord* compact_top); |
duke@435 | 271 | |
duke@435 | 272 | // Initialization helpers. |
duke@435 | 273 | void initializeIndexedFreeListArray(); |
duke@435 | 274 | |
duke@435 | 275 | // Extra stuff to manage promotion parallelism. |
duke@435 | 276 | |
duke@435 | 277 | // a lock protecting the dictionary during par promotion allocation. |
duke@435 | 278 | mutable Mutex _parDictionaryAllocLock; |
duke@435 | 279 | Mutex* parDictionaryAllocLock() const { return &_parDictionaryAllocLock; } |
duke@435 | 280 | |
duke@435 | 281 | // Locks protecting the exact lists during par promotion allocation. |
duke@435 | 282 | Mutex* _indexedFreeListParLocks[IndexSetSize]; |
duke@435 | 283 | |
duke@435 | 284 | #if CFLS_LAB_REFILL_STATS |
duke@435 | 285 | // Some statistics. |
duke@435 | 286 | jint _par_get_chunk_from_small; |
duke@435 | 287 | jint _par_get_chunk_from_large; |
duke@435 | 288 | #endif |
duke@435 | 289 | |
duke@435 | 290 | |
duke@435 | 291 | // Attempt to obtain up to "n" blocks of the size "word_sz" (which is |
duke@435 | 292 | // required to be smaller than "IndexSetSize".) If successful, |
duke@435 | 293 | // adds them to "fl", which is required to be an empty free list. |
duke@435 | 294 | // If the count of "fl" is negative, it's absolute value indicates a |
duke@435 | 295 | // number of free chunks that had been previously "borrowed" from global |
duke@435 | 296 | // list of size "word_sz", and must now be decremented. |
duke@435 | 297 | void par_get_chunk_of_blocks(size_t word_sz, size_t n, FreeList* fl); |
duke@435 | 298 | |
duke@435 | 299 | // Allocation helper functions |
duke@435 | 300 | // Allocate using a strategy that takes from the indexed free lists |
duke@435 | 301 | // first. This allocation strategy assumes a companion sweeping |
duke@435 | 302 | // strategy that attempts to keep the needed number of chunks in each |
duke@435 | 303 | // indexed free lists. |
duke@435 | 304 | HeapWord* allocate_adaptive_freelists(size_t size); |
duke@435 | 305 | // Allocate from the linear allocation buffers first. This allocation |
duke@435 | 306 | // strategy assumes maximal coalescing can maintain chunks large enough |
duke@435 | 307 | // to be used as linear allocation buffers. |
duke@435 | 308 | HeapWord* allocate_non_adaptive_freelists(size_t size); |
duke@435 | 309 | |
duke@435 | 310 | // Gets a chunk from the linear allocation block (LinAB). If there |
duke@435 | 311 | // is not enough space in the LinAB, refills it. |
duke@435 | 312 | HeapWord* getChunkFromLinearAllocBlock(LinearAllocBlock* blk, size_t size); |
duke@435 | 313 | HeapWord* getChunkFromSmallLinearAllocBlock(size_t size); |
duke@435 | 314 | // Get a chunk from the space remaining in the linear allocation block. Do |
duke@435 | 315 | // not attempt to refill if the space is not available, return NULL. Do the |
duke@435 | 316 | // repairs on the linear allocation block as appropriate. |
duke@435 | 317 | HeapWord* getChunkFromLinearAllocBlockRemainder(LinearAllocBlock* blk, size_t size); |
duke@435 | 318 | inline HeapWord* getChunkFromSmallLinearAllocBlockRemainder(size_t size); |
duke@435 | 319 | |
duke@435 | 320 | // Helper function for getChunkFromIndexedFreeList. |
duke@435 | 321 | // Replenish the indexed free list for this "size". Do not take from an |
duke@435 | 322 | // underpopulated size. |
duke@435 | 323 | FreeChunk* getChunkFromIndexedFreeListHelper(size_t size); |
duke@435 | 324 | |
duke@435 | 325 | // Get a chunk from the indexed free list. If the indexed free list |
duke@435 | 326 | // does not have a free chunk, try to replenish the indexed free list |
duke@435 | 327 | // then get the free chunk from the replenished indexed free list. |
duke@435 | 328 | inline FreeChunk* getChunkFromIndexedFreeList(size_t size); |
duke@435 | 329 | |
duke@435 | 330 | // The returned chunk may be larger than requested (or null). |
duke@435 | 331 | FreeChunk* getChunkFromDictionary(size_t size); |
duke@435 | 332 | // The returned chunk is the exact size requested (or null). |
duke@435 | 333 | FreeChunk* getChunkFromDictionaryExact(size_t size); |
duke@435 | 334 | |
duke@435 | 335 | // Find a chunk in the indexed free list that is the best |
duke@435 | 336 | // fit for size "numWords". |
duke@435 | 337 | FreeChunk* bestFitSmall(size_t numWords); |
duke@435 | 338 | // For free list "fl" of chunks of size > numWords, |
duke@435 | 339 | // remove a chunk, split off a chunk of size numWords |
duke@435 | 340 | // and return it. The split off remainder is returned to |
duke@435 | 341 | // the free lists. The old name for getFromListGreater |
duke@435 | 342 | // was lookInListGreater. |
duke@435 | 343 | FreeChunk* getFromListGreater(FreeList* fl, size_t numWords); |
duke@435 | 344 | // Get a chunk in the indexed free list or dictionary, |
duke@435 | 345 | // by considering a larger chunk and splitting it. |
duke@435 | 346 | FreeChunk* getChunkFromGreater(size_t numWords); |
duke@435 | 347 | // Verify that the given chunk is in the indexed free lists. |
duke@435 | 348 | bool verifyChunkInIndexedFreeLists(FreeChunk* fc) const; |
duke@435 | 349 | // Remove the specified chunk from the indexed free lists. |
duke@435 | 350 | void removeChunkFromIndexedFreeList(FreeChunk* fc); |
duke@435 | 351 | // Remove the specified chunk from the dictionary. |
duke@435 | 352 | void removeChunkFromDictionary(FreeChunk* fc); |
duke@435 | 353 | // Split a free chunk into a smaller free chunk of size "new_size". |
duke@435 | 354 | // Return the smaller free chunk and return the remainder to the |
duke@435 | 355 | // free lists. |
duke@435 | 356 | FreeChunk* splitChunkAndReturnRemainder(FreeChunk* chunk, size_t new_size); |
duke@435 | 357 | // Add a chunk to the free lists. |
duke@435 | 358 | void addChunkToFreeLists(HeapWord* chunk, size_t size); |
duke@435 | 359 | // Add a chunk to the free lists, preferring to suffix it |
duke@435 | 360 | // to the last free chunk at end of space if possible, and |
duke@435 | 361 | // updating the block census stats as well as block offset table. |
duke@435 | 362 | // Take any locks as appropriate if we are multithreaded. |
duke@435 | 363 | void addChunkToFreeListsAtEndRecordingStats(HeapWord* chunk, size_t size); |
duke@435 | 364 | // Add a free chunk to the indexed free lists. |
duke@435 | 365 | void returnChunkToFreeList(FreeChunk* chunk); |
duke@435 | 366 | // Add a free chunk to the dictionary. |
duke@435 | 367 | void returnChunkToDictionary(FreeChunk* chunk); |
duke@435 | 368 | |
duke@435 | 369 | // Functions for maintaining the linear allocation buffers (LinAB). |
duke@435 | 370 | // Repairing a linear allocation block refers to operations |
duke@435 | 371 | // performed on the remainder of a LinAB after an allocation |
duke@435 | 372 | // has been made from it. |
duke@435 | 373 | void repairLinearAllocationBlocks(); |
duke@435 | 374 | void repairLinearAllocBlock(LinearAllocBlock* blk); |
duke@435 | 375 | void refillLinearAllocBlock(LinearAllocBlock* blk); |
duke@435 | 376 | void refillLinearAllocBlockIfNeeded(LinearAllocBlock* blk); |
duke@435 | 377 | void refillLinearAllocBlocksIfNeeded(); |
duke@435 | 378 | |
duke@435 | 379 | void verify_objects_initialized() const; |
duke@435 | 380 | |
duke@435 | 381 | // Statistics reporting helper functions |
duke@435 | 382 | void reportFreeListStatistics() const; |
duke@435 | 383 | void reportIndexedFreeListStatistics() const; |
duke@435 | 384 | size_t maxChunkSizeInIndexedFreeLists() const; |
duke@435 | 385 | size_t numFreeBlocksInIndexedFreeLists() const; |
duke@435 | 386 | // Accessor |
duke@435 | 387 | HeapWord* unallocated_block() const { |
duke@435 | 388 | HeapWord* ub = _bt.unallocated_block(); |
duke@435 | 389 | assert(ub >= bottom() && |
duke@435 | 390 | ub <= end(), "space invariant"); |
duke@435 | 391 | return ub; |
duke@435 | 392 | } |
duke@435 | 393 | void freed(HeapWord* start, size_t size) { |
duke@435 | 394 | _bt.freed(start, size); |
duke@435 | 395 | } |
duke@435 | 396 | |
duke@435 | 397 | protected: |
duke@435 | 398 | // reset the indexed free list to its initial empty condition. |
duke@435 | 399 | void resetIndexedFreeListArray(); |
duke@435 | 400 | // reset to an initial state with a single free block described |
duke@435 | 401 | // by the MemRegion parameter. |
duke@435 | 402 | void reset(MemRegion mr); |
duke@435 | 403 | // Return the total number of words in the indexed free lists. |
duke@435 | 404 | size_t totalSizeInIndexedFreeLists() const; |
duke@435 | 405 | |
duke@435 | 406 | public: |
duke@435 | 407 | // Constructor... |
duke@435 | 408 | CompactibleFreeListSpace(BlockOffsetSharedArray* bs, MemRegion mr, |
duke@435 | 409 | bool use_adaptive_freelists, |
duke@435 | 410 | FreeBlockDictionary::DictionaryChoice); |
duke@435 | 411 | // accessors |
duke@435 | 412 | bool bestFitFirst() { return _fitStrategy == FreeBlockBestFitFirst; } |
duke@435 | 413 | FreeBlockDictionary* dictionary() const { return _dictionary; } |
duke@435 | 414 | HeapWord* nearLargestChunk() const { return _nearLargestChunk; } |
duke@435 | 415 | void set_nearLargestChunk(HeapWord* v) { _nearLargestChunk = v; } |
duke@435 | 416 | |
duke@435 | 417 | // Return the free chunk at the end of the space. If no such |
duke@435 | 418 | // chunk exists, return NULL. |
duke@435 | 419 | FreeChunk* find_chunk_at_end(); |
duke@435 | 420 | |
ysr@447 | 421 | bool adaptive_freelists() const { return _adaptive_freelists; } |
duke@435 | 422 | |
duke@435 | 423 | void set_collector(CMSCollector* collector) { _collector = collector; } |
duke@435 | 424 | |
duke@435 | 425 | // Support for parallelization of rescan and marking |
duke@435 | 426 | const size_t rescan_task_size() const { return _rescan_task_size; } |
duke@435 | 427 | const size_t marking_task_size() const { return _marking_task_size; } |
duke@435 | 428 | SequentialSubTasksDone* conc_par_seq_tasks() {return &_conc_par_seq_tasks; } |
duke@435 | 429 | void initialize_sequential_subtasks_for_rescan(int n_threads); |
duke@435 | 430 | void initialize_sequential_subtasks_for_marking(int n_threads, |
duke@435 | 431 | HeapWord* low = NULL); |
duke@435 | 432 | |
duke@435 | 433 | #if CFLS_LAB_REFILL_STATS |
duke@435 | 434 | void print_par_alloc_stats(); |
duke@435 | 435 | #endif |
duke@435 | 436 | |
duke@435 | 437 | // Space enquiries |
duke@435 | 438 | size_t used() const; |
duke@435 | 439 | size_t free() const; |
duke@435 | 440 | size_t max_alloc_in_words() const; |
duke@435 | 441 | // XXX: should have a less conservative used_region() than that of |
duke@435 | 442 | // Space; we could consider keeping track of highest allocated |
duke@435 | 443 | // address and correcting that at each sweep, as the sweeper |
duke@435 | 444 | // goes through the entire allocated part of the generation. We |
duke@435 | 445 | // could also use that information to keep the sweeper from |
duke@435 | 446 | // sweeping more than is necessary. The allocator and sweeper will |
duke@435 | 447 | // of course need to synchronize on this, since the sweeper will |
duke@435 | 448 | // try to bump down the address and the allocator will try to bump it up. |
duke@435 | 449 | // For now, however, we'll just use the default used_region() |
duke@435 | 450 | // which overestimates the region by returning the entire |
duke@435 | 451 | // committed region (this is safe, but inefficient). |
duke@435 | 452 | |
duke@435 | 453 | // Returns a subregion of the space containing all the objects in |
duke@435 | 454 | // the space. |
duke@435 | 455 | MemRegion used_region() const { |
duke@435 | 456 | return MemRegion(bottom(), |
duke@435 | 457 | BlockOffsetArrayUseUnallocatedBlock ? |
duke@435 | 458 | unallocated_block() : end()); |
duke@435 | 459 | } |
duke@435 | 460 | |
duke@435 | 461 | // This is needed because the default implementation uses block_start() |
duke@435 | 462 | // which can;t be used at certain times (for example phase 3 of mark-sweep). |
duke@435 | 463 | // A better fix is to change the assertions in phase 3 of mark-sweep to |
duke@435 | 464 | // use is_in_reserved(), but that is deferred since the is_in() assertions |
duke@435 | 465 | // are buried through several layers of callers and are used elsewhere |
duke@435 | 466 | // as well. |
duke@435 | 467 | bool is_in(const void* p) const { |
duke@435 | 468 | return used_region().contains(p); |
duke@435 | 469 | } |
duke@435 | 470 | |
duke@435 | 471 | virtual bool is_free_block(const HeapWord* p) const; |
duke@435 | 472 | |
duke@435 | 473 | // Resizing support |
duke@435 | 474 | void set_end(HeapWord* value); // override |
duke@435 | 475 | |
duke@435 | 476 | // mutual exclusion support |
duke@435 | 477 | Mutex* freelistLock() const { return &_freelistLock; } |
duke@435 | 478 | |
duke@435 | 479 | // Iteration support |
duke@435 | 480 | void oop_iterate(MemRegion mr, OopClosure* cl); |
duke@435 | 481 | void oop_iterate(OopClosure* cl); |
duke@435 | 482 | |
duke@435 | 483 | void object_iterate(ObjectClosure* blk); |
duke@435 | 484 | void object_iterate_mem(MemRegion mr, UpwardsObjectClosure* cl); |
duke@435 | 485 | |
duke@435 | 486 | // Requires that "mr" be entirely within the space. |
duke@435 | 487 | // Apply "cl->do_object" to all objects that intersect with "mr". |
duke@435 | 488 | // If the iteration encounters an unparseable portion of the region, |
duke@435 | 489 | // terminate the iteration and return the address of the start of the |
duke@435 | 490 | // subregion that isn't done. Return of "NULL" indicates that the |
duke@435 | 491 | // interation completed. |
duke@435 | 492 | virtual HeapWord* |
duke@435 | 493 | object_iterate_careful_m(MemRegion mr, |
duke@435 | 494 | ObjectClosureCareful* cl); |
duke@435 | 495 | virtual HeapWord* |
duke@435 | 496 | object_iterate_careful(ObjectClosureCareful* cl); |
duke@435 | 497 | |
duke@435 | 498 | // Override: provides a DCTO_CL specific to this kind of space. |
duke@435 | 499 | DirtyCardToOopClosure* new_dcto_cl(OopClosure* cl, |
duke@435 | 500 | CardTableModRefBS::PrecisionStyle precision, |
duke@435 | 501 | HeapWord* boundary); |
duke@435 | 502 | |
duke@435 | 503 | void blk_iterate(BlkClosure* cl); |
duke@435 | 504 | void blk_iterate_careful(BlkClosureCareful* cl); |
duke@435 | 505 | HeapWord* block_start(const void* p) const; |
duke@435 | 506 | HeapWord* block_start_careful(const void* p) const; |
duke@435 | 507 | size_t block_size(const HeapWord* p) const; |
duke@435 | 508 | size_t block_size_no_stall(HeapWord* p, const CMSCollector* c) const; |
duke@435 | 509 | bool block_is_obj(const HeapWord* p) const; |
duke@435 | 510 | bool obj_is_alive(const HeapWord* p) const; |
duke@435 | 511 | size_t block_size_nopar(const HeapWord* p) const; |
duke@435 | 512 | bool block_is_obj_nopar(const HeapWord* p) const; |
duke@435 | 513 | |
duke@435 | 514 | // iteration support for promotion |
duke@435 | 515 | void save_marks(); |
duke@435 | 516 | bool no_allocs_since_save_marks(); |
duke@435 | 517 | void object_iterate_since_last_GC(ObjectClosure* cl); |
duke@435 | 518 | |
duke@435 | 519 | // iteration support for sweeping |
duke@435 | 520 | void save_sweep_limit() { |
duke@435 | 521 | _sweep_limit = BlockOffsetArrayUseUnallocatedBlock ? |
duke@435 | 522 | unallocated_block() : end(); |
duke@435 | 523 | } |
duke@435 | 524 | NOT_PRODUCT( |
duke@435 | 525 | void clear_sweep_limit() { _sweep_limit = NULL; } |
duke@435 | 526 | ) |
duke@435 | 527 | HeapWord* sweep_limit() { return _sweep_limit; } |
duke@435 | 528 | |
duke@435 | 529 | // Apply "blk->do_oop" to the addresses of all reference fields in objects |
duke@435 | 530 | // promoted into this generation since the most recent save_marks() call. |
duke@435 | 531 | // Fields in objects allocated by applications of the closure |
duke@435 | 532 | // *are* included in the iteration. Thus, when the iteration completes |
duke@435 | 533 | // there should be no further such objects remaining. |
duke@435 | 534 | #define CFLS_OOP_SINCE_SAVE_MARKS_DECL(OopClosureType, nv_suffix) \ |
duke@435 | 535 | void oop_since_save_marks_iterate##nv_suffix(OopClosureType* blk); |
duke@435 | 536 | ALL_SINCE_SAVE_MARKS_CLOSURES(CFLS_OOP_SINCE_SAVE_MARKS_DECL) |
duke@435 | 537 | #undef CFLS_OOP_SINCE_SAVE_MARKS_DECL |
duke@435 | 538 | |
duke@435 | 539 | // Allocation support |
duke@435 | 540 | HeapWord* allocate(size_t size); |
duke@435 | 541 | HeapWord* par_allocate(size_t size); |
duke@435 | 542 | |
coleenp@548 | 543 | oop promote(oop obj, size_t obj_size); |
duke@435 | 544 | void gc_prologue(); |
duke@435 | 545 | void gc_epilogue(); |
duke@435 | 546 | |
duke@435 | 547 | // This call is used by a containing CMS generation / collector |
duke@435 | 548 | // to inform the CFLS space that a sweep has been completed |
duke@435 | 549 | // and that the space can do any related house-keeping functions. |
duke@435 | 550 | void sweep_completed(); |
duke@435 | 551 | |
duke@435 | 552 | // For an object in this space, the mark-word's two |
duke@435 | 553 | // LSB's having the value [11] indicates that it has been |
duke@435 | 554 | // promoted since the most recent call to save_marks() on |
duke@435 | 555 | // this generation and has not subsequently been iterated |
duke@435 | 556 | // over (using oop_since_save_marks_iterate() above). |
duke@435 | 557 | bool obj_allocated_since_save_marks(const oop obj) const { |
duke@435 | 558 | assert(is_in_reserved(obj), "Wrong space?"); |
duke@435 | 559 | return ((PromotedObject*)obj)->hasPromotedMark(); |
duke@435 | 560 | } |
duke@435 | 561 | |
duke@435 | 562 | // A worst-case estimate of the space required (in HeapWords) to expand the |
duke@435 | 563 | // heap when promoting an obj of size obj_size. |
duke@435 | 564 | size_t expansionSpaceRequired(size_t obj_size) const; |
duke@435 | 565 | |
duke@435 | 566 | FreeChunk* allocateScratch(size_t size); |
duke@435 | 567 | |
duke@435 | 568 | // returns true if either the small or large linear allocation buffer is empty. |
ysr@447 | 569 | bool linearAllocationWouldFail() const; |
duke@435 | 570 | |
duke@435 | 571 | // Adjust the chunk for the minimum size. This version is called in |
duke@435 | 572 | // most cases in CompactibleFreeListSpace methods. |
duke@435 | 573 | inline static size_t adjustObjectSize(size_t size) { |
duke@435 | 574 | return (size_t) align_object_size(MAX2(size, (size_t)MinChunkSize)); |
duke@435 | 575 | } |
duke@435 | 576 | // This is a virtual version of adjustObjectSize() that is called |
duke@435 | 577 | // only occasionally when the compaction space changes and the type |
duke@435 | 578 | // of the new compaction space is is only known to be CompactibleSpace. |
duke@435 | 579 | size_t adjust_object_size_v(size_t size) const { |
duke@435 | 580 | return adjustObjectSize(size); |
duke@435 | 581 | } |
duke@435 | 582 | // Minimum size of a free block. |
duke@435 | 583 | virtual size_t minimum_free_block_size() const { return MinChunkSize; } |
duke@435 | 584 | void removeFreeChunkFromFreeLists(FreeChunk* chunk); |
duke@435 | 585 | void addChunkAndRepairOffsetTable(HeapWord* chunk, size_t size, |
duke@435 | 586 | bool coalesced); |
duke@435 | 587 | |
ysr@447 | 588 | // Support for decisions regarding concurrent collection policy |
ysr@447 | 589 | bool should_concurrent_collect() const; |
ysr@447 | 590 | |
duke@435 | 591 | // Support for compaction |
duke@435 | 592 | void prepare_for_compaction(CompactPoint* cp); |
duke@435 | 593 | void adjust_pointers(); |
duke@435 | 594 | void compact(); |
duke@435 | 595 | // reset the space to reflect the fact that a compaction of the |
duke@435 | 596 | // space has been done. |
duke@435 | 597 | virtual void reset_after_compaction(); |
duke@435 | 598 | |
duke@435 | 599 | // Debugging support |
duke@435 | 600 | void print() const; |
duke@435 | 601 | void prepare_for_verify(); |
duke@435 | 602 | void verify(bool allow_dirty) const; |
duke@435 | 603 | void verifyFreeLists() const PRODUCT_RETURN; |
duke@435 | 604 | void verifyIndexedFreeLists() const; |
duke@435 | 605 | void verifyIndexedFreeList(size_t size) const; |
duke@435 | 606 | // verify that the given chunk is in the free lists. |
duke@435 | 607 | bool verifyChunkInFreeLists(FreeChunk* fc) const; |
duke@435 | 608 | // Do some basic checks on the the free lists. |
duke@435 | 609 | void checkFreeListConsistency() const PRODUCT_RETURN; |
duke@435 | 610 | |
duke@435 | 611 | NOT_PRODUCT ( |
duke@435 | 612 | void initializeIndexedFreeListArrayReturnedBytes(); |
duke@435 | 613 | size_t sumIndexedFreeListArrayReturnedBytes(); |
duke@435 | 614 | // Return the total number of chunks in the indexed free lists. |
duke@435 | 615 | size_t totalCountInIndexedFreeLists() const; |
duke@435 | 616 | // Return the total numberof chunks in the space. |
duke@435 | 617 | size_t totalCount(); |
duke@435 | 618 | ) |
duke@435 | 619 | |
duke@435 | 620 | // The census consists of counts of the quantities such as |
duke@435 | 621 | // the current count of the free chunks, number of chunks |
duke@435 | 622 | // created as a result of the split of a larger chunk or |
duke@435 | 623 | // coalescing of smaller chucks, etc. The counts in the |
duke@435 | 624 | // census is used to make decisions on splitting and |
duke@435 | 625 | // coalescing of chunks during the sweep of garbage. |
duke@435 | 626 | |
duke@435 | 627 | // Print the statistics for the free lists. |
ysr@447 | 628 | void printFLCensus(size_t sweep_count) const; |
duke@435 | 629 | |
duke@435 | 630 | // Statistics functions |
duke@435 | 631 | // Initialize census for lists before the sweep. |
duke@435 | 632 | void beginSweepFLCensus(float sweep_current, |
duke@435 | 633 | float sweep_estimate); |
duke@435 | 634 | // Set the surplus for each of the free lists. |
duke@435 | 635 | void setFLSurplus(); |
duke@435 | 636 | // Set the hint for each of the free lists. |
duke@435 | 637 | void setFLHints(); |
duke@435 | 638 | // Clear the census for each of the free lists. |
duke@435 | 639 | void clearFLCensus(); |
duke@435 | 640 | // Perform functions for the census after the end of the sweep. |
ysr@447 | 641 | void endSweepFLCensus(size_t sweep_count); |
duke@435 | 642 | // Return true if the count of free chunks is greater |
duke@435 | 643 | // than the desired number of free chunks. |
duke@435 | 644 | bool coalOverPopulated(size_t size); |
duke@435 | 645 | |
duke@435 | 646 | // Record (for each size): |
duke@435 | 647 | // |
duke@435 | 648 | // split-births = #chunks added due to splits in (prev-sweep-end, |
duke@435 | 649 | // this-sweep-start) |
duke@435 | 650 | // split-deaths = #chunks removed for splits in (prev-sweep-end, |
duke@435 | 651 | // this-sweep-start) |
duke@435 | 652 | // num-curr = #chunks at start of this sweep |
duke@435 | 653 | // num-prev = #chunks at end of previous sweep |
duke@435 | 654 | // |
duke@435 | 655 | // The above are quantities that are measured. Now define: |
duke@435 | 656 | // |
duke@435 | 657 | // num-desired := num-prev + split-births - split-deaths - num-curr |
duke@435 | 658 | // |
duke@435 | 659 | // Roughly, num-prev + split-births is the supply, |
duke@435 | 660 | // split-deaths is demand due to other sizes |
duke@435 | 661 | // and num-curr is what we have left. |
duke@435 | 662 | // |
duke@435 | 663 | // Thus, num-desired is roughly speaking the "legitimate demand" |
duke@435 | 664 | // for blocks of this size and what we are striving to reach at the |
duke@435 | 665 | // end of the current sweep. |
duke@435 | 666 | // |
duke@435 | 667 | // For a given list, let num-len be its current population. |
duke@435 | 668 | // Define, for a free list of a given size: |
duke@435 | 669 | // |
duke@435 | 670 | // coal-overpopulated := num-len >= num-desired * coal-surplus |
duke@435 | 671 | // (coal-surplus is set to 1.05, i.e. we allow a little slop when |
duke@435 | 672 | // coalescing -- we do not coalesce unless we think that the current |
duke@435 | 673 | // supply has exceeded the estimated demand by more than 5%). |
duke@435 | 674 | // |
duke@435 | 675 | // For the set of sizes in the binary tree, which is neither dense nor |
duke@435 | 676 | // closed, it may be the case that for a particular size we have never |
duke@435 | 677 | // had, or do not now have, or did not have at the previous sweep, |
duke@435 | 678 | // chunks of that size. We need to extend the definition of |
duke@435 | 679 | // coal-overpopulated to such sizes as well: |
duke@435 | 680 | // |
duke@435 | 681 | // For a chunk in/not in the binary tree, extend coal-overpopulated |
duke@435 | 682 | // defined above to include all sizes as follows: |
duke@435 | 683 | // |
duke@435 | 684 | // . a size that is non-existent is coal-overpopulated |
duke@435 | 685 | // . a size that has a num-desired <= 0 as defined above is |
duke@435 | 686 | // coal-overpopulated. |
duke@435 | 687 | // |
duke@435 | 688 | // Also define, for a chunk heap-offset C and mountain heap-offset M: |
duke@435 | 689 | // |
duke@435 | 690 | // close-to-mountain := C >= 0.99 * M |
duke@435 | 691 | // |
duke@435 | 692 | // Now, the coalescing strategy is: |
duke@435 | 693 | // |
duke@435 | 694 | // Coalesce left-hand chunk with right-hand chunk if and |
duke@435 | 695 | // only if: |
duke@435 | 696 | // |
duke@435 | 697 | // EITHER |
duke@435 | 698 | // . left-hand chunk is of a size that is coal-overpopulated |
duke@435 | 699 | // OR |
duke@435 | 700 | // . right-hand chunk is close-to-mountain |
duke@435 | 701 | void smallCoalBirth(size_t size); |
duke@435 | 702 | void smallCoalDeath(size_t size); |
duke@435 | 703 | void coalBirth(size_t size); |
duke@435 | 704 | void coalDeath(size_t size); |
duke@435 | 705 | void smallSplitBirth(size_t size); |
duke@435 | 706 | void smallSplitDeath(size_t size); |
duke@435 | 707 | void splitBirth(size_t size); |
duke@435 | 708 | void splitDeath(size_t size); |
duke@435 | 709 | void split(size_t from, size_t to1); |
duke@435 | 710 | |
duke@435 | 711 | double flsFrag() const; |
duke@435 | 712 | }; |
duke@435 | 713 | |
duke@435 | 714 | // A parallel-GC-thread-local allocation buffer for allocation into a |
duke@435 | 715 | // CompactibleFreeListSpace. |
duke@435 | 716 | class CFLS_LAB : public CHeapObj { |
duke@435 | 717 | // The space that this buffer allocates into. |
duke@435 | 718 | CompactibleFreeListSpace* _cfls; |
duke@435 | 719 | |
duke@435 | 720 | // Our local free lists. |
duke@435 | 721 | FreeList _indexedFreeList[CompactibleFreeListSpace::IndexSetSize]; |
duke@435 | 722 | |
duke@435 | 723 | // Initialized from a command-line arg. |
duke@435 | 724 | size_t _blocks_to_claim; |
duke@435 | 725 | |
duke@435 | 726 | #if CFLS_LAB_REFILL_STATS |
duke@435 | 727 | // Some statistics. |
duke@435 | 728 | int _refills; |
duke@435 | 729 | int _blocksTaken; |
duke@435 | 730 | static int _tot_refills; |
duke@435 | 731 | static int _tot_blocksTaken; |
duke@435 | 732 | static int _next_threshold; |
duke@435 | 733 | #endif |
duke@435 | 734 | |
duke@435 | 735 | public: |
duke@435 | 736 | CFLS_LAB(CompactibleFreeListSpace* cfls); |
duke@435 | 737 | |
duke@435 | 738 | // Allocate and return a block of the given size, or else return NULL. |
duke@435 | 739 | HeapWord* alloc(size_t word_sz); |
duke@435 | 740 | |
duke@435 | 741 | // Return any unused portions of the buffer to the global pool. |
duke@435 | 742 | void retire(); |
duke@435 | 743 | }; |
duke@435 | 744 | |
duke@435 | 745 | size_t PromotionInfo::refillSize() const { |
duke@435 | 746 | const size_t CMSSpoolBlockSize = 256; |
duke@435 | 747 | const size_t sz = heap_word_size(sizeof(SpoolBlock) + sizeof(markOop) |
duke@435 | 748 | * CMSSpoolBlockSize); |
duke@435 | 749 | return CompactibleFreeListSpace::adjustObjectSize(sz); |
duke@435 | 750 | } |