1.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 1.2 +++ b/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp Sat Dec 01 00:00:00 2007 +0000 1.3 @@ -0,0 +1,748 @@ 1.4 +/* 1.5 + * Copyright 2001-2006 Sun Microsystems, Inc. All Rights Reserved. 1.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 1.7 + * 1.8 + * This code is free software; you can redistribute it and/or modify it 1.9 + * under the terms of the GNU General Public License version 2 only, as 1.10 + * published by the Free Software Foundation. 1.11 + * 1.12 + * This code is distributed in the hope that it will be useful, but WITHOUT 1.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 1.14 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 1.15 + * version 2 for more details (a copy is included in the LICENSE file that 1.16 + * accompanied this code). 1.17 + * 1.18 + * You should have received a copy of the GNU General Public License version 1.19 + * 2 along with this work; if not, write to the Free Software Foundation, 1.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 1.21 + * 1.22 + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, 1.23 + * CA 95054 USA or visit www.sun.com if you need additional information or 1.24 + * have any questions. 1.25 + * 1.26 + */ 1.27 + 1.28 +// Classes in support of keeping track of promotions into a non-Contiguous 1.29 +// space, in this case a CompactibleFreeListSpace. 1.30 + 1.31 +#define CFLS_LAB_REFILL_STATS 0 1.32 + 1.33 +// Forward declarations 1.34 +class CompactibleFreeListSpace; 1.35 +class BlkClosure; 1.36 +class BlkClosureCareful; 1.37 +class UpwardsObjectClosure; 1.38 +class ObjectClosureCareful; 1.39 +class Klass; 1.40 + 1.41 +class PromotedObject VALUE_OBJ_CLASS_SPEC { 1.42 + private: 1.43 + enum { 1.44 + promoted_mask = right_n_bits(2), // i.e. 0x3 1.45 + displaced_mark = nth_bit(2), // i.e. 0x4 1.46 + next_mask = ~(right_n_bits(3)) // i.e. ~(0x7) 1.47 + }; 1.48 + intptr_t _next; 1.49 + public: 1.50 + inline PromotedObject* next() const { 1.51 + return (PromotedObject*)(_next & next_mask); 1.52 + } 1.53 + inline void setNext(PromotedObject* x) { 1.54 + assert(((intptr_t)x & ~next_mask) == 0, 1.55 + "Conflict in bit usage, " 1.56 + " or insufficient alignment of objects"); 1.57 + _next |= (intptr_t)x; 1.58 + } 1.59 + inline void setPromotedMark() { 1.60 + _next |= promoted_mask; 1.61 + } 1.62 + inline bool hasPromotedMark() const { 1.63 + return (_next & promoted_mask) == promoted_mask; 1.64 + } 1.65 + inline void setDisplacedMark() { 1.66 + _next |= displaced_mark; 1.67 + } 1.68 + inline bool hasDisplacedMark() const { 1.69 + return (_next & displaced_mark) != 0; 1.70 + } 1.71 + inline void clearNext() { _next = 0; } 1.72 + debug_only(void *next_addr() { return (void *) &_next; }) 1.73 +}; 1.74 + 1.75 +class SpoolBlock: public FreeChunk { 1.76 + friend class PromotionInfo; 1.77 + protected: 1.78 + SpoolBlock* nextSpoolBlock; 1.79 + size_t bufferSize; // number of usable words in this block 1.80 + markOop* displacedHdr; // the displaced headers start here 1.81 + 1.82 + // Note about bufferSize: it denotes the number of entries available plus 1; 1.83 + // legal indices range from 1 through BufferSize - 1. See the verification 1.84 + // code verify() that counts the number of displaced headers spooled. 1.85 + size_t computeBufferSize() { 1.86 + return (size() * sizeof(HeapWord) - sizeof(*this)) / sizeof(markOop); 1.87 + } 1.88 + 1.89 + public: 1.90 + void init() { 1.91 + bufferSize = computeBufferSize(); 1.92 + displacedHdr = (markOop*)&displacedHdr; 1.93 + nextSpoolBlock = NULL; 1.94 + } 1.95 +}; 1.96 + 1.97 +class PromotionInfo VALUE_OBJ_CLASS_SPEC { 1.98 + bool _tracking; // set if tracking 1.99 + CompactibleFreeListSpace* _space; // the space to which this belongs 1.100 + PromotedObject* _promoHead; // head of list of promoted objects 1.101 + PromotedObject* _promoTail; // tail of list of promoted objects 1.102 + SpoolBlock* _spoolHead; // first spooling block 1.103 + SpoolBlock* _spoolTail; // last non-full spooling block or null 1.104 + SpoolBlock* _splice_point; // when _spoolTail is null, holds list tail 1.105 + SpoolBlock* _spareSpool; // free spool buffer 1.106 + size_t _firstIndex; // first active index in 1.107 + // first spooling block (_spoolHead) 1.108 + size_t _nextIndex; // last active index + 1 in last 1.109 + // spooling block (_spoolTail) 1.110 + private: 1.111 + // ensure that spooling space exists; return true if there is spooling space 1.112 + bool ensure_spooling_space_work(); 1.113 + 1.114 + public: 1.115 + PromotionInfo() : 1.116 + _tracking(0), _space(NULL), 1.117 + _promoHead(NULL), _promoTail(NULL), 1.118 + _spoolHead(NULL), _spoolTail(NULL), 1.119 + _spareSpool(NULL), _firstIndex(1), 1.120 + _nextIndex(1) {} 1.121 + 1.122 + bool noPromotions() const { 1.123 + assert(_promoHead != NULL || _promoTail == NULL, "list inconsistency"); 1.124 + return _promoHead == NULL; 1.125 + } 1.126 + void startTrackingPromotions(); 1.127 + void stopTrackingPromotions(); 1.128 + bool tracking() const { return _tracking; } 1.129 + void track(PromotedObject* trackOop); // keep track of a promoted oop 1.130 + // The following variant must be used when trackOop is not fully 1.131 + // initialized and has a NULL klass: 1.132 + void track(PromotedObject* trackOop, klassOop klassOfOop); // keep track of a promoted oop 1.133 + void setSpace(CompactibleFreeListSpace* sp) { _space = sp; } 1.134 + CompactibleFreeListSpace* space() const { return _space; } 1.135 + markOop nextDisplacedHeader(); // get next header & forward spool pointer 1.136 + void saveDisplacedHeader(markOop hdr); 1.137 + // save header and forward spool 1.138 + 1.139 + inline size_t refillSize() const; 1.140 + 1.141 + SpoolBlock* getSpoolBlock(); // return a free spooling block 1.142 + inline bool has_spooling_space() { 1.143 + return _spoolTail != NULL && _spoolTail->bufferSize > _nextIndex; 1.144 + } 1.145 + // ensure that spooling space exists 1.146 + bool ensure_spooling_space() { 1.147 + return has_spooling_space() || ensure_spooling_space_work(); 1.148 + } 1.149 + #define PROMOTED_OOPS_ITERATE_DECL(OopClosureType, nv_suffix) \ 1.150 + void promoted_oops_iterate##nv_suffix(OopClosureType* cl); 1.151 + ALL_SINCE_SAVE_MARKS_CLOSURES(PROMOTED_OOPS_ITERATE_DECL) 1.152 + #undef PROMOTED_OOPS_ITERATE_DECL 1.153 + void promoted_oops_iterate(OopsInGenClosure* cl) { 1.154 + promoted_oops_iterate_v(cl); 1.155 + } 1.156 + void verify() const; 1.157 + void reset() { 1.158 + _promoHead = NULL; 1.159 + _promoTail = NULL; 1.160 + _spoolHead = NULL; 1.161 + _spoolTail = NULL; 1.162 + _spareSpool = NULL; 1.163 + _firstIndex = 0; 1.164 + _nextIndex = 0; 1.165 + 1.166 + } 1.167 +}; 1.168 + 1.169 +class LinearAllocBlock VALUE_OBJ_CLASS_SPEC { 1.170 + public: 1.171 + LinearAllocBlock() : _ptr(0), _word_size(0), _refillSize(0), 1.172 + _allocation_size_limit(0) {} 1.173 + void set(HeapWord* ptr, size_t word_size, size_t refill_size, 1.174 + size_t allocation_size_limit) { 1.175 + _ptr = ptr; 1.176 + _word_size = word_size; 1.177 + _refillSize = refill_size; 1.178 + _allocation_size_limit = allocation_size_limit; 1.179 + } 1.180 + HeapWord* _ptr; 1.181 + size_t _word_size; 1.182 + size_t _refillSize; 1.183 + size_t _allocation_size_limit; // largest size that will be allocated 1.184 +}; 1.185 + 1.186 +// Concrete subclass of CompactibleSpace that implements 1.187 +// a free list space, such as used in the concurrent mark sweep 1.188 +// generation. 1.189 + 1.190 +class CompactibleFreeListSpace: public CompactibleSpace { 1.191 + friend class VMStructs; 1.192 + friend class ConcurrentMarkSweepGeneration; 1.193 + friend class ASConcurrentMarkSweepGeneration; 1.194 + friend class CMSCollector; 1.195 + friend class CMSPermGenGen; 1.196 + // Local alloc buffer for promotion into this space. 1.197 + friend class CFLS_LAB; 1.198 + 1.199 + // "Size" of chunks of work (executed during parallel remark phases 1.200 + // of CMS collection); this probably belongs in CMSCollector, although 1.201 + // it's cached here because it's used in 1.202 + // initialize_sequential_subtasks_for_rescan() which modifies 1.203 + // par_seq_tasks which also lives in Space. XXX 1.204 + const size_t _rescan_task_size; 1.205 + const size_t _marking_task_size; 1.206 + 1.207 + // Yet another sequential tasks done structure. This supports 1.208 + // CMS GC, where we have threads dynamically 1.209 + // claiming sub-tasks from a larger parallel task. 1.210 + SequentialSubTasksDone _conc_par_seq_tasks; 1.211 + 1.212 + BlockOffsetArrayNonContigSpace _bt; 1.213 + 1.214 + CMSCollector* _collector; 1.215 + ConcurrentMarkSweepGeneration* _gen; 1.216 + 1.217 + // Data structures for free blocks (used during allocation/sweeping) 1.218 + 1.219 + // Allocation is done linearly from two different blocks depending on 1.220 + // whether the request is small or large, in an effort to reduce 1.221 + // fragmentation. We assume that any locking for allocation is done 1.222 + // by the containing generation. Thus, none of the methods in this 1.223 + // space are re-entrant. 1.224 + enum SomeConstants { 1.225 + SmallForLinearAlloc = 16, // size < this then use _sLAB 1.226 + SmallForDictionary = 257, // size < this then use _indexedFreeList 1.227 + IndexSetSize = SmallForDictionary, // keep this odd-sized 1.228 + IndexSetStart = MinObjAlignment, 1.229 + IndexSetStride = MinObjAlignment 1.230 + }; 1.231 + 1.232 + private: 1.233 + enum FitStrategyOptions { 1.234 + FreeBlockStrategyNone = 0, 1.235 + FreeBlockBestFitFirst 1.236 + }; 1.237 + 1.238 + PromotionInfo _promoInfo; 1.239 + 1.240 + // helps to impose a global total order on freelistLock ranks; 1.241 + // assumes that CFLSpace's are allocated in global total order 1.242 + static int _lockRank; 1.243 + 1.244 + // a lock protecting the free lists and free blocks; 1.245 + // mutable because of ubiquity of locking even for otherwise const methods 1.246 + mutable Mutex _freelistLock; 1.247 + // locking verifier convenience function 1.248 + void assert_locked() const PRODUCT_RETURN; 1.249 + 1.250 + // Linear allocation blocks 1.251 + LinearAllocBlock _smallLinearAllocBlock; 1.252 + 1.253 + FreeBlockDictionary::DictionaryChoice _dictionaryChoice; 1.254 + FreeBlockDictionary* _dictionary; // ptr to dictionary for large size blocks 1.255 + 1.256 + FreeList _indexedFreeList[IndexSetSize]; 1.257 + // indexed array for small size blocks 1.258 + // allocation stategy 1.259 + bool _fitStrategy; // Use best fit strategy. 1.260 + bool _adaptive_freelists; // Use adaptive freelists 1.261 + 1.262 + // This is an address close to the largest free chunk in the heap. 1.263 + // It is currently assumed to be at the end of the heap. Free 1.264 + // chunks with addresses greater than nearLargestChunk are coalesced 1.265 + // in an effort to maintain a large chunk at the end of the heap. 1.266 + HeapWord* _nearLargestChunk; 1.267 + 1.268 + // Used to keep track of limit of sweep for the space 1.269 + HeapWord* _sweep_limit; 1.270 + 1.271 + // Support for compacting cms 1.272 + HeapWord* cross_threshold(HeapWord* start, HeapWord* end); 1.273 + HeapWord* forward(oop q, size_t size, CompactPoint* cp, HeapWord* compact_top); 1.274 + 1.275 + // Initialization helpers. 1.276 + void initializeIndexedFreeListArray(); 1.277 + 1.278 + // Extra stuff to manage promotion parallelism. 1.279 + 1.280 + // a lock protecting the dictionary during par promotion allocation. 1.281 + mutable Mutex _parDictionaryAllocLock; 1.282 + Mutex* parDictionaryAllocLock() const { return &_parDictionaryAllocLock; } 1.283 + 1.284 + // Locks protecting the exact lists during par promotion allocation. 1.285 + Mutex* _indexedFreeListParLocks[IndexSetSize]; 1.286 + 1.287 +#if CFLS_LAB_REFILL_STATS 1.288 + // Some statistics. 1.289 + jint _par_get_chunk_from_small; 1.290 + jint _par_get_chunk_from_large; 1.291 +#endif 1.292 + 1.293 + 1.294 + // Attempt to obtain up to "n" blocks of the size "word_sz" (which is 1.295 + // required to be smaller than "IndexSetSize".) If successful, 1.296 + // adds them to "fl", which is required to be an empty free list. 1.297 + // If the count of "fl" is negative, it's absolute value indicates a 1.298 + // number of free chunks that had been previously "borrowed" from global 1.299 + // list of size "word_sz", and must now be decremented. 1.300 + void par_get_chunk_of_blocks(size_t word_sz, size_t n, FreeList* fl); 1.301 + 1.302 + // Allocation helper functions 1.303 + // Allocate using a strategy that takes from the indexed free lists 1.304 + // first. This allocation strategy assumes a companion sweeping 1.305 + // strategy that attempts to keep the needed number of chunks in each 1.306 + // indexed free lists. 1.307 + HeapWord* allocate_adaptive_freelists(size_t size); 1.308 + // Allocate from the linear allocation buffers first. This allocation 1.309 + // strategy assumes maximal coalescing can maintain chunks large enough 1.310 + // to be used as linear allocation buffers. 1.311 + HeapWord* allocate_non_adaptive_freelists(size_t size); 1.312 + 1.313 + // Gets a chunk from the linear allocation block (LinAB). If there 1.314 + // is not enough space in the LinAB, refills it. 1.315 + HeapWord* getChunkFromLinearAllocBlock(LinearAllocBlock* blk, size_t size); 1.316 + HeapWord* getChunkFromSmallLinearAllocBlock(size_t size); 1.317 + // Get a chunk from the space remaining in the linear allocation block. Do 1.318 + // not attempt to refill if the space is not available, return NULL. Do the 1.319 + // repairs on the linear allocation block as appropriate. 1.320 + HeapWord* getChunkFromLinearAllocBlockRemainder(LinearAllocBlock* blk, size_t size); 1.321 + inline HeapWord* getChunkFromSmallLinearAllocBlockRemainder(size_t size); 1.322 + 1.323 + // Helper function for getChunkFromIndexedFreeList. 1.324 + // Replenish the indexed free list for this "size". Do not take from an 1.325 + // underpopulated size. 1.326 + FreeChunk* getChunkFromIndexedFreeListHelper(size_t size); 1.327 + 1.328 + // Get a chunk from the indexed free list. If the indexed free list 1.329 + // does not have a free chunk, try to replenish the indexed free list 1.330 + // then get the free chunk from the replenished indexed free list. 1.331 + inline FreeChunk* getChunkFromIndexedFreeList(size_t size); 1.332 + 1.333 + // The returned chunk may be larger than requested (or null). 1.334 + FreeChunk* getChunkFromDictionary(size_t size); 1.335 + // The returned chunk is the exact size requested (or null). 1.336 + FreeChunk* getChunkFromDictionaryExact(size_t size); 1.337 + 1.338 + // Find a chunk in the indexed free list that is the best 1.339 + // fit for size "numWords". 1.340 + FreeChunk* bestFitSmall(size_t numWords); 1.341 + // For free list "fl" of chunks of size > numWords, 1.342 + // remove a chunk, split off a chunk of size numWords 1.343 + // and return it. The split off remainder is returned to 1.344 + // the free lists. The old name for getFromListGreater 1.345 + // was lookInListGreater. 1.346 + FreeChunk* getFromListGreater(FreeList* fl, size_t numWords); 1.347 + // Get a chunk in the indexed free list or dictionary, 1.348 + // by considering a larger chunk and splitting it. 1.349 + FreeChunk* getChunkFromGreater(size_t numWords); 1.350 + // Verify that the given chunk is in the indexed free lists. 1.351 + bool verifyChunkInIndexedFreeLists(FreeChunk* fc) const; 1.352 + // Remove the specified chunk from the indexed free lists. 1.353 + void removeChunkFromIndexedFreeList(FreeChunk* fc); 1.354 + // Remove the specified chunk from the dictionary. 1.355 + void removeChunkFromDictionary(FreeChunk* fc); 1.356 + // Split a free chunk into a smaller free chunk of size "new_size". 1.357 + // Return the smaller free chunk and return the remainder to the 1.358 + // free lists. 1.359 + FreeChunk* splitChunkAndReturnRemainder(FreeChunk* chunk, size_t new_size); 1.360 + // Add a chunk to the free lists. 1.361 + void addChunkToFreeLists(HeapWord* chunk, size_t size); 1.362 + // Add a chunk to the free lists, preferring to suffix it 1.363 + // to the last free chunk at end of space if possible, and 1.364 + // updating the block census stats as well as block offset table. 1.365 + // Take any locks as appropriate if we are multithreaded. 1.366 + void addChunkToFreeListsAtEndRecordingStats(HeapWord* chunk, size_t size); 1.367 + // Add a free chunk to the indexed free lists. 1.368 + void returnChunkToFreeList(FreeChunk* chunk); 1.369 + // Add a free chunk to the dictionary. 1.370 + void returnChunkToDictionary(FreeChunk* chunk); 1.371 + 1.372 + // Functions for maintaining the linear allocation buffers (LinAB). 1.373 + // Repairing a linear allocation block refers to operations 1.374 + // performed on the remainder of a LinAB after an allocation 1.375 + // has been made from it. 1.376 + void repairLinearAllocationBlocks(); 1.377 + void repairLinearAllocBlock(LinearAllocBlock* blk); 1.378 + void refillLinearAllocBlock(LinearAllocBlock* blk); 1.379 + void refillLinearAllocBlockIfNeeded(LinearAllocBlock* blk); 1.380 + void refillLinearAllocBlocksIfNeeded(); 1.381 + 1.382 + void verify_objects_initialized() const; 1.383 + 1.384 + // Statistics reporting helper functions 1.385 + void reportFreeListStatistics() const; 1.386 + void reportIndexedFreeListStatistics() const; 1.387 + size_t maxChunkSizeInIndexedFreeLists() const; 1.388 + size_t numFreeBlocksInIndexedFreeLists() const; 1.389 + // Accessor 1.390 + HeapWord* unallocated_block() const { 1.391 + HeapWord* ub = _bt.unallocated_block(); 1.392 + assert(ub >= bottom() && 1.393 + ub <= end(), "space invariant"); 1.394 + return ub; 1.395 + } 1.396 + void freed(HeapWord* start, size_t size) { 1.397 + _bt.freed(start, size); 1.398 + } 1.399 + 1.400 + protected: 1.401 + // reset the indexed free list to its initial empty condition. 1.402 + void resetIndexedFreeListArray(); 1.403 + // reset to an initial state with a single free block described 1.404 + // by the MemRegion parameter. 1.405 + void reset(MemRegion mr); 1.406 + // Return the total number of words in the indexed free lists. 1.407 + size_t totalSizeInIndexedFreeLists() const; 1.408 + 1.409 + public: 1.410 + // Constructor... 1.411 + CompactibleFreeListSpace(BlockOffsetSharedArray* bs, MemRegion mr, 1.412 + bool use_adaptive_freelists, 1.413 + FreeBlockDictionary::DictionaryChoice); 1.414 + // accessors 1.415 + bool bestFitFirst() { return _fitStrategy == FreeBlockBestFitFirst; } 1.416 + FreeBlockDictionary* dictionary() const { return _dictionary; } 1.417 + HeapWord* nearLargestChunk() const { return _nearLargestChunk; } 1.418 + void set_nearLargestChunk(HeapWord* v) { _nearLargestChunk = v; } 1.419 + 1.420 + // Return the free chunk at the end of the space. If no such 1.421 + // chunk exists, return NULL. 1.422 + FreeChunk* find_chunk_at_end(); 1.423 + 1.424 + bool adaptive_freelists() { return _adaptive_freelists; } 1.425 + 1.426 + void set_collector(CMSCollector* collector) { _collector = collector; } 1.427 + 1.428 + // Support for parallelization of rescan and marking 1.429 + const size_t rescan_task_size() const { return _rescan_task_size; } 1.430 + const size_t marking_task_size() const { return _marking_task_size; } 1.431 + SequentialSubTasksDone* conc_par_seq_tasks() {return &_conc_par_seq_tasks; } 1.432 + void initialize_sequential_subtasks_for_rescan(int n_threads); 1.433 + void initialize_sequential_subtasks_for_marking(int n_threads, 1.434 + HeapWord* low = NULL); 1.435 + 1.436 +#if CFLS_LAB_REFILL_STATS 1.437 + void print_par_alloc_stats(); 1.438 +#endif 1.439 + 1.440 + // Space enquiries 1.441 + size_t used() const; 1.442 + size_t free() const; 1.443 + size_t max_alloc_in_words() const; 1.444 + // XXX: should have a less conservative used_region() than that of 1.445 + // Space; we could consider keeping track of highest allocated 1.446 + // address and correcting that at each sweep, as the sweeper 1.447 + // goes through the entire allocated part of the generation. We 1.448 + // could also use that information to keep the sweeper from 1.449 + // sweeping more than is necessary. The allocator and sweeper will 1.450 + // of course need to synchronize on this, since the sweeper will 1.451 + // try to bump down the address and the allocator will try to bump it up. 1.452 + // For now, however, we'll just use the default used_region() 1.453 + // which overestimates the region by returning the entire 1.454 + // committed region (this is safe, but inefficient). 1.455 + 1.456 + // Returns a subregion of the space containing all the objects in 1.457 + // the space. 1.458 + MemRegion used_region() const { 1.459 + return MemRegion(bottom(), 1.460 + BlockOffsetArrayUseUnallocatedBlock ? 1.461 + unallocated_block() : end()); 1.462 + } 1.463 + 1.464 + // This is needed because the default implementation uses block_start() 1.465 + // which can;t be used at certain times (for example phase 3 of mark-sweep). 1.466 + // A better fix is to change the assertions in phase 3 of mark-sweep to 1.467 + // use is_in_reserved(), but that is deferred since the is_in() assertions 1.468 + // are buried through several layers of callers and are used elsewhere 1.469 + // as well. 1.470 + bool is_in(const void* p) const { 1.471 + return used_region().contains(p); 1.472 + } 1.473 + 1.474 + virtual bool is_free_block(const HeapWord* p) const; 1.475 + 1.476 + // Resizing support 1.477 + void set_end(HeapWord* value); // override 1.478 + 1.479 + // mutual exclusion support 1.480 + Mutex* freelistLock() const { return &_freelistLock; } 1.481 + 1.482 + // Iteration support 1.483 + void oop_iterate(MemRegion mr, OopClosure* cl); 1.484 + void oop_iterate(OopClosure* cl); 1.485 + 1.486 + void object_iterate(ObjectClosure* blk); 1.487 + void object_iterate_mem(MemRegion mr, UpwardsObjectClosure* cl); 1.488 + 1.489 + // Requires that "mr" be entirely within the space. 1.490 + // Apply "cl->do_object" to all objects that intersect with "mr". 1.491 + // If the iteration encounters an unparseable portion of the region, 1.492 + // terminate the iteration and return the address of the start of the 1.493 + // subregion that isn't done. Return of "NULL" indicates that the 1.494 + // interation completed. 1.495 + virtual HeapWord* 1.496 + object_iterate_careful_m(MemRegion mr, 1.497 + ObjectClosureCareful* cl); 1.498 + virtual HeapWord* 1.499 + object_iterate_careful(ObjectClosureCareful* cl); 1.500 + 1.501 + // Override: provides a DCTO_CL specific to this kind of space. 1.502 + DirtyCardToOopClosure* new_dcto_cl(OopClosure* cl, 1.503 + CardTableModRefBS::PrecisionStyle precision, 1.504 + HeapWord* boundary); 1.505 + 1.506 + void blk_iterate(BlkClosure* cl); 1.507 + void blk_iterate_careful(BlkClosureCareful* cl); 1.508 + HeapWord* block_start(const void* p) const; 1.509 + HeapWord* block_start_careful(const void* p) const; 1.510 + size_t block_size(const HeapWord* p) const; 1.511 + size_t block_size_no_stall(HeapWord* p, const CMSCollector* c) const; 1.512 + bool block_is_obj(const HeapWord* p) const; 1.513 + bool obj_is_alive(const HeapWord* p) const; 1.514 + size_t block_size_nopar(const HeapWord* p) const; 1.515 + bool block_is_obj_nopar(const HeapWord* p) const; 1.516 + 1.517 + // iteration support for promotion 1.518 + void save_marks(); 1.519 + bool no_allocs_since_save_marks(); 1.520 + void object_iterate_since_last_GC(ObjectClosure* cl); 1.521 + 1.522 + // iteration support for sweeping 1.523 + void save_sweep_limit() { 1.524 + _sweep_limit = BlockOffsetArrayUseUnallocatedBlock ? 1.525 + unallocated_block() : end(); 1.526 + } 1.527 + NOT_PRODUCT( 1.528 + void clear_sweep_limit() { _sweep_limit = NULL; } 1.529 + ) 1.530 + HeapWord* sweep_limit() { return _sweep_limit; } 1.531 + 1.532 + // Apply "blk->do_oop" to the addresses of all reference fields in objects 1.533 + // promoted into this generation since the most recent save_marks() call. 1.534 + // Fields in objects allocated by applications of the closure 1.535 + // *are* included in the iteration. Thus, when the iteration completes 1.536 + // there should be no further such objects remaining. 1.537 + #define CFLS_OOP_SINCE_SAVE_MARKS_DECL(OopClosureType, nv_suffix) \ 1.538 + void oop_since_save_marks_iterate##nv_suffix(OopClosureType* blk); 1.539 + ALL_SINCE_SAVE_MARKS_CLOSURES(CFLS_OOP_SINCE_SAVE_MARKS_DECL) 1.540 + #undef CFLS_OOP_SINCE_SAVE_MARKS_DECL 1.541 + 1.542 + // Allocation support 1.543 + HeapWord* allocate(size_t size); 1.544 + HeapWord* par_allocate(size_t size); 1.545 + 1.546 + oop promote(oop obj, size_t obj_size, oop* ref); 1.547 + void gc_prologue(); 1.548 + void gc_epilogue(); 1.549 + 1.550 + // This call is used by a containing CMS generation / collector 1.551 + // to inform the CFLS space that a sweep has been completed 1.552 + // and that the space can do any related house-keeping functions. 1.553 + void sweep_completed(); 1.554 + 1.555 + // For an object in this space, the mark-word's two 1.556 + // LSB's having the value [11] indicates that it has been 1.557 + // promoted since the most recent call to save_marks() on 1.558 + // this generation and has not subsequently been iterated 1.559 + // over (using oop_since_save_marks_iterate() above). 1.560 + bool obj_allocated_since_save_marks(const oop obj) const { 1.561 + assert(is_in_reserved(obj), "Wrong space?"); 1.562 + return ((PromotedObject*)obj)->hasPromotedMark(); 1.563 + } 1.564 + 1.565 + // A worst-case estimate of the space required (in HeapWords) to expand the 1.566 + // heap when promoting an obj of size obj_size. 1.567 + size_t expansionSpaceRequired(size_t obj_size) const; 1.568 + 1.569 + FreeChunk* allocateScratch(size_t size); 1.570 + 1.571 + // returns true if either the small or large linear allocation buffer is empty. 1.572 + bool linearAllocationWouldFail(); 1.573 + 1.574 + // Adjust the chunk for the minimum size. This version is called in 1.575 + // most cases in CompactibleFreeListSpace methods. 1.576 + inline static size_t adjustObjectSize(size_t size) { 1.577 + return (size_t) align_object_size(MAX2(size, (size_t)MinChunkSize)); 1.578 + } 1.579 + // This is a virtual version of adjustObjectSize() that is called 1.580 + // only occasionally when the compaction space changes and the type 1.581 + // of the new compaction space is is only known to be CompactibleSpace. 1.582 + size_t adjust_object_size_v(size_t size) const { 1.583 + return adjustObjectSize(size); 1.584 + } 1.585 + // Minimum size of a free block. 1.586 + virtual size_t minimum_free_block_size() const { return MinChunkSize; } 1.587 + void removeFreeChunkFromFreeLists(FreeChunk* chunk); 1.588 + void addChunkAndRepairOffsetTable(HeapWord* chunk, size_t size, 1.589 + bool coalesced); 1.590 + 1.591 + // Support for compaction 1.592 + void prepare_for_compaction(CompactPoint* cp); 1.593 + void adjust_pointers(); 1.594 + void compact(); 1.595 + // reset the space to reflect the fact that a compaction of the 1.596 + // space has been done. 1.597 + virtual void reset_after_compaction(); 1.598 + 1.599 + // Debugging support 1.600 + void print() const; 1.601 + void prepare_for_verify(); 1.602 + void verify(bool allow_dirty) const; 1.603 + void verifyFreeLists() const PRODUCT_RETURN; 1.604 + void verifyIndexedFreeLists() const; 1.605 + void verifyIndexedFreeList(size_t size) const; 1.606 + // verify that the given chunk is in the free lists. 1.607 + bool verifyChunkInFreeLists(FreeChunk* fc) const; 1.608 + // Do some basic checks on the the free lists. 1.609 + void checkFreeListConsistency() const PRODUCT_RETURN; 1.610 + 1.611 + NOT_PRODUCT ( 1.612 + void initializeIndexedFreeListArrayReturnedBytes(); 1.613 + size_t sumIndexedFreeListArrayReturnedBytes(); 1.614 + // Return the total number of chunks in the indexed free lists. 1.615 + size_t totalCountInIndexedFreeLists() const; 1.616 + // Return the total numberof chunks in the space. 1.617 + size_t totalCount(); 1.618 + ) 1.619 + 1.620 + // The census consists of counts of the quantities such as 1.621 + // the current count of the free chunks, number of chunks 1.622 + // created as a result of the split of a larger chunk or 1.623 + // coalescing of smaller chucks, etc. The counts in the 1.624 + // census is used to make decisions on splitting and 1.625 + // coalescing of chunks during the sweep of garbage. 1.626 + 1.627 + // Print the statistics for the free lists. 1.628 + void printFLCensus(int sweepCt) const; 1.629 + 1.630 + // Statistics functions 1.631 + // Initialize census for lists before the sweep. 1.632 + void beginSweepFLCensus(float sweep_current, 1.633 + float sweep_estimate); 1.634 + // Set the surplus for each of the free lists. 1.635 + void setFLSurplus(); 1.636 + // Set the hint for each of the free lists. 1.637 + void setFLHints(); 1.638 + // Clear the census for each of the free lists. 1.639 + void clearFLCensus(); 1.640 + // Perform functions for the census after the end of the sweep. 1.641 + void endSweepFLCensus(int sweepCt); 1.642 + // Return true if the count of free chunks is greater 1.643 + // than the desired number of free chunks. 1.644 + bool coalOverPopulated(size_t size); 1.645 + 1.646 + 1.647 +// Record (for each size): 1.648 +// 1.649 +// split-births = #chunks added due to splits in (prev-sweep-end, 1.650 +// this-sweep-start) 1.651 +// split-deaths = #chunks removed for splits in (prev-sweep-end, 1.652 +// this-sweep-start) 1.653 +// num-curr = #chunks at start of this sweep 1.654 +// num-prev = #chunks at end of previous sweep 1.655 +// 1.656 +// The above are quantities that are measured. Now define: 1.657 +// 1.658 +// num-desired := num-prev + split-births - split-deaths - num-curr 1.659 +// 1.660 +// Roughly, num-prev + split-births is the supply, 1.661 +// split-deaths is demand due to other sizes 1.662 +// and num-curr is what we have left. 1.663 +// 1.664 +// Thus, num-desired is roughly speaking the "legitimate demand" 1.665 +// for blocks of this size and what we are striving to reach at the 1.666 +// end of the current sweep. 1.667 +// 1.668 +// For a given list, let num-len be its current population. 1.669 +// Define, for a free list of a given size: 1.670 +// 1.671 +// coal-overpopulated := num-len >= num-desired * coal-surplus 1.672 +// (coal-surplus is set to 1.05, i.e. we allow a little slop when 1.673 +// coalescing -- we do not coalesce unless we think that the current 1.674 +// supply has exceeded the estimated demand by more than 5%). 1.675 +// 1.676 +// For the set of sizes in the binary tree, which is neither dense nor 1.677 +// closed, it may be the case that for a particular size we have never 1.678 +// had, or do not now have, or did not have at the previous sweep, 1.679 +// chunks of that size. We need to extend the definition of 1.680 +// coal-overpopulated to such sizes as well: 1.681 +// 1.682 +// For a chunk in/not in the binary tree, extend coal-overpopulated 1.683 +// defined above to include all sizes as follows: 1.684 +// 1.685 +// . a size that is non-existent is coal-overpopulated 1.686 +// . a size that has a num-desired <= 0 as defined above is 1.687 +// coal-overpopulated. 1.688 +// 1.689 +// Also define, for a chunk heap-offset C and mountain heap-offset M: 1.690 +// 1.691 +// close-to-mountain := C >= 0.99 * M 1.692 +// 1.693 +// Now, the coalescing strategy is: 1.694 +// 1.695 +// Coalesce left-hand chunk with right-hand chunk if and 1.696 +// only if: 1.697 +// 1.698 +// EITHER 1.699 +// . left-hand chunk is of a size that is coal-overpopulated 1.700 +// OR 1.701 +// . right-hand chunk is close-to-mountain 1.702 + void smallCoalBirth(size_t size); 1.703 + void smallCoalDeath(size_t size); 1.704 + void coalBirth(size_t size); 1.705 + void coalDeath(size_t size); 1.706 + void smallSplitBirth(size_t size); 1.707 + void smallSplitDeath(size_t size); 1.708 + void splitBirth(size_t size); 1.709 + void splitDeath(size_t size); 1.710 + void split(size_t from, size_t to1); 1.711 + 1.712 + double flsFrag() const; 1.713 +}; 1.714 + 1.715 +// A parallel-GC-thread-local allocation buffer for allocation into a 1.716 +// CompactibleFreeListSpace. 1.717 +class CFLS_LAB : public CHeapObj { 1.718 + // The space that this buffer allocates into. 1.719 + CompactibleFreeListSpace* _cfls; 1.720 + 1.721 + // Our local free lists. 1.722 + FreeList _indexedFreeList[CompactibleFreeListSpace::IndexSetSize]; 1.723 + 1.724 + // Initialized from a command-line arg. 1.725 + size_t _blocks_to_claim; 1.726 + 1.727 +#if CFLS_LAB_REFILL_STATS 1.728 + // Some statistics. 1.729 + int _refills; 1.730 + int _blocksTaken; 1.731 + static int _tot_refills; 1.732 + static int _tot_blocksTaken; 1.733 + static int _next_threshold; 1.734 +#endif 1.735 + 1.736 +public: 1.737 + CFLS_LAB(CompactibleFreeListSpace* cfls); 1.738 + 1.739 + // Allocate and return a block of the given size, or else return NULL. 1.740 + HeapWord* alloc(size_t word_sz); 1.741 + 1.742 + // Return any unused portions of the buffer to the global pool. 1.743 + void retire(); 1.744 +}; 1.745 + 1.746 +size_t PromotionInfo::refillSize() const { 1.747 + const size_t CMSSpoolBlockSize = 256; 1.748 + const size_t sz = heap_word_size(sizeof(SpoolBlock) + sizeof(markOop) 1.749 + * CMSSpoolBlockSize); 1.750 + return CompactibleFreeListSpace::adjustObjectSize(sz); 1.751 +}