duke@435: /* ysr@2071: * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved. duke@435: * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. duke@435: * duke@435: * This code is free software; you can redistribute it and/or modify it duke@435: * under the terms of the GNU General Public License version 2 only, as duke@435: * published by the Free Software Foundation. duke@435: * duke@435: * This code is distributed in the hope that it will be useful, but WITHOUT duke@435: * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or duke@435: * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License duke@435: * version 2 for more details (a copy is included in the LICENSE file that duke@435: * accompanied this code). duke@435: * duke@435: * You should have received a copy of the GNU General Public License version duke@435: * 2 along with this work; if not, write to the Free Software Foundation, duke@435: * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. duke@435: * trims@1907: * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA trims@1907: * or visit www.oracle.com if you need additional information or have any trims@1907: * questions. duke@435: * duke@435: */ duke@435: duke@435: // Classes in support of keeping track of promotions into a non-Contiguous duke@435: // space, in this case a CompactibleFreeListSpace. duke@435: duke@435: // Forward declarations duke@435: class CompactibleFreeListSpace; duke@435: class BlkClosure; duke@435: class BlkClosureCareful; duke@435: class UpwardsObjectClosure; duke@435: class ObjectClosureCareful; duke@435: class Klass; duke@435: duke@435: class LinearAllocBlock VALUE_OBJ_CLASS_SPEC { duke@435: public: duke@435: LinearAllocBlock() : _ptr(0), _word_size(0), _refillSize(0), duke@435: _allocation_size_limit(0) {} duke@435: void set(HeapWord* ptr, size_t word_size, size_t refill_size, duke@435: size_t allocation_size_limit) { duke@435: _ptr = ptr; duke@435: _word_size = word_size; duke@435: _refillSize = refill_size; duke@435: _allocation_size_limit = allocation_size_limit; duke@435: } duke@435: HeapWord* _ptr; duke@435: size_t _word_size; duke@435: size_t _refillSize; duke@435: size_t _allocation_size_limit; // largest size that will be allocated ysr@2071: ysr@2071: void print_on(outputStream* st) const; duke@435: }; duke@435: duke@435: // Concrete subclass of CompactibleSpace that implements duke@435: // a free list space, such as used in the concurrent mark sweep duke@435: // generation. duke@435: duke@435: class CompactibleFreeListSpace: public CompactibleSpace { duke@435: friend class VMStructs; duke@435: friend class ConcurrentMarkSweepGeneration; duke@435: friend class ASConcurrentMarkSweepGeneration; duke@435: friend class CMSCollector; duke@435: friend class CMSPermGenGen; duke@435: // Local alloc buffer for promotion into this space. duke@435: friend class CFLS_LAB; duke@435: duke@435: // "Size" of chunks of work (executed during parallel remark phases duke@435: // of CMS collection); this probably belongs in CMSCollector, although duke@435: // it's cached here because it's used in duke@435: // initialize_sequential_subtasks_for_rescan() which modifies duke@435: // par_seq_tasks which also lives in Space. XXX duke@435: const size_t _rescan_task_size; duke@435: const size_t _marking_task_size; duke@435: duke@435: // Yet another sequential tasks done structure. This supports duke@435: // CMS GC, where we have threads dynamically duke@435: // claiming sub-tasks from a larger parallel task. duke@435: SequentialSubTasksDone _conc_par_seq_tasks; duke@435: duke@435: BlockOffsetArrayNonContigSpace _bt; duke@435: duke@435: CMSCollector* _collector; duke@435: ConcurrentMarkSweepGeneration* _gen; duke@435: duke@435: // Data structures for free blocks (used during allocation/sweeping) duke@435: duke@435: // Allocation is done linearly from two different blocks depending on duke@435: // whether the request is small or large, in an effort to reduce duke@435: // fragmentation. We assume that any locking for allocation is done duke@435: // by the containing generation. Thus, none of the methods in this duke@435: // space are re-entrant. duke@435: enum SomeConstants { duke@435: SmallForLinearAlloc = 16, // size < this then use _sLAB duke@435: SmallForDictionary = 257, // size < this then use _indexedFreeList kvn@1926: IndexSetSize = SmallForDictionary // keep this odd-sized duke@435: }; kvn@1926: static int IndexSetStart; kvn@1926: static int IndexSetStride; duke@435: duke@435: private: duke@435: enum FitStrategyOptions { duke@435: FreeBlockStrategyNone = 0, duke@435: FreeBlockBestFitFirst duke@435: }; duke@435: duke@435: PromotionInfo _promoInfo; duke@435: duke@435: // helps to impose a global total order on freelistLock ranks; duke@435: // assumes that CFLSpace's are allocated in global total order duke@435: static int _lockRank; duke@435: duke@435: // a lock protecting the free lists and free blocks; duke@435: // mutable because of ubiquity of locking even for otherwise const methods duke@435: mutable Mutex _freelistLock; duke@435: // locking verifier convenience function duke@435: void assert_locked() const PRODUCT_RETURN; ysr@1580: void assert_locked(const Mutex* lock) const PRODUCT_RETURN; duke@435: duke@435: // Linear allocation blocks duke@435: LinearAllocBlock _smallLinearAllocBlock; duke@435: duke@435: FreeBlockDictionary::DictionaryChoice _dictionaryChoice; duke@435: FreeBlockDictionary* _dictionary; // ptr to dictionary for large size blocks duke@435: duke@435: FreeList _indexedFreeList[IndexSetSize]; duke@435: // indexed array for small size blocks duke@435: // allocation stategy duke@435: bool _fitStrategy; // Use best fit strategy. duke@435: bool _adaptive_freelists; // Use adaptive freelists duke@435: duke@435: // This is an address close to the largest free chunk in the heap. duke@435: // It is currently assumed to be at the end of the heap. Free duke@435: // chunks with addresses greater than nearLargestChunk are coalesced duke@435: // in an effort to maintain a large chunk at the end of the heap. duke@435: HeapWord* _nearLargestChunk; duke@435: duke@435: // Used to keep track of limit of sweep for the space duke@435: HeapWord* _sweep_limit; duke@435: duke@435: // Support for compacting cms duke@435: HeapWord* cross_threshold(HeapWord* start, HeapWord* end); duke@435: HeapWord* forward(oop q, size_t size, CompactPoint* cp, HeapWord* compact_top); duke@435: duke@435: // Initialization helpers. duke@435: void initializeIndexedFreeListArray(); duke@435: duke@435: // Extra stuff to manage promotion parallelism. duke@435: duke@435: // a lock protecting the dictionary during par promotion allocation. duke@435: mutable Mutex _parDictionaryAllocLock; duke@435: Mutex* parDictionaryAllocLock() const { return &_parDictionaryAllocLock; } duke@435: duke@435: // Locks protecting the exact lists during par promotion allocation. duke@435: Mutex* _indexedFreeListParLocks[IndexSetSize]; duke@435: duke@435: // Attempt to obtain up to "n" blocks of the size "word_sz" (which is duke@435: // required to be smaller than "IndexSetSize".) If successful, duke@435: // adds them to "fl", which is required to be an empty free list. duke@435: // If the count of "fl" is negative, it's absolute value indicates a duke@435: // number of free chunks that had been previously "borrowed" from global duke@435: // list of size "word_sz", and must now be decremented. duke@435: void par_get_chunk_of_blocks(size_t word_sz, size_t n, FreeList* fl); duke@435: duke@435: // Allocation helper functions duke@435: // Allocate using a strategy that takes from the indexed free lists duke@435: // first. This allocation strategy assumes a companion sweeping duke@435: // strategy that attempts to keep the needed number of chunks in each duke@435: // indexed free lists. duke@435: HeapWord* allocate_adaptive_freelists(size_t size); duke@435: // Allocate from the linear allocation buffers first. This allocation duke@435: // strategy assumes maximal coalescing can maintain chunks large enough duke@435: // to be used as linear allocation buffers. duke@435: HeapWord* allocate_non_adaptive_freelists(size_t size); duke@435: duke@435: // Gets a chunk from the linear allocation block (LinAB). If there duke@435: // is not enough space in the LinAB, refills it. duke@435: HeapWord* getChunkFromLinearAllocBlock(LinearAllocBlock* blk, size_t size); duke@435: HeapWord* getChunkFromSmallLinearAllocBlock(size_t size); duke@435: // Get a chunk from the space remaining in the linear allocation block. Do duke@435: // not attempt to refill if the space is not available, return NULL. Do the duke@435: // repairs on the linear allocation block as appropriate. duke@435: HeapWord* getChunkFromLinearAllocBlockRemainder(LinearAllocBlock* blk, size_t size); duke@435: inline HeapWord* getChunkFromSmallLinearAllocBlockRemainder(size_t size); duke@435: duke@435: // Helper function for getChunkFromIndexedFreeList. duke@435: // Replenish the indexed free list for this "size". Do not take from an duke@435: // underpopulated size. ysr@1580: FreeChunk* getChunkFromIndexedFreeListHelper(size_t size, bool replenish = true); duke@435: duke@435: // Get a chunk from the indexed free list. If the indexed free list duke@435: // does not have a free chunk, try to replenish the indexed free list duke@435: // then get the free chunk from the replenished indexed free list. duke@435: inline FreeChunk* getChunkFromIndexedFreeList(size_t size); duke@435: duke@435: // The returned chunk may be larger than requested (or null). duke@435: FreeChunk* getChunkFromDictionary(size_t size); duke@435: // The returned chunk is the exact size requested (or null). duke@435: FreeChunk* getChunkFromDictionaryExact(size_t size); duke@435: duke@435: // Find a chunk in the indexed free list that is the best duke@435: // fit for size "numWords". duke@435: FreeChunk* bestFitSmall(size_t numWords); duke@435: // For free list "fl" of chunks of size > numWords, duke@435: // remove a chunk, split off a chunk of size numWords duke@435: // and return it. The split off remainder is returned to duke@435: // the free lists. The old name for getFromListGreater duke@435: // was lookInListGreater. duke@435: FreeChunk* getFromListGreater(FreeList* fl, size_t numWords); duke@435: // Get a chunk in the indexed free list or dictionary, duke@435: // by considering a larger chunk and splitting it. duke@435: FreeChunk* getChunkFromGreater(size_t numWords); duke@435: // Verify that the given chunk is in the indexed free lists. duke@435: bool verifyChunkInIndexedFreeLists(FreeChunk* fc) const; duke@435: // Remove the specified chunk from the indexed free lists. duke@435: void removeChunkFromIndexedFreeList(FreeChunk* fc); duke@435: // Remove the specified chunk from the dictionary. duke@435: void removeChunkFromDictionary(FreeChunk* fc); duke@435: // Split a free chunk into a smaller free chunk of size "new_size". duke@435: // Return the smaller free chunk and return the remainder to the duke@435: // free lists. duke@435: FreeChunk* splitChunkAndReturnRemainder(FreeChunk* chunk, size_t new_size); duke@435: // Add a chunk to the free lists. duke@435: void addChunkToFreeLists(HeapWord* chunk, size_t size); duke@435: // Add a chunk to the free lists, preferring to suffix it duke@435: // to the last free chunk at end of space if possible, and duke@435: // updating the block census stats as well as block offset table. duke@435: // Take any locks as appropriate if we are multithreaded. duke@435: void addChunkToFreeListsAtEndRecordingStats(HeapWord* chunk, size_t size); duke@435: // Add a free chunk to the indexed free lists. duke@435: void returnChunkToFreeList(FreeChunk* chunk); duke@435: // Add a free chunk to the dictionary. duke@435: void returnChunkToDictionary(FreeChunk* chunk); duke@435: duke@435: // Functions for maintaining the linear allocation buffers (LinAB). duke@435: // Repairing a linear allocation block refers to operations duke@435: // performed on the remainder of a LinAB after an allocation duke@435: // has been made from it. duke@435: void repairLinearAllocationBlocks(); duke@435: void repairLinearAllocBlock(LinearAllocBlock* blk); duke@435: void refillLinearAllocBlock(LinearAllocBlock* blk); duke@435: void refillLinearAllocBlockIfNeeded(LinearAllocBlock* blk); duke@435: void refillLinearAllocBlocksIfNeeded(); duke@435: duke@435: void verify_objects_initialized() const; duke@435: duke@435: // Statistics reporting helper functions duke@435: void reportFreeListStatistics() const; duke@435: void reportIndexedFreeListStatistics() const; duke@435: size_t maxChunkSizeInIndexedFreeLists() const; duke@435: size_t numFreeBlocksInIndexedFreeLists() const; duke@435: // Accessor duke@435: HeapWord* unallocated_block() const { ysr@2071: if (BlockOffsetArrayUseUnallocatedBlock) { ysr@2071: HeapWord* ub = _bt.unallocated_block(); ysr@2071: assert(ub >= bottom() && ysr@2071: ub <= end(), "space invariant"); ysr@2071: return ub; ysr@2071: } else { ysr@2071: return end(); ysr@2071: } duke@435: } duke@435: void freed(HeapWord* start, size_t size) { duke@435: _bt.freed(start, size); duke@435: } duke@435: duke@435: protected: duke@435: // reset the indexed free list to its initial empty condition. duke@435: void resetIndexedFreeListArray(); duke@435: // reset to an initial state with a single free block described duke@435: // by the MemRegion parameter. duke@435: void reset(MemRegion mr); duke@435: // Return the total number of words in the indexed free lists. duke@435: size_t totalSizeInIndexedFreeLists() const; duke@435: duke@435: public: duke@435: // Constructor... duke@435: CompactibleFreeListSpace(BlockOffsetSharedArray* bs, MemRegion mr, duke@435: bool use_adaptive_freelists, duke@435: FreeBlockDictionary::DictionaryChoice); duke@435: // accessors duke@435: bool bestFitFirst() { return _fitStrategy == FreeBlockBestFitFirst; } duke@435: FreeBlockDictionary* dictionary() const { return _dictionary; } duke@435: HeapWord* nearLargestChunk() const { return _nearLargestChunk; } duke@435: void set_nearLargestChunk(HeapWord* v) { _nearLargestChunk = v; } duke@435: kvn@1926: // Set CMS global values kvn@1926: static void set_cms_values(); kvn@1926: duke@435: // Return the free chunk at the end of the space. If no such duke@435: // chunk exists, return NULL. duke@435: FreeChunk* find_chunk_at_end(); duke@435: ysr@447: bool adaptive_freelists() const { return _adaptive_freelists; } duke@435: duke@435: void set_collector(CMSCollector* collector) { _collector = collector; } duke@435: duke@435: // Support for parallelization of rescan and marking duke@435: const size_t rescan_task_size() const { return _rescan_task_size; } duke@435: const size_t marking_task_size() const { return _marking_task_size; } duke@435: SequentialSubTasksDone* conc_par_seq_tasks() {return &_conc_par_seq_tasks; } duke@435: void initialize_sequential_subtasks_for_rescan(int n_threads); duke@435: void initialize_sequential_subtasks_for_marking(int n_threads, duke@435: HeapWord* low = NULL); duke@435: duke@435: // Space enquiries duke@435: size_t used() const; duke@435: size_t free() const; duke@435: size_t max_alloc_in_words() const; duke@435: // XXX: should have a less conservative used_region() than that of duke@435: // Space; we could consider keeping track of highest allocated duke@435: // address and correcting that at each sweep, as the sweeper duke@435: // goes through the entire allocated part of the generation. We duke@435: // could also use that information to keep the sweeper from duke@435: // sweeping more than is necessary. The allocator and sweeper will duke@435: // of course need to synchronize on this, since the sweeper will duke@435: // try to bump down the address and the allocator will try to bump it up. duke@435: // For now, however, we'll just use the default used_region() duke@435: // which overestimates the region by returning the entire duke@435: // committed region (this is safe, but inefficient). duke@435: duke@435: // Returns a subregion of the space containing all the objects in duke@435: // the space. duke@435: MemRegion used_region() const { duke@435: return MemRegion(bottom(), duke@435: BlockOffsetArrayUseUnallocatedBlock ? duke@435: unallocated_block() : end()); duke@435: } duke@435: duke@435: // This is needed because the default implementation uses block_start() duke@435: // which can;t be used at certain times (for example phase 3 of mark-sweep). duke@435: // A better fix is to change the assertions in phase 3 of mark-sweep to duke@435: // use is_in_reserved(), but that is deferred since the is_in() assertions duke@435: // are buried through several layers of callers and are used elsewhere duke@435: // as well. duke@435: bool is_in(const void* p) const { duke@435: return used_region().contains(p); duke@435: } duke@435: duke@435: virtual bool is_free_block(const HeapWord* p) const; duke@435: duke@435: // Resizing support duke@435: void set_end(HeapWord* value); // override duke@435: duke@435: // mutual exclusion support duke@435: Mutex* freelistLock() const { return &_freelistLock; } duke@435: duke@435: // Iteration support duke@435: void oop_iterate(MemRegion mr, OopClosure* cl); duke@435: void oop_iterate(OopClosure* cl); duke@435: duke@435: void object_iterate(ObjectClosure* blk); jmasa@952: // Apply the closure to each object in the space whose references jmasa@952: // point to objects in the heap. The usage of CompactibleFreeListSpace jmasa@952: // by the ConcurrentMarkSweepGeneration for concurrent GC's allows jmasa@952: // objects in the space with references to objects that are no longer jmasa@952: // valid. For example, an object may reference another object jmasa@952: // that has already been sweep up (collected). This method uses jmasa@952: // obj_is_alive() to determine whether it is safe to iterate of jmasa@952: // an object. jmasa@952: void safe_object_iterate(ObjectClosure* blk); duke@435: void object_iterate_mem(MemRegion mr, UpwardsObjectClosure* cl); duke@435: duke@435: // Requires that "mr" be entirely within the space. duke@435: // Apply "cl->do_object" to all objects that intersect with "mr". duke@435: // If the iteration encounters an unparseable portion of the region, duke@435: // terminate the iteration and return the address of the start of the duke@435: // subregion that isn't done. Return of "NULL" indicates that the duke@435: // interation completed. duke@435: virtual HeapWord* duke@435: object_iterate_careful_m(MemRegion mr, duke@435: ObjectClosureCareful* cl); duke@435: virtual HeapWord* duke@435: object_iterate_careful(ObjectClosureCareful* cl); duke@435: duke@435: // Override: provides a DCTO_CL specific to this kind of space. duke@435: DirtyCardToOopClosure* new_dcto_cl(OopClosure* cl, duke@435: CardTableModRefBS::PrecisionStyle precision, duke@435: HeapWord* boundary); duke@435: duke@435: void blk_iterate(BlkClosure* cl); duke@435: void blk_iterate_careful(BlkClosureCareful* cl); ysr@777: HeapWord* block_start_const(const void* p) const; duke@435: HeapWord* block_start_careful(const void* p) const; duke@435: size_t block_size(const HeapWord* p) const; duke@435: size_t block_size_no_stall(HeapWord* p, const CMSCollector* c) const; duke@435: bool block_is_obj(const HeapWord* p) const; duke@435: bool obj_is_alive(const HeapWord* p) const; duke@435: size_t block_size_nopar(const HeapWord* p) const; duke@435: bool block_is_obj_nopar(const HeapWord* p) const; duke@435: duke@435: // iteration support for promotion duke@435: void save_marks(); duke@435: bool no_allocs_since_save_marks(); duke@435: void object_iterate_since_last_GC(ObjectClosure* cl); duke@435: duke@435: // iteration support for sweeping duke@435: void save_sweep_limit() { duke@435: _sweep_limit = BlockOffsetArrayUseUnallocatedBlock ? duke@435: unallocated_block() : end(); duke@435: } duke@435: NOT_PRODUCT( duke@435: void clear_sweep_limit() { _sweep_limit = NULL; } duke@435: ) duke@435: HeapWord* sweep_limit() { return _sweep_limit; } duke@435: duke@435: // Apply "blk->do_oop" to the addresses of all reference fields in objects duke@435: // promoted into this generation since the most recent save_marks() call. duke@435: // Fields in objects allocated by applications of the closure duke@435: // *are* included in the iteration. Thus, when the iteration completes duke@435: // there should be no further such objects remaining. duke@435: #define CFLS_OOP_SINCE_SAVE_MARKS_DECL(OopClosureType, nv_suffix) \ duke@435: void oop_since_save_marks_iterate##nv_suffix(OopClosureType* blk); duke@435: ALL_SINCE_SAVE_MARKS_CLOSURES(CFLS_OOP_SINCE_SAVE_MARKS_DECL) duke@435: #undef CFLS_OOP_SINCE_SAVE_MARKS_DECL duke@435: duke@435: // Allocation support duke@435: HeapWord* allocate(size_t size); duke@435: HeapWord* par_allocate(size_t size); duke@435: coleenp@548: oop promote(oop obj, size_t obj_size); duke@435: void gc_prologue(); duke@435: void gc_epilogue(); duke@435: duke@435: // This call is used by a containing CMS generation / collector duke@435: // to inform the CFLS space that a sweep has been completed duke@435: // and that the space can do any related house-keeping functions. duke@435: void sweep_completed(); duke@435: duke@435: // For an object in this space, the mark-word's two duke@435: // LSB's having the value [11] indicates that it has been duke@435: // promoted since the most recent call to save_marks() on duke@435: // this generation and has not subsequently been iterated duke@435: // over (using oop_since_save_marks_iterate() above). ysr@1876: // This property holds only for single-threaded collections, ysr@1876: // and is typically used for Cheney scans; for MT scavenges, ysr@1876: // the property holds for all objects promoted during that ysr@1876: // scavenge for the duration of the scavenge and is used ysr@1876: // by card-scanning to avoid scanning objects (being) promoted ysr@1876: // during that scavenge. duke@435: bool obj_allocated_since_save_marks(const oop obj) const { duke@435: assert(is_in_reserved(obj), "Wrong space?"); duke@435: return ((PromotedObject*)obj)->hasPromotedMark(); duke@435: } duke@435: duke@435: // A worst-case estimate of the space required (in HeapWords) to expand the duke@435: // heap when promoting an obj of size obj_size. duke@435: size_t expansionSpaceRequired(size_t obj_size) const; duke@435: duke@435: FreeChunk* allocateScratch(size_t size); duke@435: duke@435: // returns true if either the small or large linear allocation buffer is empty. ysr@447: bool linearAllocationWouldFail() const; duke@435: duke@435: // Adjust the chunk for the minimum size. This version is called in duke@435: // most cases in CompactibleFreeListSpace methods. duke@435: inline static size_t adjustObjectSize(size_t size) { duke@435: return (size_t) align_object_size(MAX2(size, (size_t)MinChunkSize)); duke@435: } duke@435: // This is a virtual version of adjustObjectSize() that is called duke@435: // only occasionally when the compaction space changes and the type duke@435: // of the new compaction space is is only known to be CompactibleSpace. duke@435: size_t adjust_object_size_v(size_t size) const { duke@435: return adjustObjectSize(size); duke@435: } duke@435: // Minimum size of a free block. duke@435: virtual size_t minimum_free_block_size() const { return MinChunkSize; } duke@435: void removeFreeChunkFromFreeLists(FreeChunk* chunk); duke@435: void addChunkAndRepairOffsetTable(HeapWord* chunk, size_t size, duke@435: bool coalesced); duke@435: ysr@447: // Support for decisions regarding concurrent collection policy ysr@447: bool should_concurrent_collect() const; ysr@447: duke@435: // Support for compaction duke@435: void prepare_for_compaction(CompactPoint* cp); duke@435: void adjust_pointers(); duke@435: void compact(); duke@435: // reset the space to reflect the fact that a compaction of the duke@435: // space has been done. duke@435: virtual void reset_after_compaction(); duke@435: duke@435: // Debugging support duke@435: void print() const; ysr@2071: void print_on(outputStream* st) const; duke@435: void prepare_for_verify(); duke@435: void verify(bool allow_dirty) const; duke@435: void verifyFreeLists() const PRODUCT_RETURN; duke@435: void verifyIndexedFreeLists() const; duke@435: void verifyIndexedFreeList(size_t size) const; duke@435: // verify that the given chunk is in the free lists. duke@435: bool verifyChunkInFreeLists(FreeChunk* fc) const; duke@435: // Do some basic checks on the the free lists. duke@435: void checkFreeListConsistency() const PRODUCT_RETURN; duke@435: ysr@1580: // Printing support ysr@1580: void dump_at_safepoint_with_locks(CMSCollector* c, outputStream* st); ysr@1580: void print_indexed_free_lists(outputStream* st) const; ysr@1580: void print_dictionary_free_lists(outputStream* st) const; ysr@1580: void print_promo_info_blocks(outputStream* st) const; ysr@1580: duke@435: NOT_PRODUCT ( duke@435: void initializeIndexedFreeListArrayReturnedBytes(); duke@435: size_t sumIndexedFreeListArrayReturnedBytes(); duke@435: // Return the total number of chunks in the indexed free lists. duke@435: size_t totalCountInIndexedFreeLists() const; duke@435: // Return the total numberof chunks in the space. duke@435: size_t totalCount(); duke@435: ) duke@435: duke@435: // The census consists of counts of the quantities such as duke@435: // the current count of the free chunks, number of chunks duke@435: // created as a result of the split of a larger chunk or duke@435: // coalescing of smaller chucks, etc. The counts in the duke@435: // census is used to make decisions on splitting and duke@435: // coalescing of chunks during the sweep of garbage. duke@435: duke@435: // Print the statistics for the free lists. ysr@447: void printFLCensus(size_t sweep_count) const; duke@435: duke@435: // Statistics functions duke@435: // Initialize census for lists before the sweep. ysr@1580: void beginSweepFLCensus(float inter_sweep_current, ysr@1580: float inter_sweep_estimate, ysr@1580: float intra_sweep_estimate); duke@435: // Set the surplus for each of the free lists. duke@435: void setFLSurplus(); duke@435: // Set the hint for each of the free lists. duke@435: void setFLHints(); duke@435: // Clear the census for each of the free lists. duke@435: void clearFLCensus(); duke@435: // Perform functions for the census after the end of the sweep. ysr@447: void endSweepFLCensus(size_t sweep_count); duke@435: // Return true if the count of free chunks is greater duke@435: // than the desired number of free chunks. duke@435: bool coalOverPopulated(size_t size); duke@435: duke@435: // Record (for each size): duke@435: // duke@435: // split-births = #chunks added due to splits in (prev-sweep-end, duke@435: // this-sweep-start) duke@435: // split-deaths = #chunks removed for splits in (prev-sweep-end, duke@435: // this-sweep-start) duke@435: // num-curr = #chunks at start of this sweep duke@435: // num-prev = #chunks at end of previous sweep duke@435: // duke@435: // The above are quantities that are measured. Now define: duke@435: // duke@435: // num-desired := num-prev + split-births - split-deaths - num-curr duke@435: // duke@435: // Roughly, num-prev + split-births is the supply, duke@435: // split-deaths is demand due to other sizes duke@435: // and num-curr is what we have left. duke@435: // duke@435: // Thus, num-desired is roughly speaking the "legitimate demand" duke@435: // for blocks of this size and what we are striving to reach at the duke@435: // end of the current sweep. duke@435: // duke@435: // For a given list, let num-len be its current population. duke@435: // Define, for a free list of a given size: duke@435: // duke@435: // coal-overpopulated := num-len >= num-desired * coal-surplus duke@435: // (coal-surplus is set to 1.05, i.e. we allow a little slop when duke@435: // coalescing -- we do not coalesce unless we think that the current duke@435: // supply has exceeded the estimated demand by more than 5%). duke@435: // duke@435: // For the set of sizes in the binary tree, which is neither dense nor duke@435: // closed, it may be the case that for a particular size we have never duke@435: // had, or do not now have, or did not have at the previous sweep, duke@435: // chunks of that size. We need to extend the definition of duke@435: // coal-overpopulated to such sizes as well: duke@435: // duke@435: // For a chunk in/not in the binary tree, extend coal-overpopulated duke@435: // defined above to include all sizes as follows: duke@435: // duke@435: // . a size that is non-existent is coal-overpopulated duke@435: // . a size that has a num-desired <= 0 as defined above is duke@435: // coal-overpopulated. duke@435: // duke@435: // Also define, for a chunk heap-offset C and mountain heap-offset M: duke@435: // duke@435: // close-to-mountain := C >= 0.99 * M duke@435: // duke@435: // Now, the coalescing strategy is: duke@435: // duke@435: // Coalesce left-hand chunk with right-hand chunk if and duke@435: // only if: duke@435: // duke@435: // EITHER duke@435: // . left-hand chunk is of a size that is coal-overpopulated duke@435: // OR duke@435: // . right-hand chunk is close-to-mountain duke@435: void smallCoalBirth(size_t size); duke@435: void smallCoalDeath(size_t size); duke@435: void coalBirth(size_t size); duke@435: void coalDeath(size_t size); duke@435: void smallSplitBirth(size_t size); duke@435: void smallSplitDeath(size_t size); duke@435: void splitBirth(size_t size); duke@435: void splitDeath(size_t size); duke@435: void split(size_t from, size_t to1); duke@435: duke@435: double flsFrag() const; duke@435: }; duke@435: duke@435: // A parallel-GC-thread-local allocation buffer for allocation into a duke@435: // CompactibleFreeListSpace. duke@435: class CFLS_LAB : public CHeapObj { duke@435: // The space that this buffer allocates into. duke@435: CompactibleFreeListSpace* _cfls; duke@435: duke@435: // Our local free lists. duke@435: FreeList _indexedFreeList[CompactibleFreeListSpace::IndexSetSize]; duke@435: duke@435: // Initialized from a command-line arg. duke@435: ysr@1580: // Allocation statistics in support of dynamic adjustment of ysr@1580: // #blocks to claim per get_from_global_pool() call below. ysr@1580: static AdaptiveWeightedAverage ysr@1580: _blocks_to_claim [CompactibleFreeListSpace::IndexSetSize]; ysr@1580: static size_t _global_num_blocks [CompactibleFreeListSpace::IndexSetSize]; ysr@1580: static int _global_num_workers[CompactibleFreeListSpace::IndexSetSize]; ysr@1580: size_t _num_blocks [CompactibleFreeListSpace::IndexSetSize]; ysr@1580: ysr@1580: // Internal work method ysr@1580: void get_from_global_pool(size_t word_sz, FreeList* fl); duke@435: duke@435: public: duke@435: CFLS_LAB(CompactibleFreeListSpace* cfls); duke@435: duke@435: // Allocate and return a block of the given size, or else return NULL. duke@435: HeapWord* alloc(size_t word_sz); duke@435: duke@435: // Return any unused portions of the buffer to the global pool. ysr@1580: void retire(int tid); ysr@1580: ysr@1580: // Dynamic OldPLABSize sizing ysr@1580: static void compute_desired_plab_size(); ysr@1580: // When the settings are modified from default static initialization ysr@1580: static void modify_initialization(size_t n, unsigned wt); duke@435: }; duke@435: duke@435: size_t PromotionInfo::refillSize() const { duke@435: const size_t CMSSpoolBlockSize = 256; duke@435: const size_t sz = heap_word_size(sizeof(SpoolBlock) + sizeof(markOop) duke@435: * CMSSpoolBlockSize); duke@435: return CompactibleFreeListSpace::adjustObjectSize(sz); duke@435: }