1.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 1.2 +++ b/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp Wed Apr 27 01:25:04 2016 +0800 1.3 @@ -0,0 +1,662 @@ 1.4 +/* 1.5 + * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved. 1.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 1.7 + * 1.8 + * This code is free software; you can redistribute it and/or modify it 1.9 + * under the terms of the GNU General Public License version 2 only, as 1.10 + * published by the Free Software Foundation. 1.11 + * 1.12 + * This code is distributed in the hope that it will be useful, but WITHOUT 1.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 1.14 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 1.15 + * version 2 for more details (a copy is included in the LICENSE file that 1.16 + * accompanied this code). 1.17 + * 1.18 + * You should have received a copy of the GNU General Public License version 1.19 + * 2 along with this work; if not, write to the Free Software Foundation, 1.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 1.21 + * 1.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 1.23 + * or visit www.oracle.com if you need additional information or have any 1.24 + * questions. 1.25 + * 1.26 + */ 1.27 + 1.28 +#ifndef SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_COMPACTIBLEFREELISTSPACE_HPP 1.29 +#define SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_COMPACTIBLEFREELISTSPACE_HPP 1.30 + 1.31 +#include "gc_implementation/concurrentMarkSweep/adaptiveFreeList.hpp" 1.32 +#include "gc_implementation/concurrentMarkSweep/promotionInfo.hpp" 1.33 +#include "memory/binaryTreeDictionary.hpp" 1.34 +#include "memory/blockOffsetTable.inline.hpp" 1.35 +#include "memory/freeList.hpp" 1.36 +#include "memory/space.hpp" 1.37 + 1.38 +// Classes in support of keeping track of promotions into a non-Contiguous 1.39 +// space, in this case a CompactibleFreeListSpace. 1.40 + 1.41 +// Forward declarations 1.42 +class CompactibleFreeListSpace; 1.43 +class BlkClosure; 1.44 +class BlkClosureCareful; 1.45 +class FreeChunk; 1.46 +class UpwardsObjectClosure; 1.47 +class ObjectClosureCareful; 1.48 +class Klass; 1.49 + 1.50 +class LinearAllocBlock VALUE_OBJ_CLASS_SPEC { 1.51 + public: 1.52 + LinearAllocBlock() : _ptr(0), _word_size(0), _refillSize(0), 1.53 + _allocation_size_limit(0) {} 1.54 + void set(HeapWord* ptr, size_t word_size, size_t refill_size, 1.55 + size_t allocation_size_limit) { 1.56 + _ptr = ptr; 1.57 + _word_size = word_size; 1.58 + _refillSize = refill_size; 1.59 + _allocation_size_limit = allocation_size_limit; 1.60 + } 1.61 + HeapWord* _ptr; 1.62 + size_t _word_size; 1.63 + size_t _refillSize; 1.64 + size_t _allocation_size_limit; // largest size that will be allocated 1.65 + 1.66 + void print_on(outputStream* st) const; 1.67 +}; 1.68 + 1.69 +// Concrete subclass of CompactibleSpace that implements 1.70 +// a free list space, such as used in the concurrent mark sweep 1.71 +// generation. 1.72 + 1.73 +class CompactibleFreeListSpace: public CompactibleSpace { 1.74 + friend class VMStructs; 1.75 + friend class ConcurrentMarkSweepGeneration; 1.76 + friend class ASConcurrentMarkSweepGeneration; 1.77 + friend class CMSCollector; 1.78 + // Local alloc buffer for promotion into this space. 1.79 + friend class CFLS_LAB; 1.80 + 1.81 + // "Size" of chunks of work (executed during parallel remark phases 1.82 + // of CMS collection); this probably belongs in CMSCollector, although 1.83 + // it's cached here because it's used in 1.84 + // initialize_sequential_subtasks_for_rescan() which modifies 1.85 + // par_seq_tasks which also lives in Space. XXX 1.86 + const size_t _rescan_task_size; 1.87 + const size_t _marking_task_size; 1.88 + 1.89 + // Yet another sequential tasks done structure. This supports 1.90 + // CMS GC, where we have threads dynamically 1.91 + // claiming sub-tasks from a larger parallel task. 1.92 + SequentialSubTasksDone _conc_par_seq_tasks; 1.93 + 1.94 + BlockOffsetArrayNonContigSpace _bt; 1.95 + 1.96 + CMSCollector* _collector; 1.97 + ConcurrentMarkSweepGeneration* _gen; 1.98 + 1.99 + // Data structures for free blocks (used during allocation/sweeping) 1.100 + 1.101 + // Allocation is done linearly from two different blocks depending on 1.102 + // whether the request is small or large, in an effort to reduce 1.103 + // fragmentation. We assume that any locking for allocation is done 1.104 + // by the containing generation. Thus, none of the methods in this 1.105 + // space are re-entrant. 1.106 + enum SomeConstants { 1.107 + SmallForLinearAlloc = 16, // size < this then use _sLAB 1.108 + SmallForDictionary = 257, // size < this then use _indexedFreeList 1.109 + IndexSetSize = SmallForDictionary // keep this odd-sized 1.110 + }; 1.111 + static size_t IndexSetStart; 1.112 + static size_t IndexSetStride; 1.113 + 1.114 + private: 1.115 + enum FitStrategyOptions { 1.116 + FreeBlockStrategyNone = 0, 1.117 + FreeBlockBestFitFirst 1.118 + }; 1.119 + 1.120 + PromotionInfo _promoInfo; 1.121 + 1.122 + // helps to impose a global total order on freelistLock ranks; 1.123 + // assumes that CFLSpace's are allocated in global total order 1.124 + static int _lockRank; 1.125 + 1.126 + // a lock protecting the free lists and free blocks; 1.127 + // mutable because of ubiquity of locking even for otherwise const methods 1.128 + mutable Mutex _freelistLock; 1.129 + // locking verifier convenience function 1.130 + void assert_locked() const PRODUCT_RETURN; 1.131 + void assert_locked(const Mutex* lock) const PRODUCT_RETURN; 1.132 + 1.133 + // Linear allocation blocks 1.134 + LinearAllocBlock _smallLinearAllocBlock; 1.135 + 1.136 + FreeBlockDictionary<FreeChunk>::DictionaryChoice _dictionaryChoice; 1.137 + AFLBinaryTreeDictionary* _dictionary; // ptr to dictionary for large size blocks 1.138 + 1.139 + AdaptiveFreeList<FreeChunk> _indexedFreeList[IndexSetSize]; 1.140 + // indexed array for small size blocks 1.141 + // allocation stategy 1.142 + bool _fitStrategy; // Use best fit strategy. 1.143 + bool _adaptive_freelists; // Use adaptive freelists 1.144 + 1.145 + // This is an address close to the largest free chunk in the heap. 1.146 + // It is currently assumed to be at the end of the heap. Free 1.147 + // chunks with addresses greater than nearLargestChunk are coalesced 1.148 + // in an effort to maintain a large chunk at the end of the heap. 1.149 + HeapWord* _nearLargestChunk; 1.150 + 1.151 + // Used to keep track of limit of sweep for the space 1.152 + HeapWord* _sweep_limit; 1.153 + 1.154 + // Support for compacting cms 1.155 + HeapWord* cross_threshold(HeapWord* start, HeapWord* end); 1.156 + HeapWord* forward(oop q, size_t size, CompactPoint* cp, HeapWord* compact_top); 1.157 + 1.158 + // Initialization helpers. 1.159 + void initializeIndexedFreeListArray(); 1.160 + 1.161 + // Extra stuff to manage promotion parallelism. 1.162 + 1.163 + // a lock protecting the dictionary during par promotion allocation. 1.164 + mutable Mutex _parDictionaryAllocLock; 1.165 + Mutex* parDictionaryAllocLock() const { return &_parDictionaryAllocLock; } 1.166 + 1.167 + // Locks protecting the exact lists during par promotion allocation. 1.168 + Mutex* _indexedFreeListParLocks[IndexSetSize]; 1.169 + 1.170 + // Attempt to obtain up to "n" blocks of the size "word_sz" (which is 1.171 + // required to be smaller than "IndexSetSize".) If successful, 1.172 + // adds them to "fl", which is required to be an empty free list. 1.173 + // If the count of "fl" is negative, it's absolute value indicates a 1.174 + // number of free chunks that had been previously "borrowed" from global 1.175 + // list of size "word_sz", and must now be decremented. 1.176 + void par_get_chunk_of_blocks(size_t word_sz, size_t n, AdaptiveFreeList<FreeChunk>* fl); 1.177 + 1.178 + // Allocation helper functions 1.179 + // Allocate using a strategy that takes from the indexed free lists 1.180 + // first. This allocation strategy assumes a companion sweeping 1.181 + // strategy that attempts to keep the needed number of chunks in each 1.182 + // indexed free lists. 1.183 + HeapWord* allocate_adaptive_freelists(size_t size); 1.184 + // Allocate from the linear allocation buffers first. This allocation 1.185 + // strategy assumes maximal coalescing can maintain chunks large enough 1.186 + // to be used as linear allocation buffers. 1.187 + HeapWord* allocate_non_adaptive_freelists(size_t size); 1.188 + 1.189 + // Gets a chunk from the linear allocation block (LinAB). If there 1.190 + // is not enough space in the LinAB, refills it. 1.191 + HeapWord* getChunkFromLinearAllocBlock(LinearAllocBlock* blk, size_t size); 1.192 + HeapWord* getChunkFromSmallLinearAllocBlock(size_t size); 1.193 + // Get a chunk from the space remaining in the linear allocation block. Do 1.194 + // not attempt to refill if the space is not available, return NULL. Do the 1.195 + // repairs on the linear allocation block as appropriate. 1.196 + HeapWord* getChunkFromLinearAllocBlockRemainder(LinearAllocBlock* blk, size_t size); 1.197 + inline HeapWord* getChunkFromSmallLinearAllocBlockRemainder(size_t size); 1.198 + 1.199 + // Helper function for getChunkFromIndexedFreeList. 1.200 + // Replenish the indexed free list for this "size". Do not take from an 1.201 + // underpopulated size. 1.202 + FreeChunk* getChunkFromIndexedFreeListHelper(size_t size, bool replenish = true); 1.203 + 1.204 + // Get a chunk from the indexed free list. If the indexed free list 1.205 + // does not have a free chunk, try to replenish the indexed free list 1.206 + // then get the free chunk from the replenished indexed free list. 1.207 + inline FreeChunk* getChunkFromIndexedFreeList(size_t size); 1.208 + 1.209 + // The returned chunk may be larger than requested (or null). 1.210 + FreeChunk* getChunkFromDictionary(size_t size); 1.211 + // The returned chunk is the exact size requested (or null). 1.212 + FreeChunk* getChunkFromDictionaryExact(size_t size); 1.213 + 1.214 + // Find a chunk in the indexed free list that is the best 1.215 + // fit for size "numWords". 1.216 + FreeChunk* bestFitSmall(size_t numWords); 1.217 + // For free list "fl" of chunks of size > numWords, 1.218 + // remove a chunk, split off a chunk of size numWords 1.219 + // and return it. The split off remainder is returned to 1.220 + // the free lists. The old name for getFromListGreater 1.221 + // was lookInListGreater. 1.222 + FreeChunk* getFromListGreater(AdaptiveFreeList<FreeChunk>* fl, size_t numWords); 1.223 + // Get a chunk in the indexed free list or dictionary, 1.224 + // by considering a larger chunk and splitting it. 1.225 + FreeChunk* getChunkFromGreater(size_t numWords); 1.226 + // Verify that the given chunk is in the indexed free lists. 1.227 + bool verifyChunkInIndexedFreeLists(FreeChunk* fc) const; 1.228 + // Remove the specified chunk from the indexed free lists. 1.229 + void removeChunkFromIndexedFreeList(FreeChunk* fc); 1.230 + // Remove the specified chunk from the dictionary. 1.231 + void removeChunkFromDictionary(FreeChunk* fc); 1.232 + // Split a free chunk into a smaller free chunk of size "new_size". 1.233 + // Return the smaller free chunk and return the remainder to the 1.234 + // free lists. 1.235 + FreeChunk* splitChunkAndReturnRemainder(FreeChunk* chunk, size_t new_size); 1.236 + // Add a chunk to the free lists. 1.237 + void addChunkToFreeLists(HeapWord* chunk, size_t size); 1.238 + // Add a chunk to the free lists, preferring to suffix it 1.239 + // to the last free chunk at end of space if possible, and 1.240 + // updating the block census stats as well as block offset table. 1.241 + // Take any locks as appropriate if we are multithreaded. 1.242 + void addChunkToFreeListsAtEndRecordingStats(HeapWord* chunk, size_t size); 1.243 + // Add a free chunk to the indexed free lists. 1.244 + void returnChunkToFreeList(FreeChunk* chunk); 1.245 + // Add a free chunk to the dictionary. 1.246 + void returnChunkToDictionary(FreeChunk* chunk); 1.247 + 1.248 + // Functions for maintaining the linear allocation buffers (LinAB). 1.249 + // Repairing a linear allocation block refers to operations 1.250 + // performed on the remainder of a LinAB after an allocation 1.251 + // has been made from it. 1.252 + void repairLinearAllocationBlocks(); 1.253 + void repairLinearAllocBlock(LinearAllocBlock* blk); 1.254 + void refillLinearAllocBlock(LinearAllocBlock* blk); 1.255 + void refillLinearAllocBlockIfNeeded(LinearAllocBlock* blk); 1.256 + void refillLinearAllocBlocksIfNeeded(); 1.257 + 1.258 + void verify_objects_initialized() const; 1.259 + 1.260 + // Statistics reporting helper functions 1.261 + void reportFreeListStatistics() const; 1.262 + void reportIndexedFreeListStatistics() const; 1.263 + size_t maxChunkSizeInIndexedFreeLists() const; 1.264 + size_t numFreeBlocksInIndexedFreeLists() const; 1.265 + // Accessor 1.266 + HeapWord* unallocated_block() const { 1.267 + if (BlockOffsetArrayUseUnallocatedBlock) { 1.268 + HeapWord* ub = _bt.unallocated_block(); 1.269 + assert(ub >= bottom() && 1.270 + ub <= end(), "space invariant"); 1.271 + return ub; 1.272 + } else { 1.273 + return end(); 1.274 + } 1.275 + } 1.276 + void freed(HeapWord* start, size_t size) { 1.277 + _bt.freed(start, size); 1.278 + } 1.279 + 1.280 + protected: 1.281 + // reset the indexed free list to its initial empty condition. 1.282 + void resetIndexedFreeListArray(); 1.283 + // reset to an initial state with a single free block described 1.284 + // by the MemRegion parameter. 1.285 + void reset(MemRegion mr); 1.286 + // Return the total number of words in the indexed free lists. 1.287 + size_t totalSizeInIndexedFreeLists() const; 1.288 + 1.289 + public: 1.290 + // Constructor... 1.291 + CompactibleFreeListSpace(BlockOffsetSharedArray* bs, MemRegion mr, 1.292 + bool use_adaptive_freelists, 1.293 + FreeBlockDictionary<FreeChunk>::DictionaryChoice); 1.294 + // accessors 1.295 + bool bestFitFirst() { return _fitStrategy == FreeBlockBestFitFirst; } 1.296 + FreeBlockDictionary<FreeChunk>* dictionary() const { return _dictionary; } 1.297 + HeapWord* nearLargestChunk() const { return _nearLargestChunk; } 1.298 + void set_nearLargestChunk(HeapWord* v) { _nearLargestChunk = v; } 1.299 + 1.300 + // Set CMS global values 1.301 + static void set_cms_values(); 1.302 + 1.303 + // Return the free chunk at the end of the space. If no such 1.304 + // chunk exists, return NULL. 1.305 + FreeChunk* find_chunk_at_end(); 1.306 + 1.307 + bool adaptive_freelists() const { return _adaptive_freelists; } 1.308 + 1.309 + void set_collector(CMSCollector* collector) { _collector = collector; } 1.310 + 1.311 + // Support for parallelization of rescan and marking 1.312 + const size_t rescan_task_size() const { return _rescan_task_size; } 1.313 + const size_t marking_task_size() const { return _marking_task_size; } 1.314 + SequentialSubTasksDone* conc_par_seq_tasks() {return &_conc_par_seq_tasks; } 1.315 + void initialize_sequential_subtasks_for_rescan(int n_threads); 1.316 + void initialize_sequential_subtasks_for_marking(int n_threads, 1.317 + HeapWord* low = NULL); 1.318 + 1.319 + // Space enquiries 1.320 + size_t used() const; 1.321 + size_t free() const; 1.322 + size_t max_alloc_in_words() const; 1.323 + // XXX: should have a less conservative used_region() than that of 1.324 + // Space; we could consider keeping track of highest allocated 1.325 + // address and correcting that at each sweep, as the sweeper 1.326 + // goes through the entire allocated part of the generation. We 1.327 + // could also use that information to keep the sweeper from 1.328 + // sweeping more than is necessary. The allocator and sweeper will 1.329 + // of course need to synchronize on this, since the sweeper will 1.330 + // try to bump down the address and the allocator will try to bump it up. 1.331 + // For now, however, we'll just use the default used_region() 1.332 + // which overestimates the region by returning the entire 1.333 + // committed region (this is safe, but inefficient). 1.334 + 1.335 + // Returns a subregion of the space containing all the objects in 1.336 + // the space. 1.337 + MemRegion used_region() const { 1.338 + return MemRegion(bottom(), 1.339 + BlockOffsetArrayUseUnallocatedBlock ? 1.340 + unallocated_block() : end()); 1.341 + } 1.342 + 1.343 + bool is_in(const void* p) const { 1.344 + return used_region().contains(p); 1.345 + } 1.346 + 1.347 + virtual bool is_free_block(const HeapWord* p) const; 1.348 + 1.349 + // Resizing support 1.350 + void set_end(HeapWord* value); // override 1.351 + 1.352 + // mutual exclusion support 1.353 + Mutex* freelistLock() const { return &_freelistLock; } 1.354 + 1.355 + // Iteration support 1.356 + void oop_iterate(MemRegion mr, ExtendedOopClosure* cl); 1.357 + void oop_iterate(ExtendedOopClosure* cl); 1.358 + 1.359 + void object_iterate(ObjectClosure* blk); 1.360 + // Apply the closure to each object in the space whose references 1.361 + // point to objects in the heap. The usage of CompactibleFreeListSpace 1.362 + // by the ConcurrentMarkSweepGeneration for concurrent GC's allows 1.363 + // objects in the space with references to objects that are no longer 1.364 + // valid. For example, an object may reference another object 1.365 + // that has already been sweep up (collected). This method uses 1.366 + // obj_is_alive() to determine whether it is safe to iterate of 1.367 + // an object. 1.368 + void safe_object_iterate(ObjectClosure* blk); 1.369 + void object_iterate_mem(MemRegion mr, UpwardsObjectClosure* cl); 1.370 + 1.371 + // Requires that "mr" be entirely within the space. 1.372 + // Apply "cl->do_object" to all objects that intersect with "mr". 1.373 + // If the iteration encounters an unparseable portion of the region, 1.374 + // terminate the iteration and return the address of the start of the 1.375 + // subregion that isn't done. Return of "NULL" indicates that the 1.376 + // interation completed. 1.377 + virtual HeapWord* 1.378 + object_iterate_careful_m(MemRegion mr, 1.379 + ObjectClosureCareful* cl); 1.380 + virtual HeapWord* 1.381 + object_iterate_careful(ObjectClosureCareful* cl); 1.382 + 1.383 + // Override: provides a DCTO_CL specific to this kind of space. 1.384 + DirtyCardToOopClosure* new_dcto_cl(ExtendedOopClosure* cl, 1.385 + CardTableModRefBS::PrecisionStyle precision, 1.386 + HeapWord* boundary); 1.387 + 1.388 + void blk_iterate(BlkClosure* cl); 1.389 + void blk_iterate_careful(BlkClosureCareful* cl); 1.390 + HeapWord* block_start_const(const void* p) const; 1.391 + HeapWord* block_start_careful(const void* p) const; 1.392 + size_t block_size(const HeapWord* p) const; 1.393 + size_t block_size_no_stall(HeapWord* p, const CMSCollector* c) const; 1.394 + bool block_is_obj(const HeapWord* p) const; 1.395 + bool obj_is_alive(const HeapWord* p) const; 1.396 + size_t block_size_nopar(const HeapWord* p) const; 1.397 + bool block_is_obj_nopar(const HeapWord* p) const; 1.398 + 1.399 + // iteration support for promotion 1.400 + void save_marks(); 1.401 + bool no_allocs_since_save_marks(); 1.402 + 1.403 + // iteration support for sweeping 1.404 + void save_sweep_limit() { 1.405 + _sweep_limit = BlockOffsetArrayUseUnallocatedBlock ? 1.406 + unallocated_block() : end(); 1.407 + if (CMSTraceSweeper) { 1.408 + gclog_or_tty->print_cr(">>>>> Saving sweep limit " PTR_FORMAT 1.409 + " for space [" PTR_FORMAT "," PTR_FORMAT ") <<<<<<", 1.410 + p2i(_sweep_limit), p2i(bottom()), p2i(end())); 1.411 + } 1.412 + } 1.413 + NOT_PRODUCT( 1.414 + void clear_sweep_limit() { _sweep_limit = NULL; } 1.415 + ) 1.416 + HeapWord* sweep_limit() { return _sweep_limit; } 1.417 + 1.418 + // Apply "blk->do_oop" to the addresses of all reference fields in objects 1.419 + // promoted into this generation since the most recent save_marks() call. 1.420 + // Fields in objects allocated by applications of the closure 1.421 + // *are* included in the iteration. Thus, when the iteration completes 1.422 + // there should be no further such objects remaining. 1.423 + #define CFLS_OOP_SINCE_SAVE_MARKS_DECL(OopClosureType, nv_suffix) \ 1.424 + void oop_since_save_marks_iterate##nv_suffix(OopClosureType* blk); 1.425 + ALL_SINCE_SAVE_MARKS_CLOSURES(CFLS_OOP_SINCE_SAVE_MARKS_DECL) 1.426 + #undef CFLS_OOP_SINCE_SAVE_MARKS_DECL 1.427 + 1.428 + // Allocation support 1.429 + HeapWord* allocate(size_t size); 1.430 + HeapWord* par_allocate(size_t size); 1.431 + 1.432 + oop promote(oop obj, size_t obj_size); 1.433 + void gc_prologue(); 1.434 + void gc_epilogue(); 1.435 + 1.436 + // This call is used by a containing CMS generation / collector 1.437 + // to inform the CFLS space that a sweep has been completed 1.438 + // and that the space can do any related house-keeping functions. 1.439 + void sweep_completed(); 1.440 + 1.441 + // For an object in this space, the mark-word's two 1.442 + // LSB's having the value [11] indicates that it has been 1.443 + // promoted since the most recent call to save_marks() on 1.444 + // this generation and has not subsequently been iterated 1.445 + // over (using oop_since_save_marks_iterate() above). 1.446 + // This property holds only for single-threaded collections, 1.447 + // and is typically used for Cheney scans; for MT scavenges, 1.448 + // the property holds for all objects promoted during that 1.449 + // scavenge for the duration of the scavenge and is used 1.450 + // by card-scanning to avoid scanning objects (being) promoted 1.451 + // during that scavenge. 1.452 + bool obj_allocated_since_save_marks(const oop obj) const { 1.453 + assert(is_in_reserved(obj), "Wrong space?"); 1.454 + return ((PromotedObject*)obj)->hasPromotedMark(); 1.455 + } 1.456 + 1.457 + // A worst-case estimate of the space required (in HeapWords) to expand the 1.458 + // heap when promoting an obj of size obj_size. 1.459 + size_t expansionSpaceRequired(size_t obj_size) const; 1.460 + 1.461 + FreeChunk* allocateScratch(size_t size); 1.462 + 1.463 + // returns true if either the small or large linear allocation buffer is empty. 1.464 + bool linearAllocationWouldFail() const; 1.465 + 1.466 + // Adjust the chunk for the minimum size. This version is called in 1.467 + // most cases in CompactibleFreeListSpace methods. 1.468 + inline static size_t adjustObjectSize(size_t size) { 1.469 + return (size_t) align_object_size(MAX2(size, (size_t)MinChunkSize)); 1.470 + } 1.471 + // This is a virtual version of adjustObjectSize() that is called 1.472 + // only occasionally when the compaction space changes and the type 1.473 + // of the new compaction space is is only known to be CompactibleSpace. 1.474 + size_t adjust_object_size_v(size_t size) const { 1.475 + return adjustObjectSize(size); 1.476 + } 1.477 + // Minimum size of a free block. 1.478 + virtual size_t minimum_free_block_size() const { return MinChunkSize; } 1.479 + void removeFreeChunkFromFreeLists(FreeChunk* chunk); 1.480 + void addChunkAndRepairOffsetTable(HeapWord* chunk, size_t size, 1.481 + bool coalesced); 1.482 + 1.483 + // Support for decisions regarding concurrent collection policy 1.484 + bool should_concurrent_collect() const; 1.485 + 1.486 + // Support for compaction 1.487 + void prepare_for_compaction(CompactPoint* cp); 1.488 + void adjust_pointers(); 1.489 + void compact(); 1.490 + // reset the space to reflect the fact that a compaction of the 1.491 + // space has been done. 1.492 + virtual void reset_after_compaction(); 1.493 + 1.494 + // Debugging support 1.495 + void print() const; 1.496 + void print_on(outputStream* st) const; 1.497 + void prepare_for_verify(); 1.498 + void verify() const; 1.499 + void verifyFreeLists() const PRODUCT_RETURN; 1.500 + void verifyIndexedFreeLists() const; 1.501 + void verifyIndexedFreeList(size_t size) const; 1.502 + // Verify that the given chunk is in the free lists: 1.503 + // i.e. either the binary tree dictionary, the indexed free lists 1.504 + // or the linear allocation block. 1.505 + bool verify_chunk_in_free_list(FreeChunk* fc) const; 1.506 + // Verify that the given chunk is the linear allocation block 1.507 + bool verify_chunk_is_linear_alloc_block(FreeChunk* fc) const; 1.508 + // Do some basic checks on the the free lists. 1.509 + void check_free_list_consistency() const PRODUCT_RETURN; 1.510 + 1.511 + // Printing support 1.512 + void dump_at_safepoint_with_locks(CMSCollector* c, outputStream* st); 1.513 + void print_indexed_free_lists(outputStream* st) const; 1.514 + void print_dictionary_free_lists(outputStream* st) const; 1.515 + void print_promo_info_blocks(outputStream* st) const; 1.516 + 1.517 + NOT_PRODUCT ( 1.518 + void initializeIndexedFreeListArrayReturnedBytes(); 1.519 + size_t sumIndexedFreeListArrayReturnedBytes(); 1.520 + // Return the total number of chunks in the indexed free lists. 1.521 + size_t totalCountInIndexedFreeLists() const; 1.522 + // Return the total numberof chunks in the space. 1.523 + size_t totalCount(); 1.524 + ) 1.525 + 1.526 + // The census consists of counts of the quantities such as 1.527 + // the current count of the free chunks, number of chunks 1.528 + // created as a result of the split of a larger chunk or 1.529 + // coalescing of smaller chucks, etc. The counts in the 1.530 + // census is used to make decisions on splitting and 1.531 + // coalescing of chunks during the sweep of garbage. 1.532 + 1.533 + // Print the statistics for the free lists. 1.534 + void printFLCensus(size_t sweep_count) const; 1.535 + 1.536 + // Statistics functions 1.537 + // Initialize census for lists before the sweep. 1.538 + void beginSweepFLCensus(float inter_sweep_current, 1.539 + float inter_sweep_estimate, 1.540 + float intra_sweep_estimate); 1.541 + // Set the surplus for each of the free lists. 1.542 + void setFLSurplus(); 1.543 + // Set the hint for each of the free lists. 1.544 + void setFLHints(); 1.545 + // Clear the census for each of the free lists. 1.546 + void clearFLCensus(); 1.547 + // Perform functions for the census after the end of the sweep. 1.548 + void endSweepFLCensus(size_t sweep_count); 1.549 + // Return true if the count of free chunks is greater 1.550 + // than the desired number of free chunks. 1.551 + bool coalOverPopulated(size_t size); 1.552 + 1.553 +// Record (for each size): 1.554 +// 1.555 +// split-births = #chunks added due to splits in (prev-sweep-end, 1.556 +// this-sweep-start) 1.557 +// split-deaths = #chunks removed for splits in (prev-sweep-end, 1.558 +// this-sweep-start) 1.559 +// num-curr = #chunks at start of this sweep 1.560 +// num-prev = #chunks at end of previous sweep 1.561 +// 1.562 +// The above are quantities that are measured. Now define: 1.563 +// 1.564 +// num-desired := num-prev + split-births - split-deaths - num-curr 1.565 +// 1.566 +// Roughly, num-prev + split-births is the supply, 1.567 +// split-deaths is demand due to other sizes 1.568 +// and num-curr is what we have left. 1.569 +// 1.570 +// Thus, num-desired is roughly speaking the "legitimate demand" 1.571 +// for blocks of this size and what we are striving to reach at the 1.572 +// end of the current sweep. 1.573 +// 1.574 +// For a given list, let num-len be its current population. 1.575 +// Define, for a free list of a given size: 1.576 +// 1.577 +// coal-overpopulated := num-len >= num-desired * coal-surplus 1.578 +// (coal-surplus is set to 1.05, i.e. we allow a little slop when 1.579 +// coalescing -- we do not coalesce unless we think that the current 1.580 +// supply has exceeded the estimated demand by more than 5%). 1.581 +// 1.582 +// For the set of sizes in the binary tree, which is neither dense nor 1.583 +// closed, it may be the case that for a particular size we have never 1.584 +// had, or do not now have, or did not have at the previous sweep, 1.585 +// chunks of that size. We need to extend the definition of 1.586 +// coal-overpopulated to such sizes as well: 1.587 +// 1.588 +// For a chunk in/not in the binary tree, extend coal-overpopulated 1.589 +// defined above to include all sizes as follows: 1.590 +// 1.591 +// . a size that is non-existent is coal-overpopulated 1.592 +// . a size that has a num-desired <= 0 as defined above is 1.593 +// coal-overpopulated. 1.594 +// 1.595 +// Also define, for a chunk heap-offset C and mountain heap-offset M: 1.596 +// 1.597 +// close-to-mountain := C >= 0.99 * M 1.598 +// 1.599 +// Now, the coalescing strategy is: 1.600 +// 1.601 +// Coalesce left-hand chunk with right-hand chunk if and 1.602 +// only if: 1.603 +// 1.604 +// EITHER 1.605 +// . left-hand chunk is of a size that is coal-overpopulated 1.606 +// OR 1.607 +// . right-hand chunk is close-to-mountain 1.608 + void smallCoalBirth(size_t size); 1.609 + void smallCoalDeath(size_t size); 1.610 + void coalBirth(size_t size); 1.611 + void coalDeath(size_t size); 1.612 + void smallSplitBirth(size_t size); 1.613 + void smallSplitDeath(size_t size); 1.614 + void split_birth(size_t size); 1.615 + void splitDeath(size_t size); 1.616 + void split(size_t from, size_t to1); 1.617 + 1.618 + double flsFrag() const; 1.619 +}; 1.620 + 1.621 +// A parallel-GC-thread-local allocation buffer for allocation into a 1.622 +// CompactibleFreeListSpace. 1.623 +class CFLS_LAB : public CHeapObj<mtGC> { 1.624 + // The space that this buffer allocates into. 1.625 + CompactibleFreeListSpace* _cfls; 1.626 + 1.627 + // Our local free lists. 1.628 + AdaptiveFreeList<FreeChunk> _indexedFreeList[CompactibleFreeListSpace::IndexSetSize]; 1.629 + 1.630 + // Initialized from a command-line arg. 1.631 + 1.632 + // Allocation statistics in support of dynamic adjustment of 1.633 + // #blocks to claim per get_from_global_pool() call below. 1.634 + static AdaptiveWeightedAverage 1.635 + _blocks_to_claim [CompactibleFreeListSpace::IndexSetSize]; 1.636 + static size_t _global_num_blocks [CompactibleFreeListSpace::IndexSetSize]; 1.637 + static uint _global_num_workers[CompactibleFreeListSpace::IndexSetSize]; 1.638 + size_t _num_blocks [CompactibleFreeListSpace::IndexSetSize]; 1.639 + 1.640 + // Internal work method 1.641 + void get_from_global_pool(size_t word_sz, AdaptiveFreeList<FreeChunk>* fl); 1.642 + 1.643 +public: 1.644 + CFLS_LAB(CompactibleFreeListSpace* cfls); 1.645 + 1.646 + // Allocate and return a block of the given size, or else return NULL. 1.647 + HeapWord* alloc(size_t word_sz); 1.648 + 1.649 + // Return any unused portions of the buffer to the global pool. 1.650 + void retire(int tid); 1.651 + 1.652 + // Dynamic OldPLABSize sizing 1.653 + static void compute_desired_plab_size(); 1.654 + // When the settings are modified from default static initialization 1.655 + static void modify_initialization(size_t n, unsigned wt); 1.656 +}; 1.657 + 1.658 +size_t PromotionInfo::refillSize() const { 1.659 + const size_t CMSSpoolBlockSize = 256; 1.660 + const size_t sz = heap_word_size(sizeof(SpoolBlock) + sizeof(markOop) 1.661 + * CMSSpoolBlockSize); 1.662 + return CompactibleFreeListSpace::adjustObjectSize(sz); 1.663 +} 1.664 + 1.665 +#endif // SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_COMPACTIBLEFREELISTSPACE_HPP