src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp

Mon, 16 Aug 2010 15:58:42 -0700

author
ysr
date
Mon, 16 Aug 2010 15:58:42 -0700
changeset 2071
be3f9c242c9d
parent 1934
e9ff18c4ace7
child 2314
f95d63e2154a
permissions
-rw-r--r--

6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
Summary: GC workers now recognize an intermediate transient state of blocks which are allocated but have not yet completed initialization. blk_start() calls do not attempt to determine the size of a block in the transient state, rather waiting for the block to become initialized so that it is safe to query its size. Audited and ensured the order of initialization of object fields (klass, free bit and size) to respect block state transition protocol. Also included some new assertion checking code enabled in debug mode.
Reviewed-by: chrisphi, johnc, poonam

duke@435 1 /*
ysr@2071 2 * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
duke@435 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
duke@435 4 *
duke@435 5 * This code is free software; you can redistribute it and/or modify it
duke@435 6 * under the terms of the GNU General Public License version 2 only, as
duke@435 7 * published by the Free Software Foundation.
duke@435 8 *
duke@435 9 * This code is distributed in the hope that it will be useful, but WITHOUT
duke@435 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
duke@435 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
duke@435 12 * version 2 for more details (a copy is included in the LICENSE file that
duke@435 13 * accompanied this code).
duke@435 14 *
duke@435 15 * You should have received a copy of the GNU General Public License version
duke@435 16 * 2 along with this work; if not, write to the Free Software Foundation,
duke@435 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
duke@435 18 *
trims@1907 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
trims@1907 20 * or visit www.oracle.com if you need additional information or have any
trims@1907 21 * questions.
duke@435 22 *
duke@435 23 */
duke@435 24
duke@435 25 // Classes in support of keeping track of promotions into a non-Contiguous
duke@435 26 // space, in this case a CompactibleFreeListSpace.
duke@435 27
duke@435 28 // Forward declarations
duke@435 29 class CompactibleFreeListSpace;
duke@435 30 class BlkClosure;
duke@435 31 class BlkClosureCareful;
duke@435 32 class UpwardsObjectClosure;
duke@435 33 class ObjectClosureCareful;
duke@435 34 class Klass;
duke@435 35
duke@435 36 class LinearAllocBlock VALUE_OBJ_CLASS_SPEC {
duke@435 37 public:
duke@435 38 LinearAllocBlock() : _ptr(0), _word_size(0), _refillSize(0),
duke@435 39 _allocation_size_limit(0) {}
duke@435 40 void set(HeapWord* ptr, size_t word_size, size_t refill_size,
duke@435 41 size_t allocation_size_limit) {
duke@435 42 _ptr = ptr;
duke@435 43 _word_size = word_size;
duke@435 44 _refillSize = refill_size;
duke@435 45 _allocation_size_limit = allocation_size_limit;
duke@435 46 }
duke@435 47 HeapWord* _ptr;
duke@435 48 size_t _word_size;
duke@435 49 size_t _refillSize;
duke@435 50 size_t _allocation_size_limit; // largest size that will be allocated
ysr@2071 51
ysr@2071 52 void print_on(outputStream* st) const;
duke@435 53 };
duke@435 54
duke@435 55 // Concrete subclass of CompactibleSpace that implements
duke@435 56 // a free list space, such as used in the concurrent mark sweep
duke@435 57 // generation.
duke@435 58
duke@435 59 class CompactibleFreeListSpace: public CompactibleSpace {
duke@435 60 friend class VMStructs;
duke@435 61 friend class ConcurrentMarkSweepGeneration;
duke@435 62 friend class ASConcurrentMarkSweepGeneration;
duke@435 63 friend class CMSCollector;
duke@435 64 friend class CMSPermGenGen;
duke@435 65 // Local alloc buffer for promotion into this space.
duke@435 66 friend class CFLS_LAB;
duke@435 67
duke@435 68 // "Size" of chunks of work (executed during parallel remark phases
duke@435 69 // of CMS collection); this probably belongs in CMSCollector, although
duke@435 70 // it's cached here because it's used in
duke@435 71 // initialize_sequential_subtasks_for_rescan() which modifies
duke@435 72 // par_seq_tasks which also lives in Space. XXX
duke@435 73 const size_t _rescan_task_size;
duke@435 74 const size_t _marking_task_size;
duke@435 75
duke@435 76 // Yet another sequential tasks done structure. This supports
duke@435 77 // CMS GC, where we have threads dynamically
duke@435 78 // claiming sub-tasks from a larger parallel task.
duke@435 79 SequentialSubTasksDone _conc_par_seq_tasks;
duke@435 80
duke@435 81 BlockOffsetArrayNonContigSpace _bt;
duke@435 82
duke@435 83 CMSCollector* _collector;
duke@435 84 ConcurrentMarkSweepGeneration* _gen;
duke@435 85
duke@435 86 // Data structures for free blocks (used during allocation/sweeping)
duke@435 87
duke@435 88 // Allocation is done linearly from two different blocks depending on
duke@435 89 // whether the request is small or large, in an effort to reduce
duke@435 90 // fragmentation. We assume that any locking for allocation is done
duke@435 91 // by the containing generation. Thus, none of the methods in this
duke@435 92 // space are re-entrant.
duke@435 93 enum SomeConstants {
duke@435 94 SmallForLinearAlloc = 16, // size < this then use _sLAB
duke@435 95 SmallForDictionary = 257, // size < this then use _indexedFreeList
kvn@1926 96 IndexSetSize = SmallForDictionary // keep this odd-sized
duke@435 97 };
kvn@1926 98 static int IndexSetStart;
kvn@1926 99 static int IndexSetStride;
duke@435 100
duke@435 101 private:
duke@435 102 enum FitStrategyOptions {
duke@435 103 FreeBlockStrategyNone = 0,
duke@435 104 FreeBlockBestFitFirst
duke@435 105 };
duke@435 106
duke@435 107 PromotionInfo _promoInfo;
duke@435 108
duke@435 109 // helps to impose a global total order on freelistLock ranks;
duke@435 110 // assumes that CFLSpace's are allocated in global total order
duke@435 111 static int _lockRank;
duke@435 112
duke@435 113 // a lock protecting the free lists and free blocks;
duke@435 114 // mutable because of ubiquity of locking even for otherwise const methods
duke@435 115 mutable Mutex _freelistLock;
duke@435 116 // locking verifier convenience function
duke@435 117 void assert_locked() const PRODUCT_RETURN;
ysr@1580 118 void assert_locked(const Mutex* lock) const PRODUCT_RETURN;
duke@435 119
duke@435 120 // Linear allocation blocks
duke@435 121 LinearAllocBlock _smallLinearAllocBlock;
duke@435 122
duke@435 123 FreeBlockDictionary::DictionaryChoice _dictionaryChoice;
duke@435 124 FreeBlockDictionary* _dictionary; // ptr to dictionary for large size blocks
duke@435 125
duke@435 126 FreeList _indexedFreeList[IndexSetSize];
duke@435 127 // indexed array for small size blocks
duke@435 128 // allocation stategy
duke@435 129 bool _fitStrategy; // Use best fit strategy.
duke@435 130 bool _adaptive_freelists; // Use adaptive freelists
duke@435 131
duke@435 132 // This is an address close to the largest free chunk in the heap.
duke@435 133 // It is currently assumed to be at the end of the heap. Free
duke@435 134 // chunks with addresses greater than nearLargestChunk are coalesced
duke@435 135 // in an effort to maintain a large chunk at the end of the heap.
duke@435 136 HeapWord* _nearLargestChunk;
duke@435 137
duke@435 138 // Used to keep track of limit of sweep for the space
duke@435 139 HeapWord* _sweep_limit;
duke@435 140
duke@435 141 // Support for compacting cms
duke@435 142 HeapWord* cross_threshold(HeapWord* start, HeapWord* end);
duke@435 143 HeapWord* forward(oop q, size_t size, CompactPoint* cp, HeapWord* compact_top);
duke@435 144
duke@435 145 // Initialization helpers.
duke@435 146 void initializeIndexedFreeListArray();
duke@435 147
duke@435 148 // Extra stuff to manage promotion parallelism.
duke@435 149
duke@435 150 // a lock protecting the dictionary during par promotion allocation.
duke@435 151 mutable Mutex _parDictionaryAllocLock;
duke@435 152 Mutex* parDictionaryAllocLock() const { return &_parDictionaryAllocLock; }
duke@435 153
duke@435 154 // Locks protecting the exact lists during par promotion allocation.
duke@435 155 Mutex* _indexedFreeListParLocks[IndexSetSize];
duke@435 156
duke@435 157 // Attempt to obtain up to "n" blocks of the size "word_sz" (which is
duke@435 158 // required to be smaller than "IndexSetSize".) If successful,
duke@435 159 // adds them to "fl", which is required to be an empty free list.
duke@435 160 // If the count of "fl" is negative, it's absolute value indicates a
duke@435 161 // number of free chunks that had been previously "borrowed" from global
duke@435 162 // list of size "word_sz", and must now be decremented.
duke@435 163 void par_get_chunk_of_blocks(size_t word_sz, size_t n, FreeList* fl);
duke@435 164
duke@435 165 // Allocation helper functions
duke@435 166 // Allocate using a strategy that takes from the indexed free lists
duke@435 167 // first. This allocation strategy assumes a companion sweeping
duke@435 168 // strategy that attempts to keep the needed number of chunks in each
duke@435 169 // indexed free lists.
duke@435 170 HeapWord* allocate_adaptive_freelists(size_t size);
duke@435 171 // Allocate from the linear allocation buffers first. This allocation
duke@435 172 // strategy assumes maximal coalescing can maintain chunks large enough
duke@435 173 // to be used as linear allocation buffers.
duke@435 174 HeapWord* allocate_non_adaptive_freelists(size_t size);
duke@435 175
duke@435 176 // Gets a chunk from the linear allocation block (LinAB). If there
duke@435 177 // is not enough space in the LinAB, refills it.
duke@435 178 HeapWord* getChunkFromLinearAllocBlock(LinearAllocBlock* blk, size_t size);
duke@435 179 HeapWord* getChunkFromSmallLinearAllocBlock(size_t size);
duke@435 180 // Get a chunk from the space remaining in the linear allocation block. Do
duke@435 181 // not attempt to refill if the space is not available, return NULL. Do the
duke@435 182 // repairs on the linear allocation block as appropriate.
duke@435 183 HeapWord* getChunkFromLinearAllocBlockRemainder(LinearAllocBlock* blk, size_t size);
duke@435 184 inline HeapWord* getChunkFromSmallLinearAllocBlockRemainder(size_t size);
duke@435 185
duke@435 186 // Helper function for getChunkFromIndexedFreeList.
duke@435 187 // Replenish the indexed free list for this "size". Do not take from an
duke@435 188 // underpopulated size.
ysr@1580 189 FreeChunk* getChunkFromIndexedFreeListHelper(size_t size, bool replenish = true);
duke@435 190
duke@435 191 // Get a chunk from the indexed free list. If the indexed free list
duke@435 192 // does not have a free chunk, try to replenish the indexed free list
duke@435 193 // then get the free chunk from the replenished indexed free list.
duke@435 194 inline FreeChunk* getChunkFromIndexedFreeList(size_t size);
duke@435 195
duke@435 196 // The returned chunk may be larger than requested (or null).
duke@435 197 FreeChunk* getChunkFromDictionary(size_t size);
duke@435 198 // The returned chunk is the exact size requested (or null).
duke@435 199 FreeChunk* getChunkFromDictionaryExact(size_t size);
duke@435 200
duke@435 201 // Find a chunk in the indexed free list that is the best
duke@435 202 // fit for size "numWords".
duke@435 203 FreeChunk* bestFitSmall(size_t numWords);
duke@435 204 // For free list "fl" of chunks of size > numWords,
duke@435 205 // remove a chunk, split off a chunk of size numWords
duke@435 206 // and return it. The split off remainder is returned to
duke@435 207 // the free lists. The old name for getFromListGreater
duke@435 208 // was lookInListGreater.
duke@435 209 FreeChunk* getFromListGreater(FreeList* fl, size_t numWords);
duke@435 210 // Get a chunk in the indexed free list or dictionary,
duke@435 211 // by considering a larger chunk and splitting it.
duke@435 212 FreeChunk* getChunkFromGreater(size_t numWords);
duke@435 213 // Verify that the given chunk is in the indexed free lists.
duke@435 214 bool verifyChunkInIndexedFreeLists(FreeChunk* fc) const;
duke@435 215 // Remove the specified chunk from the indexed free lists.
duke@435 216 void removeChunkFromIndexedFreeList(FreeChunk* fc);
duke@435 217 // Remove the specified chunk from the dictionary.
duke@435 218 void removeChunkFromDictionary(FreeChunk* fc);
duke@435 219 // Split a free chunk into a smaller free chunk of size "new_size".
duke@435 220 // Return the smaller free chunk and return the remainder to the
duke@435 221 // free lists.
duke@435 222 FreeChunk* splitChunkAndReturnRemainder(FreeChunk* chunk, size_t new_size);
duke@435 223 // Add a chunk to the free lists.
duke@435 224 void addChunkToFreeLists(HeapWord* chunk, size_t size);
duke@435 225 // Add a chunk to the free lists, preferring to suffix it
duke@435 226 // to the last free chunk at end of space if possible, and
duke@435 227 // updating the block census stats as well as block offset table.
duke@435 228 // Take any locks as appropriate if we are multithreaded.
duke@435 229 void addChunkToFreeListsAtEndRecordingStats(HeapWord* chunk, size_t size);
duke@435 230 // Add a free chunk to the indexed free lists.
duke@435 231 void returnChunkToFreeList(FreeChunk* chunk);
duke@435 232 // Add a free chunk to the dictionary.
duke@435 233 void returnChunkToDictionary(FreeChunk* chunk);
duke@435 234
duke@435 235 // Functions for maintaining the linear allocation buffers (LinAB).
duke@435 236 // Repairing a linear allocation block refers to operations
duke@435 237 // performed on the remainder of a LinAB after an allocation
duke@435 238 // has been made from it.
duke@435 239 void repairLinearAllocationBlocks();
duke@435 240 void repairLinearAllocBlock(LinearAllocBlock* blk);
duke@435 241 void refillLinearAllocBlock(LinearAllocBlock* blk);
duke@435 242 void refillLinearAllocBlockIfNeeded(LinearAllocBlock* blk);
duke@435 243 void refillLinearAllocBlocksIfNeeded();
duke@435 244
duke@435 245 void verify_objects_initialized() const;
duke@435 246
duke@435 247 // Statistics reporting helper functions
duke@435 248 void reportFreeListStatistics() const;
duke@435 249 void reportIndexedFreeListStatistics() const;
duke@435 250 size_t maxChunkSizeInIndexedFreeLists() const;
duke@435 251 size_t numFreeBlocksInIndexedFreeLists() const;
duke@435 252 // Accessor
duke@435 253 HeapWord* unallocated_block() const {
ysr@2071 254 if (BlockOffsetArrayUseUnallocatedBlock) {
ysr@2071 255 HeapWord* ub = _bt.unallocated_block();
ysr@2071 256 assert(ub >= bottom() &&
ysr@2071 257 ub <= end(), "space invariant");
ysr@2071 258 return ub;
ysr@2071 259 } else {
ysr@2071 260 return end();
ysr@2071 261 }
duke@435 262 }
duke@435 263 void freed(HeapWord* start, size_t size) {
duke@435 264 _bt.freed(start, size);
duke@435 265 }
duke@435 266
duke@435 267 protected:
duke@435 268 // reset the indexed free list to its initial empty condition.
duke@435 269 void resetIndexedFreeListArray();
duke@435 270 // reset to an initial state with a single free block described
duke@435 271 // by the MemRegion parameter.
duke@435 272 void reset(MemRegion mr);
duke@435 273 // Return the total number of words in the indexed free lists.
duke@435 274 size_t totalSizeInIndexedFreeLists() const;
duke@435 275
duke@435 276 public:
duke@435 277 // Constructor...
duke@435 278 CompactibleFreeListSpace(BlockOffsetSharedArray* bs, MemRegion mr,
duke@435 279 bool use_adaptive_freelists,
duke@435 280 FreeBlockDictionary::DictionaryChoice);
duke@435 281 // accessors
duke@435 282 bool bestFitFirst() { return _fitStrategy == FreeBlockBestFitFirst; }
duke@435 283 FreeBlockDictionary* dictionary() const { return _dictionary; }
duke@435 284 HeapWord* nearLargestChunk() const { return _nearLargestChunk; }
duke@435 285 void set_nearLargestChunk(HeapWord* v) { _nearLargestChunk = v; }
duke@435 286
kvn@1926 287 // Set CMS global values
kvn@1926 288 static void set_cms_values();
kvn@1926 289
duke@435 290 // Return the free chunk at the end of the space. If no such
duke@435 291 // chunk exists, return NULL.
duke@435 292 FreeChunk* find_chunk_at_end();
duke@435 293
ysr@447 294 bool adaptive_freelists() const { return _adaptive_freelists; }
duke@435 295
duke@435 296 void set_collector(CMSCollector* collector) { _collector = collector; }
duke@435 297
duke@435 298 // Support for parallelization of rescan and marking
duke@435 299 const size_t rescan_task_size() const { return _rescan_task_size; }
duke@435 300 const size_t marking_task_size() const { return _marking_task_size; }
duke@435 301 SequentialSubTasksDone* conc_par_seq_tasks() {return &_conc_par_seq_tasks; }
duke@435 302 void initialize_sequential_subtasks_for_rescan(int n_threads);
duke@435 303 void initialize_sequential_subtasks_for_marking(int n_threads,
duke@435 304 HeapWord* low = NULL);
duke@435 305
duke@435 306 // Space enquiries
duke@435 307 size_t used() const;
duke@435 308 size_t free() const;
duke@435 309 size_t max_alloc_in_words() const;
duke@435 310 // XXX: should have a less conservative used_region() than that of
duke@435 311 // Space; we could consider keeping track of highest allocated
duke@435 312 // address and correcting that at each sweep, as the sweeper
duke@435 313 // goes through the entire allocated part of the generation. We
duke@435 314 // could also use that information to keep the sweeper from
duke@435 315 // sweeping more than is necessary. The allocator and sweeper will
duke@435 316 // of course need to synchronize on this, since the sweeper will
duke@435 317 // try to bump down the address and the allocator will try to bump it up.
duke@435 318 // For now, however, we'll just use the default used_region()
duke@435 319 // which overestimates the region by returning the entire
duke@435 320 // committed region (this is safe, but inefficient).
duke@435 321
duke@435 322 // Returns a subregion of the space containing all the objects in
duke@435 323 // the space.
duke@435 324 MemRegion used_region() const {
duke@435 325 return MemRegion(bottom(),
duke@435 326 BlockOffsetArrayUseUnallocatedBlock ?
duke@435 327 unallocated_block() : end());
duke@435 328 }
duke@435 329
duke@435 330 // This is needed because the default implementation uses block_start()
duke@435 331 // which can;t be used at certain times (for example phase 3 of mark-sweep).
duke@435 332 // A better fix is to change the assertions in phase 3 of mark-sweep to
duke@435 333 // use is_in_reserved(), but that is deferred since the is_in() assertions
duke@435 334 // are buried through several layers of callers and are used elsewhere
duke@435 335 // as well.
duke@435 336 bool is_in(const void* p) const {
duke@435 337 return used_region().contains(p);
duke@435 338 }
duke@435 339
duke@435 340 virtual bool is_free_block(const HeapWord* p) const;
duke@435 341
duke@435 342 // Resizing support
duke@435 343 void set_end(HeapWord* value); // override
duke@435 344
duke@435 345 // mutual exclusion support
duke@435 346 Mutex* freelistLock() const { return &_freelistLock; }
duke@435 347
duke@435 348 // Iteration support
duke@435 349 void oop_iterate(MemRegion mr, OopClosure* cl);
duke@435 350 void oop_iterate(OopClosure* cl);
duke@435 351
duke@435 352 void object_iterate(ObjectClosure* blk);
jmasa@952 353 // Apply the closure to each object in the space whose references
jmasa@952 354 // point to objects in the heap. The usage of CompactibleFreeListSpace
jmasa@952 355 // by the ConcurrentMarkSweepGeneration for concurrent GC's allows
jmasa@952 356 // objects in the space with references to objects that are no longer
jmasa@952 357 // valid. For example, an object may reference another object
jmasa@952 358 // that has already been sweep up (collected). This method uses
jmasa@952 359 // obj_is_alive() to determine whether it is safe to iterate of
jmasa@952 360 // an object.
jmasa@952 361 void safe_object_iterate(ObjectClosure* blk);
duke@435 362 void object_iterate_mem(MemRegion mr, UpwardsObjectClosure* cl);
duke@435 363
duke@435 364 // Requires that "mr" be entirely within the space.
duke@435 365 // Apply "cl->do_object" to all objects that intersect with "mr".
duke@435 366 // If the iteration encounters an unparseable portion of the region,
duke@435 367 // terminate the iteration and return the address of the start of the
duke@435 368 // subregion that isn't done. Return of "NULL" indicates that the
duke@435 369 // interation completed.
duke@435 370 virtual HeapWord*
duke@435 371 object_iterate_careful_m(MemRegion mr,
duke@435 372 ObjectClosureCareful* cl);
duke@435 373 virtual HeapWord*
duke@435 374 object_iterate_careful(ObjectClosureCareful* cl);
duke@435 375
duke@435 376 // Override: provides a DCTO_CL specific to this kind of space.
duke@435 377 DirtyCardToOopClosure* new_dcto_cl(OopClosure* cl,
duke@435 378 CardTableModRefBS::PrecisionStyle precision,
duke@435 379 HeapWord* boundary);
duke@435 380
duke@435 381 void blk_iterate(BlkClosure* cl);
duke@435 382 void blk_iterate_careful(BlkClosureCareful* cl);
ysr@777 383 HeapWord* block_start_const(const void* p) const;
duke@435 384 HeapWord* block_start_careful(const void* p) const;
duke@435 385 size_t block_size(const HeapWord* p) const;
duke@435 386 size_t block_size_no_stall(HeapWord* p, const CMSCollector* c) const;
duke@435 387 bool block_is_obj(const HeapWord* p) const;
duke@435 388 bool obj_is_alive(const HeapWord* p) const;
duke@435 389 size_t block_size_nopar(const HeapWord* p) const;
duke@435 390 bool block_is_obj_nopar(const HeapWord* p) const;
duke@435 391
duke@435 392 // iteration support for promotion
duke@435 393 void save_marks();
duke@435 394 bool no_allocs_since_save_marks();
duke@435 395 void object_iterate_since_last_GC(ObjectClosure* cl);
duke@435 396
duke@435 397 // iteration support for sweeping
duke@435 398 void save_sweep_limit() {
duke@435 399 _sweep_limit = BlockOffsetArrayUseUnallocatedBlock ?
duke@435 400 unallocated_block() : end();
duke@435 401 }
duke@435 402 NOT_PRODUCT(
duke@435 403 void clear_sweep_limit() { _sweep_limit = NULL; }
duke@435 404 )
duke@435 405 HeapWord* sweep_limit() { return _sweep_limit; }
duke@435 406
duke@435 407 // Apply "blk->do_oop" to the addresses of all reference fields in objects
duke@435 408 // promoted into this generation since the most recent save_marks() call.
duke@435 409 // Fields in objects allocated by applications of the closure
duke@435 410 // *are* included in the iteration. Thus, when the iteration completes
duke@435 411 // there should be no further such objects remaining.
duke@435 412 #define CFLS_OOP_SINCE_SAVE_MARKS_DECL(OopClosureType, nv_suffix) \
duke@435 413 void oop_since_save_marks_iterate##nv_suffix(OopClosureType* blk);
duke@435 414 ALL_SINCE_SAVE_MARKS_CLOSURES(CFLS_OOP_SINCE_SAVE_MARKS_DECL)
duke@435 415 #undef CFLS_OOP_SINCE_SAVE_MARKS_DECL
duke@435 416
duke@435 417 // Allocation support
duke@435 418 HeapWord* allocate(size_t size);
duke@435 419 HeapWord* par_allocate(size_t size);
duke@435 420
coleenp@548 421 oop promote(oop obj, size_t obj_size);
duke@435 422 void gc_prologue();
duke@435 423 void gc_epilogue();
duke@435 424
duke@435 425 // This call is used by a containing CMS generation / collector
duke@435 426 // to inform the CFLS space that a sweep has been completed
duke@435 427 // and that the space can do any related house-keeping functions.
duke@435 428 void sweep_completed();
duke@435 429
duke@435 430 // For an object in this space, the mark-word's two
duke@435 431 // LSB's having the value [11] indicates that it has been
duke@435 432 // promoted since the most recent call to save_marks() on
duke@435 433 // this generation and has not subsequently been iterated
duke@435 434 // over (using oop_since_save_marks_iterate() above).
ysr@1876 435 // This property holds only for single-threaded collections,
ysr@1876 436 // and is typically used for Cheney scans; for MT scavenges,
ysr@1876 437 // the property holds for all objects promoted during that
ysr@1876 438 // scavenge for the duration of the scavenge and is used
ysr@1876 439 // by card-scanning to avoid scanning objects (being) promoted
ysr@1876 440 // during that scavenge.
duke@435 441 bool obj_allocated_since_save_marks(const oop obj) const {
duke@435 442 assert(is_in_reserved(obj), "Wrong space?");
duke@435 443 return ((PromotedObject*)obj)->hasPromotedMark();
duke@435 444 }
duke@435 445
duke@435 446 // A worst-case estimate of the space required (in HeapWords) to expand the
duke@435 447 // heap when promoting an obj of size obj_size.
duke@435 448 size_t expansionSpaceRequired(size_t obj_size) const;
duke@435 449
duke@435 450 FreeChunk* allocateScratch(size_t size);
duke@435 451
duke@435 452 // returns true if either the small or large linear allocation buffer is empty.
ysr@447 453 bool linearAllocationWouldFail() const;
duke@435 454
duke@435 455 // Adjust the chunk for the minimum size. This version is called in
duke@435 456 // most cases in CompactibleFreeListSpace methods.
duke@435 457 inline static size_t adjustObjectSize(size_t size) {
duke@435 458 return (size_t) align_object_size(MAX2(size, (size_t)MinChunkSize));
duke@435 459 }
duke@435 460 // This is a virtual version of adjustObjectSize() that is called
duke@435 461 // only occasionally when the compaction space changes and the type
duke@435 462 // of the new compaction space is is only known to be CompactibleSpace.
duke@435 463 size_t adjust_object_size_v(size_t size) const {
duke@435 464 return adjustObjectSize(size);
duke@435 465 }
duke@435 466 // Minimum size of a free block.
duke@435 467 virtual size_t minimum_free_block_size() const { return MinChunkSize; }
duke@435 468 void removeFreeChunkFromFreeLists(FreeChunk* chunk);
duke@435 469 void addChunkAndRepairOffsetTable(HeapWord* chunk, size_t size,
duke@435 470 bool coalesced);
duke@435 471
ysr@447 472 // Support for decisions regarding concurrent collection policy
ysr@447 473 bool should_concurrent_collect() const;
ysr@447 474
duke@435 475 // Support for compaction
duke@435 476 void prepare_for_compaction(CompactPoint* cp);
duke@435 477 void adjust_pointers();
duke@435 478 void compact();
duke@435 479 // reset the space to reflect the fact that a compaction of the
duke@435 480 // space has been done.
duke@435 481 virtual void reset_after_compaction();
duke@435 482
duke@435 483 // Debugging support
duke@435 484 void print() const;
ysr@2071 485 void print_on(outputStream* st) const;
duke@435 486 void prepare_for_verify();
duke@435 487 void verify(bool allow_dirty) const;
duke@435 488 void verifyFreeLists() const PRODUCT_RETURN;
duke@435 489 void verifyIndexedFreeLists() const;
duke@435 490 void verifyIndexedFreeList(size_t size) const;
duke@435 491 // verify that the given chunk is in the free lists.
duke@435 492 bool verifyChunkInFreeLists(FreeChunk* fc) const;
duke@435 493 // Do some basic checks on the the free lists.
duke@435 494 void checkFreeListConsistency() const PRODUCT_RETURN;
duke@435 495
ysr@1580 496 // Printing support
ysr@1580 497 void dump_at_safepoint_with_locks(CMSCollector* c, outputStream* st);
ysr@1580 498 void print_indexed_free_lists(outputStream* st) const;
ysr@1580 499 void print_dictionary_free_lists(outputStream* st) const;
ysr@1580 500 void print_promo_info_blocks(outputStream* st) const;
ysr@1580 501
duke@435 502 NOT_PRODUCT (
duke@435 503 void initializeIndexedFreeListArrayReturnedBytes();
duke@435 504 size_t sumIndexedFreeListArrayReturnedBytes();
duke@435 505 // Return the total number of chunks in the indexed free lists.
duke@435 506 size_t totalCountInIndexedFreeLists() const;
duke@435 507 // Return the total numberof chunks in the space.
duke@435 508 size_t totalCount();
duke@435 509 )
duke@435 510
duke@435 511 // The census consists of counts of the quantities such as
duke@435 512 // the current count of the free chunks, number of chunks
duke@435 513 // created as a result of the split of a larger chunk or
duke@435 514 // coalescing of smaller chucks, etc. The counts in the
duke@435 515 // census is used to make decisions on splitting and
duke@435 516 // coalescing of chunks during the sweep of garbage.
duke@435 517
duke@435 518 // Print the statistics for the free lists.
ysr@447 519 void printFLCensus(size_t sweep_count) const;
duke@435 520
duke@435 521 // Statistics functions
duke@435 522 // Initialize census for lists before the sweep.
ysr@1580 523 void beginSweepFLCensus(float inter_sweep_current,
ysr@1580 524 float inter_sweep_estimate,
ysr@1580 525 float intra_sweep_estimate);
duke@435 526 // Set the surplus for each of the free lists.
duke@435 527 void setFLSurplus();
duke@435 528 // Set the hint for each of the free lists.
duke@435 529 void setFLHints();
duke@435 530 // Clear the census for each of the free lists.
duke@435 531 void clearFLCensus();
duke@435 532 // Perform functions for the census after the end of the sweep.
ysr@447 533 void endSweepFLCensus(size_t sweep_count);
duke@435 534 // Return true if the count of free chunks is greater
duke@435 535 // than the desired number of free chunks.
duke@435 536 bool coalOverPopulated(size_t size);
duke@435 537
duke@435 538 // Record (for each size):
duke@435 539 //
duke@435 540 // split-births = #chunks added due to splits in (prev-sweep-end,
duke@435 541 // this-sweep-start)
duke@435 542 // split-deaths = #chunks removed for splits in (prev-sweep-end,
duke@435 543 // this-sweep-start)
duke@435 544 // num-curr = #chunks at start of this sweep
duke@435 545 // num-prev = #chunks at end of previous sweep
duke@435 546 //
duke@435 547 // The above are quantities that are measured. Now define:
duke@435 548 //
duke@435 549 // num-desired := num-prev + split-births - split-deaths - num-curr
duke@435 550 //
duke@435 551 // Roughly, num-prev + split-births is the supply,
duke@435 552 // split-deaths is demand due to other sizes
duke@435 553 // and num-curr is what we have left.
duke@435 554 //
duke@435 555 // Thus, num-desired is roughly speaking the "legitimate demand"
duke@435 556 // for blocks of this size and what we are striving to reach at the
duke@435 557 // end of the current sweep.
duke@435 558 //
duke@435 559 // For a given list, let num-len be its current population.
duke@435 560 // Define, for a free list of a given size:
duke@435 561 //
duke@435 562 // coal-overpopulated := num-len >= num-desired * coal-surplus
duke@435 563 // (coal-surplus is set to 1.05, i.e. we allow a little slop when
duke@435 564 // coalescing -- we do not coalesce unless we think that the current
duke@435 565 // supply has exceeded the estimated demand by more than 5%).
duke@435 566 //
duke@435 567 // For the set of sizes in the binary tree, which is neither dense nor
duke@435 568 // closed, it may be the case that for a particular size we have never
duke@435 569 // had, or do not now have, or did not have at the previous sweep,
duke@435 570 // chunks of that size. We need to extend the definition of
duke@435 571 // coal-overpopulated to such sizes as well:
duke@435 572 //
duke@435 573 // For a chunk in/not in the binary tree, extend coal-overpopulated
duke@435 574 // defined above to include all sizes as follows:
duke@435 575 //
duke@435 576 // . a size that is non-existent is coal-overpopulated
duke@435 577 // . a size that has a num-desired <= 0 as defined above is
duke@435 578 // coal-overpopulated.
duke@435 579 //
duke@435 580 // Also define, for a chunk heap-offset C and mountain heap-offset M:
duke@435 581 //
duke@435 582 // close-to-mountain := C >= 0.99 * M
duke@435 583 //
duke@435 584 // Now, the coalescing strategy is:
duke@435 585 //
duke@435 586 // Coalesce left-hand chunk with right-hand chunk if and
duke@435 587 // only if:
duke@435 588 //
duke@435 589 // EITHER
duke@435 590 // . left-hand chunk is of a size that is coal-overpopulated
duke@435 591 // OR
duke@435 592 // . right-hand chunk is close-to-mountain
duke@435 593 void smallCoalBirth(size_t size);
duke@435 594 void smallCoalDeath(size_t size);
duke@435 595 void coalBirth(size_t size);
duke@435 596 void coalDeath(size_t size);
duke@435 597 void smallSplitBirth(size_t size);
duke@435 598 void smallSplitDeath(size_t size);
duke@435 599 void splitBirth(size_t size);
duke@435 600 void splitDeath(size_t size);
duke@435 601 void split(size_t from, size_t to1);
duke@435 602
duke@435 603 double flsFrag() const;
duke@435 604 };
duke@435 605
duke@435 606 // A parallel-GC-thread-local allocation buffer for allocation into a
duke@435 607 // CompactibleFreeListSpace.
duke@435 608 class CFLS_LAB : public CHeapObj {
duke@435 609 // The space that this buffer allocates into.
duke@435 610 CompactibleFreeListSpace* _cfls;
duke@435 611
duke@435 612 // Our local free lists.
duke@435 613 FreeList _indexedFreeList[CompactibleFreeListSpace::IndexSetSize];
duke@435 614
duke@435 615 // Initialized from a command-line arg.
duke@435 616
ysr@1580 617 // Allocation statistics in support of dynamic adjustment of
ysr@1580 618 // #blocks to claim per get_from_global_pool() call below.
ysr@1580 619 static AdaptiveWeightedAverage
ysr@1580 620 _blocks_to_claim [CompactibleFreeListSpace::IndexSetSize];
ysr@1580 621 static size_t _global_num_blocks [CompactibleFreeListSpace::IndexSetSize];
ysr@1580 622 static int _global_num_workers[CompactibleFreeListSpace::IndexSetSize];
ysr@1580 623 size_t _num_blocks [CompactibleFreeListSpace::IndexSetSize];
ysr@1580 624
ysr@1580 625 // Internal work method
ysr@1580 626 void get_from_global_pool(size_t word_sz, FreeList* fl);
duke@435 627
duke@435 628 public:
duke@435 629 CFLS_LAB(CompactibleFreeListSpace* cfls);
duke@435 630
duke@435 631 // Allocate and return a block of the given size, or else return NULL.
duke@435 632 HeapWord* alloc(size_t word_sz);
duke@435 633
duke@435 634 // Return any unused portions of the buffer to the global pool.
ysr@1580 635 void retire(int tid);
ysr@1580 636
ysr@1580 637 // Dynamic OldPLABSize sizing
ysr@1580 638 static void compute_desired_plab_size();
ysr@1580 639 // When the settings are modified from default static initialization
ysr@1580 640 static void modify_initialization(size_t n, unsigned wt);
duke@435 641 };
duke@435 642
duke@435 643 size_t PromotionInfo::refillSize() const {
duke@435 644 const size_t CMSSpoolBlockSize = 256;
duke@435 645 const size_t sz = heap_word_size(sizeof(SpoolBlock) + sizeof(markOop)
duke@435 646 * CMSSpoolBlockSize);
duke@435 647 return CompactibleFreeListSpace::adjustObjectSize(sz);
duke@435 648 }

mercurial