Sat, 01 Sep 2012 13:25:18 -0400
6964458: Reimplement class meta-data storage to use native memory
Summary: Remove PermGen, allocate meta-data in metaspace linked to class loaders, rewrite GC walking, rewrite and rename metadata to be C++ classes
Reviewed-by: jmasa, stefank, never, coleenp, kvn, brutisso, mgerdin, dholmes, jrose, twisti, roland
Contributed-by: jmasa <jon.masamitsu@oracle.com>, stefank <stefan.karlsson@oracle.com>, mgerdin <mikael.gerdin@oracle.com>, never <tom.rodriguez@oracle.com>
1 /*
2 * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #ifndef SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_COMPACTIBLEFREELISTSPACE_HPP
26 #define SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_COMPACTIBLEFREELISTSPACE_HPP
28 #include "gc_implementation/concurrentMarkSweep/promotionInfo.hpp"
29 #include "memory/binaryTreeDictionary.hpp"
30 #include "memory/blockOffsetTable.inline.hpp"
31 #include "memory/freeList.hpp"
32 #include "memory/space.hpp"
34 // Classes in support of keeping track of promotions into a non-Contiguous
35 // space, in this case a CompactibleFreeListSpace.
37 // Forward declarations
38 class CompactibleFreeListSpace;
39 class BlkClosure;
40 class BlkClosureCareful;
41 class UpwardsObjectClosure;
42 class ObjectClosureCareful;
43 class Klass;
45 class LinearAllocBlock VALUE_OBJ_CLASS_SPEC {
46 public:
47 LinearAllocBlock() : _ptr(0), _word_size(0), _refillSize(0),
48 _allocation_size_limit(0) {}
49 void set(HeapWord* ptr, size_t word_size, size_t refill_size,
50 size_t allocation_size_limit) {
51 _ptr = ptr;
52 _word_size = word_size;
53 _refillSize = refill_size;
54 _allocation_size_limit = allocation_size_limit;
55 }
56 HeapWord* _ptr;
57 size_t _word_size;
58 size_t _refillSize;
59 size_t _allocation_size_limit; // largest size that will be allocated
61 void print_on(outputStream* st) const;
62 };
64 // Concrete subclass of CompactibleSpace that implements
65 // a free list space, such as used in the concurrent mark sweep
66 // generation.
68 class CompactibleFreeListSpace: public CompactibleSpace {
69 friend class VMStructs;
70 friend class ConcurrentMarkSweepGeneration;
71 friend class ASConcurrentMarkSweepGeneration;
72 friend class CMSCollector;
73 // Local alloc buffer for promotion into this space.
74 friend class CFLS_LAB;
76 // "Size" of chunks of work (executed during parallel remark phases
77 // of CMS collection); this probably belongs in CMSCollector, although
78 // it's cached here because it's used in
79 // initialize_sequential_subtasks_for_rescan() which modifies
80 // par_seq_tasks which also lives in Space. XXX
81 const size_t _rescan_task_size;
82 const size_t _marking_task_size;
84 // Yet another sequential tasks done structure. This supports
85 // CMS GC, where we have threads dynamically
86 // claiming sub-tasks from a larger parallel task.
87 SequentialSubTasksDone _conc_par_seq_tasks;
89 BlockOffsetArrayNonContigSpace _bt;
91 CMSCollector* _collector;
92 ConcurrentMarkSweepGeneration* _gen;
94 // Data structures for free blocks (used during allocation/sweeping)
96 // Allocation is done linearly from two different blocks depending on
97 // whether the request is small or large, in an effort to reduce
98 // fragmentation. We assume that any locking for allocation is done
99 // by the containing generation. Thus, none of the methods in this
100 // space are re-entrant.
101 enum SomeConstants {
102 SmallForLinearAlloc = 16, // size < this then use _sLAB
103 SmallForDictionary = 257, // size < this then use _indexedFreeList
104 IndexSetSize = SmallForDictionary // keep this odd-sized
105 };
106 static size_t IndexSetStart;
107 static size_t IndexSetStride;
109 private:
110 enum FitStrategyOptions {
111 FreeBlockStrategyNone = 0,
112 FreeBlockBestFitFirst
113 };
115 PromotionInfo _promoInfo;
117 // helps to impose a global total order on freelistLock ranks;
118 // assumes that CFLSpace's are allocated in global total order
119 static int _lockRank;
121 // a lock protecting the free lists and free blocks;
122 // mutable because of ubiquity of locking even for otherwise const methods
123 mutable Mutex _freelistLock;
124 // locking verifier convenience function
125 void assert_locked() const PRODUCT_RETURN;
126 void assert_locked(const Mutex* lock) const PRODUCT_RETURN;
128 // Linear allocation blocks
129 LinearAllocBlock _smallLinearAllocBlock;
131 FreeBlockDictionary<FreeChunk>::DictionaryChoice _dictionaryChoice;
132 FreeBlockDictionary<FreeChunk>* _dictionary; // ptr to dictionary for large size blocks
134 FreeList<FreeChunk> _indexedFreeList[IndexSetSize];
135 // indexed array for small size blocks
136 // allocation stategy
137 bool _fitStrategy; // Use best fit strategy.
138 bool _adaptive_freelists; // Use adaptive freelists
140 // This is an address close to the largest free chunk in the heap.
141 // It is currently assumed to be at the end of the heap. Free
142 // chunks with addresses greater than nearLargestChunk are coalesced
143 // in an effort to maintain a large chunk at the end of the heap.
144 HeapWord* _nearLargestChunk;
146 // Used to keep track of limit of sweep for the space
147 HeapWord* _sweep_limit;
149 // Support for compacting cms
150 HeapWord* cross_threshold(HeapWord* start, HeapWord* end);
151 HeapWord* forward(oop q, size_t size, CompactPoint* cp, HeapWord* compact_top);
153 // Initialization helpers.
154 void initializeIndexedFreeListArray();
156 // Extra stuff to manage promotion parallelism.
158 // a lock protecting the dictionary during par promotion allocation.
159 mutable Mutex _parDictionaryAllocLock;
160 Mutex* parDictionaryAllocLock() const { return &_parDictionaryAllocLock; }
162 // Locks protecting the exact lists during par promotion allocation.
163 Mutex* _indexedFreeListParLocks[IndexSetSize];
165 // Attempt to obtain up to "n" blocks of the size "word_sz" (which is
166 // required to be smaller than "IndexSetSize".) If successful,
167 // adds them to "fl", which is required to be an empty free list.
168 // If the count of "fl" is negative, it's absolute value indicates a
169 // number of free chunks that had been previously "borrowed" from global
170 // list of size "word_sz", and must now be decremented.
171 void par_get_chunk_of_blocks(size_t word_sz, size_t n, FreeList<FreeChunk>* fl);
173 // Allocation helper functions
174 // Allocate using a strategy that takes from the indexed free lists
175 // first. This allocation strategy assumes a companion sweeping
176 // strategy that attempts to keep the needed number of chunks in each
177 // indexed free lists.
178 HeapWord* allocate_adaptive_freelists(size_t size);
179 // Allocate from the linear allocation buffers first. This allocation
180 // strategy assumes maximal coalescing can maintain chunks large enough
181 // to be used as linear allocation buffers.
182 HeapWord* allocate_non_adaptive_freelists(size_t size);
184 // Gets a chunk from the linear allocation block (LinAB). If there
185 // is not enough space in the LinAB, refills it.
186 HeapWord* getChunkFromLinearAllocBlock(LinearAllocBlock* blk, size_t size);
187 HeapWord* getChunkFromSmallLinearAllocBlock(size_t size);
188 // Get a chunk from the space remaining in the linear allocation block. Do
189 // not attempt to refill if the space is not available, return NULL. Do the
190 // repairs on the linear allocation block as appropriate.
191 HeapWord* getChunkFromLinearAllocBlockRemainder(LinearAllocBlock* blk, size_t size);
192 inline HeapWord* getChunkFromSmallLinearAllocBlockRemainder(size_t size);
194 // Helper function for getChunkFromIndexedFreeList.
195 // Replenish the indexed free list for this "size". Do not take from an
196 // underpopulated size.
197 FreeChunk* getChunkFromIndexedFreeListHelper(size_t size, bool replenish = true);
199 // Get a chunk from the indexed free list. If the indexed free list
200 // does not have a free chunk, try to replenish the indexed free list
201 // then get the free chunk from the replenished indexed free list.
202 inline FreeChunk* getChunkFromIndexedFreeList(size_t size);
204 // The returned chunk may be larger than requested (or null).
205 FreeChunk* getChunkFromDictionary(size_t size);
206 // The returned chunk is the exact size requested (or null).
207 FreeChunk* getChunkFromDictionaryExact(size_t size);
209 // Find a chunk in the indexed free list that is the best
210 // fit for size "numWords".
211 FreeChunk* bestFitSmall(size_t numWords);
212 // For free list "fl" of chunks of size > numWords,
213 // remove a chunk, split off a chunk of size numWords
214 // and return it. The split off remainder is returned to
215 // the free lists. The old name for getFromListGreater
216 // was lookInListGreater.
217 FreeChunk* getFromListGreater(FreeList<FreeChunk>* fl, size_t numWords);
218 // Get a chunk in the indexed free list or dictionary,
219 // by considering a larger chunk and splitting it.
220 FreeChunk* getChunkFromGreater(size_t numWords);
221 // Verify that the given chunk is in the indexed free lists.
222 bool verifyChunkInIndexedFreeLists(FreeChunk* fc) const;
223 // Remove the specified chunk from the indexed free lists.
224 void removeChunkFromIndexedFreeList(FreeChunk* fc);
225 // Remove the specified chunk from the dictionary.
226 void removeChunkFromDictionary(FreeChunk* fc);
227 // Split a free chunk into a smaller free chunk of size "new_size".
228 // Return the smaller free chunk and return the remainder to the
229 // free lists.
230 FreeChunk* splitChunkAndReturnRemainder(FreeChunk* chunk, size_t new_size);
231 // Add a chunk to the free lists.
232 void addChunkToFreeLists(HeapWord* chunk, size_t size);
233 // Add a chunk to the free lists, preferring to suffix it
234 // to the last free chunk at end of space if possible, and
235 // updating the block census stats as well as block offset table.
236 // Take any locks as appropriate if we are multithreaded.
237 void addChunkToFreeListsAtEndRecordingStats(HeapWord* chunk, size_t size);
238 // Add a free chunk to the indexed free lists.
239 void returnChunkToFreeList(FreeChunk* chunk);
240 // Add a free chunk to the dictionary.
241 void returnChunkToDictionary(FreeChunk* chunk);
243 // Functions for maintaining the linear allocation buffers (LinAB).
244 // Repairing a linear allocation block refers to operations
245 // performed on the remainder of a LinAB after an allocation
246 // has been made from it.
247 void repairLinearAllocationBlocks();
248 void repairLinearAllocBlock(LinearAllocBlock* blk);
249 void refillLinearAllocBlock(LinearAllocBlock* blk);
250 void refillLinearAllocBlockIfNeeded(LinearAllocBlock* blk);
251 void refillLinearAllocBlocksIfNeeded();
253 void verify_objects_initialized() const;
255 // Statistics reporting helper functions
256 void reportFreeListStatistics() const;
257 void reportIndexedFreeListStatistics() const;
258 size_t maxChunkSizeInIndexedFreeLists() const;
259 size_t numFreeBlocksInIndexedFreeLists() const;
260 // Accessor
261 HeapWord* unallocated_block() const {
262 if (BlockOffsetArrayUseUnallocatedBlock) {
263 HeapWord* ub = _bt.unallocated_block();
264 assert(ub >= bottom() &&
265 ub <= end(), "space invariant");
266 return ub;
267 } else {
268 return end();
269 }
270 }
271 void freed(HeapWord* start, size_t size) {
272 _bt.freed(start, size);
273 }
275 protected:
276 // reset the indexed free list to its initial empty condition.
277 void resetIndexedFreeListArray();
278 // reset to an initial state with a single free block described
279 // by the MemRegion parameter.
280 void reset(MemRegion mr);
281 // Return the total number of words in the indexed free lists.
282 size_t totalSizeInIndexedFreeLists() const;
284 public:
285 // Constructor...
286 CompactibleFreeListSpace(BlockOffsetSharedArray* bs, MemRegion mr,
287 bool use_adaptive_freelists,
288 FreeBlockDictionary<FreeChunk>::DictionaryChoice);
289 // accessors
290 bool bestFitFirst() { return _fitStrategy == FreeBlockBestFitFirst; }
291 FreeBlockDictionary<FreeChunk>* dictionary() const { return _dictionary; }
292 HeapWord* nearLargestChunk() const { return _nearLargestChunk; }
293 void set_nearLargestChunk(HeapWord* v) { _nearLargestChunk = v; }
295 // Set CMS global values
296 static void set_cms_values();
298 // Return the free chunk at the end of the space. If no such
299 // chunk exists, return NULL.
300 FreeChunk* find_chunk_at_end();
302 bool adaptive_freelists() const { return _adaptive_freelists; }
304 void set_collector(CMSCollector* collector) { _collector = collector; }
306 // Support for parallelization of rescan and marking
307 const size_t rescan_task_size() const { return _rescan_task_size; }
308 const size_t marking_task_size() const { return _marking_task_size; }
309 SequentialSubTasksDone* conc_par_seq_tasks() {return &_conc_par_seq_tasks; }
310 void initialize_sequential_subtasks_for_rescan(int n_threads);
311 void initialize_sequential_subtasks_for_marking(int n_threads,
312 HeapWord* low = NULL);
314 // Space enquiries
315 size_t used() const;
316 size_t free() const;
317 size_t max_alloc_in_words() const;
318 // XXX: should have a less conservative used_region() than that of
319 // Space; we could consider keeping track of highest allocated
320 // address and correcting that at each sweep, as the sweeper
321 // goes through the entire allocated part of the generation. We
322 // could also use that information to keep the sweeper from
323 // sweeping more than is necessary. The allocator and sweeper will
324 // of course need to synchronize on this, since the sweeper will
325 // try to bump down the address and the allocator will try to bump it up.
326 // For now, however, we'll just use the default used_region()
327 // which overestimates the region by returning the entire
328 // committed region (this is safe, but inefficient).
330 // Returns a subregion of the space containing all the objects in
331 // the space.
332 MemRegion used_region() const {
333 return MemRegion(bottom(),
334 BlockOffsetArrayUseUnallocatedBlock ?
335 unallocated_block() : end());
336 }
338 bool is_in(const void* p) const {
339 return used_region().contains(p);
340 }
342 virtual bool is_free_block(const HeapWord* p) const;
344 // Resizing support
345 void set_end(HeapWord* value); // override
347 // mutual exclusion support
348 Mutex* freelistLock() const { return &_freelistLock; }
350 // Iteration support
351 void oop_iterate(MemRegion mr, ExtendedOopClosure* cl);
352 void oop_iterate(ExtendedOopClosure* cl);
354 void object_iterate(ObjectClosure* blk);
355 // Apply the closure to each object in the space whose references
356 // point to objects in the heap. The usage of CompactibleFreeListSpace
357 // by the ConcurrentMarkSweepGeneration for concurrent GC's allows
358 // objects in the space with references to objects that are no longer
359 // valid. For example, an object may reference another object
360 // that has already been sweep up (collected). This method uses
361 // obj_is_alive() to determine whether it is safe to iterate of
362 // an object.
363 void safe_object_iterate(ObjectClosure* blk);
364 void object_iterate_mem(MemRegion mr, UpwardsObjectClosure* cl);
366 // Requires that "mr" be entirely within the space.
367 // Apply "cl->do_object" to all objects that intersect with "mr".
368 // If the iteration encounters an unparseable portion of the region,
369 // terminate the iteration and return the address of the start of the
370 // subregion that isn't done. Return of "NULL" indicates that the
371 // interation completed.
372 virtual HeapWord*
373 object_iterate_careful_m(MemRegion mr,
374 ObjectClosureCareful* cl);
375 virtual HeapWord*
376 object_iterate_careful(ObjectClosureCareful* cl);
378 // Override: provides a DCTO_CL specific to this kind of space.
379 DirtyCardToOopClosure* new_dcto_cl(ExtendedOopClosure* cl,
380 CardTableModRefBS::PrecisionStyle precision,
381 HeapWord* boundary);
383 void blk_iterate(BlkClosure* cl);
384 void blk_iterate_careful(BlkClosureCareful* cl);
385 HeapWord* block_start_const(const void* p) const;
386 HeapWord* block_start_careful(const void* p) const;
387 size_t block_size(const HeapWord* p) const;
388 size_t block_size_no_stall(HeapWord* p, const CMSCollector* c) const;
389 bool block_is_obj(const HeapWord* p) const;
390 bool obj_is_alive(const HeapWord* p) const;
391 size_t block_size_nopar(const HeapWord* p) const;
392 bool block_is_obj_nopar(const HeapWord* p) const;
394 // iteration support for promotion
395 void save_marks();
396 bool no_allocs_since_save_marks();
397 void object_iterate_since_last_GC(ObjectClosure* cl);
399 // iteration support for sweeping
400 void save_sweep_limit() {
401 _sweep_limit = BlockOffsetArrayUseUnallocatedBlock ?
402 unallocated_block() : end();
403 if (CMSTraceSweeper) {
404 gclog_or_tty->print_cr(">>>>> Saving sweep limit " PTR_FORMAT
405 " for space [" PTR_FORMAT "," PTR_FORMAT ") <<<<<<",
406 _sweep_limit, bottom(), end());
407 }
408 }
409 NOT_PRODUCT(
410 void clear_sweep_limit() { _sweep_limit = NULL; }
411 )
412 HeapWord* sweep_limit() { return _sweep_limit; }
414 // Apply "blk->do_oop" to the addresses of all reference fields in objects
415 // promoted into this generation since the most recent save_marks() call.
416 // Fields in objects allocated by applications of the closure
417 // *are* included in the iteration. Thus, when the iteration completes
418 // there should be no further such objects remaining.
419 #define CFLS_OOP_SINCE_SAVE_MARKS_DECL(OopClosureType, nv_suffix) \
420 void oop_since_save_marks_iterate##nv_suffix(OopClosureType* blk);
421 ALL_SINCE_SAVE_MARKS_CLOSURES(CFLS_OOP_SINCE_SAVE_MARKS_DECL)
422 #undef CFLS_OOP_SINCE_SAVE_MARKS_DECL
424 // Allocation support
425 HeapWord* allocate(size_t size);
426 HeapWord* par_allocate(size_t size);
428 oop promote(oop obj, size_t obj_size);
429 void gc_prologue();
430 void gc_epilogue();
432 // This call is used by a containing CMS generation / collector
433 // to inform the CFLS space that a sweep has been completed
434 // and that the space can do any related house-keeping functions.
435 void sweep_completed();
437 // For an object in this space, the mark-word's two
438 // LSB's having the value [11] indicates that it has been
439 // promoted since the most recent call to save_marks() on
440 // this generation and has not subsequently been iterated
441 // over (using oop_since_save_marks_iterate() above).
442 // This property holds only for single-threaded collections,
443 // and is typically used for Cheney scans; for MT scavenges,
444 // the property holds for all objects promoted during that
445 // scavenge for the duration of the scavenge and is used
446 // by card-scanning to avoid scanning objects (being) promoted
447 // during that scavenge.
448 bool obj_allocated_since_save_marks(const oop obj) const {
449 assert(is_in_reserved(obj), "Wrong space?");
450 return ((PromotedObject*)obj)->hasPromotedMark();
451 }
453 // A worst-case estimate of the space required (in HeapWords) to expand the
454 // heap when promoting an obj of size obj_size.
455 size_t expansionSpaceRequired(size_t obj_size) const;
457 FreeChunk* allocateScratch(size_t size);
459 // returns true if either the small or large linear allocation buffer is empty.
460 bool linearAllocationWouldFail() const;
462 // Adjust the chunk for the minimum size. This version is called in
463 // most cases in CompactibleFreeListSpace methods.
464 inline static size_t adjustObjectSize(size_t size) {
465 return (size_t) align_object_size(MAX2(size, (size_t)MinChunkSize));
466 }
467 // This is a virtual version of adjustObjectSize() that is called
468 // only occasionally when the compaction space changes and the type
469 // of the new compaction space is is only known to be CompactibleSpace.
470 size_t adjust_object_size_v(size_t size) const {
471 return adjustObjectSize(size);
472 }
473 // Minimum size of a free block.
474 virtual size_t minimum_free_block_size() const { return MinChunkSize; }
475 void removeFreeChunkFromFreeLists(FreeChunk* chunk);
476 void addChunkAndRepairOffsetTable(HeapWord* chunk, size_t size,
477 bool coalesced);
479 // Support for decisions regarding concurrent collection policy
480 bool should_concurrent_collect() const;
482 // Support for compaction
483 void prepare_for_compaction(CompactPoint* cp);
484 void adjust_pointers();
485 void compact();
486 // reset the space to reflect the fact that a compaction of the
487 // space has been done.
488 virtual void reset_after_compaction();
490 // Debugging support
491 void print() const;
492 void print_on(outputStream* st) const;
493 void prepare_for_verify();
494 void verify() const;
495 void verifyFreeLists() const PRODUCT_RETURN;
496 void verifyIndexedFreeLists() const;
497 void verifyIndexedFreeList(size_t size) const;
498 // Verify that the given chunk is in the free lists:
499 // i.e. either the binary tree dictionary, the indexed free lists
500 // or the linear allocation block.
501 bool verify_chunk_in_free_list(FreeChunk* fc) const;
502 // Verify that the given chunk is the linear allocation block
503 bool verify_chunk_is_linear_alloc_block(FreeChunk* fc) const;
504 // Do some basic checks on the the free lists.
505 void check_free_list_consistency() const PRODUCT_RETURN;
507 // Printing support
508 void dump_at_safepoint_with_locks(CMSCollector* c, outputStream* st);
509 void print_indexed_free_lists(outputStream* st) const;
510 void print_dictionary_free_lists(outputStream* st) const;
511 void print_promo_info_blocks(outputStream* st) const;
513 NOT_PRODUCT (
514 void initializeIndexedFreeListArrayReturnedBytes();
515 size_t sumIndexedFreeListArrayReturnedBytes();
516 // Return the total number of chunks in the indexed free lists.
517 size_t totalCountInIndexedFreeLists() const;
518 // Return the total numberof chunks in the space.
519 size_t totalCount();
520 )
522 // The census consists of counts of the quantities such as
523 // the current count of the free chunks, number of chunks
524 // created as a result of the split of a larger chunk or
525 // coalescing of smaller chucks, etc. The counts in the
526 // census is used to make decisions on splitting and
527 // coalescing of chunks during the sweep of garbage.
529 // Print the statistics for the free lists.
530 void printFLCensus(size_t sweep_count) const;
532 // Statistics functions
533 // Initialize census for lists before the sweep.
534 void beginSweepFLCensus(float inter_sweep_current,
535 float inter_sweep_estimate,
536 float intra_sweep_estimate);
537 // Set the surplus for each of the free lists.
538 void setFLSurplus();
539 // Set the hint for each of the free lists.
540 void setFLHints();
541 // Clear the census for each of the free lists.
542 void clearFLCensus();
543 // Perform functions for the census after the end of the sweep.
544 void endSweepFLCensus(size_t sweep_count);
545 // Return true if the count of free chunks is greater
546 // than the desired number of free chunks.
547 bool coalOverPopulated(size_t size);
549 // Record (for each size):
550 //
551 // split-births = #chunks added due to splits in (prev-sweep-end,
552 // this-sweep-start)
553 // split-deaths = #chunks removed for splits in (prev-sweep-end,
554 // this-sweep-start)
555 // num-curr = #chunks at start of this sweep
556 // num-prev = #chunks at end of previous sweep
557 //
558 // The above are quantities that are measured. Now define:
559 //
560 // num-desired := num-prev + split-births - split-deaths - num-curr
561 //
562 // Roughly, num-prev + split-births is the supply,
563 // split-deaths is demand due to other sizes
564 // and num-curr is what we have left.
565 //
566 // Thus, num-desired is roughly speaking the "legitimate demand"
567 // for blocks of this size and what we are striving to reach at the
568 // end of the current sweep.
569 //
570 // For a given list, let num-len be its current population.
571 // Define, for a free list of a given size:
572 //
573 // coal-overpopulated := num-len >= num-desired * coal-surplus
574 // (coal-surplus is set to 1.05, i.e. we allow a little slop when
575 // coalescing -- we do not coalesce unless we think that the current
576 // supply has exceeded the estimated demand by more than 5%).
577 //
578 // For the set of sizes in the binary tree, which is neither dense nor
579 // closed, it may be the case that for a particular size we have never
580 // had, or do not now have, or did not have at the previous sweep,
581 // chunks of that size. We need to extend the definition of
582 // coal-overpopulated to such sizes as well:
583 //
584 // For a chunk in/not in the binary tree, extend coal-overpopulated
585 // defined above to include all sizes as follows:
586 //
587 // . a size that is non-existent is coal-overpopulated
588 // . a size that has a num-desired <= 0 as defined above is
589 // coal-overpopulated.
590 //
591 // Also define, for a chunk heap-offset C and mountain heap-offset M:
592 //
593 // close-to-mountain := C >= 0.99 * M
594 //
595 // Now, the coalescing strategy is:
596 //
597 // Coalesce left-hand chunk with right-hand chunk if and
598 // only if:
599 //
600 // EITHER
601 // . left-hand chunk is of a size that is coal-overpopulated
602 // OR
603 // . right-hand chunk is close-to-mountain
604 void smallCoalBirth(size_t size);
605 void smallCoalDeath(size_t size);
606 void coalBirth(size_t size);
607 void coalDeath(size_t size);
608 void smallSplitBirth(size_t size);
609 void smallSplitDeath(size_t size);
610 void split_birth(size_t size);
611 void splitDeath(size_t size);
612 void split(size_t from, size_t to1);
614 double flsFrag() const;
615 };
617 // A parallel-GC-thread-local allocation buffer for allocation into a
618 // CompactibleFreeListSpace.
619 class CFLS_LAB : public CHeapObj<mtGC> {
620 // The space that this buffer allocates into.
621 CompactibleFreeListSpace* _cfls;
623 // Our local free lists.
624 FreeList<FreeChunk> _indexedFreeList[CompactibleFreeListSpace::IndexSetSize];
626 // Initialized from a command-line arg.
628 // Allocation statistics in support of dynamic adjustment of
629 // #blocks to claim per get_from_global_pool() call below.
630 static AdaptiveWeightedAverage
631 _blocks_to_claim [CompactibleFreeListSpace::IndexSetSize];
632 static size_t _global_num_blocks [CompactibleFreeListSpace::IndexSetSize];
633 static uint _global_num_workers[CompactibleFreeListSpace::IndexSetSize];
634 size_t _num_blocks [CompactibleFreeListSpace::IndexSetSize];
636 // Internal work method
637 void get_from_global_pool(size_t word_sz, FreeList<FreeChunk>* fl);
639 public:
640 CFLS_LAB(CompactibleFreeListSpace* cfls);
642 // Allocate and return a block of the given size, or else return NULL.
643 HeapWord* alloc(size_t word_sz);
645 // Return any unused portions of the buffer to the global pool.
646 void retire(int tid);
648 // Dynamic OldPLABSize sizing
649 static void compute_desired_plab_size();
650 // When the settings are modified from default static initialization
651 static void modify_initialization(size_t n, unsigned wt);
652 };
654 size_t PromotionInfo::refillSize() const {
655 const size_t CMSSpoolBlockSize = 256;
656 const size_t sz = heap_word_size(sizeof(SpoolBlock) + sizeof(markOop)
657 * CMSSpoolBlockSize);
658 return CompactibleFreeListSpace::adjustObjectSize(sz);
659 }
661 #endif // SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_COMPACTIBLEFREELISTSPACE_HPP