Wed, 02 Jun 2010 22:45:42 -0700
Merge
1 /*
2 * Copyright (c) 2001, 2009, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 // Classes in support of keeping track of promotions into a non-Contiguous
26 // space, in this case a CompactibleFreeListSpace.
28 // Forward declarations
29 class CompactibleFreeListSpace;
30 class BlkClosure;
31 class BlkClosureCareful;
32 class UpwardsObjectClosure;
33 class ObjectClosureCareful;
34 class Klass;
36 class LinearAllocBlock VALUE_OBJ_CLASS_SPEC {
37 public:
38 LinearAllocBlock() : _ptr(0), _word_size(0), _refillSize(0),
39 _allocation_size_limit(0) {}
40 void set(HeapWord* ptr, size_t word_size, size_t refill_size,
41 size_t allocation_size_limit) {
42 _ptr = ptr;
43 _word_size = word_size;
44 _refillSize = refill_size;
45 _allocation_size_limit = allocation_size_limit;
46 }
47 HeapWord* _ptr;
48 size_t _word_size;
49 size_t _refillSize;
50 size_t _allocation_size_limit; // largest size that will be allocated
51 };
53 // Concrete subclass of CompactibleSpace that implements
54 // a free list space, such as used in the concurrent mark sweep
55 // generation.
57 class CompactibleFreeListSpace: public CompactibleSpace {
58 friend class VMStructs;
59 friend class ConcurrentMarkSweepGeneration;
60 friend class ASConcurrentMarkSweepGeneration;
61 friend class CMSCollector;
62 friend class CMSPermGenGen;
63 // Local alloc buffer for promotion into this space.
64 friend class CFLS_LAB;
66 // "Size" of chunks of work (executed during parallel remark phases
67 // of CMS collection); this probably belongs in CMSCollector, although
68 // it's cached here because it's used in
69 // initialize_sequential_subtasks_for_rescan() which modifies
70 // par_seq_tasks which also lives in Space. XXX
71 const size_t _rescan_task_size;
72 const size_t _marking_task_size;
74 // Yet another sequential tasks done structure. This supports
75 // CMS GC, where we have threads dynamically
76 // claiming sub-tasks from a larger parallel task.
77 SequentialSubTasksDone _conc_par_seq_tasks;
79 BlockOffsetArrayNonContigSpace _bt;
81 CMSCollector* _collector;
82 ConcurrentMarkSweepGeneration* _gen;
84 // Data structures for free blocks (used during allocation/sweeping)
86 // Allocation is done linearly from two different blocks depending on
87 // whether the request is small or large, in an effort to reduce
88 // fragmentation. We assume that any locking for allocation is done
89 // by the containing generation. Thus, none of the methods in this
90 // space are re-entrant.
91 enum SomeConstants {
92 SmallForLinearAlloc = 16, // size < this then use _sLAB
93 SmallForDictionary = 257, // size < this then use _indexedFreeList
94 IndexSetSize = SmallForDictionary // keep this odd-sized
95 };
96 static int IndexSetStart;
97 static int IndexSetStride;
99 private:
100 enum FitStrategyOptions {
101 FreeBlockStrategyNone = 0,
102 FreeBlockBestFitFirst
103 };
105 PromotionInfo _promoInfo;
107 // helps to impose a global total order on freelistLock ranks;
108 // assumes that CFLSpace's are allocated in global total order
109 static int _lockRank;
111 // a lock protecting the free lists and free blocks;
112 // mutable because of ubiquity of locking even for otherwise const methods
113 mutable Mutex _freelistLock;
114 // locking verifier convenience function
115 void assert_locked() const PRODUCT_RETURN;
116 void assert_locked(const Mutex* lock) const PRODUCT_RETURN;
118 // Linear allocation blocks
119 LinearAllocBlock _smallLinearAllocBlock;
121 FreeBlockDictionary::DictionaryChoice _dictionaryChoice;
122 FreeBlockDictionary* _dictionary; // ptr to dictionary for large size blocks
124 FreeList _indexedFreeList[IndexSetSize];
125 // indexed array for small size blocks
126 // allocation stategy
127 bool _fitStrategy; // Use best fit strategy.
128 bool _adaptive_freelists; // Use adaptive freelists
130 // This is an address close to the largest free chunk in the heap.
131 // It is currently assumed to be at the end of the heap. Free
132 // chunks with addresses greater than nearLargestChunk are coalesced
133 // in an effort to maintain a large chunk at the end of the heap.
134 HeapWord* _nearLargestChunk;
136 // Used to keep track of limit of sweep for the space
137 HeapWord* _sweep_limit;
139 // Support for compacting cms
140 HeapWord* cross_threshold(HeapWord* start, HeapWord* end);
141 HeapWord* forward(oop q, size_t size, CompactPoint* cp, HeapWord* compact_top);
143 // Initialization helpers.
144 void initializeIndexedFreeListArray();
146 // Extra stuff to manage promotion parallelism.
148 // a lock protecting the dictionary during par promotion allocation.
149 mutable Mutex _parDictionaryAllocLock;
150 Mutex* parDictionaryAllocLock() const { return &_parDictionaryAllocLock; }
152 // Locks protecting the exact lists during par promotion allocation.
153 Mutex* _indexedFreeListParLocks[IndexSetSize];
155 // Attempt to obtain up to "n" blocks of the size "word_sz" (which is
156 // required to be smaller than "IndexSetSize".) If successful,
157 // adds them to "fl", which is required to be an empty free list.
158 // If the count of "fl" is negative, it's absolute value indicates a
159 // number of free chunks that had been previously "borrowed" from global
160 // list of size "word_sz", and must now be decremented.
161 void par_get_chunk_of_blocks(size_t word_sz, size_t n, FreeList* fl);
163 // Allocation helper functions
164 // Allocate using a strategy that takes from the indexed free lists
165 // first. This allocation strategy assumes a companion sweeping
166 // strategy that attempts to keep the needed number of chunks in each
167 // indexed free lists.
168 HeapWord* allocate_adaptive_freelists(size_t size);
169 // Allocate from the linear allocation buffers first. This allocation
170 // strategy assumes maximal coalescing can maintain chunks large enough
171 // to be used as linear allocation buffers.
172 HeapWord* allocate_non_adaptive_freelists(size_t size);
174 // Gets a chunk from the linear allocation block (LinAB). If there
175 // is not enough space in the LinAB, refills it.
176 HeapWord* getChunkFromLinearAllocBlock(LinearAllocBlock* blk, size_t size);
177 HeapWord* getChunkFromSmallLinearAllocBlock(size_t size);
178 // Get a chunk from the space remaining in the linear allocation block. Do
179 // not attempt to refill if the space is not available, return NULL. Do the
180 // repairs on the linear allocation block as appropriate.
181 HeapWord* getChunkFromLinearAllocBlockRemainder(LinearAllocBlock* blk, size_t size);
182 inline HeapWord* getChunkFromSmallLinearAllocBlockRemainder(size_t size);
184 // Helper function for getChunkFromIndexedFreeList.
185 // Replenish the indexed free list for this "size". Do not take from an
186 // underpopulated size.
187 FreeChunk* getChunkFromIndexedFreeListHelper(size_t size, bool replenish = true);
189 // Get a chunk from the indexed free list. If the indexed free list
190 // does not have a free chunk, try to replenish the indexed free list
191 // then get the free chunk from the replenished indexed free list.
192 inline FreeChunk* getChunkFromIndexedFreeList(size_t size);
194 // The returned chunk may be larger than requested (or null).
195 FreeChunk* getChunkFromDictionary(size_t size);
196 // The returned chunk is the exact size requested (or null).
197 FreeChunk* getChunkFromDictionaryExact(size_t size);
199 // Find a chunk in the indexed free list that is the best
200 // fit for size "numWords".
201 FreeChunk* bestFitSmall(size_t numWords);
202 // For free list "fl" of chunks of size > numWords,
203 // remove a chunk, split off a chunk of size numWords
204 // and return it. The split off remainder is returned to
205 // the free lists. The old name for getFromListGreater
206 // was lookInListGreater.
207 FreeChunk* getFromListGreater(FreeList* fl, size_t numWords);
208 // Get a chunk in the indexed free list or dictionary,
209 // by considering a larger chunk and splitting it.
210 FreeChunk* getChunkFromGreater(size_t numWords);
211 // Verify that the given chunk is in the indexed free lists.
212 bool verifyChunkInIndexedFreeLists(FreeChunk* fc) const;
213 // Remove the specified chunk from the indexed free lists.
214 void removeChunkFromIndexedFreeList(FreeChunk* fc);
215 // Remove the specified chunk from the dictionary.
216 void removeChunkFromDictionary(FreeChunk* fc);
217 // Split a free chunk into a smaller free chunk of size "new_size".
218 // Return the smaller free chunk and return the remainder to the
219 // free lists.
220 FreeChunk* splitChunkAndReturnRemainder(FreeChunk* chunk, size_t new_size);
221 // Add a chunk to the free lists.
222 void addChunkToFreeLists(HeapWord* chunk, size_t size);
223 // Add a chunk to the free lists, preferring to suffix it
224 // to the last free chunk at end of space if possible, and
225 // updating the block census stats as well as block offset table.
226 // Take any locks as appropriate if we are multithreaded.
227 void addChunkToFreeListsAtEndRecordingStats(HeapWord* chunk, size_t size);
228 // Add a free chunk to the indexed free lists.
229 void returnChunkToFreeList(FreeChunk* chunk);
230 // Add a free chunk to the dictionary.
231 void returnChunkToDictionary(FreeChunk* chunk);
233 // Functions for maintaining the linear allocation buffers (LinAB).
234 // Repairing a linear allocation block refers to operations
235 // performed on the remainder of a LinAB after an allocation
236 // has been made from it.
237 void repairLinearAllocationBlocks();
238 void repairLinearAllocBlock(LinearAllocBlock* blk);
239 void refillLinearAllocBlock(LinearAllocBlock* blk);
240 void refillLinearAllocBlockIfNeeded(LinearAllocBlock* blk);
241 void refillLinearAllocBlocksIfNeeded();
243 void verify_objects_initialized() const;
245 // Statistics reporting helper functions
246 void reportFreeListStatistics() const;
247 void reportIndexedFreeListStatistics() const;
248 size_t maxChunkSizeInIndexedFreeLists() const;
249 size_t numFreeBlocksInIndexedFreeLists() const;
250 // Accessor
251 HeapWord* unallocated_block() const {
252 HeapWord* ub = _bt.unallocated_block();
253 assert(ub >= bottom() &&
254 ub <= end(), "space invariant");
255 return ub;
256 }
257 void freed(HeapWord* start, size_t size) {
258 _bt.freed(start, size);
259 }
261 protected:
262 // reset the indexed free list to its initial empty condition.
263 void resetIndexedFreeListArray();
264 // reset to an initial state with a single free block described
265 // by the MemRegion parameter.
266 void reset(MemRegion mr);
267 // Return the total number of words in the indexed free lists.
268 size_t totalSizeInIndexedFreeLists() const;
270 public:
271 // Constructor...
272 CompactibleFreeListSpace(BlockOffsetSharedArray* bs, MemRegion mr,
273 bool use_adaptive_freelists,
274 FreeBlockDictionary::DictionaryChoice);
275 // accessors
276 bool bestFitFirst() { return _fitStrategy == FreeBlockBestFitFirst; }
277 FreeBlockDictionary* dictionary() const { return _dictionary; }
278 HeapWord* nearLargestChunk() const { return _nearLargestChunk; }
279 void set_nearLargestChunk(HeapWord* v) { _nearLargestChunk = v; }
281 // Set CMS global values
282 static void set_cms_values();
284 // Return the free chunk at the end of the space. If no such
285 // chunk exists, return NULL.
286 FreeChunk* find_chunk_at_end();
288 bool adaptive_freelists() const { return _adaptive_freelists; }
290 void set_collector(CMSCollector* collector) { _collector = collector; }
292 // Support for parallelization of rescan and marking
293 const size_t rescan_task_size() const { return _rescan_task_size; }
294 const size_t marking_task_size() const { return _marking_task_size; }
295 SequentialSubTasksDone* conc_par_seq_tasks() {return &_conc_par_seq_tasks; }
296 void initialize_sequential_subtasks_for_rescan(int n_threads);
297 void initialize_sequential_subtasks_for_marking(int n_threads,
298 HeapWord* low = NULL);
300 // Space enquiries
301 size_t used() const;
302 size_t free() const;
303 size_t max_alloc_in_words() const;
304 // XXX: should have a less conservative used_region() than that of
305 // Space; we could consider keeping track of highest allocated
306 // address and correcting that at each sweep, as the sweeper
307 // goes through the entire allocated part of the generation. We
308 // could also use that information to keep the sweeper from
309 // sweeping more than is necessary. The allocator and sweeper will
310 // of course need to synchronize on this, since the sweeper will
311 // try to bump down the address and the allocator will try to bump it up.
312 // For now, however, we'll just use the default used_region()
313 // which overestimates the region by returning the entire
314 // committed region (this is safe, but inefficient).
316 // Returns a subregion of the space containing all the objects in
317 // the space.
318 MemRegion used_region() const {
319 return MemRegion(bottom(),
320 BlockOffsetArrayUseUnallocatedBlock ?
321 unallocated_block() : end());
322 }
324 // This is needed because the default implementation uses block_start()
325 // which can;t be used at certain times (for example phase 3 of mark-sweep).
326 // A better fix is to change the assertions in phase 3 of mark-sweep to
327 // use is_in_reserved(), but that is deferred since the is_in() assertions
328 // are buried through several layers of callers and are used elsewhere
329 // as well.
330 bool is_in(const void* p) const {
331 return used_region().contains(p);
332 }
334 virtual bool is_free_block(const HeapWord* p) const;
336 // Resizing support
337 void set_end(HeapWord* value); // override
339 // mutual exclusion support
340 Mutex* freelistLock() const { return &_freelistLock; }
342 // Iteration support
343 void oop_iterate(MemRegion mr, OopClosure* cl);
344 void oop_iterate(OopClosure* cl);
346 void object_iterate(ObjectClosure* blk);
347 // Apply the closure to each object in the space whose references
348 // point to objects in the heap. The usage of CompactibleFreeListSpace
349 // by the ConcurrentMarkSweepGeneration for concurrent GC's allows
350 // objects in the space with references to objects that are no longer
351 // valid. For example, an object may reference another object
352 // that has already been sweep up (collected). This method uses
353 // obj_is_alive() to determine whether it is safe to iterate of
354 // an object.
355 void safe_object_iterate(ObjectClosure* blk);
356 void object_iterate_mem(MemRegion mr, UpwardsObjectClosure* cl);
358 // Requires that "mr" be entirely within the space.
359 // Apply "cl->do_object" to all objects that intersect with "mr".
360 // If the iteration encounters an unparseable portion of the region,
361 // terminate the iteration and return the address of the start of the
362 // subregion that isn't done. Return of "NULL" indicates that the
363 // interation completed.
364 virtual HeapWord*
365 object_iterate_careful_m(MemRegion mr,
366 ObjectClosureCareful* cl);
367 virtual HeapWord*
368 object_iterate_careful(ObjectClosureCareful* cl);
370 // Override: provides a DCTO_CL specific to this kind of space.
371 DirtyCardToOopClosure* new_dcto_cl(OopClosure* cl,
372 CardTableModRefBS::PrecisionStyle precision,
373 HeapWord* boundary);
375 void blk_iterate(BlkClosure* cl);
376 void blk_iterate_careful(BlkClosureCareful* cl);
377 HeapWord* block_start_const(const void* p) const;
378 HeapWord* block_start_careful(const void* p) const;
379 size_t block_size(const HeapWord* p) const;
380 size_t block_size_no_stall(HeapWord* p, const CMSCollector* c) const;
381 bool block_is_obj(const HeapWord* p) const;
382 bool obj_is_alive(const HeapWord* p) const;
383 size_t block_size_nopar(const HeapWord* p) const;
384 bool block_is_obj_nopar(const HeapWord* p) const;
386 // iteration support for promotion
387 void save_marks();
388 bool no_allocs_since_save_marks();
389 void object_iterate_since_last_GC(ObjectClosure* cl);
391 // iteration support for sweeping
392 void save_sweep_limit() {
393 _sweep_limit = BlockOffsetArrayUseUnallocatedBlock ?
394 unallocated_block() : end();
395 }
396 NOT_PRODUCT(
397 void clear_sweep_limit() { _sweep_limit = NULL; }
398 )
399 HeapWord* sweep_limit() { return _sweep_limit; }
401 // Apply "blk->do_oop" to the addresses of all reference fields in objects
402 // promoted into this generation since the most recent save_marks() call.
403 // Fields in objects allocated by applications of the closure
404 // *are* included in the iteration. Thus, when the iteration completes
405 // there should be no further such objects remaining.
406 #define CFLS_OOP_SINCE_SAVE_MARKS_DECL(OopClosureType, nv_suffix) \
407 void oop_since_save_marks_iterate##nv_suffix(OopClosureType* blk);
408 ALL_SINCE_SAVE_MARKS_CLOSURES(CFLS_OOP_SINCE_SAVE_MARKS_DECL)
409 #undef CFLS_OOP_SINCE_SAVE_MARKS_DECL
411 // Allocation support
412 HeapWord* allocate(size_t size);
413 HeapWord* par_allocate(size_t size);
415 oop promote(oop obj, size_t obj_size);
416 void gc_prologue();
417 void gc_epilogue();
419 // This call is used by a containing CMS generation / collector
420 // to inform the CFLS space that a sweep has been completed
421 // and that the space can do any related house-keeping functions.
422 void sweep_completed();
424 // For an object in this space, the mark-word's two
425 // LSB's having the value [11] indicates that it has been
426 // promoted since the most recent call to save_marks() on
427 // this generation and has not subsequently been iterated
428 // over (using oop_since_save_marks_iterate() above).
429 // This property holds only for single-threaded collections,
430 // and is typically used for Cheney scans; for MT scavenges,
431 // the property holds for all objects promoted during that
432 // scavenge for the duration of the scavenge and is used
433 // by card-scanning to avoid scanning objects (being) promoted
434 // during that scavenge.
435 bool obj_allocated_since_save_marks(const oop obj) const {
436 assert(is_in_reserved(obj), "Wrong space?");
437 return ((PromotedObject*)obj)->hasPromotedMark();
438 }
440 // A worst-case estimate of the space required (in HeapWords) to expand the
441 // heap when promoting an obj of size obj_size.
442 size_t expansionSpaceRequired(size_t obj_size) const;
444 FreeChunk* allocateScratch(size_t size);
446 // returns true if either the small or large linear allocation buffer is empty.
447 bool linearAllocationWouldFail() const;
449 // Adjust the chunk for the minimum size. This version is called in
450 // most cases in CompactibleFreeListSpace methods.
451 inline static size_t adjustObjectSize(size_t size) {
452 return (size_t) align_object_size(MAX2(size, (size_t)MinChunkSize));
453 }
454 // This is a virtual version of adjustObjectSize() that is called
455 // only occasionally when the compaction space changes and the type
456 // of the new compaction space is is only known to be CompactibleSpace.
457 size_t adjust_object_size_v(size_t size) const {
458 return adjustObjectSize(size);
459 }
460 // Minimum size of a free block.
461 virtual size_t minimum_free_block_size() const { return MinChunkSize; }
462 void removeFreeChunkFromFreeLists(FreeChunk* chunk);
463 void addChunkAndRepairOffsetTable(HeapWord* chunk, size_t size,
464 bool coalesced);
466 // Support for decisions regarding concurrent collection policy
467 bool should_concurrent_collect() const;
469 // Support for compaction
470 void prepare_for_compaction(CompactPoint* cp);
471 void adjust_pointers();
472 void compact();
473 // reset the space to reflect the fact that a compaction of the
474 // space has been done.
475 virtual void reset_after_compaction();
477 // Debugging support
478 void print() const;
479 void prepare_for_verify();
480 void verify(bool allow_dirty) const;
481 void verifyFreeLists() const PRODUCT_RETURN;
482 void verifyIndexedFreeLists() const;
483 void verifyIndexedFreeList(size_t size) const;
484 // verify that the given chunk is in the free lists.
485 bool verifyChunkInFreeLists(FreeChunk* fc) const;
486 // Do some basic checks on the the free lists.
487 void checkFreeListConsistency() const PRODUCT_RETURN;
489 // Printing support
490 void dump_at_safepoint_with_locks(CMSCollector* c, outputStream* st);
491 void print_indexed_free_lists(outputStream* st) const;
492 void print_dictionary_free_lists(outputStream* st) const;
493 void print_promo_info_blocks(outputStream* st) const;
495 NOT_PRODUCT (
496 void initializeIndexedFreeListArrayReturnedBytes();
497 size_t sumIndexedFreeListArrayReturnedBytes();
498 // Return the total number of chunks in the indexed free lists.
499 size_t totalCountInIndexedFreeLists() const;
500 // Return the total numberof chunks in the space.
501 size_t totalCount();
502 )
504 // The census consists of counts of the quantities such as
505 // the current count of the free chunks, number of chunks
506 // created as a result of the split of a larger chunk or
507 // coalescing of smaller chucks, etc. The counts in the
508 // census is used to make decisions on splitting and
509 // coalescing of chunks during the sweep of garbage.
511 // Print the statistics for the free lists.
512 void printFLCensus(size_t sweep_count) const;
514 // Statistics functions
515 // Initialize census for lists before the sweep.
516 void beginSweepFLCensus(float inter_sweep_current,
517 float inter_sweep_estimate,
518 float intra_sweep_estimate);
519 // Set the surplus for each of the free lists.
520 void setFLSurplus();
521 // Set the hint for each of the free lists.
522 void setFLHints();
523 // Clear the census for each of the free lists.
524 void clearFLCensus();
525 // Perform functions for the census after the end of the sweep.
526 void endSweepFLCensus(size_t sweep_count);
527 // Return true if the count of free chunks is greater
528 // than the desired number of free chunks.
529 bool coalOverPopulated(size_t size);
531 // Record (for each size):
532 //
533 // split-births = #chunks added due to splits in (prev-sweep-end,
534 // this-sweep-start)
535 // split-deaths = #chunks removed for splits in (prev-sweep-end,
536 // this-sweep-start)
537 // num-curr = #chunks at start of this sweep
538 // num-prev = #chunks at end of previous sweep
539 //
540 // The above are quantities that are measured. Now define:
541 //
542 // num-desired := num-prev + split-births - split-deaths - num-curr
543 //
544 // Roughly, num-prev + split-births is the supply,
545 // split-deaths is demand due to other sizes
546 // and num-curr is what we have left.
547 //
548 // Thus, num-desired is roughly speaking the "legitimate demand"
549 // for blocks of this size and what we are striving to reach at the
550 // end of the current sweep.
551 //
552 // For a given list, let num-len be its current population.
553 // Define, for a free list of a given size:
554 //
555 // coal-overpopulated := num-len >= num-desired * coal-surplus
556 // (coal-surplus is set to 1.05, i.e. we allow a little slop when
557 // coalescing -- we do not coalesce unless we think that the current
558 // supply has exceeded the estimated demand by more than 5%).
559 //
560 // For the set of sizes in the binary tree, which is neither dense nor
561 // closed, it may be the case that for a particular size we have never
562 // had, or do not now have, or did not have at the previous sweep,
563 // chunks of that size. We need to extend the definition of
564 // coal-overpopulated to such sizes as well:
565 //
566 // For a chunk in/not in the binary tree, extend coal-overpopulated
567 // defined above to include all sizes as follows:
568 //
569 // . a size that is non-existent is coal-overpopulated
570 // . a size that has a num-desired <= 0 as defined above is
571 // coal-overpopulated.
572 //
573 // Also define, for a chunk heap-offset C and mountain heap-offset M:
574 //
575 // close-to-mountain := C >= 0.99 * M
576 //
577 // Now, the coalescing strategy is:
578 //
579 // Coalesce left-hand chunk with right-hand chunk if and
580 // only if:
581 //
582 // EITHER
583 // . left-hand chunk is of a size that is coal-overpopulated
584 // OR
585 // . right-hand chunk is close-to-mountain
586 void smallCoalBirth(size_t size);
587 void smallCoalDeath(size_t size);
588 void coalBirth(size_t size);
589 void coalDeath(size_t size);
590 void smallSplitBirth(size_t size);
591 void smallSplitDeath(size_t size);
592 void splitBirth(size_t size);
593 void splitDeath(size_t size);
594 void split(size_t from, size_t to1);
596 double flsFrag() const;
597 };
599 // A parallel-GC-thread-local allocation buffer for allocation into a
600 // CompactibleFreeListSpace.
601 class CFLS_LAB : public CHeapObj {
602 // The space that this buffer allocates into.
603 CompactibleFreeListSpace* _cfls;
605 // Our local free lists.
606 FreeList _indexedFreeList[CompactibleFreeListSpace::IndexSetSize];
608 // Initialized from a command-line arg.
610 // Allocation statistics in support of dynamic adjustment of
611 // #blocks to claim per get_from_global_pool() call below.
612 static AdaptiveWeightedAverage
613 _blocks_to_claim [CompactibleFreeListSpace::IndexSetSize];
614 static size_t _global_num_blocks [CompactibleFreeListSpace::IndexSetSize];
615 static int _global_num_workers[CompactibleFreeListSpace::IndexSetSize];
616 size_t _num_blocks [CompactibleFreeListSpace::IndexSetSize];
618 // Internal work method
619 void get_from_global_pool(size_t word_sz, FreeList* fl);
621 public:
622 CFLS_LAB(CompactibleFreeListSpace* cfls);
624 // Allocate and return a block of the given size, or else return NULL.
625 HeapWord* alloc(size_t word_sz);
627 // Return any unused portions of the buffer to the global pool.
628 void retire(int tid);
630 // Dynamic OldPLABSize sizing
631 static void compute_desired_plab_size();
632 // When the settings are modified from default static initialization
633 static void modify_initialization(size_t n, unsigned wt);
634 };
636 size_t PromotionInfo::refillSize() const {
637 const size_t CMSSpoolBlockSize = 256;
638 const size_t sz = heap_word_size(sizeof(SpoolBlock) + sizeof(markOop)
639 * CMSSpoolBlockSize);
640 return CompactibleFreeListSpace::adjustObjectSize(sz);
641 }