src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp

Mon, 09 Mar 2009 13:28:46 -0700

author
xdono
date
Mon, 09 Mar 2009 13:28:46 -0700
changeset 1014
0fbdb4381b99
parent 952
e9be0e04635a
child 1580
e018e6884bd8
permissions
-rw-r--r--

6814575: Update copyright year
Summary: Update copyright for files that have been modified in 2009, up to 03/09
Reviewed-by: katleman, tbell, ohair

     1 /*
     2  * Copyright 2001-2009 Sun Microsystems, Inc.  All Rights Reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
    20  * CA 95054 USA or visit www.sun.com if you need additional information or
    21  * have any questions.
    22  *
    23  */
    25 // Classes in support of keeping track of promotions into a non-Contiguous
    26 // space, in this case a CompactibleFreeListSpace.
    28 #define CFLS_LAB_REFILL_STATS 0
    30 // Forward declarations
    31 class CompactibleFreeListSpace;
    32 class BlkClosure;
    33 class BlkClosureCareful;
    34 class UpwardsObjectClosure;
    35 class ObjectClosureCareful;
    36 class Klass;
    38 class PromotedObject VALUE_OBJ_CLASS_SPEC {
    39  private:
    40   enum {
    41     promoted_mask  = right_n_bits(2),   // i.e. 0x3
    42     displaced_mark = nth_bit(2),        // i.e. 0x4
    43     next_mask      = ~(right_n_bits(3)) // i.e. ~(0x7)
    44   };
    45   intptr_t _next;
    46  public:
    47   inline PromotedObject* next() const {
    48     return (PromotedObject*)(_next & next_mask);
    49   }
    50   inline void setNext(PromotedObject* x) {
    51     assert(((intptr_t)x & ~next_mask) == 0,
    52            "Conflict in bit usage, "
    53            " or insufficient alignment of objects");
    54     _next |= (intptr_t)x;
    55   }
    56   inline void setPromotedMark() {
    57     _next |= promoted_mask;
    58   }
    59   inline bool hasPromotedMark() const {
    60     return (_next & promoted_mask) == promoted_mask;
    61   }
    62   inline void setDisplacedMark() {
    63     _next |= displaced_mark;
    64   }
    65   inline bool hasDisplacedMark() const {
    66     return (_next & displaced_mark) != 0;
    67   }
    68   inline void clearNext()        { _next = 0; }
    69   debug_only(void *next_addr() { return (void *) &_next; })
    70 };
    72 class SpoolBlock: public FreeChunk {
    73   friend class PromotionInfo;
    74  protected:
    75   SpoolBlock*  nextSpoolBlock;
    76   size_t       bufferSize;        // number of usable words in this block
    77   markOop*     displacedHdr;      // the displaced headers start here
    79   // Note about bufferSize: it denotes the number of entries available plus 1;
    80   // legal indices range from 1 through BufferSize - 1.  See the verification
    81   // code verify() that counts the number of displaced headers spooled.
    82   size_t computeBufferSize() {
    83     return (size() * sizeof(HeapWord) - sizeof(*this)) / sizeof(markOop);
    84   }
    86  public:
    87   void init() {
    88     bufferSize = computeBufferSize();
    89     displacedHdr = (markOop*)&displacedHdr;
    90     nextSpoolBlock = NULL;
    91   }
    92 };
    94 class PromotionInfo VALUE_OBJ_CLASS_SPEC {
    95   bool            _tracking;      // set if tracking
    96   CompactibleFreeListSpace* _space; // the space to which this belongs
    97   PromotedObject* _promoHead;     // head of list of promoted objects
    98   PromotedObject* _promoTail;     // tail of list of promoted objects
    99   SpoolBlock*     _spoolHead;     // first spooling block
   100   SpoolBlock*     _spoolTail;     // last  non-full spooling block or null
   101   SpoolBlock*     _splice_point;  // when _spoolTail is null, holds list tail
   102   SpoolBlock*     _spareSpool;    // free spool buffer
   103   size_t          _firstIndex;    // first active index in
   104                                   // first spooling block (_spoolHead)
   105   size_t          _nextIndex;     // last active index + 1 in last
   106                                   // spooling block (_spoolTail)
   107  private:
   108   // ensure that spooling space exists; return true if there is spooling space
   109   bool ensure_spooling_space_work();
   111  public:
   112   PromotionInfo() :
   113     _tracking(0), _space(NULL),
   114     _promoHead(NULL), _promoTail(NULL),
   115     _spoolHead(NULL), _spoolTail(NULL),
   116     _spareSpool(NULL), _firstIndex(1),
   117     _nextIndex(1) {}
   119   bool noPromotions() const {
   120     assert(_promoHead != NULL || _promoTail == NULL, "list inconsistency");
   121     return _promoHead == NULL;
   122   }
   123   void startTrackingPromotions();
   124   void stopTrackingPromotions();
   125   bool tracking() const          { return _tracking;  }
   126   void track(PromotedObject* trackOop);      // keep track of a promoted oop
   127   // The following variant must be used when trackOop is not fully
   128   // initialized and has a NULL klass:
   129   void track(PromotedObject* trackOop, klassOop klassOfOop); // keep track of a promoted oop
   130   void setSpace(CompactibleFreeListSpace* sp) { _space = sp; }
   131   CompactibleFreeListSpace* space() const     { return _space; }
   132   markOop nextDisplacedHeader(); // get next header & forward spool pointer
   133   void    saveDisplacedHeader(markOop hdr);
   134                                  // save header and forward spool
   136   inline size_t refillSize() const;
   138   SpoolBlock* getSpoolBlock();   // return a free spooling block
   139   inline bool has_spooling_space() {
   140     return _spoolTail != NULL && _spoolTail->bufferSize > _nextIndex;
   141   }
   142   // ensure that spooling space exists
   143   bool ensure_spooling_space() {
   144     return has_spooling_space() || ensure_spooling_space_work();
   145   }
   146   #define PROMOTED_OOPS_ITERATE_DECL(OopClosureType, nv_suffix)  \
   147     void promoted_oops_iterate##nv_suffix(OopClosureType* cl);
   148   ALL_SINCE_SAVE_MARKS_CLOSURES(PROMOTED_OOPS_ITERATE_DECL)
   149   #undef PROMOTED_OOPS_ITERATE_DECL
   150   void promoted_oops_iterate(OopsInGenClosure* cl) {
   151     promoted_oops_iterate_v(cl);
   152   }
   153   void verify()  const;
   154   void reset() {
   155     _promoHead = NULL;
   156     _promoTail = NULL;
   157     _spoolHead = NULL;
   158     _spoolTail = NULL;
   159     _spareSpool = NULL;
   160     _firstIndex = 0;
   161     _nextIndex = 0;
   163   }
   164 };
   166 class LinearAllocBlock VALUE_OBJ_CLASS_SPEC {
   167  public:
   168   LinearAllocBlock() : _ptr(0), _word_size(0), _refillSize(0),
   169     _allocation_size_limit(0) {}
   170   void set(HeapWord* ptr, size_t word_size, size_t refill_size,
   171     size_t allocation_size_limit) {
   172     _ptr = ptr;
   173     _word_size = word_size;
   174     _refillSize = refill_size;
   175     _allocation_size_limit = allocation_size_limit;
   176   }
   177   HeapWord* _ptr;
   178   size_t    _word_size;
   179   size_t    _refillSize;
   180   size_t    _allocation_size_limit;  // largest size that will be allocated
   181 };
   183 // Concrete subclass of CompactibleSpace that implements
   184 // a free list space, such as used in the concurrent mark sweep
   185 // generation.
   187 class CompactibleFreeListSpace: public CompactibleSpace {
   188   friend class VMStructs;
   189   friend class ConcurrentMarkSweepGeneration;
   190   friend class ASConcurrentMarkSweepGeneration;
   191   friend class CMSCollector;
   192   friend class CMSPermGenGen;
   193   // Local alloc buffer for promotion into this space.
   194   friend class CFLS_LAB;
   196   // "Size" of chunks of work (executed during parallel remark phases
   197   // of CMS collection); this probably belongs in CMSCollector, although
   198   // it's cached here because it's used in
   199   // initialize_sequential_subtasks_for_rescan() which modifies
   200   // par_seq_tasks which also lives in Space. XXX
   201   const size_t _rescan_task_size;
   202   const size_t _marking_task_size;
   204   // Yet another sequential tasks done structure. This supports
   205   // CMS GC, where we have threads dynamically
   206   // claiming sub-tasks from a larger parallel task.
   207   SequentialSubTasksDone _conc_par_seq_tasks;
   209   BlockOffsetArrayNonContigSpace _bt;
   211   CMSCollector* _collector;
   212   ConcurrentMarkSweepGeneration* _gen;
   214   // Data structures for free blocks (used during allocation/sweeping)
   216   // Allocation is done linearly from two different blocks depending on
   217   // whether the request is small or large, in an effort to reduce
   218   // fragmentation. We assume that any locking for allocation is done
   219   // by the containing generation. Thus, none of the methods in this
   220   // space are re-entrant.
   221   enum SomeConstants {
   222     SmallForLinearAlloc = 16,        // size < this then use _sLAB
   223     SmallForDictionary  = 257,       // size < this then use _indexedFreeList
   224     IndexSetSize        = SmallForDictionary,  // keep this odd-sized
   225     IndexSetStart       = MinObjAlignment,
   226     IndexSetStride      = MinObjAlignment
   227   };
   229  private:
   230   enum FitStrategyOptions {
   231     FreeBlockStrategyNone = 0,
   232     FreeBlockBestFitFirst
   233   };
   235   PromotionInfo _promoInfo;
   237   // helps to impose a global total order on freelistLock ranks;
   238   // assumes that CFLSpace's are allocated in global total order
   239   static int   _lockRank;
   241   // a lock protecting the free lists and free blocks;
   242   // mutable because of ubiquity of locking even for otherwise const methods
   243   mutable Mutex _freelistLock;
   244   // locking verifier convenience function
   245   void assert_locked() const PRODUCT_RETURN;
   247   // Linear allocation blocks
   248   LinearAllocBlock _smallLinearAllocBlock;
   250   FreeBlockDictionary::DictionaryChoice _dictionaryChoice;
   251   FreeBlockDictionary* _dictionary;    // ptr to dictionary for large size blocks
   253   FreeList _indexedFreeList[IndexSetSize];
   254                                        // indexed array for small size blocks
   255   // allocation stategy
   256   bool       _fitStrategy;      // Use best fit strategy.
   257   bool       _adaptive_freelists; // Use adaptive freelists
   259   // This is an address close to the largest free chunk in the heap.
   260   // It is currently assumed to be at the end of the heap.  Free
   261   // chunks with addresses greater than nearLargestChunk are coalesced
   262   // in an effort to maintain a large chunk at the end of the heap.
   263   HeapWord*  _nearLargestChunk;
   265   // Used to keep track of limit of sweep for the space
   266   HeapWord* _sweep_limit;
   268   // Support for compacting cms
   269   HeapWord* cross_threshold(HeapWord* start, HeapWord* end);
   270   HeapWord* forward(oop q, size_t size, CompactPoint* cp, HeapWord* compact_top);
   272   // Initialization helpers.
   273   void initializeIndexedFreeListArray();
   275   // Extra stuff to manage promotion parallelism.
   277   // a lock protecting the dictionary during par promotion allocation.
   278   mutable Mutex _parDictionaryAllocLock;
   279   Mutex* parDictionaryAllocLock() const { return &_parDictionaryAllocLock; }
   281   // Locks protecting the exact lists during par promotion allocation.
   282   Mutex* _indexedFreeListParLocks[IndexSetSize];
   284 #if CFLS_LAB_REFILL_STATS
   285   // Some statistics.
   286   jint  _par_get_chunk_from_small;
   287   jint  _par_get_chunk_from_large;
   288 #endif
   291   // Attempt to obtain up to "n" blocks of the size "word_sz" (which is
   292   // required to be smaller than "IndexSetSize".)  If successful,
   293   // adds them to "fl", which is required to be an empty free list.
   294   // If the count of "fl" is negative, it's absolute value indicates a
   295   // number of free chunks that had been previously "borrowed" from global
   296   // list of size "word_sz", and must now be decremented.
   297   void par_get_chunk_of_blocks(size_t word_sz, size_t n, FreeList* fl);
   299   // Allocation helper functions
   300   // Allocate using a strategy that takes from the indexed free lists
   301   // first.  This allocation strategy assumes a companion sweeping
   302   // strategy that attempts to keep the needed number of chunks in each
   303   // indexed free lists.
   304   HeapWord* allocate_adaptive_freelists(size_t size);
   305   // Allocate from the linear allocation buffers first.  This allocation
   306   // strategy assumes maximal coalescing can maintain chunks large enough
   307   // to be used as linear allocation buffers.
   308   HeapWord* allocate_non_adaptive_freelists(size_t size);
   310   // Gets a chunk from the linear allocation block (LinAB).  If there
   311   // is not enough space in the LinAB, refills it.
   312   HeapWord*  getChunkFromLinearAllocBlock(LinearAllocBlock* blk, size_t size);
   313   HeapWord*  getChunkFromSmallLinearAllocBlock(size_t size);
   314   // Get a chunk from the space remaining in the linear allocation block.  Do
   315   // not attempt to refill if the space is not available, return NULL.  Do the
   316   // repairs on the linear allocation block as appropriate.
   317   HeapWord*  getChunkFromLinearAllocBlockRemainder(LinearAllocBlock* blk, size_t size);
   318   inline HeapWord*  getChunkFromSmallLinearAllocBlockRemainder(size_t size);
   320   // Helper function for getChunkFromIndexedFreeList.
   321   // Replenish the indexed free list for this "size".  Do not take from an
   322   // underpopulated size.
   323   FreeChunk*  getChunkFromIndexedFreeListHelper(size_t size);
   325   // Get a chunk from the indexed free list.  If the indexed free list
   326   // does not have a free chunk, try to replenish the indexed free list
   327   // then get the free chunk from the replenished indexed free list.
   328   inline FreeChunk* getChunkFromIndexedFreeList(size_t size);
   330   // The returned chunk may be larger than requested (or null).
   331   FreeChunk* getChunkFromDictionary(size_t size);
   332   // The returned chunk is the exact size requested (or null).
   333   FreeChunk* getChunkFromDictionaryExact(size_t size);
   335   // Find a chunk in the indexed free list that is the best
   336   // fit for size "numWords".
   337   FreeChunk* bestFitSmall(size_t numWords);
   338   // For free list "fl" of chunks of size > numWords,
   339   // remove a chunk, split off a chunk of size numWords
   340   // and return it.  The split off remainder is returned to
   341   // the free lists.  The old name for getFromListGreater
   342   // was lookInListGreater.
   343   FreeChunk* getFromListGreater(FreeList* fl, size_t numWords);
   344   // Get a chunk in the indexed free list or dictionary,
   345   // by considering a larger chunk and splitting it.
   346   FreeChunk* getChunkFromGreater(size_t numWords);
   347   //  Verify that the given chunk is in the indexed free lists.
   348   bool verifyChunkInIndexedFreeLists(FreeChunk* fc) const;
   349   // Remove the specified chunk from the indexed free lists.
   350   void       removeChunkFromIndexedFreeList(FreeChunk* fc);
   351   // Remove the specified chunk from the dictionary.
   352   void       removeChunkFromDictionary(FreeChunk* fc);
   353   // Split a free chunk into a smaller free chunk of size "new_size".
   354   // Return the smaller free chunk and return the remainder to the
   355   // free lists.
   356   FreeChunk* splitChunkAndReturnRemainder(FreeChunk* chunk, size_t new_size);
   357   // Add a chunk to the free lists.
   358   void       addChunkToFreeLists(HeapWord* chunk, size_t size);
   359   // Add a chunk to the free lists, preferring to suffix it
   360   // to the last free chunk at end of space if possible, and
   361   // updating the block census stats as well as block offset table.
   362   // Take any locks as appropriate if we are multithreaded.
   363   void       addChunkToFreeListsAtEndRecordingStats(HeapWord* chunk, size_t size);
   364   // Add a free chunk to the indexed free lists.
   365   void       returnChunkToFreeList(FreeChunk* chunk);
   366   // Add a free chunk to the dictionary.
   367   void       returnChunkToDictionary(FreeChunk* chunk);
   369   // Functions for maintaining the linear allocation buffers (LinAB).
   370   // Repairing a linear allocation block refers to operations
   371   // performed on the remainder of a LinAB after an allocation
   372   // has been made from it.
   373   void       repairLinearAllocationBlocks();
   374   void       repairLinearAllocBlock(LinearAllocBlock* blk);
   375   void       refillLinearAllocBlock(LinearAllocBlock* blk);
   376   void       refillLinearAllocBlockIfNeeded(LinearAllocBlock* blk);
   377   void       refillLinearAllocBlocksIfNeeded();
   379   void       verify_objects_initialized() const;
   381   // Statistics reporting helper functions
   382   void       reportFreeListStatistics() const;
   383   void       reportIndexedFreeListStatistics() const;
   384   size_t     maxChunkSizeInIndexedFreeLists() const;
   385   size_t     numFreeBlocksInIndexedFreeLists() const;
   386   // Accessor
   387   HeapWord* unallocated_block() const {
   388     HeapWord* ub = _bt.unallocated_block();
   389     assert(ub >= bottom() &&
   390            ub <= end(), "space invariant");
   391     return ub;
   392   }
   393   void freed(HeapWord* start, size_t size) {
   394     _bt.freed(start, size);
   395   }
   397  protected:
   398   // reset the indexed free list to its initial empty condition.
   399   void resetIndexedFreeListArray();
   400   // reset to an initial state with a single free block described
   401   // by the MemRegion parameter.
   402   void reset(MemRegion mr);
   403   // Return the total number of words in the indexed free lists.
   404   size_t     totalSizeInIndexedFreeLists() const;
   406  public:
   407   // Constructor...
   408   CompactibleFreeListSpace(BlockOffsetSharedArray* bs, MemRegion mr,
   409                            bool use_adaptive_freelists,
   410                            FreeBlockDictionary::DictionaryChoice);
   411   // accessors
   412   bool bestFitFirst() { return _fitStrategy == FreeBlockBestFitFirst; }
   413   FreeBlockDictionary* dictionary() const { return _dictionary; }
   414   HeapWord* nearLargestChunk() const { return _nearLargestChunk; }
   415   void set_nearLargestChunk(HeapWord* v) { _nearLargestChunk = v; }
   417   // Return the free chunk at the end of the space.  If no such
   418   // chunk exists, return NULL.
   419   FreeChunk* find_chunk_at_end();
   421   bool adaptive_freelists() const { return _adaptive_freelists; }
   423   void set_collector(CMSCollector* collector) { _collector = collector; }
   425   // Support for parallelization of rescan and marking
   426   const size_t rescan_task_size()  const { return _rescan_task_size;  }
   427   const size_t marking_task_size() const { return _marking_task_size; }
   428   SequentialSubTasksDone* conc_par_seq_tasks() {return &_conc_par_seq_tasks; }
   429   void initialize_sequential_subtasks_for_rescan(int n_threads);
   430   void initialize_sequential_subtasks_for_marking(int n_threads,
   431          HeapWord* low = NULL);
   433 #if CFLS_LAB_REFILL_STATS
   434   void print_par_alloc_stats();
   435 #endif
   437   // Space enquiries
   438   size_t used() const;
   439   size_t free() const;
   440   size_t max_alloc_in_words() const;
   441   // XXX: should have a less conservative used_region() than that of
   442   // Space; we could consider keeping track of highest allocated
   443   // address and correcting that at each sweep, as the sweeper
   444   // goes through the entire allocated part of the generation. We
   445   // could also use that information to keep the sweeper from
   446   // sweeping more than is necessary. The allocator and sweeper will
   447   // of course need to synchronize on this, since the sweeper will
   448   // try to bump down the address and the allocator will try to bump it up.
   449   // For now, however, we'll just use the default used_region()
   450   // which overestimates the region by returning the entire
   451   // committed region (this is safe, but inefficient).
   453   // Returns a subregion of the space containing all the objects in
   454   // the space.
   455   MemRegion used_region() const {
   456     return MemRegion(bottom(),
   457                      BlockOffsetArrayUseUnallocatedBlock ?
   458                      unallocated_block() : end());
   459   }
   461   // This is needed because the default implementation uses block_start()
   462   // which can;t be used at certain times (for example phase 3 of mark-sweep).
   463   // A better fix is to change the assertions in phase 3 of mark-sweep to
   464   // use is_in_reserved(), but that is deferred since the is_in() assertions
   465   // are buried through several layers of callers and are used elsewhere
   466   // as well.
   467   bool is_in(const void* p) const {
   468     return used_region().contains(p);
   469   }
   471   virtual bool is_free_block(const HeapWord* p) const;
   473   // Resizing support
   474   void set_end(HeapWord* value);  // override
   476   // mutual exclusion support
   477   Mutex* freelistLock() const { return &_freelistLock; }
   479   // Iteration support
   480   void oop_iterate(MemRegion mr, OopClosure* cl);
   481   void oop_iterate(OopClosure* cl);
   483   void object_iterate(ObjectClosure* blk);
   484   // Apply the closure to each object in the space whose references
   485   // point to objects in the heap.  The usage of CompactibleFreeListSpace
   486   // by the ConcurrentMarkSweepGeneration for concurrent GC's allows
   487   // objects in the space with references to objects that are no longer
   488   // valid.  For example, an object may reference another object
   489   // that has already been sweep up (collected).  This method uses
   490   // obj_is_alive() to determine whether it is safe to iterate of
   491   // an object.
   492   void safe_object_iterate(ObjectClosure* blk);
   493   void object_iterate_mem(MemRegion mr, UpwardsObjectClosure* cl);
   495   // Requires that "mr" be entirely within the space.
   496   // Apply "cl->do_object" to all objects that intersect with "mr".
   497   // If the iteration encounters an unparseable portion of the region,
   498   // terminate the iteration and return the address of the start of the
   499   // subregion that isn't done.  Return of "NULL" indicates that the
   500   // interation completed.
   501   virtual HeapWord*
   502        object_iterate_careful_m(MemRegion mr,
   503                                 ObjectClosureCareful* cl);
   504   virtual HeapWord*
   505        object_iterate_careful(ObjectClosureCareful* cl);
   507   // Override: provides a DCTO_CL specific to this kind of space.
   508   DirtyCardToOopClosure* new_dcto_cl(OopClosure* cl,
   509                                      CardTableModRefBS::PrecisionStyle precision,
   510                                      HeapWord* boundary);
   512   void blk_iterate(BlkClosure* cl);
   513   void blk_iterate_careful(BlkClosureCareful* cl);
   514   HeapWord* block_start_const(const void* p) const;
   515   HeapWord* block_start_careful(const void* p) const;
   516   size_t block_size(const HeapWord* p) const;
   517   size_t block_size_no_stall(HeapWord* p, const CMSCollector* c) const;
   518   bool block_is_obj(const HeapWord* p) const;
   519   bool obj_is_alive(const HeapWord* p) const;
   520   size_t block_size_nopar(const HeapWord* p) const;
   521   bool block_is_obj_nopar(const HeapWord* p) const;
   523   // iteration support for promotion
   524   void save_marks();
   525   bool no_allocs_since_save_marks();
   526   void object_iterate_since_last_GC(ObjectClosure* cl);
   528   // iteration support for sweeping
   529   void save_sweep_limit() {
   530     _sweep_limit = BlockOffsetArrayUseUnallocatedBlock ?
   531                    unallocated_block() : end();
   532   }
   533   NOT_PRODUCT(
   534     void clear_sweep_limit() { _sweep_limit = NULL; }
   535   )
   536   HeapWord* sweep_limit() { return _sweep_limit; }
   538   // Apply "blk->do_oop" to the addresses of all reference fields in objects
   539   // promoted into this generation since the most recent save_marks() call.
   540   // Fields in objects allocated by applications of the closure
   541   // *are* included in the iteration. Thus, when the iteration completes
   542   // there should be no further such objects remaining.
   543   #define CFLS_OOP_SINCE_SAVE_MARKS_DECL(OopClosureType, nv_suffix)  \
   544     void oop_since_save_marks_iterate##nv_suffix(OopClosureType* blk);
   545   ALL_SINCE_SAVE_MARKS_CLOSURES(CFLS_OOP_SINCE_SAVE_MARKS_DECL)
   546   #undef CFLS_OOP_SINCE_SAVE_MARKS_DECL
   548   // Allocation support
   549   HeapWord* allocate(size_t size);
   550   HeapWord* par_allocate(size_t size);
   552   oop       promote(oop obj, size_t obj_size);
   553   void      gc_prologue();
   554   void      gc_epilogue();
   556   // This call is used by a containing CMS generation / collector
   557   // to inform the CFLS space that a sweep has been completed
   558   // and that the space can do any related house-keeping functions.
   559   void      sweep_completed();
   561   // For an object in this space, the mark-word's two
   562   // LSB's having the value [11] indicates that it has been
   563   // promoted since the most recent call to save_marks() on
   564   // this generation and has not subsequently been iterated
   565   // over (using oop_since_save_marks_iterate() above).
   566   bool obj_allocated_since_save_marks(const oop obj) const {
   567     assert(is_in_reserved(obj), "Wrong space?");
   568     return ((PromotedObject*)obj)->hasPromotedMark();
   569   }
   571   // A worst-case estimate of the space required (in HeapWords) to expand the
   572   // heap when promoting an obj of size obj_size.
   573   size_t expansionSpaceRequired(size_t obj_size) const;
   575   FreeChunk* allocateScratch(size_t size);
   577   // returns true if either the small or large linear allocation buffer is empty.
   578   bool       linearAllocationWouldFail() const;
   580   // Adjust the chunk for the minimum size.  This version is called in
   581   // most cases in CompactibleFreeListSpace methods.
   582   inline static size_t adjustObjectSize(size_t size) {
   583     return (size_t) align_object_size(MAX2(size, (size_t)MinChunkSize));
   584   }
   585   // This is a virtual version of adjustObjectSize() that is called
   586   // only occasionally when the compaction space changes and the type
   587   // of the new compaction space is is only known to be CompactibleSpace.
   588   size_t adjust_object_size_v(size_t size) const {
   589     return adjustObjectSize(size);
   590   }
   591   // Minimum size of a free block.
   592   virtual size_t minimum_free_block_size() const { return MinChunkSize; }
   593   void      removeFreeChunkFromFreeLists(FreeChunk* chunk);
   594   void      addChunkAndRepairOffsetTable(HeapWord* chunk, size_t size,
   595               bool coalesced);
   597   // Support for decisions regarding concurrent collection policy
   598   bool should_concurrent_collect() const;
   600   // Support for compaction
   601   void prepare_for_compaction(CompactPoint* cp);
   602   void adjust_pointers();
   603   void compact();
   604   // reset the space to reflect the fact that a compaction of the
   605   // space has been done.
   606   virtual void reset_after_compaction();
   608   // Debugging support
   609   void print()                            const;
   610   void prepare_for_verify();
   611   void verify(bool allow_dirty)           const;
   612   void verifyFreeLists()                  const PRODUCT_RETURN;
   613   void verifyIndexedFreeLists()           const;
   614   void verifyIndexedFreeList(size_t size) const;
   615   // verify that the given chunk is in the free lists.
   616   bool verifyChunkInFreeLists(FreeChunk* fc) const;
   617   // Do some basic checks on the the free lists.
   618   void checkFreeListConsistency()         const PRODUCT_RETURN;
   620   NOT_PRODUCT (
   621     void initializeIndexedFreeListArrayReturnedBytes();
   622     size_t sumIndexedFreeListArrayReturnedBytes();
   623     // Return the total number of chunks in the indexed free lists.
   624     size_t totalCountInIndexedFreeLists() const;
   625     // Return the total numberof chunks in the space.
   626     size_t totalCount();
   627   )
   629   // The census consists of counts of the quantities such as
   630   // the current count of the free chunks, number of chunks
   631   // created as a result of the split of a larger chunk or
   632   // coalescing of smaller chucks, etc.  The counts in the
   633   // census is used to make decisions on splitting and
   634   // coalescing of chunks during the sweep of garbage.
   636   // Print the statistics for the free lists.
   637   void printFLCensus(size_t sweep_count) const;
   639   // Statistics functions
   640   // Initialize census for lists before the sweep.
   641   void beginSweepFLCensus(float sweep_current,
   642                           float sweep_estimate);
   643   // Set the surplus for each of the free lists.
   644   void setFLSurplus();
   645   // Set the hint for each of the free lists.
   646   void setFLHints();
   647   // Clear the census for each of the free lists.
   648   void clearFLCensus();
   649   // Perform functions for the census after the end of the sweep.
   650   void endSweepFLCensus(size_t sweep_count);
   651   // Return true if the count of free chunks is greater
   652   // than the desired number of free chunks.
   653   bool coalOverPopulated(size_t size);
   655 // Record (for each size):
   656 //
   657 //   split-births = #chunks added due to splits in (prev-sweep-end,
   658 //      this-sweep-start)
   659 //   split-deaths = #chunks removed for splits in (prev-sweep-end,
   660 //      this-sweep-start)
   661 //   num-curr     = #chunks at start of this sweep
   662 //   num-prev     = #chunks at end of previous sweep
   663 //
   664 // The above are quantities that are measured. Now define:
   665 //
   666 //   num-desired := num-prev + split-births - split-deaths - num-curr
   667 //
   668 // Roughly, num-prev + split-births is the supply,
   669 // split-deaths is demand due to other sizes
   670 // and num-curr is what we have left.
   671 //
   672 // Thus, num-desired is roughly speaking the "legitimate demand"
   673 // for blocks of this size and what we are striving to reach at the
   674 // end of the current sweep.
   675 //
   676 // For a given list, let num-len be its current population.
   677 // Define, for a free list of a given size:
   678 //
   679 //   coal-overpopulated := num-len >= num-desired * coal-surplus
   680 // (coal-surplus is set to 1.05, i.e. we allow a little slop when
   681 // coalescing -- we do not coalesce unless we think that the current
   682 // supply has exceeded the estimated demand by more than 5%).
   683 //
   684 // For the set of sizes in the binary tree, which is neither dense nor
   685 // closed, it may be the case that for a particular size we have never
   686 // had, or do not now have, or did not have at the previous sweep,
   687 // chunks of that size. We need to extend the definition of
   688 // coal-overpopulated to such sizes as well:
   689 //
   690 //   For a chunk in/not in the binary tree, extend coal-overpopulated
   691 //   defined above to include all sizes as follows:
   692 //
   693 //   . a size that is non-existent is coal-overpopulated
   694 //   . a size that has a num-desired <= 0 as defined above is
   695 //     coal-overpopulated.
   696 //
   697 // Also define, for a chunk heap-offset C and mountain heap-offset M:
   698 //
   699 //   close-to-mountain := C >= 0.99 * M
   700 //
   701 // Now, the coalescing strategy is:
   702 //
   703 //    Coalesce left-hand chunk with right-hand chunk if and
   704 //    only if:
   705 //
   706 //      EITHER
   707 //        . left-hand chunk is of a size that is coal-overpopulated
   708 //      OR
   709 //        . right-hand chunk is close-to-mountain
   710   void smallCoalBirth(size_t size);
   711   void smallCoalDeath(size_t size);
   712   void coalBirth(size_t size);
   713   void coalDeath(size_t size);
   714   void smallSplitBirth(size_t size);
   715   void smallSplitDeath(size_t size);
   716   void splitBirth(size_t size);
   717   void splitDeath(size_t size);
   718   void split(size_t from, size_t to1);
   720   double flsFrag() const;
   721 };
   723 // A parallel-GC-thread-local allocation buffer for allocation into a
   724 // CompactibleFreeListSpace.
   725 class CFLS_LAB : public CHeapObj {
   726   // The space that this buffer allocates into.
   727   CompactibleFreeListSpace* _cfls;
   729   // Our local free lists.
   730   FreeList _indexedFreeList[CompactibleFreeListSpace::IndexSetSize];
   732   // Initialized from a command-line arg.
   733   size_t _blocks_to_claim;
   735 #if CFLS_LAB_REFILL_STATS
   736   // Some statistics.
   737   int _refills;
   738   int _blocksTaken;
   739   static int _tot_refills;
   740   static int _tot_blocksTaken;
   741   static int _next_threshold;
   742 #endif
   744 public:
   745   CFLS_LAB(CompactibleFreeListSpace* cfls);
   747   // Allocate and return a block of the given size, or else return NULL.
   748   HeapWord* alloc(size_t word_sz);
   750   // Return any unused portions of the buffer to the global pool.
   751   void retire();
   752 };
   754 size_t PromotionInfo::refillSize() const {
   755   const size_t CMSSpoolBlockSize = 256;
   756   const size_t sz = heap_word_size(sizeof(SpoolBlock) + sizeof(markOop)
   757                                    * CMSSpoolBlockSize);
   758   return CompactibleFreeListSpace::adjustObjectSize(sz);
   759 }

mercurial