src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp

Wed, 23 Sep 2009 23:57:44 -0700

author
jrose
date
Wed, 23 Sep 2009 23:57:44 -0700
changeset 1429
753cf9794df9
parent 1014
0fbdb4381b99
child 1580
e018e6884bd8
permissions
-rw-r--r--

6885169: merge of 4957990 and 6863023 causes conflict on do_nmethods
Summary: After mechanically merging changes, some by-hand adjustments are needed.
Reviewed-by: ysr

     1 /*
     2  * Copyright 2001-2009 Sun Microsystems, Inc.  All Rights Reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
    20  * CA 95054 USA or visit www.sun.com if you need additional information or
    21  * have any questions.
    22  *
    23  */
    25 # include "incls/_precompiled.incl"
    26 # include "incls/_compactibleFreeListSpace.cpp.incl"
    28 /////////////////////////////////////////////////////////////////////////
    29 //// CompactibleFreeListSpace
    30 /////////////////////////////////////////////////////////////////////////
    32 // highest ranked  free list lock rank
    33 int CompactibleFreeListSpace::_lockRank = Mutex::leaf + 3;
    35 // Constructor
    36 CompactibleFreeListSpace::CompactibleFreeListSpace(BlockOffsetSharedArray* bs,
    37   MemRegion mr, bool use_adaptive_freelists,
    38   FreeBlockDictionary::DictionaryChoice dictionaryChoice) :
    39   _dictionaryChoice(dictionaryChoice),
    40   _adaptive_freelists(use_adaptive_freelists),
    41   _bt(bs, mr),
    42   // free list locks are in the range of values taken by _lockRank
    43   // This range currently is [_leaf+2, _leaf+3]
    44   // Note: this requires that CFLspace c'tors
    45   // are called serially in the order in which the locks are
    46   // are acquired in the program text. This is true today.
    47   _freelistLock(_lockRank--, "CompactibleFreeListSpace._lock", true),
    48   _parDictionaryAllocLock(Mutex::leaf - 1,  // == rank(ExpandHeap_lock) - 1
    49                           "CompactibleFreeListSpace._dict_par_lock", true),
    50   _rescan_task_size(CardTableModRefBS::card_size_in_words * BitsPerWord *
    51                     CMSRescanMultiple),
    52   _marking_task_size(CardTableModRefBS::card_size_in_words * BitsPerWord *
    53                     CMSConcMarkMultiple),
    54   _collector(NULL)
    55 {
    56   _bt.set_space(this);
    57   initialize(mr, SpaceDecorator::Clear, SpaceDecorator::Mangle);
    58   // We have all of "mr", all of which we place in the dictionary
    59   // as one big chunk. We'll need to decide here which of several
    60   // possible alternative dictionary implementations to use. For
    61   // now the choice is easy, since we have only one working
    62   // implementation, namely, the simple binary tree (splaying
    63   // temporarily disabled).
    64   switch (dictionaryChoice) {
    65     case FreeBlockDictionary::dictionaryBinaryTree:
    66       _dictionary = new BinaryTreeDictionary(mr);
    67       break;
    68     case FreeBlockDictionary::dictionarySplayTree:
    69     case FreeBlockDictionary::dictionarySkipList:
    70     default:
    71       warning("dictionaryChoice: selected option not understood; using"
    72               " default BinaryTreeDictionary implementation instead.");
    73       _dictionary = new BinaryTreeDictionary(mr);
    74       break;
    75   }
    76   splitBirth(mr.word_size());
    77   assert(_dictionary != NULL, "CMS dictionary initialization");
    78   // The indexed free lists are initially all empty and are lazily
    79   // filled in on demand. Initialize the array elements to NULL.
    80   initializeIndexedFreeListArray();
    82   // Not using adaptive free lists assumes that allocation is first
    83   // from the linAB's.  Also a cms perm gen which can be compacted
    84   // has to have the klass's klassKlass allocated at a lower
    85   // address in the heap than the klass so that the klassKlass is
    86   // moved to its new location before the klass is moved.
    87   // Set the _refillSize for the linear allocation blocks
    88   if (!use_adaptive_freelists) {
    89     FreeChunk* fc = _dictionary->getChunk(mr.word_size());
    90     // The small linAB initially has all the space and will allocate
    91     // a chunk of any size.
    92     HeapWord* addr = (HeapWord*) fc;
    93     _smallLinearAllocBlock.set(addr, fc->size() ,
    94       1024*SmallForLinearAlloc, fc->size());
    95     // Note that _unallocated_block is not updated here.
    96     // Allocations from the linear allocation block should
    97     // update it.
    98   } else {
    99     _smallLinearAllocBlock.set(0, 0, 1024*SmallForLinearAlloc,
   100                                SmallForLinearAlloc);
   101   }
   102   // CMSIndexedFreeListReplenish should be at least 1
   103   CMSIndexedFreeListReplenish = MAX2((uintx)1, CMSIndexedFreeListReplenish);
   104   _promoInfo.setSpace(this);
   105   if (UseCMSBestFit) {
   106     _fitStrategy = FreeBlockBestFitFirst;
   107   } else {
   108     _fitStrategy = FreeBlockStrategyNone;
   109   }
   110   checkFreeListConsistency();
   112   // Initialize locks for parallel case.
   113   if (ParallelGCThreads > 0) {
   114     for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
   115       _indexedFreeListParLocks[i] = new Mutex(Mutex::leaf - 1, // == ExpandHeap_lock - 1
   116                                               "a freelist par lock",
   117                                               true);
   118       if (_indexedFreeListParLocks[i] == NULL)
   119         vm_exit_during_initialization("Could not allocate a par lock");
   120       DEBUG_ONLY(
   121         _indexedFreeList[i].set_protecting_lock(_indexedFreeListParLocks[i]);
   122       )
   123     }
   124     _dictionary->set_par_lock(&_parDictionaryAllocLock);
   125   }
   126 }
   128 // Like CompactibleSpace forward() but always calls cross_threshold() to
   129 // update the block offset table.  Removed initialize_threshold call because
   130 // CFLS does not use a block offset array for contiguous spaces.
   131 HeapWord* CompactibleFreeListSpace::forward(oop q, size_t size,
   132                                     CompactPoint* cp, HeapWord* compact_top) {
   133   // q is alive
   134   // First check if we should switch compaction space
   135   assert(this == cp->space, "'this' should be current compaction space.");
   136   size_t compaction_max_size = pointer_delta(end(), compact_top);
   137   assert(adjustObjectSize(size) == cp->space->adjust_object_size_v(size),
   138     "virtual adjustObjectSize_v() method is not correct");
   139   size_t adjusted_size = adjustObjectSize(size);
   140   assert(compaction_max_size >= MinChunkSize || compaction_max_size == 0,
   141          "no small fragments allowed");
   142   assert(minimum_free_block_size() == MinChunkSize,
   143          "for de-virtualized reference below");
   144   // Can't leave a nonzero size, residual fragment smaller than MinChunkSize
   145   if (adjusted_size + MinChunkSize > compaction_max_size &&
   146       adjusted_size != compaction_max_size) {
   147     do {
   148       // switch to next compaction space
   149       cp->space->set_compaction_top(compact_top);
   150       cp->space = cp->space->next_compaction_space();
   151       if (cp->space == NULL) {
   152         cp->gen = GenCollectedHeap::heap()->prev_gen(cp->gen);
   153         assert(cp->gen != NULL, "compaction must succeed");
   154         cp->space = cp->gen->first_compaction_space();
   155         assert(cp->space != NULL, "generation must have a first compaction space");
   156       }
   157       compact_top = cp->space->bottom();
   158       cp->space->set_compaction_top(compact_top);
   159       // The correct adjusted_size may not be the same as that for this method
   160       // (i.e., cp->space may no longer be "this" so adjust the size again.
   161       // Use the virtual method which is not used above to save the virtual
   162       // dispatch.
   163       adjusted_size = cp->space->adjust_object_size_v(size);
   164       compaction_max_size = pointer_delta(cp->space->end(), compact_top);
   165       assert(cp->space->minimum_free_block_size() == 0, "just checking");
   166     } while (adjusted_size > compaction_max_size);
   167   }
   169   // store the forwarding pointer into the mark word
   170   if ((HeapWord*)q != compact_top) {
   171     q->forward_to(oop(compact_top));
   172     assert(q->is_gc_marked(), "encoding the pointer should preserve the mark");
   173   } else {
   174     // if the object isn't moving we can just set the mark to the default
   175     // mark and handle it specially later on.
   176     q->init_mark();
   177     assert(q->forwardee() == NULL, "should be forwarded to NULL");
   178   }
   180   VALIDATE_MARK_SWEEP_ONLY(MarkSweep::register_live_oop(q, adjusted_size));
   181   compact_top += adjusted_size;
   183   // we need to update the offset table so that the beginnings of objects can be
   184   // found during scavenge.  Note that we are updating the offset table based on
   185   // where the object will be once the compaction phase finishes.
   187   // Always call cross_threshold().  A contiguous space can only call it when
   188   // the compaction_top exceeds the current threshold but not for an
   189   // non-contiguous space.
   190   cp->threshold =
   191     cp->space->cross_threshold(compact_top - adjusted_size, compact_top);
   192   return compact_top;
   193 }
   195 // A modified copy of OffsetTableContigSpace::cross_threshold() with _offsets -> _bt
   196 // and use of single_block instead of alloc_block.  The name here is not really
   197 // appropriate - maybe a more general name could be invented for both the
   198 // contiguous and noncontiguous spaces.
   200 HeapWord* CompactibleFreeListSpace::cross_threshold(HeapWord* start, HeapWord* the_end) {
   201   _bt.single_block(start, the_end);
   202   return end();
   203 }
   205 // Initialize them to NULL.
   206 void CompactibleFreeListSpace::initializeIndexedFreeListArray() {
   207   for (size_t i = 0; i < IndexSetSize; i++) {
   208     // Note that on platforms where objects are double word aligned,
   209     // the odd array elements are not used.  It is convenient, however,
   210     // to map directly from the object size to the array element.
   211     _indexedFreeList[i].reset(IndexSetSize);
   212     _indexedFreeList[i].set_size(i);
   213     assert(_indexedFreeList[i].count() == 0, "reset check failed");
   214     assert(_indexedFreeList[i].head() == NULL, "reset check failed");
   215     assert(_indexedFreeList[i].tail() == NULL, "reset check failed");
   216     assert(_indexedFreeList[i].hint() == IndexSetSize, "reset check failed");
   217   }
   218 }
   220 void CompactibleFreeListSpace::resetIndexedFreeListArray() {
   221   for (int i = 1; i < IndexSetSize; i++) {
   222     assert(_indexedFreeList[i].size() == (size_t) i,
   223       "Indexed free list sizes are incorrect");
   224     _indexedFreeList[i].reset(IndexSetSize);
   225     assert(_indexedFreeList[i].count() == 0, "reset check failed");
   226     assert(_indexedFreeList[i].head() == NULL, "reset check failed");
   227     assert(_indexedFreeList[i].tail() == NULL, "reset check failed");
   228     assert(_indexedFreeList[i].hint() == IndexSetSize, "reset check failed");
   229   }
   230 }
   232 void CompactibleFreeListSpace::reset(MemRegion mr) {
   233   resetIndexedFreeListArray();
   234   dictionary()->reset();
   235   if (BlockOffsetArrayUseUnallocatedBlock) {
   236     assert(end() == mr.end(), "We are compacting to the bottom of CMS gen");
   237     // Everything's allocated until proven otherwise.
   238     _bt.set_unallocated_block(end());
   239   }
   240   if (!mr.is_empty()) {
   241     assert(mr.word_size() >= MinChunkSize, "Chunk size is too small");
   242     _bt.single_block(mr.start(), mr.word_size());
   243     FreeChunk* fc = (FreeChunk*) mr.start();
   244     fc->setSize(mr.word_size());
   245     if (mr.word_size() >= IndexSetSize ) {
   246       returnChunkToDictionary(fc);
   247     } else {
   248       _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
   249       _indexedFreeList[mr.word_size()].returnChunkAtHead(fc);
   250     }
   251   }
   252   _promoInfo.reset();
   253   _smallLinearAllocBlock._ptr = NULL;
   254   _smallLinearAllocBlock._word_size = 0;
   255 }
   257 void CompactibleFreeListSpace::reset_after_compaction() {
   258   // Reset the space to the new reality - one free chunk.
   259   MemRegion mr(compaction_top(), end());
   260   reset(mr);
   261   // Now refill the linear allocation block(s) if possible.
   262   if (_adaptive_freelists) {
   263     refillLinearAllocBlocksIfNeeded();
   264   } else {
   265     // Place as much of mr in the linAB as we can get,
   266     // provided it was big enough to go into the dictionary.
   267     FreeChunk* fc = dictionary()->findLargestDict();
   268     if (fc != NULL) {
   269       assert(fc->size() == mr.word_size(),
   270              "Why was the chunk broken up?");
   271       removeChunkFromDictionary(fc);
   272       HeapWord* addr = (HeapWord*) fc;
   273       _smallLinearAllocBlock.set(addr, fc->size() ,
   274         1024*SmallForLinearAlloc, fc->size());
   275       // Note that _unallocated_block is not updated here.
   276     }
   277   }
   278 }
   280 // Walks the entire dictionary, returning a coterminal
   281 // chunk, if it exists. Use with caution since it involves
   282 // a potentially complete walk of a potentially large tree.
   283 FreeChunk* CompactibleFreeListSpace::find_chunk_at_end() {
   285   assert_lock_strong(&_freelistLock);
   287   return dictionary()->find_chunk_ends_at(end());
   288 }
   291 #ifndef PRODUCT
   292 void CompactibleFreeListSpace::initializeIndexedFreeListArrayReturnedBytes() {
   293   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
   294     _indexedFreeList[i].allocation_stats()->set_returnedBytes(0);
   295   }
   296 }
   298 size_t CompactibleFreeListSpace::sumIndexedFreeListArrayReturnedBytes() {
   299   size_t sum = 0;
   300   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
   301     sum += _indexedFreeList[i].allocation_stats()->returnedBytes();
   302   }
   303   return sum;
   304 }
   306 size_t CompactibleFreeListSpace::totalCountInIndexedFreeLists() const {
   307   size_t count = 0;
   308   for (int i = MinChunkSize; i < IndexSetSize; i++) {
   309     debug_only(
   310       ssize_t total_list_count = 0;
   311       for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
   312          fc = fc->next()) {
   313         total_list_count++;
   314       }
   315       assert(total_list_count ==  _indexedFreeList[i].count(),
   316         "Count in list is incorrect");
   317     )
   318     count += _indexedFreeList[i].count();
   319   }
   320   return count;
   321 }
   323 size_t CompactibleFreeListSpace::totalCount() {
   324   size_t num = totalCountInIndexedFreeLists();
   325   num +=  dictionary()->totalCount();
   326   if (_smallLinearAllocBlock._word_size != 0) {
   327     num++;
   328   }
   329   return num;
   330 }
   331 #endif
   333 bool CompactibleFreeListSpace::is_free_block(const HeapWord* p) const {
   334   FreeChunk* fc = (FreeChunk*) p;
   335   return fc->isFree();
   336 }
   338 size_t CompactibleFreeListSpace::used() const {
   339   return capacity() - free();
   340 }
   342 size_t CompactibleFreeListSpace::free() const {
   343   // "MT-safe, but not MT-precise"(TM), if you will: i.e.
   344   // if you do this while the structures are in flux you
   345   // may get an approximate answer only; for instance
   346   // because there is concurrent allocation either
   347   // directly by mutators or for promotion during a GC.
   348   // It's "MT-safe", however, in the sense that you are guaranteed
   349   // not to crash and burn, for instance, because of walking
   350   // pointers that could disappear as you were walking them.
   351   // The approximation is because the various components
   352   // that are read below are not read atomically (and
   353   // further the computation of totalSizeInIndexedFreeLists()
   354   // is itself a non-atomic computation. The normal use of
   355   // this is during a resize operation at the end of GC
   356   // and at that time you are guaranteed to get the
   357   // correct actual value. However, for instance, this is
   358   // also read completely asynchronously by the "perf-sampler"
   359   // that supports jvmstat, and you are apt to see the values
   360   // flicker in such cases.
   361   assert(_dictionary != NULL, "No _dictionary?");
   362   return (_dictionary->totalChunkSize(DEBUG_ONLY(freelistLock())) +
   363           totalSizeInIndexedFreeLists() +
   364           _smallLinearAllocBlock._word_size) * HeapWordSize;
   365 }
   367 size_t CompactibleFreeListSpace::max_alloc_in_words() const {
   368   assert(_dictionary != NULL, "No _dictionary?");
   369   assert_locked();
   370   size_t res = _dictionary->maxChunkSize();
   371   res = MAX2(res, MIN2(_smallLinearAllocBlock._word_size,
   372                        (size_t) SmallForLinearAlloc - 1));
   373   // XXX the following could potentially be pretty slow;
   374   // should one, pesimally for the rare cases when res
   375   // caclulated above is less than IndexSetSize,
   376   // just return res calculated above? My reasoning was that
   377   // those cases will be so rare that the extra time spent doesn't
   378   // really matter....
   379   // Note: do not change the loop test i >= res + IndexSetStride
   380   // to i > res below, because i is unsigned and res may be zero.
   381   for (size_t i = IndexSetSize - 1; i >= res + IndexSetStride;
   382        i -= IndexSetStride) {
   383     if (_indexedFreeList[i].head() != NULL) {
   384       assert(_indexedFreeList[i].count() != 0, "Inconsistent FreeList");
   385       return i;
   386     }
   387   }
   388   return res;
   389 }
   391 void CompactibleFreeListSpace::reportFreeListStatistics() const {
   392   assert_lock_strong(&_freelistLock);
   393   assert(PrintFLSStatistics != 0, "Reporting error");
   394   _dictionary->reportStatistics();
   395   if (PrintFLSStatistics > 1) {
   396     reportIndexedFreeListStatistics();
   397     size_t totalSize = totalSizeInIndexedFreeLists() +
   398                        _dictionary->totalChunkSize(DEBUG_ONLY(freelistLock()));
   399     gclog_or_tty->print(" free=%ld frag=%1.4f\n", totalSize, flsFrag());
   400   }
   401 }
   403 void CompactibleFreeListSpace::reportIndexedFreeListStatistics() const {
   404   assert_lock_strong(&_freelistLock);
   405   gclog_or_tty->print("Statistics for IndexedFreeLists:\n"
   406                       "--------------------------------\n");
   407   size_t totalSize = totalSizeInIndexedFreeLists();
   408   size_t   freeBlocks = numFreeBlocksInIndexedFreeLists();
   409   gclog_or_tty->print("Total Free Space: %d\n", totalSize);
   410   gclog_or_tty->print("Max   Chunk Size: %d\n", maxChunkSizeInIndexedFreeLists());
   411   gclog_or_tty->print("Number of Blocks: %d\n", freeBlocks);
   412   if (freeBlocks != 0) {
   413     gclog_or_tty->print("Av.  Block  Size: %d\n", totalSize/freeBlocks);
   414   }
   415 }
   417 size_t CompactibleFreeListSpace::numFreeBlocksInIndexedFreeLists() const {
   418   size_t res = 0;
   419   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
   420     debug_only(
   421       ssize_t recount = 0;
   422       for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
   423          fc = fc->next()) {
   424         recount += 1;
   425       }
   426       assert(recount == _indexedFreeList[i].count(),
   427         "Incorrect count in list");
   428     )
   429     res += _indexedFreeList[i].count();
   430   }
   431   return res;
   432 }
   434 size_t CompactibleFreeListSpace::maxChunkSizeInIndexedFreeLists() const {
   435   for (size_t i = IndexSetSize - 1; i != 0; i -= IndexSetStride) {
   436     if (_indexedFreeList[i].head() != NULL) {
   437       assert(_indexedFreeList[i].count() != 0, "Inconsistent FreeList");
   438       return (size_t)i;
   439     }
   440   }
   441   return 0;
   442 }
   444 void CompactibleFreeListSpace::set_end(HeapWord* value) {
   445   HeapWord* prevEnd = end();
   446   assert(prevEnd != value, "unnecessary set_end call");
   447   assert(prevEnd == NULL || value >= unallocated_block(), "New end is below unallocated block");
   448   _end = value;
   449   if (prevEnd != NULL) {
   450     // Resize the underlying block offset table.
   451     _bt.resize(pointer_delta(value, bottom()));
   452   if (value <= prevEnd) {
   453     assert(value >= unallocated_block(), "New end is below unallocated block");
   454   } else {
   455     // Now, take this new chunk and add it to the free blocks.
   456     // Note that the BOT has not yet been updated for this block.
   457     size_t newFcSize = pointer_delta(value, prevEnd);
   458     // XXX This is REALLY UGLY and should be fixed up. XXX
   459     if (!_adaptive_freelists && _smallLinearAllocBlock._ptr == NULL) {
   460       // Mark the boundary of the new block in BOT
   461       _bt.mark_block(prevEnd, value);
   462       // put it all in the linAB
   463       if (ParallelGCThreads == 0) {
   464         _smallLinearAllocBlock._ptr = prevEnd;
   465         _smallLinearAllocBlock._word_size = newFcSize;
   466         repairLinearAllocBlock(&_smallLinearAllocBlock);
   467       } else { // ParallelGCThreads > 0
   468         MutexLockerEx x(parDictionaryAllocLock(),
   469                         Mutex::_no_safepoint_check_flag);
   470         _smallLinearAllocBlock._ptr = prevEnd;
   471         _smallLinearAllocBlock._word_size = newFcSize;
   472         repairLinearAllocBlock(&_smallLinearAllocBlock);
   473       }
   474       // Births of chunks put into a LinAB are not recorded.  Births
   475       // of chunks as they are allocated out of a LinAB are.
   476     } else {
   477       // Add the block to the free lists, if possible coalescing it
   478       // with the last free block, and update the BOT and census data.
   479       addChunkToFreeListsAtEndRecordingStats(prevEnd, newFcSize);
   480     }
   481   }
   482   }
   483 }
   485 class FreeListSpace_DCTOC : public Filtering_DCTOC {
   486   CompactibleFreeListSpace* _cfls;
   487   CMSCollector* _collector;
   488 protected:
   489   // Override.
   490 #define walk_mem_region_with_cl_DECL(ClosureType)                       \
   491   virtual void walk_mem_region_with_cl(MemRegion mr,                    \
   492                                        HeapWord* bottom, HeapWord* top, \
   493                                        ClosureType* cl);                \
   494       void walk_mem_region_with_cl_par(MemRegion mr,                    \
   495                                        HeapWord* bottom, HeapWord* top, \
   496                                        ClosureType* cl);                \
   497     void walk_mem_region_with_cl_nopar(MemRegion mr,                    \
   498                                        HeapWord* bottom, HeapWord* top, \
   499                                        ClosureType* cl)
   500   walk_mem_region_with_cl_DECL(OopClosure);
   501   walk_mem_region_with_cl_DECL(FilteringClosure);
   503 public:
   504   FreeListSpace_DCTOC(CompactibleFreeListSpace* sp,
   505                       CMSCollector* collector,
   506                       OopClosure* cl,
   507                       CardTableModRefBS::PrecisionStyle precision,
   508                       HeapWord* boundary) :
   509     Filtering_DCTOC(sp, cl, precision, boundary),
   510     _cfls(sp), _collector(collector) {}
   511 };
   513 // We de-virtualize the block-related calls below, since we know that our
   514 // space is a CompactibleFreeListSpace.
   515 #define FreeListSpace_DCTOC__walk_mem_region_with_cl_DEFN(ClosureType)          \
   516 void FreeListSpace_DCTOC::walk_mem_region_with_cl(MemRegion mr,                 \
   517                                                  HeapWord* bottom,              \
   518                                                  HeapWord* top,                 \
   519                                                  ClosureType* cl) {             \
   520    if (SharedHeap::heap()->n_par_threads() > 0) {                               \
   521      walk_mem_region_with_cl_par(mr, bottom, top, cl);                          \
   522    } else {                                                                     \
   523      walk_mem_region_with_cl_nopar(mr, bottom, top, cl);                        \
   524    }                                                                            \
   525 }                                                                               \
   526 void FreeListSpace_DCTOC::walk_mem_region_with_cl_par(MemRegion mr,             \
   527                                                       HeapWord* bottom,         \
   528                                                       HeapWord* top,            \
   529                                                       ClosureType* cl) {        \
   530   /* Skip parts that are before "mr", in case "block_start" sent us             \
   531      back too far. */                                                           \
   532   HeapWord* mr_start = mr.start();                                              \
   533   size_t bot_size = _cfls->CompactibleFreeListSpace::block_size(bottom);        \
   534   HeapWord* next = bottom + bot_size;                                           \
   535   while (next < mr_start) {                                                     \
   536     bottom = next;                                                              \
   537     bot_size = _cfls->CompactibleFreeListSpace::block_size(bottom);             \
   538     next = bottom + bot_size;                                                   \
   539   }                                                                             \
   540                                                                                 \
   541   while (bottom < top) {                                                        \
   542     if (_cfls->CompactibleFreeListSpace::block_is_obj(bottom) &&                \
   543         !_cfls->CompactibleFreeListSpace::obj_allocated_since_save_marks(       \
   544                     oop(bottom)) &&                                             \
   545         !_collector->CMSCollector::is_dead_obj(oop(bottom))) {                  \
   546       size_t word_sz = oop(bottom)->oop_iterate(cl, mr);                        \
   547       bottom += _cfls->adjustObjectSize(word_sz);                               \
   548     } else {                                                                    \
   549       bottom += _cfls->CompactibleFreeListSpace::block_size(bottom);            \
   550     }                                                                           \
   551   }                                                                             \
   552 }                                                                               \
   553 void FreeListSpace_DCTOC::walk_mem_region_with_cl_nopar(MemRegion mr,           \
   554                                                         HeapWord* bottom,       \
   555                                                         HeapWord* top,          \
   556                                                         ClosureType* cl) {      \
   557   /* Skip parts that are before "mr", in case "block_start" sent us             \
   558      back too far. */                                                           \
   559   HeapWord* mr_start = mr.start();                                              \
   560   size_t bot_size = _cfls->CompactibleFreeListSpace::block_size_nopar(bottom);  \
   561   HeapWord* next = bottom + bot_size;                                           \
   562   while (next < mr_start) {                                                     \
   563     bottom = next;                                                              \
   564     bot_size = _cfls->CompactibleFreeListSpace::block_size_nopar(bottom);       \
   565     next = bottom + bot_size;                                                   \
   566   }                                                                             \
   567                                                                                 \
   568   while (bottom < top) {                                                        \
   569     if (_cfls->CompactibleFreeListSpace::block_is_obj_nopar(bottom) &&          \
   570         !_cfls->CompactibleFreeListSpace::obj_allocated_since_save_marks(       \
   571                     oop(bottom)) &&                                             \
   572         !_collector->CMSCollector::is_dead_obj(oop(bottom))) {                  \
   573       size_t word_sz = oop(bottom)->oop_iterate(cl, mr);                        \
   574       bottom += _cfls->adjustObjectSize(word_sz);                               \
   575     } else {                                                                    \
   576       bottom += _cfls->CompactibleFreeListSpace::block_size_nopar(bottom);      \
   577     }                                                                           \
   578   }                                                                             \
   579 }
   581 // (There are only two of these, rather than N, because the split is due
   582 // only to the introduction of the FilteringClosure, a local part of the
   583 // impl of this abstraction.)
   584 FreeListSpace_DCTOC__walk_mem_region_with_cl_DEFN(OopClosure)
   585 FreeListSpace_DCTOC__walk_mem_region_with_cl_DEFN(FilteringClosure)
   587 DirtyCardToOopClosure*
   588 CompactibleFreeListSpace::new_dcto_cl(OopClosure* cl,
   589                                       CardTableModRefBS::PrecisionStyle precision,
   590                                       HeapWord* boundary) {
   591   return new FreeListSpace_DCTOC(this, _collector, cl, precision, boundary);
   592 }
   595 // Note on locking for the space iteration functions:
   596 // since the collector's iteration activities are concurrent with
   597 // allocation activities by mutators, absent a suitable mutual exclusion
   598 // mechanism the iterators may go awry. For instace a block being iterated
   599 // may suddenly be allocated or divided up and part of it allocated and
   600 // so on.
   602 // Apply the given closure to each block in the space.
   603 void CompactibleFreeListSpace::blk_iterate_careful(BlkClosureCareful* cl) {
   604   assert_lock_strong(freelistLock());
   605   HeapWord *cur, *limit;
   606   for (cur = bottom(), limit = end(); cur < limit;
   607        cur += cl->do_blk_careful(cur));
   608 }
   610 // Apply the given closure to each block in the space.
   611 void CompactibleFreeListSpace::blk_iterate(BlkClosure* cl) {
   612   assert_lock_strong(freelistLock());
   613   HeapWord *cur, *limit;
   614   for (cur = bottom(), limit = end(); cur < limit;
   615        cur += cl->do_blk(cur));
   616 }
   618 // Apply the given closure to each oop in the space.
   619 void CompactibleFreeListSpace::oop_iterate(OopClosure* cl) {
   620   assert_lock_strong(freelistLock());
   621   HeapWord *cur, *limit;
   622   size_t curSize;
   623   for (cur = bottom(), limit = end(); cur < limit;
   624        cur += curSize) {
   625     curSize = block_size(cur);
   626     if (block_is_obj(cur)) {
   627       oop(cur)->oop_iterate(cl);
   628     }
   629   }
   630 }
   632 // Apply the given closure to each oop in the space \intersect memory region.
   633 void CompactibleFreeListSpace::oop_iterate(MemRegion mr, OopClosure* cl) {
   634   assert_lock_strong(freelistLock());
   635   if (is_empty()) {
   636     return;
   637   }
   638   MemRegion cur = MemRegion(bottom(), end());
   639   mr = mr.intersection(cur);
   640   if (mr.is_empty()) {
   641     return;
   642   }
   643   if (mr.equals(cur)) {
   644     oop_iterate(cl);
   645     return;
   646   }
   647   assert(mr.end() <= end(), "just took an intersection above");
   648   HeapWord* obj_addr = block_start(mr.start());
   649   HeapWord* t = mr.end();
   651   SpaceMemRegionOopsIterClosure smr_blk(cl, mr);
   652   if (block_is_obj(obj_addr)) {
   653     // Handle first object specially.
   654     oop obj = oop(obj_addr);
   655     obj_addr += adjustObjectSize(obj->oop_iterate(&smr_blk));
   656   } else {
   657     FreeChunk* fc = (FreeChunk*)obj_addr;
   658     obj_addr += fc->size();
   659   }
   660   while (obj_addr < t) {
   661     HeapWord* obj = obj_addr;
   662     obj_addr += block_size(obj_addr);
   663     // If "obj_addr" is not greater than top, then the
   664     // entire object "obj" is within the region.
   665     if (obj_addr <= t) {
   666       if (block_is_obj(obj)) {
   667         oop(obj)->oop_iterate(cl);
   668       }
   669     } else {
   670       // "obj" extends beyond end of region
   671       if (block_is_obj(obj)) {
   672         oop(obj)->oop_iterate(&smr_blk);
   673       }
   674       break;
   675     }
   676   }
   677 }
   679 // NOTE: In the following methods, in order to safely be able to
   680 // apply the closure to an object, we need to be sure that the
   681 // object has been initialized. We are guaranteed that an object
   682 // is initialized if we are holding the Heap_lock with the
   683 // world stopped.
   684 void CompactibleFreeListSpace::verify_objects_initialized() const {
   685   if (is_init_completed()) {
   686     assert_locked_or_safepoint(Heap_lock);
   687     if (Universe::is_fully_initialized()) {
   688       guarantee(SafepointSynchronize::is_at_safepoint(),
   689                 "Required for objects to be initialized");
   690     }
   691   } // else make a concession at vm start-up
   692 }
   694 // Apply the given closure to each object in the space
   695 void CompactibleFreeListSpace::object_iterate(ObjectClosure* blk) {
   696   assert_lock_strong(freelistLock());
   697   NOT_PRODUCT(verify_objects_initialized());
   698   HeapWord *cur, *limit;
   699   size_t curSize;
   700   for (cur = bottom(), limit = end(); cur < limit;
   701        cur += curSize) {
   702     curSize = block_size(cur);
   703     if (block_is_obj(cur)) {
   704       blk->do_object(oop(cur));
   705     }
   706   }
   707 }
   709 // Apply the given closure to each live object in the space
   710 //   The usage of CompactibleFreeListSpace
   711 // by the ConcurrentMarkSweepGeneration for concurrent GC's allows
   712 // objects in the space with references to objects that are no longer
   713 // valid.  For example, an object may reference another object
   714 // that has already been sweep up (collected).  This method uses
   715 // obj_is_alive() to determine whether it is safe to apply the closure to
   716 // an object.  See obj_is_alive() for details on how liveness of an
   717 // object is decided.
   719 void CompactibleFreeListSpace::safe_object_iterate(ObjectClosure* blk) {
   720   assert_lock_strong(freelistLock());
   721   NOT_PRODUCT(verify_objects_initialized());
   722   HeapWord *cur, *limit;
   723   size_t curSize;
   724   for (cur = bottom(), limit = end(); cur < limit;
   725        cur += curSize) {
   726     curSize = block_size(cur);
   727     if (block_is_obj(cur) && obj_is_alive(cur)) {
   728       blk->do_object(oop(cur));
   729     }
   730   }
   731 }
   733 void CompactibleFreeListSpace::object_iterate_mem(MemRegion mr,
   734                                                   UpwardsObjectClosure* cl) {
   735   assert_locked();
   736   NOT_PRODUCT(verify_objects_initialized());
   737   Space::object_iterate_mem(mr, cl);
   738 }
   740 // Callers of this iterator beware: The closure application should
   741 // be robust in the face of uninitialized objects and should (always)
   742 // return a correct size so that the next addr + size below gives us a
   743 // valid block boundary. [See for instance,
   744 // ScanMarkedObjectsAgainCarefullyClosure::do_object_careful()
   745 // in ConcurrentMarkSweepGeneration.cpp.]
   746 HeapWord*
   747 CompactibleFreeListSpace::object_iterate_careful(ObjectClosureCareful* cl) {
   748   assert_lock_strong(freelistLock());
   749   HeapWord *addr, *last;
   750   size_t size;
   751   for (addr = bottom(), last  = end();
   752        addr < last; addr += size) {
   753     FreeChunk* fc = (FreeChunk*)addr;
   754     if (fc->isFree()) {
   755       // Since we hold the free list lock, which protects direct
   756       // allocation in this generation by mutators, a free object
   757       // will remain free throughout this iteration code.
   758       size = fc->size();
   759     } else {
   760       // Note that the object need not necessarily be initialized,
   761       // because (for instance) the free list lock does NOT protect
   762       // object initialization. The closure application below must
   763       // therefore be correct in the face of uninitialized objects.
   764       size = cl->do_object_careful(oop(addr));
   765       if (size == 0) {
   766         // An unparsable object found. Signal early termination.
   767         return addr;
   768       }
   769     }
   770   }
   771   return NULL;
   772 }
   774 // Callers of this iterator beware: The closure application should
   775 // be robust in the face of uninitialized objects and should (always)
   776 // return a correct size so that the next addr + size below gives us a
   777 // valid block boundary. [See for instance,
   778 // ScanMarkedObjectsAgainCarefullyClosure::do_object_careful()
   779 // in ConcurrentMarkSweepGeneration.cpp.]
   780 HeapWord*
   781 CompactibleFreeListSpace::object_iterate_careful_m(MemRegion mr,
   782   ObjectClosureCareful* cl) {
   783   assert_lock_strong(freelistLock());
   784   // Can't use used_region() below because it may not necessarily
   785   // be the same as [bottom(),end()); although we could
   786   // use [used_region().start(),round_to(used_region().end(),CardSize)),
   787   // that appears too cumbersome, so we just do the simpler check
   788   // in the assertion below.
   789   assert(!mr.is_empty() && MemRegion(bottom(),end()).contains(mr),
   790          "mr should be non-empty and within used space");
   791   HeapWord *addr, *end;
   792   size_t size;
   793   for (addr = block_start_careful(mr.start()), end  = mr.end();
   794        addr < end; addr += size) {
   795     FreeChunk* fc = (FreeChunk*)addr;
   796     if (fc->isFree()) {
   797       // Since we hold the free list lock, which protects direct
   798       // allocation in this generation by mutators, a free object
   799       // will remain free throughout this iteration code.
   800       size = fc->size();
   801     } else {
   802       // Note that the object need not necessarily be initialized,
   803       // because (for instance) the free list lock does NOT protect
   804       // object initialization. The closure application below must
   805       // therefore be correct in the face of uninitialized objects.
   806       size = cl->do_object_careful_m(oop(addr), mr);
   807       if (size == 0) {
   808         // An unparsable object found. Signal early termination.
   809         return addr;
   810       }
   811     }
   812   }
   813   return NULL;
   814 }
   817 HeapWord* CompactibleFreeListSpace::block_start_const(const void* p) const {
   818   NOT_PRODUCT(verify_objects_initialized());
   819   return _bt.block_start(p);
   820 }
   822 HeapWord* CompactibleFreeListSpace::block_start_careful(const void* p) const {
   823   return _bt.block_start_careful(p);
   824 }
   826 size_t CompactibleFreeListSpace::block_size(const HeapWord* p) const {
   827   NOT_PRODUCT(verify_objects_initialized());
   828   assert(MemRegion(bottom(), end()).contains(p), "p not in space");
   829   // This must be volatile, or else there is a danger that the compiler
   830   // will compile the code below into a sometimes-infinite loop, by keeping
   831   // the value read the first time in a register.
   832   while (true) {
   833     // We must do this until we get a consistent view of the object.
   834     if (FreeChunk::indicatesFreeChunk(p)) {
   835       volatile FreeChunk* fc = (volatile FreeChunk*)p;
   836       size_t res = fc->size();
   837       // If the object is still a free chunk, return the size, else it
   838       // has been allocated so try again.
   839       if (FreeChunk::indicatesFreeChunk(p)) {
   840         assert(res != 0, "Block size should not be 0");
   841         return res;
   842       }
   843     } else {
   844       // must read from what 'p' points to in each loop.
   845       klassOop k = ((volatile oopDesc*)p)->klass_or_null();
   846       if (k != NULL) {
   847         assert(k->is_oop(true /* ignore mark word */), "Should really be klass oop.");
   848         oop o = (oop)p;
   849         assert(o->is_parsable(), "Should be parsable");
   850         assert(o->is_oop(true /* ignore mark word */), "Should be an oop.");
   851         size_t res = o->size_given_klass(k->klass_part());
   852         res = adjustObjectSize(res);
   853         assert(res != 0, "Block size should not be 0");
   854         return res;
   855       }
   856     }
   857   }
   858 }
   860 // A variant of the above that uses the Printezis bits for
   861 // unparsable but allocated objects. This avoids any possible
   862 // stalls waiting for mutators to initialize objects, and is
   863 // thus potentially faster than the variant above. However,
   864 // this variant may return a zero size for a block that is
   865 // under mutation and for which a consistent size cannot be
   866 // inferred without stalling; see CMSCollector::block_size_if_printezis_bits().
   867 size_t CompactibleFreeListSpace::block_size_no_stall(HeapWord* p,
   868                                                      const CMSCollector* c)
   869 const {
   870   assert(MemRegion(bottom(), end()).contains(p), "p not in space");
   871   // This must be volatile, or else there is a danger that the compiler
   872   // will compile the code below into a sometimes-infinite loop, by keeping
   873   // the value read the first time in a register.
   874   DEBUG_ONLY(uint loops = 0;)
   875   while (true) {
   876     // We must do this until we get a consistent view of the object.
   877     if (FreeChunk::indicatesFreeChunk(p)) {
   878       volatile FreeChunk* fc = (volatile FreeChunk*)p;
   879       size_t res = fc->size();
   880       if (FreeChunk::indicatesFreeChunk(p)) {
   881         assert(res != 0, "Block size should not be 0");
   882         assert(loops == 0, "Should be 0");
   883         return res;
   884       }
   885     } else {
   886       // must read from what 'p' points to in each loop.
   887       klassOop k = ((volatile oopDesc*)p)->klass_or_null();
   888       if (k != NULL &&
   889           ((oopDesc*)p)->is_parsable() &&
   890           ((oopDesc*)p)->is_conc_safe()) {
   891         assert(k->is_oop(), "Should really be klass oop.");
   892         oop o = (oop)p;
   893         assert(o->is_oop(), "Should be an oop");
   894         size_t res = o->size_given_klass(k->klass_part());
   895         res = adjustObjectSize(res);
   896         assert(res != 0, "Block size should not be 0");
   897         return res;
   898       } else {
   899         return c->block_size_if_printezis_bits(p);
   900       }
   901     }
   902     assert(loops == 0, "Can loop at most once");
   903     DEBUG_ONLY(loops++;)
   904   }
   905 }
   907 size_t CompactibleFreeListSpace::block_size_nopar(const HeapWord* p) const {
   908   NOT_PRODUCT(verify_objects_initialized());
   909   assert(MemRegion(bottom(), end()).contains(p), "p not in space");
   910   FreeChunk* fc = (FreeChunk*)p;
   911   if (fc->isFree()) {
   912     return fc->size();
   913   } else {
   914     // Ignore mark word because this may be a recently promoted
   915     // object whose mark word is used to chain together grey
   916     // objects (the last one would have a null value).
   917     assert(oop(p)->is_oop(true), "Should be an oop");
   918     return adjustObjectSize(oop(p)->size());
   919   }
   920 }
   922 // This implementation assumes that the property of "being an object" is
   923 // stable.  But being a free chunk may not be (because of parallel
   924 // promotion.)
   925 bool CompactibleFreeListSpace::block_is_obj(const HeapWord* p) const {
   926   FreeChunk* fc = (FreeChunk*)p;
   927   assert(is_in_reserved(p), "Should be in space");
   928   // When doing a mark-sweep-compact of the CMS generation, this
   929   // assertion may fail because prepare_for_compaction() uses
   930   // space that is garbage to maintain information on ranges of
   931   // live objects so that these live ranges can be moved as a whole.
   932   // Comment out this assertion until that problem can be solved
   933   // (i.e., that the block start calculation may look at objects
   934   // at address below "p" in finding the object that contains "p"
   935   // and those objects (if garbage) may have been modified to hold
   936   // live range information.
   937   // assert(ParallelGCThreads > 0 || _bt.block_start(p) == p, "Should be a block boundary");
   938   if (FreeChunk::indicatesFreeChunk(p)) return false;
   939   klassOop k = oop(p)->klass_or_null();
   940   if (k != NULL) {
   941     // Ignore mark word because it may have been used to
   942     // chain together promoted objects (the last one
   943     // would have a null value).
   944     assert(oop(p)->is_oop(true), "Should be an oop");
   945     return true;
   946   } else {
   947     return false;  // Was not an object at the start of collection.
   948   }
   949 }
   951 // Check if the object is alive. This fact is checked either by consulting
   952 // the main marking bitmap in the sweeping phase or, if it's a permanent
   953 // generation and we're not in the sweeping phase, by checking the
   954 // perm_gen_verify_bit_map where we store the "deadness" information if
   955 // we did not sweep the perm gen in the most recent previous GC cycle.
   956 bool CompactibleFreeListSpace::obj_is_alive(const HeapWord* p) const {
   957   assert (block_is_obj(p), "The address should point to an object");
   959   // If we're sweeping, we use object liveness information from the main bit map
   960   // for both perm gen and old gen.
   961   // We don't need to lock the bitmap (live_map or dead_map below), because
   962   // EITHER we are in the middle of the sweeping phase, and the
   963   // main marking bit map (live_map below) is locked,
   964   // OR we're in other phases and perm_gen_verify_bit_map (dead_map below)
   965   // is stable, because it's mutated only in the sweeping phase.
   966   if (_collector->abstract_state() == CMSCollector::Sweeping) {
   967     CMSBitMap* live_map = _collector->markBitMap();
   968     return live_map->isMarked((HeapWord*) p);
   969   } else {
   970     // If we're not currently sweeping and we haven't swept the perm gen in
   971     // the previous concurrent cycle then we may have dead but unswept objects
   972     // in the perm gen. In this case, we use the "deadness" information
   973     // that we had saved in perm_gen_verify_bit_map at the last sweep.
   974     if (!CMSClassUnloadingEnabled && _collector->_permGen->reserved().contains(p)) {
   975       if (_collector->verifying()) {
   976         CMSBitMap* dead_map = _collector->perm_gen_verify_bit_map();
   977         // Object is marked in the dead_map bitmap at the previous sweep
   978         // when we know that it's dead; if the bitmap is not allocated then
   979         // the object is alive.
   980         return (dead_map->sizeInBits() == 0) // bit_map has been allocated
   981                || !dead_map->par_isMarked((HeapWord*) p);
   982       } else {
   983         return false; // We can't say for sure if it's live, so we say that it's dead.
   984       }
   985     }
   986   }
   987   return true;
   988 }
   990 bool CompactibleFreeListSpace::block_is_obj_nopar(const HeapWord* p) const {
   991   FreeChunk* fc = (FreeChunk*)p;
   992   assert(is_in_reserved(p), "Should be in space");
   993   assert(_bt.block_start(p) == p, "Should be a block boundary");
   994   if (!fc->isFree()) {
   995     // Ignore mark word because it may have been used to
   996     // chain together promoted objects (the last one
   997     // would have a null value).
   998     assert(oop(p)->is_oop(true), "Should be an oop");
   999     return true;
  1001   return false;
  1004 // "MT-safe but not guaranteed MT-precise" (TM); you may get an
  1005 // approximate answer if you don't hold the freelistlock when you call this.
  1006 size_t CompactibleFreeListSpace::totalSizeInIndexedFreeLists() const {
  1007   size_t size = 0;
  1008   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
  1009     debug_only(
  1010       // We may be calling here without the lock in which case we
  1011       // won't do this modest sanity check.
  1012       if (freelistLock()->owned_by_self()) {
  1013         size_t total_list_size = 0;
  1014         for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
  1015           fc = fc->next()) {
  1016           total_list_size += i;
  1018         assert(total_list_size == i * _indexedFreeList[i].count(),
  1019                "Count in list is incorrect");
  1022     size += i * _indexedFreeList[i].count();
  1024   return size;
  1027 HeapWord* CompactibleFreeListSpace::par_allocate(size_t size) {
  1028   MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
  1029   return allocate(size);
  1032 HeapWord*
  1033 CompactibleFreeListSpace::getChunkFromSmallLinearAllocBlockRemainder(size_t size) {
  1034   return getChunkFromLinearAllocBlockRemainder(&_smallLinearAllocBlock, size);
  1037 HeapWord* CompactibleFreeListSpace::allocate(size_t size) {
  1038   assert_lock_strong(freelistLock());
  1039   HeapWord* res = NULL;
  1040   assert(size == adjustObjectSize(size),
  1041          "use adjustObjectSize() before calling into allocate()");
  1043   if (_adaptive_freelists) {
  1044     res = allocate_adaptive_freelists(size);
  1045   } else {  // non-adaptive free lists
  1046     res = allocate_non_adaptive_freelists(size);
  1049   if (res != NULL) {
  1050     // check that res does lie in this space!
  1051     assert(is_in_reserved(res), "Not in this space!");
  1052     assert(is_aligned((void*)res), "alignment check");
  1054     FreeChunk* fc = (FreeChunk*)res;
  1055     fc->markNotFree();
  1056     assert(!fc->isFree(), "shouldn't be marked free");
  1057     assert(oop(fc)->klass_or_null() == NULL, "should look uninitialized");
  1058     // Verify that the block offset table shows this to
  1059     // be a single block, but not one which is unallocated.
  1060     _bt.verify_single_block(res, size);
  1061     _bt.verify_not_unallocated(res, size);
  1062     // mangle a just allocated object with a distinct pattern.
  1063     debug_only(fc->mangleAllocated(size));
  1066   return res;
  1069 HeapWord* CompactibleFreeListSpace::allocate_non_adaptive_freelists(size_t size) {
  1070   HeapWord* res = NULL;
  1071   // try and use linear allocation for smaller blocks
  1072   if (size < _smallLinearAllocBlock._allocation_size_limit) {
  1073     // if successful, the following also adjusts block offset table
  1074     res = getChunkFromSmallLinearAllocBlock(size);
  1076   // Else triage to indexed lists for smaller sizes
  1077   if (res == NULL) {
  1078     if (size < SmallForDictionary) {
  1079       res = (HeapWord*) getChunkFromIndexedFreeList(size);
  1080     } else {
  1081       // else get it from the big dictionary; if even this doesn't
  1082       // work we are out of luck.
  1083       res = (HeapWord*)getChunkFromDictionaryExact(size);
  1087   return res;
  1090 HeapWord* CompactibleFreeListSpace::allocate_adaptive_freelists(size_t size) {
  1091   assert_lock_strong(freelistLock());
  1092   HeapWord* res = NULL;
  1093   assert(size == adjustObjectSize(size),
  1094          "use adjustObjectSize() before calling into allocate()");
  1096   // Strategy
  1097   //   if small
  1098   //     exact size from small object indexed list if small
  1099   //     small or large linear allocation block (linAB) as appropriate
  1100   //     take from lists of greater sized chunks
  1101   //   else
  1102   //     dictionary
  1103   //     small or large linear allocation block if it has the space
  1104   // Try allocating exact size from indexTable first
  1105   if (size < IndexSetSize) {
  1106     res = (HeapWord*) getChunkFromIndexedFreeList(size);
  1107     if(res != NULL) {
  1108       assert(res != (HeapWord*)_indexedFreeList[size].head(),
  1109         "Not removed from free list");
  1110       // no block offset table adjustment is necessary on blocks in
  1111       // the indexed lists.
  1113     // Try allocating from the small LinAB
  1114     } else if (size < _smallLinearAllocBlock._allocation_size_limit &&
  1115         (res = getChunkFromSmallLinearAllocBlock(size)) != NULL) {
  1116         // if successful, the above also adjusts block offset table
  1117         // Note that this call will refill the LinAB to
  1118         // satisfy the request.  This is different that
  1119         // evm.
  1120         // Don't record chunk off a LinAB?  smallSplitBirth(size);
  1122     } else {
  1123       // Raid the exact free lists larger than size, even if they are not
  1124       // overpopulated.
  1125       res = (HeapWord*) getChunkFromGreater(size);
  1127   } else {
  1128     // Big objects get allocated directly from the dictionary.
  1129     res = (HeapWord*) getChunkFromDictionaryExact(size);
  1130     if (res == NULL) {
  1131       // Try hard not to fail since an allocation failure will likely
  1132       // trigger a synchronous GC.  Try to get the space from the
  1133       // allocation blocks.
  1134       res = getChunkFromSmallLinearAllocBlockRemainder(size);
  1138   return res;
  1141 // A worst-case estimate of the space required (in HeapWords) to expand the heap
  1142 // when promoting obj.
  1143 size_t CompactibleFreeListSpace::expansionSpaceRequired(size_t obj_size) const {
  1144   // Depending on the object size, expansion may require refilling either a
  1145   // bigLAB or a smallLAB plus refilling a PromotionInfo object.  MinChunkSize
  1146   // is added because the dictionary may over-allocate to avoid fragmentation.
  1147   size_t space = obj_size;
  1148   if (!_adaptive_freelists) {
  1149     space = MAX2(space, _smallLinearAllocBlock._refillSize);
  1151   space += _promoInfo.refillSize() + 2 * MinChunkSize;
  1152   return space;
  1155 FreeChunk* CompactibleFreeListSpace::getChunkFromGreater(size_t numWords) {
  1156   FreeChunk* ret;
  1158   assert(numWords >= MinChunkSize, "Size is less than minimum");
  1159   assert(linearAllocationWouldFail() || bestFitFirst(),
  1160     "Should not be here");
  1162   size_t i;
  1163   size_t currSize = numWords + MinChunkSize;
  1164   assert(currSize % MinObjAlignment == 0, "currSize should be aligned");
  1165   for (i = currSize; i < IndexSetSize; i += IndexSetStride) {
  1166     FreeList* fl = &_indexedFreeList[i];
  1167     if (fl->head()) {
  1168       ret = getFromListGreater(fl, numWords);
  1169       assert(ret == NULL || ret->isFree(), "Should be returning a free chunk");
  1170       return ret;
  1174   currSize = MAX2((size_t)SmallForDictionary,
  1175                   (size_t)(numWords + MinChunkSize));
  1177   /* Try to get a chunk that satisfies request, while avoiding
  1178      fragmentation that can't be handled. */
  1180     ret =  dictionary()->getChunk(currSize);
  1181     if (ret != NULL) {
  1182       assert(ret->size() - numWords >= MinChunkSize,
  1183              "Chunk is too small");
  1184       _bt.allocated((HeapWord*)ret, ret->size());
  1185       /* Carve returned chunk. */
  1186       (void) splitChunkAndReturnRemainder(ret, numWords);
  1187       /* Label this as no longer a free chunk. */
  1188       assert(ret->isFree(), "This chunk should be free");
  1189       ret->linkPrev(NULL);
  1191     assert(ret == NULL || ret->isFree(), "Should be returning a free chunk");
  1192     return ret;
  1194   ShouldNotReachHere();
  1197 bool CompactibleFreeListSpace::verifyChunkInIndexedFreeLists(FreeChunk* fc)
  1198   const {
  1199   assert(fc->size() < IndexSetSize, "Size of chunk is too large");
  1200   return _indexedFreeList[fc->size()].verifyChunkInFreeLists(fc);
  1203 bool CompactibleFreeListSpace::verifyChunkInFreeLists(FreeChunk* fc) const {
  1204   if (fc->size() >= IndexSetSize) {
  1205     return dictionary()->verifyChunkInFreeLists(fc);
  1206   } else {
  1207     return verifyChunkInIndexedFreeLists(fc);
  1211 #ifndef PRODUCT
  1212 void CompactibleFreeListSpace::assert_locked() const {
  1213   CMSLockVerifier::assert_locked(freelistLock(), parDictionaryAllocLock());
  1215 #endif
  1217 FreeChunk* CompactibleFreeListSpace::allocateScratch(size_t size) {
  1218   // In the parallel case, the main thread holds the free list lock
  1219   // on behalf the parallel threads.
  1220   assert_locked();
  1221   FreeChunk* fc;
  1223     // If GC is parallel, this might be called by several threads.
  1224     // This should be rare enough that the locking overhead won't affect
  1225     // the sequential code.
  1226     MutexLockerEx x(parDictionaryAllocLock(),
  1227                     Mutex::_no_safepoint_check_flag);
  1228     fc = getChunkFromDictionary(size);
  1230   if (fc != NULL) {
  1231     fc->dontCoalesce();
  1232     assert(fc->isFree(), "Should be free, but not coalescable");
  1233     // Verify that the block offset table shows this to
  1234     // be a single block, but not one which is unallocated.
  1235     _bt.verify_single_block((HeapWord*)fc, fc->size());
  1236     _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
  1238   return fc;
  1241 oop CompactibleFreeListSpace::promote(oop obj, size_t obj_size) {
  1242   assert(obj_size == (size_t)obj->size(), "bad obj_size passed in");
  1243   assert_locked();
  1245   // if we are tracking promotions, then first ensure space for
  1246   // promotion (including spooling space for saving header if necessary).
  1247   // then allocate and copy, then track promoted info if needed.
  1248   // When tracking (see PromotionInfo::track()), the mark word may
  1249   // be displaced and in this case restoration of the mark word
  1250   // occurs in the (oop_since_save_marks_)iterate phase.
  1251   if (_promoInfo.tracking() && !_promoInfo.ensure_spooling_space()) {
  1252     return NULL;
  1254   // Call the allocate(size_t, bool) form directly to avoid the
  1255   // additional call through the allocate(size_t) form.  Having
  1256   // the compile inline the call is problematic because allocate(size_t)
  1257   // is a virtual method.
  1258   HeapWord* res = allocate(adjustObjectSize(obj_size));
  1259   if (res != NULL) {
  1260     Copy::aligned_disjoint_words((HeapWord*)obj, res, obj_size);
  1261     // if we should be tracking promotions, do so.
  1262     if (_promoInfo.tracking()) {
  1263         _promoInfo.track((PromotedObject*)res);
  1266   return oop(res);
  1269 HeapWord*
  1270 CompactibleFreeListSpace::getChunkFromSmallLinearAllocBlock(size_t size) {
  1271   assert_locked();
  1272   assert(size >= MinChunkSize, "minimum chunk size");
  1273   assert(size <  _smallLinearAllocBlock._allocation_size_limit,
  1274     "maximum from smallLinearAllocBlock");
  1275   return getChunkFromLinearAllocBlock(&_smallLinearAllocBlock, size);
  1278 HeapWord*
  1279 CompactibleFreeListSpace::getChunkFromLinearAllocBlock(LinearAllocBlock *blk,
  1280                                                        size_t size) {
  1281   assert_locked();
  1282   assert(size >= MinChunkSize, "too small");
  1283   HeapWord* res = NULL;
  1284   // Try to do linear allocation from blk, making sure that
  1285   if (blk->_word_size == 0) {
  1286     // We have probably been unable to fill this either in the prologue or
  1287     // when it was exhausted at the last linear allocation. Bail out until
  1288     // next time.
  1289     assert(blk->_ptr == NULL, "consistency check");
  1290     return NULL;
  1292   assert(blk->_word_size != 0 && blk->_ptr != NULL, "consistency check");
  1293   res = getChunkFromLinearAllocBlockRemainder(blk, size);
  1294   if (res != NULL) return res;
  1296   // about to exhaust this linear allocation block
  1297   if (blk->_word_size == size) { // exactly satisfied
  1298     res = blk->_ptr;
  1299     _bt.allocated(res, blk->_word_size);
  1300   } else if (size + MinChunkSize <= blk->_refillSize) {
  1301     // Update _unallocated_block if the size is such that chunk would be
  1302     // returned to the indexed free list.  All other chunks in the indexed
  1303     // free lists are allocated from the dictionary so that _unallocated_block
  1304     // has already been adjusted for them.  Do it here so that the cost
  1305     // for all chunks added back to the indexed free lists.
  1306     if (blk->_word_size < SmallForDictionary) {
  1307       _bt.allocated(blk->_ptr, blk->_word_size);
  1309     // Return the chunk that isn't big enough, and then refill below.
  1310     addChunkToFreeLists(blk->_ptr, blk->_word_size);
  1311     _bt.verify_single_block(blk->_ptr, (blk->_ptr + blk->_word_size));
  1312     // Don't keep statistics on adding back chunk from a LinAB.
  1313   } else {
  1314     // A refilled block would not satisfy the request.
  1315     return NULL;
  1318   blk->_ptr = NULL; blk->_word_size = 0;
  1319   refillLinearAllocBlock(blk);
  1320   assert(blk->_ptr == NULL || blk->_word_size >= size + MinChunkSize,
  1321          "block was replenished");
  1322   if (res != NULL) {
  1323     splitBirth(size);
  1324     repairLinearAllocBlock(blk);
  1325   } else if (blk->_ptr != NULL) {
  1326     res = blk->_ptr;
  1327     size_t blk_size = blk->_word_size;
  1328     blk->_word_size -= size;
  1329     blk->_ptr  += size;
  1330     splitBirth(size);
  1331     repairLinearAllocBlock(blk);
  1332     // Update BOT last so that other (parallel) GC threads see a consistent
  1333     // view of the BOT and free blocks.
  1334     // Above must occur before BOT is updated below.
  1335     _bt.split_block(res, blk_size, size);  // adjust block offset table
  1337   return res;
  1340 HeapWord*  CompactibleFreeListSpace::getChunkFromLinearAllocBlockRemainder(
  1341                                         LinearAllocBlock* blk,
  1342                                         size_t size) {
  1343   assert_locked();
  1344   assert(size >= MinChunkSize, "too small");
  1346   HeapWord* res = NULL;
  1347   // This is the common case.  Keep it simple.
  1348   if (blk->_word_size >= size + MinChunkSize) {
  1349     assert(blk->_ptr != NULL, "consistency check");
  1350     res = blk->_ptr;
  1351     // Note that the BOT is up-to-date for the linAB before allocation.  It
  1352     // indicates the start of the linAB.  The split_block() updates the
  1353     // BOT for the linAB after the allocation (indicates the start of the
  1354     // next chunk to be allocated).
  1355     size_t blk_size = blk->_word_size;
  1356     blk->_word_size -= size;
  1357     blk->_ptr  += size;
  1358     splitBirth(size);
  1359     repairLinearAllocBlock(blk);
  1360     // Update BOT last so that other (parallel) GC threads see a consistent
  1361     // view of the BOT and free blocks.
  1362     // Above must occur before BOT is updated below.
  1363     _bt.split_block(res, blk_size, size);  // adjust block offset table
  1364     _bt.allocated(res, size);
  1366   return res;
  1369 FreeChunk*
  1370 CompactibleFreeListSpace::getChunkFromIndexedFreeList(size_t size) {
  1371   assert_locked();
  1372   assert(size < SmallForDictionary, "just checking");
  1373   FreeChunk* res;
  1374   res = _indexedFreeList[size].getChunkAtHead();
  1375   if (res == NULL) {
  1376     res = getChunkFromIndexedFreeListHelper(size);
  1378   _bt.verify_not_unallocated((HeapWord*) res, size);
  1379   return res;
  1382 FreeChunk*
  1383 CompactibleFreeListSpace::getChunkFromIndexedFreeListHelper(size_t size) {
  1384   assert_locked();
  1385   FreeChunk* fc = NULL;
  1386   if (size < SmallForDictionary) {
  1387     assert(_indexedFreeList[size].head() == NULL ||
  1388       _indexedFreeList[size].surplus() <= 0,
  1389       "List for this size should be empty or under populated");
  1390     // Try best fit in exact lists before replenishing the list
  1391     if (!bestFitFirst() || (fc = bestFitSmall(size)) == NULL) {
  1392       // Replenish list.
  1393       //
  1394       // Things tried that failed.
  1395       //   Tried allocating out of the two LinAB's first before
  1396       // replenishing lists.
  1397       //   Tried small linAB of size 256 (size in indexed list)
  1398       // and replenishing indexed lists from the small linAB.
  1399       //
  1400       FreeChunk* newFc = NULL;
  1401       size_t replenish_size = CMSIndexedFreeListReplenish * size;
  1402       if (replenish_size < SmallForDictionary) {
  1403         // Do not replenish from an underpopulated size.
  1404         if (_indexedFreeList[replenish_size].surplus() > 0 &&
  1405             _indexedFreeList[replenish_size].head() != NULL) {
  1406           newFc =
  1407             _indexedFreeList[replenish_size].getChunkAtHead();
  1408         } else {
  1409           newFc = bestFitSmall(replenish_size);
  1412       if (newFc != NULL) {
  1413         splitDeath(replenish_size);
  1414       } else if (replenish_size > size) {
  1415         assert(CMSIndexedFreeListReplenish > 1, "ctl pt invariant");
  1416         newFc =
  1417           getChunkFromIndexedFreeListHelper(replenish_size);
  1419       if (newFc != NULL) {
  1420         assert(newFc->size() == replenish_size, "Got wrong size");
  1421         size_t i;
  1422         FreeChunk *curFc, *nextFc;
  1423         // carve up and link blocks 0, ..., CMSIndexedFreeListReplenish - 2
  1424         // The last chunk is not added to the lists but is returned as the
  1425         // free chunk.
  1426         for (curFc = newFc, nextFc = (FreeChunk*)((HeapWord*)curFc + size),
  1427              i = 0;
  1428              i < (CMSIndexedFreeListReplenish - 1);
  1429              curFc = nextFc, nextFc = (FreeChunk*)((HeapWord*)nextFc + size),
  1430              i++) {
  1431           curFc->setSize(size);
  1432           // Don't record this as a return in order to try and
  1433           // determine the "returns" from a GC.
  1434           _bt.verify_not_unallocated((HeapWord*) fc, size);
  1435           _indexedFreeList[size].returnChunkAtTail(curFc, false);
  1436           _bt.mark_block((HeapWord*)curFc, size);
  1437           splitBirth(size);
  1438           // Don't record the initial population of the indexed list
  1439           // as a split birth.
  1442         // check that the arithmetic was OK above
  1443         assert((HeapWord*)nextFc == (HeapWord*)newFc + replenish_size,
  1444           "inconsistency in carving newFc");
  1445         curFc->setSize(size);
  1446         _bt.mark_block((HeapWord*)curFc, size);
  1447         splitBirth(size);
  1448         return curFc;
  1451   } else {
  1452     // Get a free chunk from the free chunk dictionary to be returned to
  1453     // replenish the indexed free list.
  1454     fc = getChunkFromDictionaryExact(size);
  1456   assert(fc == NULL || fc->isFree(), "Should be returning a free chunk");
  1457   return fc;
  1460 FreeChunk*
  1461 CompactibleFreeListSpace::getChunkFromDictionary(size_t size) {
  1462   assert_locked();
  1463   FreeChunk* fc = _dictionary->getChunk(size);
  1464   if (fc == NULL) {
  1465     return NULL;
  1467   _bt.allocated((HeapWord*)fc, fc->size());
  1468   if (fc->size() >= size + MinChunkSize) {
  1469     fc = splitChunkAndReturnRemainder(fc, size);
  1471   assert(fc->size() >= size, "chunk too small");
  1472   assert(fc->size() < size + MinChunkSize, "chunk too big");
  1473   _bt.verify_single_block((HeapWord*)fc, fc->size());
  1474   return fc;
  1477 FreeChunk*
  1478 CompactibleFreeListSpace::getChunkFromDictionaryExact(size_t size) {
  1479   assert_locked();
  1480   FreeChunk* fc = _dictionary->getChunk(size);
  1481   if (fc == NULL) {
  1482     return fc;
  1484   _bt.allocated((HeapWord*)fc, fc->size());
  1485   if (fc->size() == size) {
  1486     _bt.verify_single_block((HeapWord*)fc, size);
  1487     return fc;
  1489   assert(fc->size() > size, "getChunk() guarantee");
  1490   if (fc->size() < size + MinChunkSize) {
  1491     // Return the chunk to the dictionary and go get a bigger one.
  1492     returnChunkToDictionary(fc);
  1493     fc = _dictionary->getChunk(size + MinChunkSize);
  1494     if (fc == NULL) {
  1495       return NULL;
  1497     _bt.allocated((HeapWord*)fc, fc->size());
  1499   assert(fc->size() >= size + MinChunkSize, "tautology");
  1500   fc = splitChunkAndReturnRemainder(fc, size);
  1501   assert(fc->size() == size, "chunk is wrong size");
  1502   _bt.verify_single_block((HeapWord*)fc, size);
  1503   return fc;
  1506 void
  1507 CompactibleFreeListSpace::returnChunkToDictionary(FreeChunk* chunk) {
  1508   assert_locked();
  1510   size_t size = chunk->size();
  1511   _bt.verify_single_block((HeapWord*)chunk, size);
  1512   // adjust _unallocated_block downward, as necessary
  1513   _bt.freed((HeapWord*)chunk, size);
  1514   _dictionary->returnChunk(chunk);
  1517 void
  1518 CompactibleFreeListSpace::returnChunkToFreeList(FreeChunk* fc) {
  1519   assert_locked();
  1520   size_t size = fc->size();
  1521   _bt.verify_single_block((HeapWord*) fc, size);
  1522   _bt.verify_not_unallocated((HeapWord*) fc, size);
  1523   if (_adaptive_freelists) {
  1524     _indexedFreeList[size].returnChunkAtTail(fc);
  1525   } else {
  1526     _indexedFreeList[size].returnChunkAtHead(fc);
  1530 // Add chunk to end of last block -- if it's the largest
  1531 // block -- and update BOT and census data. We would
  1532 // of course have preferred to coalesce it with the
  1533 // last block, but it's currently less expensive to find the
  1534 // largest block than it is to find the last.
  1535 void
  1536 CompactibleFreeListSpace::addChunkToFreeListsAtEndRecordingStats(
  1537   HeapWord* chunk, size_t     size) {
  1538   // check that the chunk does lie in this space!
  1539   assert(chunk != NULL && is_in_reserved(chunk), "Not in this space!");
  1540   assert_locked();
  1541   // One of the parallel gc task threads may be here
  1542   // whilst others are allocating.
  1543   Mutex* lock = NULL;
  1544   if (ParallelGCThreads != 0) {
  1545     lock = &_parDictionaryAllocLock;
  1547   FreeChunk* ec;
  1549     MutexLockerEx x(lock, Mutex::_no_safepoint_check_flag);
  1550     ec = dictionary()->findLargestDict();  // get largest block
  1551     if (ec != NULL && ec->end() == chunk) {
  1552       // It's a coterminal block - we can coalesce.
  1553       size_t old_size = ec->size();
  1554       coalDeath(old_size);
  1555       removeChunkFromDictionary(ec);
  1556       size += old_size;
  1557     } else {
  1558       ec = (FreeChunk*)chunk;
  1561   ec->setSize(size);
  1562   debug_only(ec->mangleFreed(size));
  1563   if (size < SmallForDictionary) {
  1564     lock = _indexedFreeListParLocks[size];
  1566   MutexLockerEx x(lock, Mutex::_no_safepoint_check_flag);
  1567   addChunkAndRepairOffsetTable((HeapWord*)ec, size, true);
  1568   // record the birth under the lock since the recording involves
  1569   // manipulation of the list on which the chunk lives and
  1570   // if the chunk is allocated and is the last on the list,
  1571   // the list can go away.
  1572   coalBirth(size);
  1575 void
  1576 CompactibleFreeListSpace::addChunkToFreeLists(HeapWord* chunk,
  1577                                               size_t     size) {
  1578   // check that the chunk does lie in this space!
  1579   assert(chunk != NULL && is_in_reserved(chunk), "Not in this space!");
  1580   assert_locked();
  1581   _bt.verify_single_block(chunk, size);
  1583   FreeChunk* fc = (FreeChunk*) chunk;
  1584   fc->setSize(size);
  1585   debug_only(fc->mangleFreed(size));
  1586   if (size < SmallForDictionary) {
  1587     returnChunkToFreeList(fc);
  1588   } else {
  1589     returnChunkToDictionary(fc);
  1593 void
  1594 CompactibleFreeListSpace::addChunkAndRepairOffsetTable(HeapWord* chunk,
  1595   size_t size, bool coalesced) {
  1596   assert_locked();
  1597   assert(chunk != NULL, "null chunk");
  1598   if (coalesced) {
  1599     // repair BOT
  1600     _bt.single_block(chunk, size);
  1602   addChunkToFreeLists(chunk, size);
  1605 // We _must_ find the purported chunk on our free lists;
  1606 // we assert if we don't.
  1607 void
  1608 CompactibleFreeListSpace::removeFreeChunkFromFreeLists(FreeChunk* fc) {
  1609   size_t size = fc->size();
  1610   assert_locked();
  1611   debug_only(verifyFreeLists());
  1612   if (size < SmallForDictionary) {
  1613     removeChunkFromIndexedFreeList(fc);
  1614   } else {
  1615     removeChunkFromDictionary(fc);
  1617   _bt.verify_single_block((HeapWord*)fc, size);
  1618   debug_only(verifyFreeLists());
  1621 void
  1622 CompactibleFreeListSpace::removeChunkFromDictionary(FreeChunk* fc) {
  1623   size_t size = fc->size();
  1624   assert_locked();
  1625   assert(fc != NULL, "null chunk");
  1626   _bt.verify_single_block((HeapWord*)fc, size);
  1627   _dictionary->removeChunk(fc);
  1628   // adjust _unallocated_block upward, as necessary
  1629   _bt.allocated((HeapWord*)fc, size);
  1632 void
  1633 CompactibleFreeListSpace::removeChunkFromIndexedFreeList(FreeChunk* fc) {
  1634   assert_locked();
  1635   size_t size = fc->size();
  1636   _bt.verify_single_block((HeapWord*)fc, size);
  1637   NOT_PRODUCT(
  1638     if (FLSVerifyIndexTable) {
  1639       verifyIndexedFreeList(size);
  1642   _indexedFreeList[size].removeChunk(fc);
  1643   debug_only(fc->clearNext());
  1644   debug_only(fc->clearPrev());
  1645   NOT_PRODUCT(
  1646     if (FLSVerifyIndexTable) {
  1647       verifyIndexedFreeList(size);
  1652 FreeChunk* CompactibleFreeListSpace::bestFitSmall(size_t numWords) {
  1653   /* A hint is the next larger size that has a surplus.
  1654      Start search at a size large enough to guarantee that
  1655      the excess is >= MIN_CHUNK. */
  1656   size_t start = align_object_size(numWords + MinChunkSize);
  1657   if (start < IndexSetSize) {
  1658     FreeList* it   = _indexedFreeList;
  1659     size_t    hint = _indexedFreeList[start].hint();
  1660     while (hint < IndexSetSize) {
  1661       assert(hint % MinObjAlignment == 0, "hint should be aligned");
  1662       FreeList *fl = &_indexedFreeList[hint];
  1663       if (fl->surplus() > 0 && fl->head() != NULL) {
  1664         // Found a list with surplus, reset original hint
  1665         // and split out a free chunk which is returned.
  1666         _indexedFreeList[start].set_hint(hint);
  1667         FreeChunk* res = getFromListGreater(fl, numWords);
  1668         assert(res == NULL || res->isFree(),
  1669           "Should be returning a free chunk");
  1670         return res;
  1672       hint = fl->hint(); /* keep looking */
  1674     /* None found. */
  1675     it[start].set_hint(IndexSetSize);
  1677   return NULL;
  1680 /* Requires fl->size >= numWords + MinChunkSize */
  1681 FreeChunk* CompactibleFreeListSpace::getFromListGreater(FreeList* fl,
  1682   size_t numWords) {
  1683   FreeChunk *curr = fl->head();
  1684   size_t oldNumWords = curr->size();
  1685   assert(numWords >= MinChunkSize, "Word size is too small");
  1686   assert(curr != NULL, "List is empty");
  1687   assert(oldNumWords >= numWords + MinChunkSize,
  1688         "Size of chunks in the list is too small");
  1690   fl->removeChunk(curr);
  1691   // recorded indirectly by splitChunkAndReturnRemainder -
  1692   // smallSplit(oldNumWords, numWords);
  1693   FreeChunk* new_chunk = splitChunkAndReturnRemainder(curr, numWords);
  1694   // Does anything have to be done for the remainder in terms of
  1695   // fixing the card table?
  1696   assert(new_chunk == NULL || new_chunk->isFree(),
  1697     "Should be returning a free chunk");
  1698   return new_chunk;
  1701 FreeChunk*
  1702 CompactibleFreeListSpace::splitChunkAndReturnRemainder(FreeChunk* chunk,
  1703   size_t new_size) {
  1704   assert_locked();
  1705   size_t size = chunk->size();
  1706   assert(size > new_size, "Split from a smaller block?");
  1707   assert(is_aligned(chunk), "alignment problem");
  1708   assert(size == adjustObjectSize(size), "alignment problem");
  1709   size_t rem_size = size - new_size;
  1710   assert(rem_size == adjustObjectSize(rem_size), "alignment problem");
  1711   assert(rem_size >= MinChunkSize, "Free chunk smaller than minimum");
  1712   FreeChunk* ffc = (FreeChunk*)((HeapWord*)chunk + new_size);
  1713   assert(is_aligned(ffc), "alignment problem");
  1714   ffc->setSize(rem_size);
  1715   ffc->linkNext(NULL);
  1716   ffc->linkPrev(NULL); // Mark as a free block for other (parallel) GC threads.
  1717   // Above must occur before BOT is updated below.
  1718   // adjust block offset table
  1719   _bt.split_block((HeapWord*)chunk, chunk->size(), new_size);
  1720   if (rem_size < SmallForDictionary) {
  1721     bool is_par = (SharedHeap::heap()->n_par_threads() > 0);
  1722     if (is_par) _indexedFreeListParLocks[rem_size]->lock();
  1723     returnChunkToFreeList(ffc);
  1724     split(size, rem_size);
  1725     if (is_par) _indexedFreeListParLocks[rem_size]->unlock();
  1726   } else {
  1727     returnChunkToDictionary(ffc);
  1728     split(size ,rem_size);
  1730   chunk->setSize(new_size);
  1731   return chunk;
  1734 void
  1735 CompactibleFreeListSpace::sweep_completed() {
  1736   // Now that space is probably plentiful, refill linear
  1737   // allocation blocks as needed.
  1738   refillLinearAllocBlocksIfNeeded();
  1741 void
  1742 CompactibleFreeListSpace::gc_prologue() {
  1743   assert_locked();
  1744   if (PrintFLSStatistics != 0) {
  1745     gclog_or_tty->print("Before GC:\n");
  1746     reportFreeListStatistics();
  1748   refillLinearAllocBlocksIfNeeded();
  1751 void
  1752 CompactibleFreeListSpace::gc_epilogue() {
  1753   assert_locked();
  1754   if (PrintGCDetails && Verbose && !_adaptive_freelists) {
  1755     if (_smallLinearAllocBlock._word_size == 0)
  1756       warning("CompactibleFreeListSpace(epilogue):: Linear allocation failure");
  1758   assert(_promoInfo.noPromotions(), "_promoInfo inconsistency");
  1759   _promoInfo.stopTrackingPromotions();
  1760   repairLinearAllocationBlocks();
  1761   // Print Space's stats
  1762   if (PrintFLSStatistics != 0) {
  1763     gclog_or_tty->print("After GC:\n");
  1764     reportFreeListStatistics();
  1768 // Iteration support, mostly delegated from a CMS generation
  1770 void CompactibleFreeListSpace::save_marks() {
  1771   // mark the "end" of the used space at the time of this call;
  1772   // note, however, that promoted objects from this point
  1773   // on are tracked in the _promoInfo below.
  1774   set_saved_mark_word(BlockOffsetArrayUseUnallocatedBlock ?
  1775                       unallocated_block() : end());
  1776   // inform allocator that promotions should be tracked.
  1777   assert(_promoInfo.noPromotions(), "_promoInfo inconsistency");
  1778   _promoInfo.startTrackingPromotions();
  1781 bool CompactibleFreeListSpace::no_allocs_since_save_marks() {
  1782   assert(_promoInfo.tracking(), "No preceding save_marks?");
  1783   guarantee(SharedHeap::heap()->n_par_threads() == 0,
  1784             "Shouldn't be called (yet) during parallel part of gc.");
  1785   return _promoInfo.noPromotions();
  1788 #define CFLS_OOP_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix)           \
  1790 void CompactibleFreeListSpace::                                             \
  1791 oop_since_save_marks_iterate##nv_suffix(OopClosureType* blk) {              \
  1792   assert(SharedHeap::heap()->n_par_threads() == 0,                          \
  1793          "Shouldn't be called (yet) during parallel part of gc.");          \
  1794   _promoInfo.promoted_oops_iterate##nv_suffix(blk);                         \
  1795   /*                                                                        \
  1796    * This also restores any displaced headers and removes the elements from \
  1797    * the iteration set as they are processed, so that we have a clean slate \
  1798    * at the end of the iteration. Note, thus, that if new objects are       \
  1799    * promoted as a result of the iteration they are iterated over as well.  \
  1800    */                                                                       \
  1801   assert(_promoInfo.noPromotions(), "_promoInfo inconsistency");            \
  1804 ALL_SINCE_SAVE_MARKS_CLOSURES(CFLS_OOP_SINCE_SAVE_MARKS_DEFN)
  1806 //////////////////////////////////////////////////////////////////////////////
  1807 // We go over the list of promoted objects, removing each from the list,
  1808 // and applying the closure (this may, in turn, add more elements to
  1809 // the tail of the promoted list, and these newly added objects will
  1810 // also be processed) until the list is empty.
  1811 // To aid verification and debugging, in the non-product builds
  1812 // we actually forward _promoHead each time we process a promoted oop.
  1813 // Note that this is not necessary in general (i.e. when we don't need to
  1814 // call PromotionInfo::verify()) because oop_iterate can only add to the
  1815 // end of _promoTail, and never needs to look at _promoHead.
  1817 #define PROMOTED_OOPS_ITERATE_DEFN(OopClosureType, nv_suffix)               \
  1819 void PromotionInfo::promoted_oops_iterate##nv_suffix(OopClosureType* cl) {  \
  1820   NOT_PRODUCT(verify());                                                    \
  1821   PromotedObject *curObj, *nextObj;                                         \
  1822   for (curObj = _promoHead; curObj != NULL; curObj = nextObj) {             \
  1823     if ((nextObj = curObj->next()) == NULL) {                               \
  1824       /* protect ourselves against additions due to closure application     \
  1825          below by resetting the list.  */                                   \
  1826       assert(_promoTail == curObj, "Should have been the tail");            \
  1827       _promoHead = _promoTail = NULL;                                       \
  1828     }                                                                       \
  1829     if (curObj->hasDisplacedMark()) {                                       \
  1830       /* restore displaced header */                                        \
  1831       oop(curObj)->set_mark(nextDisplacedHeader());                         \
  1832     } else {                                                                \
  1833       /* restore prototypical header */                                     \
  1834       oop(curObj)->init_mark();                                             \
  1835     }                                                                       \
  1836     /* The "promoted_mark" should now not be set */                         \
  1837     assert(!curObj->hasPromotedMark(),                                      \
  1838            "Should have been cleared by restoring displaced mark-word");    \
  1839     NOT_PRODUCT(_promoHead = nextObj);                                      \
  1840     if (cl != NULL) oop(curObj)->oop_iterate(cl);                           \
  1841     if (nextObj == NULL) { /* start at head of list reset above */          \
  1842       nextObj = _promoHead;                                                 \
  1843     }                                                                       \
  1844   }                                                                         \
  1845   assert(noPromotions(), "post-condition violation");                       \
  1846   assert(_promoHead == NULL && _promoTail == NULL, "emptied promoted list");\
  1847   assert(_spoolHead == _spoolTail, "emptied spooling buffers");             \
  1848   assert(_firstIndex == _nextIndex, "empty buffer");                        \
  1851 // This should have been ALL_SINCE_...() just like the others,
  1852 // but, because the body of the method above is somehwat longer,
  1853 // the MSVC compiler cannot cope; as a workaround, we split the
  1854 // macro into its 3 constituent parts below (see original macro
  1855 // definition in specializedOopClosures.hpp).
  1856 SPECIALIZED_SINCE_SAVE_MARKS_CLOSURES_YOUNG(PROMOTED_OOPS_ITERATE_DEFN)
  1857 PROMOTED_OOPS_ITERATE_DEFN(OopsInGenClosure,_v)
  1860 void CompactibleFreeListSpace::object_iterate_since_last_GC(ObjectClosure* cl) {
  1861   // ugghh... how would one do this efficiently for a non-contiguous space?
  1862   guarantee(false, "NYI");
  1865 bool CompactibleFreeListSpace::linearAllocationWouldFail() const {
  1866   return _smallLinearAllocBlock._word_size == 0;
  1869 void CompactibleFreeListSpace::repairLinearAllocationBlocks() {
  1870   // Fix up linear allocation blocks to look like free blocks
  1871   repairLinearAllocBlock(&_smallLinearAllocBlock);
  1874 void CompactibleFreeListSpace::repairLinearAllocBlock(LinearAllocBlock* blk) {
  1875   assert_locked();
  1876   if (blk->_ptr != NULL) {
  1877     assert(blk->_word_size != 0 && blk->_word_size >= MinChunkSize,
  1878            "Minimum block size requirement");
  1879     FreeChunk* fc = (FreeChunk*)(blk->_ptr);
  1880     fc->setSize(blk->_word_size);
  1881     fc->linkPrev(NULL);   // mark as free
  1882     fc->dontCoalesce();
  1883     assert(fc->isFree(), "just marked it free");
  1884     assert(fc->cantCoalesce(), "just marked it uncoalescable");
  1888 void CompactibleFreeListSpace::refillLinearAllocBlocksIfNeeded() {
  1889   assert_locked();
  1890   if (_smallLinearAllocBlock._ptr == NULL) {
  1891     assert(_smallLinearAllocBlock._word_size == 0,
  1892       "Size of linAB should be zero if the ptr is NULL");
  1893     // Reset the linAB refill and allocation size limit.
  1894     _smallLinearAllocBlock.set(0, 0, 1024*SmallForLinearAlloc, SmallForLinearAlloc);
  1896   refillLinearAllocBlockIfNeeded(&_smallLinearAllocBlock);
  1899 void
  1900 CompactibleFreeListSpace::refillLinearAllocBlockIfNeeded(LinearAllocBlock* blk) {
  1901   assert_locked();
  1902   assert((blk->_ptr == NULL && blk->_word_size == 0) ||
  1903          (blk->_ptr != NULL && blk->_word_size >= MinChunkSize),
  1904          "blk invariant");
  1905   if (blk->_ptr == NULL) {
  1906     refillLinearAllocBlock(blk);
  1908   if (PrintMiscellaneous && Verbose) {
  1909     if (blk->_word_size == 0) {
  1910       warning("CompactibleFreeListSpace(prologue):: Linear allocation failure");
  1915 void
  1916 CompactibleFreeListSpace::refillLinearAllocBlock(LinearAllocBlock* blk) {
  1917   assert_locked();
  1918   assert(blk->_word_size == 0 && blk->_ptr == NULL,
  1919          "linear allocation block should be empty");
  1920   FreeChunk* fc;
  1921   if (blk->_refillSize < SmallForDictionary &&
  1922       (fc = getChunkFromIndexedFreeList(blk->_refillSize)) != NULL) {
  1923     // A linAB's strategy might be to use small sizes to reduce
  1924     // fragmentation but still get the benefits of allocation from a
  1925     // linAB.
  1926   } else {
  1927     fc = getChunkFromDictionary(blk->_refillSize);
  1929   if (fc != NULL) {
  1930     blk->_ptr  = (HeapWord*)fc;
  1931     blk->_word_size = fc->size();
  1932     fc->dontCoalesce();   // to prevent sweeper from sweeping us up
  1936 // Support for concurrent collection policy decisions.
  1937 bool CompactibleFreeListSpace::should_concurrent_collect() const {
  1938   // In the future we might want to add in frgamentation stats --
  1939   // including erosion of the "mountain" into this decision as well.
  1940   return !adaptive_freelists() && linearAllocationWouldFail();
  1943 // Support for compaction
  1945 void CompactibleFreeListSpace::prepare_for_compaction(CompactPoint* cp) {
  1946   SCAN_AND_FORWARD(cp,end,block_is_obj,block_size);
  1947   // prepare_for_compaction() uses the space between live objects
  1948   // so that later phase can skip dead space quickly.  So verification
  1949   // of the free lists doesn't work after.
  1952 #define obj_size(q) adjustObjectSize(oop(q)->size())
  1953 #define adjust_obj_size(s) adjustObjectSize(s)
  1955 void CompactibleFreeListSpace::adjust_pointers() {
  1956   // In other versions of adjust_pointers(), a bail out
  1957   // based on the amount of live data in the generation
  1958   // (i.e., if 0, bail out) may be used.
  1959   // Cannot test used() == 0 here because the free lists have already
  1960   // been mangled by the compaction.
  1962   SCAN_AND_ADJUST_POINTERS(adjust_obj_size);
  1963   // See note about verification in prepare_for_compaction().
  1966 void CompactibleFreeListSpace::compact() {
  1967   SCAN_AND_COMPACT(obj_size);
  1970 // fragmentation_metric = 1 - [sum of (fbs**2) / (sum of fbs)**2]
  1971 // where fbs is free block sizes
  1972 double CompactibleFreeListSpace::flsFrag() const {
  1973   size_t itabFree = totalSizeInIndexedFreeLists();
  1974   double frag = 0.0;
  1975   size_t i;
  1977   for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
  1978     double sz  = i;
  1979     frag      += _indexedFreeList[i].count() * (sz * sz);
  1982   double totFree = itabFree +
  1983                    _dictionary->totalChunkSize(DEBUG_ONLY(freelistLock()));
  1984   if (totFree > 0) {
  1985     frag = ((frag + _dictionary->sum_of_squared_block_sizes()) /
  1986             (totFree * totFree));
  1987     frag = (double)1.0  - frag;
  1988   } else {
  1989     assert(frag == 0.0, "Follows from totFree == 0");
  1991   return frag;
  1994 #define CoalSurplusPercent 1.05
  1995 #define SplitSurplusPercent 1.10
  1997 void CompactibleFreeListSpace::beginSweepFLCensus(
  1998   float inter_sweep_current,
  1999   float inter_sweep_estimate) {
  2000   assert_locked();
  2001   size_t i;
  2002   for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
  2003     FreeList* fl    = &_indexedFreeList[i];
  2004     fl->compute_desired(inter_sweep_current, inter_sweep_estimate);
  2005     fl->set_coalDesired((ssize_t)((double)fl->desired() * CoalSurplusPercent));
  2006     fl->set_beforeSweep(fl->count());
  2007     fl->set_bfrSurp(fl->surplus());
  2009   _dictionary->beginSweepDictCensus(CoalSurplusPercent,
  2010                                     inter_sweep_current,
  2011                                     inter_sweep_estimate);
  2014 void CompactibleFreeListSpace::setFLSurplus() {
  2015   assert_locked();
  2016   size_t i;
  2017   for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
  2018     FreeList *fl = &_indexedFreeList[i];
  2019     fl->set_surplus(fl->count() -
  2020                     (ssize_t)((double)fl->desired() * SplitSurplusPercent));
  2024 void CompactibleFreeListSpace::setFLHints() {
  2025   assert_locked();
  2026   size_t i;
  2027   size_t h = IndexSetSize;
  2028   for (i = IndexSetSize - 1; i != 0; i -= IndexSetStride) {
  2029     FreeList *fl = &_indexedFreeList[i];
  2030     fl->set_hint(h);
  2031     if (fl->surplus() > 0) {
  2032       h = i;
  2037 void CompactibleFreeListSpace::clearFLCensus() {
  2038   assert_locked();
  2039   int i;
  2040   for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
  2041     FreeList *fl = &_indexedFreeList[i];
  2042     fl->set_prevSweep(fl->count());
  2043     fl->set_coalBirths(0);
  2044     fl->set_coalDeaths(0);
  2045     fl->set_splitBirths(0);
  2046     fl->set_splitDeaths(0);
  2050 void CompactibleFreeListSpace::endSweepFLCensus(size_t sweep_count) {
  2051   setFLSurplus();
  2052   setFLHints();
  2053   if (PrintGC && PrintFLSCensus > 0) {
  2054     printFLCensus(sweep_count);
  2056   clearFLCensus();
  2057   assert_locked();
  2058   _dictionary->endSweepDictCensus(SplitSurplusPercent);
  2061 bool CompactibleFreeListSpace::coalOverPopulated(size_t size) {
  2062   if (size < SmallForDictionary) {
  2063     FreeList *fl = &_indexedFreeList[size];
  2064     return (fl->coalDesired() < 0) ||
  2065            ((int)fl->count() > fl->coalDesired());
  2066   } else {
  2067     return dictionary()->coalDictOverPopulated(size);
  2071 void CompactibleFreeListSpace::smallCoalBirth(size_t size) {
  2072   assert(size < SmallForDictionary, "Size too large for indexed list");
  2073   FreeList *fl = &_indexedFreeList[size];
  2074   fl->increment_coalBirths();
  2075   fl->increment_surplus();
  2078 void CompactibleFreeListSpace::smallCoalDeath(size_t size) {
  2079   assert(size < SmallForDictionary, "Size too large for indexed list");
  2080   FreeList *fl = &_indexedFreeList[size];
  2081   fl->increment_coalDeaths();
  2082   fl->decrement_surplus();
  2085 void CompactibleFreeListSpace::coalBirth(size_t size) {
  2086   if (size  < SmallForDictionary) {
  2087     smallCoalBirth(size);
  2088   } else {
  2089     dictionary()->dictCensusUpdate(size,
  2090                                    false /* split */,
  2091                                    true /* birth */);
  2095 void CompactibleFreeListSpace::coalDeath(size_t size) {
  2096   if(size  < SmallForDictionary) {
  2097     smallCoalDeath(size);
  2098   } else {
  2099     dictionary()->dictCensusUpdate(size,
  2100                                    false /* split */,
  2101                                    false /* birth */);
  2105 void CompactibleFreeListSpace::smallSplitBirth(size_t size) {
  2106   assert(size < SmallForDictionary, "Size too large for indexed list");
  2107   FreeList *fl = &_indexedFreeList[size];
  2108   fl->increment_splitBirths();
  2109   fl->increment_surplus();
  2112 void CompactibleFreeListSpace::smallSplitDeath(size_t size) {
  2113   assert(size < SmallForDictionary, "Size too large for indexed list");
  2114   FreeList *fl = &_indexedFreeList[size];
  2115   fl->increment_splitDeaths();
  2116   fl->decrement_surplus();
  2119 void CompactibleFreeListSpace::splitBirth(size_t size) {
  2120   if (size  < SmallForDictionary) {
  2121     smallSplitBirth(size);
  2122   } else {
  2123     dictionary()->dictCensusUpdate(size,
  2124                                    true /* split */,
  2125                                    true /* birth */);
  2129 void CompactibleFreeListSpace::splitDeath(size_t size) {
  2130   if (size  < SmallForDictionary) {
  2131     smallSplitDeath(size);
  2132   } else {
  2133     dictionary()->dictCensusUpdate(size,
  2134                                    true /* split */,
  2135                                    false /* birth */);
  2139 void CompactibleFreeListSpace::split(size_t from, size_t to1) {
  2140   size_t to2 = from - to1;
  2141   splitDeath(from);
  2142   splitBirth(to1);
  2143   splitBirth(to2);
  2146 void CompactibleFreeListSpace::print() const {
  2147   tty->print(" CompactibleFreeListSpace");
  2148   Space::print();
  2151 void CompactibleFreeListSpace::prepare_for_verify() {
  2152   assert_locked();
  2153   repairLinearAllocationBlocks();
  2154   // Verify that the SpoolBlocks look like free blocks of
  2155   // appropriate sizes... To be done ...
  2158 class VerifyAllBlksClosure: public BlkClosure {
  2159  private:
  2160   const CompactibleFreeListSpace* _sp;
  2161   const MemRegion                 _span;
  2163  public:
  2164   VerifyAllBlksClosure(const CompactibleFreeListSpace* sp,
  2165     MemRegion span) :  _sp(sp), _span(span) { }
  2167   virtual size_t do_blk(HeapWord* addr) {
  2168     size_t res;
  2169     if (_sp->block_is_obj(addr)) {
  2170       oop p = oop(addr);
  2171       guarantee(p->is_oop(), "Should be an oop");
  2172       res = _sp->adjustObjectSize(p->size());
  2173       if (_sp->obj_is_alive(addr)) {
  2174         p->verify();
  2176     } else {
  2177       FreeChunk* fc = (FreeChunk*)addr;
  2178       res = fc->size();
  2179       if (FLSVerifyLists && !fc->cantCoalesce()) {
  2180         guarantee(_sp->verifyChunkInFreeLists(fc),
  2181                   "Chunk should be on a free list");
  2184     guarantee(res != 0, "Livelock: no rank reduction!");
  2185     return res;
  2187 };
  2189 class VerifyAllOopsClosure: public OopClosure {
  2190  private:
  2191   const CMSCollector*             _collector;
  2192   const CompactibleFreeListSpace* _sp;
  2193   const MemRegion                 _span;
  2194   const bool                      _past_remark;
  2195   const CMSBitMap*                _bit_map;
  2197  protected:
  2198   void do_oop(void* p, oop obj) {
  2199     if (_span.contains(obj)) { // the interior oop points into CMS heap
  2200       if (!_span.contains(p)) { // reference from outside CMS heap
  2201         // Should be a valid object; the first disjunct below allows
  2202         // us to sidestep an assertion in block_is_obj() that insists
  2203         // that p be in _sp. Note that several generations (and spaces)
  2204         // are spanned by _span (CMS heap) above.
  2205         guarantee(!_sp->is_in_reserved(obj) ||
  2206                   _sp->block_is_obj((HeapWord*)obj),
  2207                   "Should be an object");
  2208         guarantee(obj->is_oop(), "Should be an oop");
  2209         obj->verify();
  2210         if (_past_remark) {
  2211           // Remark has been completed, the object should be marked
  2212           _bit_map->isMarked((HeapWord*)obj);
  2214       } else { // reference within CMS heap
  2215         if (_past_remark) {
  2216           // Remark has been completed -- so the referent should have
  2217           // been marked, if referring object is.
  2218           if (_bit_map->isMarked(_collector->block_start(p))) {
  2219             guarantee(_bit_map->isMarked((HeapWord*)obj), "Marking error?");
  2223     } else if (_sp->is_in_reserved(p)) {
  2224       // the reference is from FLS, and points out of FLS
  2225       guarantee(obj->is_oop(), "Should be an oop");
  2226       obj->verify();
  2230   template <class T> void do_oop_work(T* p) {
  2231     T heap_oop = oopDesc::load_heap_oop(p);
  2232     if (!oopDesc::is_null(heap_oop)) {
  2233       oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
  2234       do_oop(p, obj);
  2238  public:
  2239   VerifyAllOopsClosure(const CMSCollector* collector,
  2240     const CompactibleFreeListSpace* sp, MemRegion span,
  2241     bool past_remark, CMSBitMap* bit_map) :
  2242     OopClosure(), _collector(collector), _sp(sp), _span(span),
  2243     _past_remark(past_remark), _bit_map(bit_map) { }
  2245   virtual void do_oop(oop* p)       { VerifyAllOopsClosure::do_oop_work(p); }
  2246   virtual void do_oop(narrowOop* p) { VerifyAllOopsClosure::do_oop_work(p); }
  2247 };
  2249 void CompactibleFreeListSpace::verify(bool ignored) const {
  2250   assert_lock_strong(&_freelistLock);
  2251   verify_objects_initialized();
  2252   MemRegion span = _collector->_span;
  2253   bool past_remark = (_collector->abstract_state() ==
  2254                       CMSCollector::Sweeping);
  2256   ResourceMark rm;
  2257   HandleMark  hm;
  2259   // Check integrity of CFL data structures
  2260   _promoInfo.verify();
  2261   _dictionary->verify();
  2262   if (FLSVerifyIndexTable) {
  2263     verifyIndexedFreeLists();
  2265   // Check integrity of all objects and free blocks in space
  2267     VerifyAllBlksClosure cl(this, span);
  2268     ((CompactibleFreeListSpace*)this)->blk_iterate(&cl);  // cast off const
  2270   // Check that all references in the heap to FLS
  2271   // are to valid objects in FLS or that references in
  2272   // FLS are to valid objects elsewhere in the heap
  2273   if (FLSVerifyAllHeapReferences)
  2275     VerifyAllOopsClosure cl(_collector, this, span, past_remark,
  2276       _collector->markBitMap());
  2277     CollectedHeap* ch = Universe::heap();
  2278     ch->oop_iterate(&cl);              // all oops in generations
  2279     ch->permanent_oop_iterate(&cl);    // all oops in perm gen
  2282   if (VerifyObjectStartArray) {
  2283     // Verify the block offset table
  2284     _bt.verify();
  2288 #ifndef PRODUCT
  2289 void CompactibleFreeListSpace::verifyFreeLists() const {
  2290   if (FLSVerifyLists) {
  2291     _dictionary->verify();
  2292     verifyIndexedFreeLists();
  2293   } else {
  2294     if (FLSVerifyDictionary) {
  2295       _dictionary->verify();
  2297     if (FLSVerifyIndexTable) {
  2298       verifyIndexedFreeLists();
  2302 #endif
  2304 void CompactibleFreeListSpace::verifyIndexedFreeLists() const {
  2305   size_t i = 0;
  2306   for (; i < MinChunkSize; i++) {
  2307     guarantee(_indexedFreeList[i].head() == NULL, "should be NULL");
  2309   for (; i < IndexSetSize; i++) {
  2310     verifyIndexedFreeList(i);
  2314 void CompactibleFreeListSpace::verifyIndexedFreeList(size_t size) const {
  2315   FreeChunk* fc =  _indexedFreeList[size].head();
  2316   guarantee((size % 2 == 0) || fc == NULL, "Odd slots should be empty");
  2317   for (; fc != NULL; fc = fc->next()) {
  2318     guarantee(fc->size() == size, "Size inconsistency");
  2319     guarantee(fc->isFree(), "!free?");
  2320     guarantee(fc->next() == NULL || fc->next()->prev() == fc, "Broken list");
  2324 #ifndef PRODUCT
  2325 void CompactibleFreeListSpace::checkFreeListConsistency() const {
  2326   assert(_dictionary->minSize() <= IndexSetSize,
  2327     "Some sizes can't be allocated without recourse to"
  2328     " linear allocation buffers");
  2329   assert(MIN_TREE_CHUNK_SIZE*HeapWordSize == sizeof(TreeChunk),
  2330     "else MIN_TREE_CHUNK_SIZE is wrong");
  2331   assert((IndexSetStride == 2 && IndexSetStart == 2) ||
  2332          (IndexSetStride == 1 && IndexSetStart == 1), "just checking");
  2333   assert((IndexSetStride != 2) || (MinChunkSize % 2 == 0),
  2334       "Some for-loops may be incorrectly initialized");
  2335   assert((IndexSetStride != 2) || (IndexSetSize % 2 == 1),
  2336       "For-loops that iterate over IndexSet with stride 2 may be wrong");
  2338 #endif
  2340 void CompactibleFreeListSpace::printFLCensus(size_t sweep_count) const {
  2341   assert_lock_strong(&_freelistLock);
  2342   FreeList total;
  2343   gclog_or_tty->print("end sweep# " SIZE_FORMAT "\n", sweep_count);
  2344   FreeList::print_labels_on(gclog_or_tty, "size");
  2345   size_t totalFree = 0;
  2346   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
  2347     const FreeList *fl = &_indexedFreeList[i];
  2348     totalFree += fl->count() * fl->size();
  2349     if (i % (40*IndexSetStride) == 0) {
  2350       FreeList::print_labels_on(gclog_or_tty, "size");
  2352     fl->print_on(gclog_or_tty);
  2353     total.set_bfrSurp(    total.bfrSurp()     + fl->bfrSurp()    );
  2354     total.set_surplus(    total.surplus()     + fl->surplus()    );
  2355     total.set_desired(    total.desired()     + fl->desired()    );
  2356     total.set_prevSweep(  total.prevSweep()   + fl->prevSweep()  );
  2357     total.set_beforeSweep(total.beforeSweep() + fl->beforeSweep());
  2358     total.set_count(      total.count()       + fl->count()      );
  2359     total.set_coalBirths( total.coalBirths()  + fl->coalBirths() );
  2360     total.set_coalDeaths( total.coalDeaths()  + fl->coalDeaths() );
  2361     total.set_splitBirths(total.splitBirths() + fl->splitBirths());
  2362     total.set_splitDeaths(total.splitDeaths() + fl->splitDeaths());
  2364   total.print_on(gclog_or_tty, "TOTAL");
  2365   gclog_or_tty->print_cr("Total free in indexed lists "
  2366                          SIZE_FORMAT " words", totalFree);
  2367   gclog_or_tty->print("growth: %8.5f  deficit: %8.5f\n",
  2368     (double)(total.splitBirths()+total.coalBirths()-total.splitDeaths()-total.coalDeaths())/
  2369             (total.prevSweep() != 0 ? (double)total.prevSweep() : 1.0),
  2370     (double)(total.desired() - total.count())/(total.desired() != 0 ? (double)total.desired() : 1.0));
  2371   _dictionary->printDictCensus();
  2374 // Return the next displaced header, incrementing the pointer and
  2375 // recycling spool area as necessary.
  2376 markOop PromotionInfo::nextDisplacedHeader() {
  2377   assert(_spoolHead != NULL, "promotionInfo inconsistency");
  2378   assert(_spoolHead != _spoolTail || _firstIndex < _nextIndex,
  2379          "Empty spool space: no displaced header can be fetched");
  2380   assert(_spoolHead->bufferSize > _firstIndex, "Off by one error at head?");
  2381   markOop hdr = _spoolHead->displacedHdr[_firstIndex];
  2382   // Spool forward
  2383   if (++_firstIndex == _spoolHead->bufferSize) { // last location in this block
  2384     // forward to next block, recycling this block into spare spool buffer
  2385     SpoolBlock* tmp = _spoolHead->nextSpoolBlock;
  2386     assert(_spoolHead != _spoolTail, "Spooling storage mix-up");
  2387     _spoolHead->nextSpoolBlock = _spareSpool;
  2388     _spareSpool = _spoolHead;
  2389     _spoolHead = tmp;
  2390     _firstIndex = 1;
  2391     NOT_PRODUCT(
  2392       if (_spoolHead == NULL) {  // all buffers fully consumed
  2393         assert(_spoolTail == NULL && _nextIndex == 1,
  2394                "spool buffers processing inconsistency");
  2398   return hdr;
  2401 void PromotionInfo::track(PromotedObject* trackOop) {
  2402   track(trackOop, oop(trackOop)->klass());
  2405 void PromotionInfo::track(PromotedObject* trackOop, klassOop klassOfOop) {
  2406   // make a copy of header as it may need to be spooled
  2407   markOop mark = oop(trackOop)->mark();
  2408   trackOop->clearNext();
  2409   if (mark->must_be_preserved_for_cms_scavenge(klassOfOop)) {
  2410     // save non-prototypical header, and mark oop
  2411     saveDisplacedHeader(mark);
  2412     trackOop->setDisplacedMark();
  2413   } else {
  2414     // we'd like to assert something like the following:
  2415     // assert(mark == markOopDesc::prototype(), "consistency check");
  2416     // ... but the above won't work because the age bits have not (yet) been
  2417     // cleared. The remainder of the check would be identical to the
  2418     // condition checked in must_be_preserved() above, so we don't really
  2419     // have anything useful to check here!
  2421   if (_promoTail != NULL) {
  2422     assert(_promoHead != NULL, "List consistency");
  2423     _promoTail->setNext(trackOop);
  2424     _promoTail = trackOop;
  2425   } else {
  2426     assert(_promoHead == NULL, "List consistency");
  2427     _promoHead = _promoTail = trackOop;
  2429   // Mask as newly promoted, so we can skip over such objects
  2430   // when scanning dirty cards
  2431   assert(!trackOop->hasPromotedMark(), "Should not have been marked");
  2432   trackOop->setPromotedMark();
  2435 // Save the given displaced header, incrementing the pointer and
  2436 // obtaining more spool area as necessary.
  2437 void PromotionInfo::saveDisplacedHeader(markOop hdr) {
  2438   assert(_spoolHead != NULL && _spoolTail != NULL,
  2439          "promotionInfo inconsistency");
  2440   assert(_spoolTail->bufferSize > _nextIndex, "Off by one error at tail?");
  2441   _spoolTail->displacedHdr[_nextIndex] = hdr;
  2442   // Spool forward
  2443   if (++_nextIndex == _spoolTail->bufferSize) { // last location in this block
  2444     // get a new spooling block
  2445     assert(_spoolTail->nextSpoolBlock == NULL, "tail should terminate spool list");
  2446     _splice_point = _spoolTail;                   // save for splicing
  2447     _spoolTail->nextSpoolBlock = getSpoolBlock(); // might fail
  2448     _spoolTail = _spoolTail->nextSpoolBlock;      // might become NULL ...
  2449     // ... but will attempt filling before next promotion attempt
  2450     _nextIndex = 1;
  2454 // Ensure that spooling space exists. Return false if spooling space
  2455 // could not be obtained.
  2456 bool PromotionInfo::ensure_spooling_space_work() {
  2457   assert(!has_spooling_space(), "Only call when there is no spooling space");
  2458   // Try and obtain more spooling space
  2459   SpoolBlock* newSpool = getSpoolBlock();
  2460   assert(newSpool == NULL ||
  2461          (newSpool->bufferSize != 0 && newSpool->nextSpoolBlock == NULL),
  2462         "getSpoolBlock() sanity check");
  2463   if (newSpool == NULL) {
  2464     return false;
  2466   _nextIndex = 1;
  2467   if (_spoolTail == NULL) {
  2468     _spoolTail = newSpool;
  2469     if (_spoolHead == NULL) {
  2470       _spoolHead = newSpool;
  2471       _firstIndex = 1;
  2472     } else {
  2473       assert(_splice_point != NULL && _splice_point->nextSpoolBlock == NULL,
  2474              "Splice point invariant");
  2475       // Extra check that _splice_point is connected to list
  2476       #ifdef ASSERT
  2478         SpoolBlock* blk = _spoolHead;
  2479         for (; blk->nextSpoolBlock != NULL;
  2480              blk = blk->nextSpoolBlock);
  2481         assert(blk != NULL && blk == _splice_point,
  2482                "Splice point incorrect");
  2484       #endif // ASSERT
  2485       _splice_point->nextSpoolBlock = newSpool;
  2487   } else {
  2488     assert(_spoolHead != NULL, "spool list consistency");
  2489     _spoolTail->nextSpoolBlock = newSpool;
  2490     _spoolTail = newSpool;
  2492   return true;
  2495 // Get a free spool buffer from the free pool, getting a new block
  2496 // from the heap if necessary.
  2497 SpoolBlock* PromotionInfo::getSpoolBlock() {
  2498   SpoolBlock* res;
  2499   if ((res = _spareSpool) != NULL) {
  2500     _spareSpool = _spareSpool->nextSpoolBlock;
  2501     res->nextSpoolBlock = NULL;
  2502   } else {  // spare spool exhausted, get some from heap
  2503     res = (SpoolBlock*)(space()->allocateScratch(refillSize()));
  2504     if (res != NULL) {
  2505       res->init();
  2508   assert(res == NULL || res->nextSpoolBlock == NULL, "postcondition");
  2509   return res;
  2512 void PromotionInfo::startTrackingPromotions() {
  2513   assert(_spoolHead == _spoolTail && _firstIndex == _nextIndex,
  2514          "spooling inconsistency?");
  2515   _firstIndex = _nextIndex = 1;
  2516   _tracking = true;
  2519 void PromotionInfo::stopTrackingPromotions() {
  2520   assert(_spoolHead == _spoolTail && _firstIndex == _nextIndex,
  2521          "spooling inconsistency?");
  2522   _firstIndex = _nextIndex = 1;
  2523   _tracking = false;
  2526 // When _spoolTail is not NULL, then the slot <_spoolTail, _nextIndex>
  2527 // points to the next slot available for filling.
  2528 // The set of slots holding displaced headers are then all those in the
  2529 // right-open interval denoted by:
  2530 //
  2531 //    [ <_spoolHead, _firstIndex>, <_spoolTail, _nextIndex> )
  2532 //
  2533 // When _spoolTail is NULL, then the set of slots with displaced headers
  2534 // is all those starting at the slot <_spoolHead, _firstIndex> and
  2535 // going up to the last slot of last block in the linked list.
  2536 // In this lartter case, _splice_point points to the tail block of
  2537 // this linked list of blocks holding displaced headers.
  2538 void PromotionInfo::verify() const {
  2539   // Verify the following:
  2540   // 1. the number of displaced headers matches the number of promoted
  2541   //    objects that have displaced headers
  2542   // 2. each promoted object lies in this space
  2543   debug_only(
  2544     PromotedObject* junk = NULL;
  2545     assert(junk->next_addr() == (void*)(oop(junk)->mark_addr()),
  2546            "Offset of PromotedObject::_next is expected to align with "
  2547            "  the OopDesc::_mark within OopDesc");
  2549   // FIXME: guarantee????
  2550   guarantee(_spoolHead == NULL || _spoolTail != NULL ||
  2551             _splice_point != NULL, "list consistency");
  2552   guarantee(_promoHead == NULL || _promoTail != NULL, "list consistency");
  2553   // count the number of objects with displaced headers
  2554   size_t numObjsWithDisplacedHdrs = 0;
  2555   for (PromotedObject* curObj = _promoHead; curObj != NULL; curObj = curObj->next()) {
  2556     guarantee(space()->is_in_reserved((HeapWord*)curObj), "Containment");
  2557     // the last promoted object may fail the mark() != NULL test of is_oop().
  2558     guarantee(curObj->next() == NULL || oop(curObj)->is_oop(), "must be an oop");
  2559     if (curObj->hasDisplacedMark()) {
  2560       numObjsWithDisplacedHdrs++;
  2563   // Count the number of displaced headers
  2564   size_t numDisplacedHdrs = 0;
  2565   for (SpoolBlock* curSpool = _spoolHead;
  2566        curSpool != _spoolTail && curSpool != NULL;
  2567        curSpool = curSpool->nextSpoolBlock) {
  2568     // the first entry is just a self-pointer; indices 1 through
  2569     // bufferSize - 1 are occupied (thus, bufferSize - 1 slots).
  2570     guarantee((void*)curSpool->displacedHdr == (void*)&curSpool->displacedHdr,
  2571               "first entry of displacedHdr should be self-referential");
  2572     numDisplacedHdrs += curSpool->bufferSize - 1;
  2574   guarantee((_spoolHead == _spoolTail) == (numDisplacedHdrs == 0),
  2575             "internal consistency");
  2576   guarantee(_spoolTail != NULL || _nextIndex == 1,
  2577             "Inconsistency between _spoolTail and _nextIndex");
  2578   // We overcounted (_firstIndex-1) worth of slots in block
  2579   // _spoolHead and we undercounted (_nextIndex-1) worth of
  2580   // slots in block _spoolTail. We make an appropriate
  2581   // adjustment by subtracting the first and adding the
  2582   // second:  - (_firstIndex - 1) + (_nextIndex - 1)
  2583   numDisplacedHdrs += (_nextIndex - _firstIndex);
  2584   guarantee(numDisplacedHdrs == numObjsWithDisplacedHdrs, "Displaced hdr count");
  2588 CFLS_LAB::CFLS_LAB(CompactibleFreeListSpace* cfls) :
  2589   _cfls(cfls)
  2591   _blocks_to_claim = CMSParPromoteBlocksToClaim;
  2592   for (size_t i = CompactibleFreeListSpace::IndexSetStart;
  2593        i < CompactibleFreeListSpace::IndexSetSize;
  2594        i += CompactibleFreeListSpace::IndexSetStride) {
  2595     _indexedFreeList[i].set_size(i);
  2599 HeapWord* CFLS_LAB::alloc(size_t word_sz) {
  2600   FreeChunk* res;
  2601   word_sz = _cfls->adjustObjectSize(word_sz);
  2602   if (word_sz >=  CompactibleFreeListSpace::IndexSetSize) {
  2603     // This locking manages sync with other large object allocations.
  2604     MutexLockerEx x(_cfls->parDictionaryAllocLock(),
  2605                     Mutex::_no_safepoint_check_flag);
  2606     res = _cfls->getChunkFromDictionaryExact(word_sz);
  2607     if (res == NULL) return NULL;
  2608   } else {
  2609     FreeList* fl = &_indexedFreeList[word_sz];
  2610     bool filled = false; //TRAP
  2611     if (fl->count() == 0) {
  2612       bool filled = true; //TRAP
  2613       // Attempt to refill this local free list.
  2614       _cfls->par_get_chunk_of_blocks(word_sz, _blocks_to_claim, fl);
  2615       // If it didn't work, give up.
  2616       if (fl->count() == 0) return NULL;
  2618     res = fl->getChunkAtHead();
  2619     assert(res != NULL, "Why was count non-zero?");
  2621   res->markNotFree();
  2622   assert(!res->isFree(), "shouldn't be marked free");
  2623   assert(oop(res)->klass_or_null() == NULL, "should look uninitialized");
  2624   // mangle a just allocated object with a distinct pattern.
  2625   debug_only(res->mangleAllocated(word_sz));
  2626   return (HeapWord*)res;
  2629 void CFLS_LAB::retire() {
  2630   for (size_t i = CompactibleFreeListSpace::IndexSetStart;
  2631        i < CompactibleFreeListSpace::IndexSetSize;
  2632        i += CompactibleFreeListSpace::IndexSetStride) {
  2633     if (_indexedFreeList[i].count() > 0) {
  2634       MutexLockerEx x(_cfls->_indexedFreeListParLocks[i],
  2635                       Mutex::_no_safepoint_check_flag);
  2636       _cfls->_indexedFreeList[i].prepend(&_indexedFreeList[i]);
  2637       // Reset this list.
  2638       _indexedFreeList[i] = FreeList();
  2639       _indexedFreeList[i].set_size(i);
  2644 void
  2645 CompactibleFreeListSpace::
  2646 par_get_chunk_of_blocks(size_t word_sz, size_t n, FreeList* fl) {
  2647   assert(fl->count() == 0, "Precondition.");
  2648   assert(word_sz < CompactibleFreeListSpace::IndexSetSize,
  2649          "Precondition");
  2651   // We'll try all multiples of word_sz in the indexed set (starting with
  2652   // word_sz itself), then try getting a big chunk and splitting it.
  2653   int k = 1;
  2654   size_t cur_sz = k * word_sz;
  2655   bool found = false;
  2656   while (cur_sz < CompactibleFreeListSpace::IndexSetSize && k == 1) {
  2657     FreeList* gfl = &_indexedFreeList[cur_sz];
  2658     FreeList fl_for_cur_sz;  // Empty.
  2659     fl_for_cur_sz.set_size(cur_sz);
  2661       MutexLockerEx x(_indexedFreeListParLocks[cur_sz],
  2662                       Mutex::_no_safepoint_check_flag);
  2663       if (gfl->count() != 0) {
  2664         size_t nn = MAX2(n/k, (size_t)1);
  2665         gfl->getFirstNChunksFromList(nn, &fl_for_cur_sz);
  2666         found = true;
  2669     // Now transfer fl_for_cur_sz to fl.  Common case, we hope, is k = 1.
  2670     if (found) {
  2671       if (k == 1) {
  2672         fl->prepend(&fl_for_cur_sz);
  2673       } else {
  2674         // Divide each block on fl_for_cur_sz up k ways.
  2675         FreeChunk* fc;
  2676         while ((fc = fl_for_cur_sz.getChunkAtHead()) != NULL) {
  2677           // Must do this in reverse order, so that anybody attempting to
  2678           // access the main chunk sees it as a single free block until we
  2679           // change it.
  2680           size_t fc_size = fc->size();
  2681           for (int i = k-1; i >= 0; i--) {
  2682             FreeChunk* ffc = (FreeChunk*)((HeapWord*)fc + i * word_sz);
  2683             ffc->setSize(word_sz);
  2684             ffc->linkNext(NULL);
  2685             ffc->linkPrev(NULL); // Mark as a free block for other (parallel) GC threads.
  2686             // Above must occur before BOT is updated below.
  2687             // splitting from the right, fc_size == (k - i + 1) * wordsize
  2688             _bt.mark_block((HeapWord*)ffc, word_sz);
  2689             fc_size -= word_sz;
  2690             _bt.verify_not_unallocated((HeapWord*)ffc, ffc->size());
  2691             _bt.verify_single_block((HeapWord*)fc, fc_size);
  2692             _bt.verify_single_block((HeapWord*)ffc, ffc->size());
  2693             // Push this on "fl".
  2694             fl->returnChunkAtHead(ffc);
  2696           // TRAP
  2697           assert(fl->tail()->next() == NULL, "List invariant.");
  2700       return;
  2702     k++; cur_sz = k * word_sz;
  2704   // Otherwise, we'll split a block from the dictionary.
  2705   FreeChunk* fc = NULL;
  2706   FreeChunk* rem_fc = NULL;
  2707   size_t rem;
  2709     MutexLockerEx x(parDictionaryAllocLock(),
  2710                     Mutex::_no_safepoint_check_flag);
  2711     while (n > 0) {
  2712       fc = dictionary()->getChunk(MAX2(n * word_sz,
  2713                                   _dictionary->minSize()),
  2714                                   FreeBlockDictionary::atLeast);
  2715       if (fc != NULL) {
  2716         _bt.allocated((HeapWord*)fc, fc->size());  // update _unallocated_blk
  2717         dictionary()->dictCensusUpdate(fc->size(),
  2718                                        true /*split*/,
  2719                                        false /*birth*/);
  2720         break;
  2721       } else {
  2722         n--;
  2725     if (fc == NULL) return;
  2726     // Otherwise, split up that block.
  2727     size_t nn = fc->size() / word_sz;
  2728     n = MIN2(nn, n);
  2729     rem = fc->size() - n * word_sz;
  2730     // If there is a remainder, and it's too small, allocate one fewer.
  2731     if (rem > 0 && rem < MinChunkSize) {
  2732       n--; rem += word_sz;
  2734     // First return the remainder, if any.
  2735     // Note that we hold the lock until we decide if we're going to give
  2736     // back the remainder to the dictionary, since a contending allocator
  2737     // may otherwise see the heap as empty.  (We're willing to take that
  2738     // hit if the block is a small block.)
  2739     if (rem > 0) {
  2740       size_t prefix_size = n * word_sz;
  2741       rem_fc = (FreeChunk*)((HeapWord*)fc + prefix_size);
  2742       rem_fc->setSize(rem);
  2743       rem_fc->linkNext(NULL);
  2744       rem_fc->linkPrev(NULL); // Mark as a free block for other (parallel) GC threads.
  2745       // Above must occur before BOT is updated below.
  2746       _bt.split_block((HeapWord*)fc, fc->size(), prefix_size);
  2747       if (rem >= IndexSetSize) {
  2748         returnChunkToDictionary(rem_fc);
  2749         dictionary()->dictCensusUpdate(fc->size(),
  2750                                        true /*split*/,
  2751                                        true /*birth*/);
  2752         rem_fc = NULL;
  2754       // Otherwise, return it to the small list below.
  2757   //
  2758   if (rem_fc != NULL) {
  2759     MutexLockerEx x(_indexedFreeListParLocks[rem],
  2760                     Mutex::_no_safepoint_check_flag);
  2761     _bt.verify_not_unallocated((HeapWord*)rem_fc, rem_fc->size());
  2762     _indexedFreeList[rem].returnChunkAtHead(rem_fc);
  2763     smallSplitBirth(rem);
  2766   // Now do the splitting up.
  2767   // Must do this in reverse order, so that anybody attempting to
  2768   // access the main chunk sees it as a single free block until we
  2769   // change it.
  2770   size_t fc_size = n * word_sz;
  2771   // All but first chunk in this loop
  2772   for (ssize_t i = n-1; i > 0; i--) {
  2773     FreeChunk* ffc = (FreeChunk*)((HeapWord*)fc + i * word_sz);
  2774     ffc->setSize(word_sz);
  2775     ffc->linkNext(NULL);
  2776     ffc->linkPrev(NULL); // Mark as a free block for other (parallel) GC threads.
  2777     // Above must occur before BOT is updated below.
  2778     // splitting from the right, fc_size == (n - i + 1) * wordsize
  2779     _bt.mark_block((HeapWord*)ffc, word_sz);
  2780     fc_size -= word_sz;
  2781     _bt.verify_not_unallocated((HeapWord*)ffc, ffc->size());
  2782     _bt.verify_single_block((HeapWord*)ffc, ffc->size());
  2783     _bt.verify_single_block((HeapWord*)fc, fc_size);
  2784     // Push this on "fl".
  2785     fl->returnChunkAtHead(ffc);
  2787   // First chunk
  2788   fc->setSize(word_sz);
  2789   fc->linkNext(NULL);
  2790   fc->linkPrev(NULL);
  2791   _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
  2792   _bt.verify_single_block((HeapWord*)fc, fc->size());
  2793   fl->returnChunkAtHead(fc);
  2796     MutexLockerEx x(_indexedFreeListParLocks[word_sz],
  2797                     Mutex::_no_safepoint_check_flag);
  2798     ssize_t new_births = _indexedFreeList[word_sz].splitBirths() + n;
  2799     _indexedFreeList[word_sz].set_splitBirths(new_births);
  2800     ssize_t new_surplus = _indexedFreeList[word_sz].surplus() + n;
  2801     _indexedFreeList[word_sz].set_surplus(new_surplus);
  2804   // TRAP
  2805   assert(fl->tail()->next() == NULL, "List invariant.");
  2808 // Set up the space's par_seq_tasks structure for work claiming
  2809 // for parallel rescan. See CMSParRemarkTask where this is currently used.
  2810 // XXX Need to suitably abstract and generalize this and the next
  2811 // method into one.
  2812 void
  2813 CompactibleFreeListSpace::
  2814 initialize_sequential_subtasks_for_rescan(int n_threads) {
  2815   // The "size" of each task is fixed according to rescan_task_size.
  2816   assert(n_threads > 0, "Unexpected n_threads argument");
  2817   const size_t task_size = rescan_task_size();
  2818   size_t n_tasks = (used_region().word_size() + task_size - 1)/task_size;
  2819   assert((n_tasks == 0) == used_region().is_empty(), "n_tasks incorrect");
  2820   assert(n_tasks == 0 ||
  2821          ((used_region().start() + (n_tasks - 1)*task_size < used_region().end()) &&
  2822           (used_region().start() + n_tasks*task_size >= used_region().end())),
  2823          "n_tasks calculation incorrect");
  2824   SequentialSubTasksDone* pst = conc_par_seq_tasks();
  2825   assert(!pst->valid(), "Clobbering existing data?");
  2826   pst->set_par_threads(n_threads);
  2827   pst->set_n_tasks((int)n_tasks);
  2830 // Set up the space's par_seq_tasks structure for work claiming
  2831 // for parallel concurrent marking. See CMSConcMarkTask where this is currently used.
  2832 void
  2833 CompactibleFreeListSpace::
  2834 initialize_sequential_subtasks_for_marking(int n_threads,
  2835                                            HeapWord* low) {
  2836   // The "size" of each task is fixed according to rescan_task_size.
  2837   assert(n_threads > 0, "Unexpected n_threads argument");
  2838   const size_t task_size = marking_task_size();
  2839   assert(task_size > CardTableModRefBS::card_size_in_words &&
  2840          (task_size %  CardTableModRefBS::card_size_in_words == 0),
  2841          "Otherwise arithmetic below would be incorrect");
  2842   MemRegion span = _gen->reserved();
  2843   if (low != NULL) {
  2844     if (span.contains(low)) {
  2845       // Align low down to  a card boundary so that
  2846       // we can use block_offset_careful() on span boundaries.
  2847       HeapWord* aligned_low = (HeapWord*)align_size_down((uintptr_t)low,
  2848                                  CardTableModRefBS::card_size);
  2849       // Clip span prefix at aligned_low
  2850       span = span.intersection(MemRegion(aligned_low, span.end()));
  2851     } else if (low > span.end()) {
  2852       span = MemRegion(low, low);  // Null region
  2853     } // else use entire span
  2855   assert(span.is_empty() ||
  2856          ((uintptr_t)span.start() %  CardTableModRefBS::card_size == 0),
  2857         "span should start at a card boundary");
  2858   size_t n_tasks = (span.word_size() + task_size - 1)/task_size;
  2859   assert((n_tasks == 0) == span.is_empty(), "Inconsistency");
  2860   assert(n_tasks == 0 ||
  2861          ((span.start() + (n_tasks - 1)*task_size < span.end()) &&
  2862           (span.start() + n_tasks*task_size >= span.end())),
  2863          "n_tasks calculation incorrect");
  2864   SequentialSubTasksDone* pst = conc_par_seq_tasks();
  2865   assert(!pst->valid(), "Clobbering existing data?");
  2866   pst->set_par_threads(n_threads);
  2867   pst->set_n_tasks((int)n_tasks);

mercurial