src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp

Thu, 27 May 2010 18:01:56 -0700

author
kvn
date
Thu, 27 May 2010 18:01:56 -0700
changeset 1926
2d127394260e
parent 1876
a8127dc669ba
child 1934
e9ff18c4ace7
permissions
-rw-r--r--

6916623: Align object to 16 bytes to use Compressed Oops with java heap up to 64Gb
Summary: Added new product ObjectAlignmentInBytes flag to control object alignment.
Reviewed-by: twisti, ysr, iveresov

     1 /*
     2  * Copyright 2001-2009 Sun Microsystems, Inc.  All Rights Reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
    20  * CA 95054 USA or visit www.sun.com if you need additional information or
    21  * have any questions.
    22  *
    23  */
    25 # include "incls/_precompiled.incl"
    26 # include "incls/_compactibleFreeListSpace.cpp.incl"
    28 /////////////////////////////////////////////////////////////////////////
    29 //// CompactibleFreeListSpace
    30 /////////////////////////////////////////////////////////////////////////
    32 // highest ranked  free list lock rank
    33 int CompactibleFreeListSpace::_lockRank = Mutex::leaf + 3;
    35 // Defaults are 0 so things will break badly if incorrectly initialized.
    36 int CompactibleFreeListSpace::IndexSetStart  = 0;
    37 int CompactibleFreeListSpace::IndexSetStride = 0;
    39 size_t MinChunkSize = 0;
    41 void CompactibleFreeListSpace::set_cms_values() {
    42   // Set CMS global values
    43   assert(MinChunkSize == 0, "already set");
    44   #define numQuanta(x,y) ((x+y-1)/y)
    45   MinChunkSize = numQuanta(sizeof(FreeChunk), MinObjAlignmentInBytes) * MinObjAlignment;
    47   assert(IndexSetStart == 0 && IndexSetStride == 0, "already set");
    48   IndexSetStart  = MinObjAlignment;
    49   IndexSetStride = MinObjAlignment;
    50 }
    52 // Constructor
    53 CompactibleFreeListSpace::CompactibleFreeListSpace(BlockOffsetSharedArray* bs,
    54   MemRegion mr, bool use_adaptive_freelists,
    55   FreeBlockDictionary::DictionaryChoice dictionaryChoice) :
    56   _dictionaryChoice(dictionaryChoice),
    57   _adaptive_freelists(use_adaptive_freelists),
    58   _bt(bs, mr),
    59   // free list locks are in the range of values taken by _lockRank
    60   // This range currently is [_leaf+2, _leaf+3]
    61   // Note: this requires that CFLspace c'tors
    62   // are called serially in the order in which the locks are
    63   // are acquired in the program text. This is true today.
    64   _freelistLock(_lockRank--, "CompactibleFreeListSpace._lock", true),
    65   _parDictionaryAllocLock(Mutex::leaf - 1,  // == rank(ExpandHeap_lock) - 1
    66                           "CompactibleFreeListSpace._dict_par_lock", true),
    67   _rescan_task_size(CardTableModRefBS::card_size_in_words * BitsPerWord *
    68                     CMSRescanMultiple),
    69   _marking_task_size(CardTableModRefBS::card_size_in_words * BitsPerWord *
    70                     CMSConcMarkMultiple),
    71   _collector(NULL)
    72 {
    73   _bt.set_space(this);
    74   initialize(mr, SpaceDecorator::Clear, SpaceDecorator::Mangle);
    75   // We have all of "mr", all of which we place in the dictionary
    76   // as one big chunk. We'll need to decide here which of several
    77   // possible alternative dictionary implementations to use. For
    78   // now the choice is easy, since we have only one working
    79   // implementation, namely, the simple binary tree (splaying
    80   // temporarily disabled).
    81   switch (dictionaryChoice) {
    82     case FreeBlockDictionary::dictionarySplayTree:
    83     case FreeBlockDictionary::dictionarySkipList:
    84     default:
    85       warning("dictionaryChoice: selected option not understood; using"
    86               " default BinaryTreeDictionary implementation instead.");
    87     case FreeBlockDictionary::dictionaryBinaryTree:
    88       _dictionary = new BinaryTreeDictionary(mr);
    89       break;
    90   }
    91   assert(_dictionary != NULL, "CMS dictionary initialization");
    92   // The indexed free lists are initially all empty and are lazily
    93   // filled in on demand. Initialize the array elements to NULL.
    94   initializeIndexedFreeListArray();
    96   // Not using adaptive free lists assumes that allocation is first
    97   // from the linAB's.  Also a cms perm gen which can be compacted
    98   // has to have the klass's klassKlass allocated at a lower
    99   // address in the heap than the klass so that the klassKlass is
   100   // moved to its new location before the klass is moved.
   101   // Set the _refillSize for the linear allocation blocks
   102   if (!use_adaptive_freelists) {
   103     FreeChunk* fc = _dictionary->getChunk(mr.word_size());
   104     // The small linAB initially has all the space and will allocate
   105     // a chunk of any size.
   106     HeapWord* addr = (HeapWord*) fc;
   107     _smallLinearAllocBlock.set(addr, fc->size() ,
   108       1024*SmallForLinearAlloc, fc->size());
   109     // Note that _unallocated_block is not updated here.
   110     // Allocations from the linear allocation block should
   111     // update it.
   112   } else {
   113     _smallLinearAllocBlock.set(0, 0, 1024*SmallForLinearAlloc,
   114                                SmallForLinearAlloc);
   115   }
   116   // CMSIndexedFreeListReplenish should be at least 1
   117   CMSIndexedFreeListReplenish = MAX2((uintx)1, CMSIndexedFreeListReplenish);
   118   _promoInfo.setSpace(this);
   119   if (UseCMSBestFit) {
   120     _fitStrategy = FreeBlockBestFitFirst;
   121   } else {
   122     _fitStrategy = FreeBlockStrategyNone;
   123   }
   124   checkFreeListConsistency();
   126   // Initialize locks for parallel case.
   127   if (ParallelGCThreads > 0) {
   128     for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
   129       _indexedFreeListParLocks[i] = new Mutex(Mutex::leaf - 1, // == ExpandHeap_lock - 1
   130                                               "a freelist par lock",
   131                                               true);
   132       if (_indexedFreeListParLocks[i] == NULL)
   133         vm_exit_during_initialization("Could not allocate a par lock");
   134       DEBUG_ONLY(
   135         _indexedFreeList[i].set_protecting_lock(_indexedFreeListParLocks[i]);
   136       )
   137     }
   138     _dictionary->set_par_lock(&_parDictionaryAllocLock);
   139   }
   140 }
   142 // Like CompactibleSpace forward() but always calls cross_threshold() to
   143 // update the block offset table.  Removed initialize_threshold call because
   144 // CFLS does not use a block offset array for contiguous spaces.
   145 HeapWord* CompactibleFreeListSpace::forward(oop q, size_t size,
   146                                     CompactPoint* cp, HeapWord* compact_top) {
   147   // q is alive
   148   // First check if we should switch compaction space
   149   assert(this == cp->space, "'this' should be current compaction space.");
   150   size_t compaction_max_size = pointer_delta(end(), compact_top);
   151   assert(adjustObjectSize(size) == cp->space->adjust_object_size_v(size),
   152     "virtual adjustObjectSize_v() method is not correct");
   153   size_t adjusted_size = adjustObjectSize(size);
   154   assert(compaction_max_size >= MinChunkSize || compaction_max_size == 0,
   155          "no small fragments allowed");
   156   assert(minimum_free_block_size() == MinChunkSize,
   157          "for de-virtualized reference below");
   158   // Can't leave a nonzero size, residual fragment smaller than MinChunkSize
   159   if (adjusted_size + MinChunkSize > compaction_max_size &&
   160       adjusted_size != compaction_max_size) {
   161     do {
   162       // switch to next compaction space
   163       cp->space->set_compaction_top(compact_top);
   164       cp->space = cp->space->next_compaction_space();
   165       if (cp->space == NULL) {
   166         cp->gen = GenCollectedHeap::heap()->prev_gen(cp->gen);
   167         assert(cp->gen != NULL, "compaction must succeed");
   168         cp->space = cp->gen->first_compaction_space();
   169         assert(cp->space != NULL, "generation must have a first compaction space");
   170       }
   171       compact_top = cp->space->bottom();
   172       cp->space->set_compaction_top(compact_top);
   173       // The correct adjusted_size may not be the same as that for this method
   174       // (i.e., cp->space may no longer be "this" so adjust the size again.
   175       // Use the virtual method which is not used above to save the virtual
   176       // dispatch.
   177       adjusted_size = cp->space->adjust_object_size_v(size);
   178       compaction_max_size = pointer_delta(cp->space->end(), compact_top);
   179       assert(cp->space->minimum_free_block_size() == 0, "just checking");
   180     } while (adjusted_size > compaction_max_size);
   181   }
   183   // store the forwarding pointer into the mark word
   184   if ((HeapWord*)q != compact_top) {
   185     q->forward_to(oop(compact_top));
   186     assert(q->is_gc_marked(), "encoding the pointer should preserve the mark");
   187   } else {
   188     // if the object isn't moving we can just set the mark to the default
   189     // mark and handle it specially later on.
   190     q->init_mark();
   191     assert(q->forwardee() == NULL, "should be forwarded to NULL");
   192   }
   194   VALIDATE_MARK_SWEEP_ONLY(MarkSweep::register_live_oop(q, adjusted_size));
   195   compact_top += adjusted_size;
   197   // we need to update the offset table so that the beginnings of objects can be
   198   // found during scavenge.  Note that we are updating the offset table based on
   199   // where the object will be once the compaction phase finishes.
   201   // Always call cross_threshold().  A contiguous space can only call it when
   202   // the compaction_top exceeds the current threshold but not for an
   203   // non-contiguous space.
   204   cp->threshold =
   205     cp->space->cross_threshold(compact_top - adjusted_size, compact_top);
   206   return compact_top;
   207 }
   209 // A modified copy of OffsetTableContigSpace::cross_threshold() with _offsets -> _bt
   210 // and use of single_block instead of alloc_block.  The name here is not really
   211 // appropriate - maybe a more general name could be invented for both the
   212 // contiguous and noncontiguous spaces.
   214 HeapWord* CompactibleFreeListSpace::cross_threshold(HeapWord* start, HeapWord* the_end) {
   215   _bt.single_block(start, the_end);
   216   return end();
   217 }
   219 // Initialize them to NULL.
   220 void CompactibleFreeListSpace::initializeIndexedFreeListArray() {
   221   for (size_t i = 0; i < IndexSetSize; i++) {
   222     // Note that on platforms where objects are double word aligned,
   223     // the odd array elements are not used.  It is convenient, however,
   224     // to map directly from the object size to the array element.
   225     _indexedFreeList[i].reset(IndexSetSize);
   226     _indexedFreeList[i].set_size(i);
   227     assert(_indexedFreeList[i].count() == 0, "reset check failed");
   228     assert(_indexedFreeList[i].head() == NULL, "reset check failed");
   229     assert(_indexedFreeList[i].tail() == NULL, "reset check failed");
   230     assert(_indexedFreeList[i].hint() == IndexSetSize, "reset check failed");
   231   }
   232 }
   234 void CompactibleFreeListSpace::resetIndexedFreeListArray() {
   235   for (int i = 1; i < IndexSetSize; i++) {
   236     assert(_indexedFreeList[i].size() == (size_t) i,
   237       "Indexed free list sizes are incorrect");
   238     _indexedFreeList[i].reset(IndexSetSize);
   239     assert(_indexedFreeList[i].count() == 0, "reset check failed");
   240     assert(_indexedFreeList[i].head() == NULL, "reset check failed");
   241     assert(_indexedFreeList[i].tail() == NULL, "reset check failed");
   242     assert(_indexedFreeList[i].hint() == IndexSetSize, "reset check failed");
   243   }
   244 }
   246 void CompactibleFreeListSpace::reset(MemRegion mr) {
   247   resetIndexedFreeListArray();
   248   dictionary()->reset();
   249   if (BlockOffsetArrayUseUnallocatedBlock) {
   250     assert(end() == mr.end(), "We are compacting to the bottom of CMS gen");
   251     // Everything's allocated until proven otherwise.
   252     _bt.set_unallocated_block(end());
   253   }
   254   if (!mr.is_empty()) {
   255     assert(mr.word_size() >= MinChunkSize, "Chunk size is too small");
   256     _bt.single_block(mr.start(), mr.word_size());
   257     FreeChunk* fc = (FreeChunk*) mr.start();
   258     fc->setSize(mr.word_size());
   259     if (mr.word_size() >= IndexSetSize ) {
   260       returnChunkToDictionary(fc);
   261     } else {
   262       _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
   263       _indexedFreeList[mr.word_size()].returnChunkAtHead(fc);
   264     }
   265   }
   266   _promoInfo.reset();
   267   _smallLinearAllocBlock._ptr = NULL;
   268   _smallLinearAllocBlock._word_size = 0;
   269 }
   271 void CompactibleFreeListSpace::reset_after_compaction() {
   272   // Reset the space to the new reality - one free chunk.
   273   MemRegion mr(compaction_top(), end());
   274   reset(mr);
   275   // Now refill the linear allocation block(s) if possible.
   276   if (_adaptive_freelists) {
   277     refillLinearAllocBlocksIfNeeded();
   278   } else {
   279     // Place as much of mr in the linAB as we can get,
   280     // provided it was big enough to go into the dictionary.
   281     FreeChunk* fc = dictionary()->findLargestDict();
   282     if (fc != NULL) {
   283       assert(fc->size() == mr.word_size(),
   284              "Why was the chunk broken up?");
   285       removeChunkFromDictionary(fc);
   286       HeapWord* addr = (HeapWord*) fc;
   287       _smallLinearAllocBlock.set(addr, fc->size() ,
   288         1024*SmallForLinearAlloc, fc->size());
   289       // Note that _unallocated_block is not updated here.
   290     }
   291   }
   292 }
   294 // Walks the entire dictionary, returning a coterminal
   295 // chunk, if it exists. Use with caution since it involves
   296 // a potentially complete walk of a potentially large tree.
   297 FreeChunk* CompactibleFreeListSpace::find_chunk_at_end() {
   299   assert_lock_strong(&_freelistLock);
   301   return dictionary()->find_chunk_ends_at(end());
   302 }
   305 #ifndef PRODUCT
   306 void CompactibleFreeListSpace::initializeIndexedFreeListArrayReturnedBytes() {
   307   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
   308     _indexedFreeList[i].allocation_stats()->set_returnedBytes(0);
   309   }
   310 }
   312 size_t CompactibleFreeListSpace::sumIndexedFreeListArrayReturnedBytes() {
   313   size_t sum = 0;
   314   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
   315     sum += _indexedFreeList[i].allocation_stats()->returnedBytes();
   316   }
   317   return sum;
   318 }
   320 size_t CompactibleFreeListSpace::totalCountInIndexedFreeLists() const {
   321   size_t count = 0;
   322   for (int i = (int)MinChunkSize; i < IndexSetSize; i++) {
   323     debug_only(
   324       ssize_t total_list_count = 0;
   325       for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
   326          fc = fc->next()) {
   327         total_list_count++;
   328       }
   329       assert(total_list_count ==  _indexedFreeList[i].count(),
   330         "Count in list is incorrect");
   331     )
   332     count += _indexedFreeList[i].count();
   333   }
   334   return count;
   335 }
   337 size_t CompactibleFreeListSpace::totalCount() {
   338   size_t num = totalCountInIndexedFreeLists();
   339   num +=  dictionary()->totalCount();
   340   if (_smallLinearAllocBlock._word_size != 0) {
   341     num++;
   342   }
   343   return num;
   344 }
   345 #endif
   347 bool CompactibleFreeListSpace::is_free_block(const HeapWord* p) const {
   348   FreeChunk* fc = (FreeChunk*) p;
   349   return fc->isFree();
   350 }
   352 size_t CompactibleFreeListSpace::used() const {
   353   return capacity() - free();
   354 }
   356 size_t CompactibleFreeListSpace::free() const {
   357   // "MT-safe, but not MT-precise"(TM), if you will: i.e.
   358   // if you do this while the structures are in flux you
   359   // may get an approximate answer only; for instance
   360   // because there is concurrent allocation either
   361   // directly by mutators or for promotion during a GC.
   362   // It's "MT-safe", however, in the sense that you are guaranteed
   363   // not to crash and burn, for instance, because of walking
   364   // pointers that could disappear as you were walking them.
   365   // The approximation is because the various components
   366   // that are read below are not read atomically (and
   367   // further the computation of totalSizeInIndexedFreeLists()
   368   // is itself a non-atomic computation. The normal use of
   369   // this is during a resize operation at the end of GC
   370   // and at that time you are guaranteed to get the
   371   // correct actual value. However, for instance, this is
   372   // also read completely asynchronously by the "perf-sampler"
   373   // that supports jvmstat, and you are apt to see the values
   374   // flicker in such cases.
   375   assert(_dictionary != NULL, "No _dictionary?");
   376   return (_dictionary->totalChunkSize(DEBUG_ONLY(freelistLock())) +
   377           totalSizeInIndexedFreeLists() +
   378           _smallLinearAllocBlock._word_size) * HeapWordSize;
   379 }
   381 size_t CompactibleFreeListSpace::max_alloc_in_words() const {
   382   assert(_dictionary != NULL, "No _dictionary?");
   383   assert_locked();
   384   size_t res = _dictionary->maxChunkSize();
   385   res = MAX2(res, MIN2(_smallLinearAllocBlock._word_size,
   386                        (size_t) SmallForLinearAlloc - 1));
   387   // XXX the following could potentially be pretty slow;
   388   // should one, pesimally for the rare cases when res
   389   // caclulated above is less than IndexSetSize,
   390   // just return res calculated above? My reasoning was that
   391   // those cases will be so rare that the extra time spent doesn't
   392   // really matter....
   393   // Note: do not change the loop test i >= res + IndexSetStride
   394   // to i > res below, because i is unsigned and res may be zero.
   395   for (size_t i = IndexSetSize - 1; i >= res + IndexSetStride;
   396        i -= IndexSetStride) {
   397     if (_indexedFreeList[i].head() != NULL) {
   398       assert(_indexedFreeList[i].count() != 0, "Inconsistent FreeList");
   399       return i;
   400     }
   401   }
   402   return res;
   403 }
   405 void CompactibleFreeListSpace::print_indexed_free_lists(outputStream* st)
   406 const {
   407   reportIndexedFreeListStatistics();
   408   gclog_or_tty->print_cr("Layout of Indexed Freelists");
   409   gclog_or_tty->print_cr("---------------------------");
   410   FreeList::print_labels_on(st, "size");
   411   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
   412     _indexedFreeList[i].print_on(gclog_or_tty);
   413     for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
   414          fc = fc->next()) {
   415       gclog_or_tty->print_cr("\t[" PTR_FORMAT "," PTR_FORMAT ")  %s",
   416                           fc, (HeapWord*)fc + i,
   417                           fc->cantCoalesce() ? "\t CC" : "");
   418     }
   419   }
   420 }
   422 void CompactibleFreeListSpace::print_promo_info_blocks(outputStream* st)
   423 const {
   424   _promoInfo.print_on(st);
   425 }
   427 void CompactibleFreeListSpace::print_dictionary_free_lists(outputStream* st)
   428 const {
   429   _dictionary->reportStatistics();
   430   st->print_cr("Layout of Freelists in Tree");
   431   st->print_cr("---------------------------");
   432   _dictionary->print_free_lists(st);
   433 }
   435 class BlkPrintingClosure: public BlkClosure {
   436   const CMSCollector*             _collector;
   437   const CompactibleFreeListSpace* _sp;
   438   const CMSBitMap*                _live_bit_map;
   439   const bool                      _post_remark;
   440   outputStream*                   _st;
   441 public:
   442   BlkPrintingClosure(const CMSCollector* collector,
   443                      const CompactibleFreeListSpace* sp,
   444                      const CMSBitMap* live_bit_map,
   445                      outputStream* st):
   446     _collector(collector),
   447     _sp(sp),
   448     _live_bit_map(live_bit_map),
   449     _post_remark(collector->abstract_state() > CMSCollector::FinalMarking),
   450     _st(st) { }
   451   size_t do_blk(HeapWord* addr);
   452 };
   454 size_t BlkPrintingClosure::do_blk(HeapWord* addr) {
   455   size_t sz = _sp->block_size_no_stall(addr, _collector);
   456   assert(sz != 0, "Should always be able to compute a size");
   457   if (_sp->block_is_obj(addr)) {
   458     const bool dead = _post_remark && !_live_bit_map->isMarked(addr);
   459     _st->print_cr(PTR_FORMAT ": %s object of size " SIZE_FORMAT "%s",
   460       addr,
   461       dead ? "dead" : "live",
   462       sz,
   463       (!dead && CMSPrintObjectsInDump) ? ":" : ".");
   464     if (CMSPrintObjectsInDump && !dead) {
   465       oop(addr)->print_on(_st);
   466       _st->print_cr("--------------------------------------");
   467     }
   468   } else { // free block
   469     _st->print_cr(PTR_FORMAT ": free block of size " SIZE_FORMAT "%s",
   470       addr, sz, CMSPrintChunksInDump ? ":" : ".");
   471     if (CMSPrintChunksInDump) {
   472       ((FreeChunk*)addr)->print_on(_st);
   473       _st->print_cr("--------------------------------------");
   474     }
   475   }
   476   return sz;
   477 }
   479 void CompactibleFreeListSpace::dump_at_safepoint_with_locks(CMSCollector* c,
   480   outputStream* st) {
   481   st->print_cr("\n=========================");
   482   st->print_cr("Block layout in CMS Heap:");
   483   st->print_cr("=========================");
   484   BlkPrintingClosure  bpcl(c, this, c->markBitMap(), st);
   485   blk_iterate(&bpcl);
   487   st->print_cr("\n=======================================");
   488   st->print_cr("Order & Layout of Promotion Info Blocks");
   489   st->print_cr("=======================================");
   490   print_promo_info_blocks(st);
   492   st->print_cr("\n===========================");
   493   st->print_cr("Order of Indexed Free Lists");
   494   st->print_cr("=========================");
   495   print_indexed_free_lists(st);
   497   st->print_cr("\n=================================");
   498   st->print_cr("Order of Free Lists in Dictionary");
   499   st->print_cr("=================================");
   500   print_dictionary_free_lists(st);
   501 }
   504 void CompactibleFreeListSpace::reportFreeListStatistics() const {
   505   assert_lock_strong(&_freelistLock);
   506   assert(PrintFLSStatistics != 0, "Reporting error");
   507   _dictionary->reportStatistics();
   508   if (PrintFLSStatistics > 1) {
   509     reportIndexedFreeListStatistics();
   510     size_t totalSize = totalSizeInIndexedFreeLists() +
   511                        _dictionary->totalChunkSize(DEBUG_ONLY(freelistLock()));
   512     gclog_or_tty->print(" free=%ld frag=%1.4f\n", totalSize, flsFrag());
   513   }
   514 }
   516 void CompactibleFreeListSpace::reportIndexedFreeListStatistics() const {
   517   assert_lock_strong(&_freelistLock);
   518   gclog_or_tty->print("Statistics for IndexedFreeLists:\n"
   519                       "--------------------------------\n");
   520   size_t totalSize = totalSizeInIndexedFreeLists();
   521   size_t   freeBlocks = numFreeBlocksInIndexedFreeLists();
   522   gclog_or_tty->print("Total Free Space: %d\n", totalSize);
   523   gclog_or_tty->print("Max   Chunk Size: %d\n", maxChunkSizeInIndexedFreeLists());
   524   gclog_or_tty->print("Number of Blocks: %d\n", freeBlocks);
   525   if (freeBlocks != 0) {
   526     gclog_or_tty->print("Av.  Block  Size: %d\n", totalSize/freeBlocks);
   527   }
   528 }
   530 size_t CompactibleFreeListSpace::numFreeBlocksInIndexedFreeLists() const {
   531   size_t res = 0;
   532   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
   533     debug_only(
   534       ssize_t recount = 0;
   535       for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
   536          fc = fc->next()) {
   537         recount += 1;
   538       }
   539       assert(recount == _indexedFreeList[i].count(),
   540         "Incorrect count in list");
   541     )
   542     res += _indexedFreeList[i].count();
   543   }
   544   return res;
   545 }
   547 size_t CompactibleFreeListSpace::maxChunkSizeInIndexedFreeLists() const {
   548   for (size_t i = IndexSetSize - 1; i != 0; i -= IndexSetStride) {
   549     if (_indexedFreeList[i].head() != NULL) {
   550       assert(_indexedFreeList[i].count() != 0, "Inconsistent FreeList");
   551       return (size_t)i;
   552     }
   553   }
   554   return 0;
   555 }
   557 void CompactibleFreeListSpace::set_end(HeapWord* value) {
   558   HeapWord* prevEnd = end();
   559   assert(prevEnd != value, "unnecessary set_end call");
   560   assert(prevEnd == NULL || value >= unallocated_block(), "New end is below unallocated block");
   561   _end = value;
   562   if (prevEnd != NULL) {
   563     // Resize the underlying block offset table.
   564     _bt.resize(pointer_delta(value, bottom()));
   565     if (value <= prevEnd) {
   566       assert(value >= unallocated_block(), "New end is below unallocated block");
   567     } else {
   568       // Now, take this new chunk and add it to the free blocks.
   569       // Note that the BOT has not yet been updated for this block.
   570       size_t newFcSize = pointer_delta(value, prevEnd);
   571       // XXX This is REALLY UGLY and should be fixed up. XXX
   572       if (!_adaptive_freelists && _smallLinearAllocBlock._ptr == NULL) {
   573         // Mark the boundary of the new block in BOT
   574         _bt.mark_block(prevEnd, value);
   575         // put it all in the linAB
   576         if (ParallelGCThreads == 0) {
   577           _smallLinearAllocBlock._ptr = prevEnd;
   578           _smallLinearAllocBlock._word_size = newFcSize;
   579           repairLinearAllocBlock(&_smallLinearAllocBlock);
   580         } else { // ParallelGCThreads > 0
   581           MutexLockerEx x(parDictionaryAllocLock(),
   582                           Mutex::_no_safepoint_check_flag);
   583           _smallLinearAllocBlock._ptr = prevEnd;
   584           _smallLinearAllocBlock._word_size = newFcSize;
   585           repairLinearAllocBlock(&_smallLinearAllocBlock);
   586         }
   587         // Births of chunks put into a LinAB are not recorded.  Births
   588         // of chunks as they are allocated out of a LinAB are.
   589       } else {
   590         // Add the block to the free lists, if possible coalescing it
   591         // with the last free block, and update the BOT and census data.
   592         addChunkToFreeListsAtEndRecordingStats(prevEnd, newFcSize);
   593       }
   594     }
   595   }
   596 }
   598 class FreeListSpace_DCTOC : public Filtering_DCTOC {
   599   CompactibleFreeListSpace* _cfls;
   600   CMSCollector* _collector;
   601 protected:
   602   // Override.
   603 #define walk_mem_region_with_cl_DECL(ClosureType)                       \
   604   virtual void walk_mem_region_with_cl(MemRegion mr,                    \
   605                                        HeapWord* bottom, HeapWord* top, \
   606                                        ClosureType* cl);                \
   607       void walk_mem_region_with_cl_par(MemRegion mr,                    \
   608                                        HeapWord* bottom, HeapWord* top, \
   609                                        ClosureType* cl);                \
   610     void walk_mem_region_with_cl_nopar(MemRegion mr,                    \
   611                                        HeapWord* bottom, HeapWord* top, \
   612                                        ClosureType* cl)
   613   walk_mem_region_with_cl_DECL(OopClosure);
   614   walk_mem_region_with_cl_DECL(FilteringClosure);
   616 public:
   617   FreeListSpace_DCTOC(CompactibleFreeListSpace* sp,
   618                       CMSCollector* collector,
   619                       OopClosure* cl,
   620                       CardTableModRefBS::PrecisionStyle precision,
   621                       HeapWord* boundary) :
   622     Filtering_DCTOC(sp, cl, precision, boundary),
   623     _cfls(sp), _collector(collector) {}
   624 };
   626 // We de-virtualize the block-related calls below, since we know that our
   627 // space is a CompactibleFreeListSpace.
   628 #define FreeListSpace_DCTOC__walk_mem_region_with_cl_DEFN(ClosureType)          \
   629 void FreeListSpace_DCTOC::walk_mem_region_with_cl(MemRegion mr,                 \
   630                                                  HeapWord* bottom,              \
   631                                                  HeapWord* top,                 \
   632                                                  ClosureType* cl) {             \
   633    if (SharedHeap::heap()->n_par_threads() > 0) {                               \
   634      walk_mem_region_with_cl_par(mr, bottom, top, cl);                          \
   635    } else {                                                                     \
   636      walk_mem_region_with_cl_nopar(mr, bottom, top, cl);                        \
   637    }                                                                            \
   638 }                                                                               \
   639 void FreeListSpace_DCTOC::walk_mem_region_with_cl_par(MemRegion mr,             \
   640                                                       HeapWord* bottom,         \
   641                                                       HeapWord* top,            \
   642                                                       ClosureType* cl) {        \
   643   /* Skip parts that are before "mr", in case "block_start" sent us             \
   644      back too far. */                                                           \
   645   HeapWord* mr_start = mr.start();                                              \
   646   size_t bot_size = _cfls->CompactibleFreeListSpace::block_size(bottom);        \
   647   HeapWord* next = bottom + bot_size;                                           \
   648   while (next < mr_start) {                                                     \
   649     bottom = next;                                                              \
   650     bot_size = _cfls->CompactibleFreeListSpace::block_size(bottom);             \
   651     next = bottom + bot_size;                                                   \
   652   }                                                                             \
   653                                                                                 \
   654   while (bottom < top) {                                                        \
   655     if (_cfls->CompactibleFreeListSpace::block_is_obj(bottom) &&                \
   656         !_cfls->CompactibleFreeListSpace::obj_allocated_since_save_marks(       \
   657                     oop(bottom)) &&                                             \
   658         !_collector->CMSCollector::is_dead_obj(oop(bottom))) {                  \
   659       size_t word_sz = oop(bottom)->oop_iterate(cl, mr);                        \
   660       bottom += _cfls->adjustObjectSize(word_sz);                               \
   661     } else {                                                                    \
   662       bottom += _cfls->CompactibleFreeListSpace::block_size(bottom);            \
   663     }                                                                           \
   664   }                                                                             \
   665 }                                                                               \
   666 void FreeListSpace_DCTOC::walk_mem_region_with_cl_nopar(MemRegion mr,           \
   667                                                         HeapWord* bottom,       \
   668                                                         HeapWord* top,          \
   669                                                         ClosureType* cl) {      \
   670   /* Skip parts that are before "mr", in case "block_start" sent us             \
   671      back too far. */                                                           \
   672   HeapWord* mr_start = mr.start();                                              \
   673   size_t bot_size = _cfls->CompactibleFreeListSpace::block_size_nopar(bottom);  \
   674   HeapWord* next = bottom + bot_size;                                           \
   675   while (next < mr_start) {                                                     \
   676     bottom = next;                                                              \
   677     bot_size = _cfls->CompactibleFreeListSpace::block_size_nopar(bottom);       \
   678     next = bottom + bot_size;                                                   \
   679   }                                                                             \
   680                                                                                 \
   681   while (bottom < top) {                                                        \
   682     if (_cfls->CompactibleFreeListSpace::block_is_obj_nopar(bottom) &&          \
   683         !_cfls->CompactibleFreeListSpace::obj_allocated_since_save_marks(       \
   684                     oop(bottom)) &&                                             \
   685         !_collector->CMSCollector::is_dead_obj(oop(bottom))) {                  \
   686       size_t word_sz = oop(bottom)->oop_iterate(cl, mr);                        \
   687       bottom += _cfls->adjustObjectSize(word_sz);                               \
   688     } else {                                                                    \
   689       bottom += _cfls->CompactibleFreeListSpace::block_size_nopar(bottom);      \
   690     }                                                                           \
   691   }                                                                             \
   692 }
   694 // (There are only two of these, rather than N, because the split is due
   695 // only to the introduction of the FilteringClosure, a local part of the
   696 // impl of this abstraction.)
   697 FreeListSpace_DCTOC__walk_mem_region_with_cl_DEFN(OopClosure)
   698 FreeListSpace_DCTOC__walk_mem_region_with_cl_DEFN(FilteringClosure)
   700 DirtyCardToOopClosure*
   701 CompactibleFreeListSpace::new_dcto_cl(OopClosure* cl,
   702                                       CardTableModRefBS::PrecisionStyle precision,
   703                                       HeapWord* boundary) {
   704   return new FreeListSpace_DCTOC(this, _collector, cl, precision, boundary);
   705 }
   708 // Note on locking for the space iteration functions:
   709 // since the collector's iteration activities are concurrent with
   710 // allocation activities by mutators, absent a suitable mutual exclusion
   711 // mechanism the iterators may go awry. For instace a block being iterated
   712 // may suddenly be allocated or divided up and part of it allocated and
   713 // so on.
   715 // Apply the given closure to each block in the space.
   716 void CompactibleFreeListSpace::blk_iterate_careful(BlkClosureCareful* cl) {
   717   assert_lock_strong(freelistLock());
   718   HeapWord *cur, *limit;
   719   for (cur = bottom(), limit = end(); cur < limit;
   720        cur += cl->do_blk_careful(cur));
   721 }
   723 // Apply the given closure to each block in the space.
   724 void CompactibleFreeListSpace::blk_iterate(BlkClosure* cl) {
   725   assert_lock_strong(freelistLock());
   726   HeapWord *cur, *limit;
   727   for (cur = bottom(), limit = end(); cur < limit;
   728        cur += cl->do_blk(cur));
   729 }
   731 // Apply the given closure to each oop in the space.
   732 void CompactibleFreeListSpace::oop_iterate(OopClosure* cl) {
   733   assert_lock_strong(freelistLock());
   734   HeapWord *cur, *limit;
   735   size_t curSize;
   736   for (cur = bottom(), limit = end(); cur < limit;
   737        cur += curSize) {
   738     curSize = block_size(cur);
   739     if (block_is_obj(cur)) {
   740       oop(cur)->oop_iterate(cl);
   741     }
   742   }
   743 }
   745 // Apply the given closure to each oop in the space \intersect memory region.
   746 void CompactibleFreeListSpace::oop_iterate(MemRegion mr, OopClosure* cl) {
   747   assert_lock_strong(freelistLock());
   748   if (is_empty()) {
   749     return;
   750   }
   751   MemRegion cur = MemRegion(bottom(), end());
   752   mr = mr.intersection(cur);
   753   if (mr.is_empty()) {
   754     return;
   755   }
   756   if (mr.equals(cur)) {
   757     oop_iterate(cl);
   758     return;
   759   }
   760   assert(mr.end() <= end(), "just took an intersection above");
   761   HeapWord* obj_addr = block_start(mr.start());
   762   HeapWord* t = mr.end();
   764   SpaceMemRegionOopsIterClosure smr_blk(cl, mr);
   765   if (block_is_obj(obj_addr)) {
   766     // Handle first object specially.
   767     oop obj = oop(obj_addr);
   768     obj_addr += adjustObjectSize(obj->oop_iterate(&smr_blk));
   769   } else {
   770     FreeChunk* fc = (FreeChunk*)obj_addr;
   771     obj_addr += fc->size();
   772   }
   773   while (obj_addr < t) {
   774     HeapWord* obj = obj_addr;
   775     obj_addr += block_size(obj_addr);
   776     // If "obj_addr" is not greater than top, then the
   777     // entire object "obj" is within the region.
   778     if (obj_addr <= t) {
   779       if (block_is_obj(obj)) {
   780         oop(obj)->oop_iterate(cl);
   781       }
   782     } else {
   783       // "obj" extends beyond end of region
   784       if (block_is_obj(obj)) {
   785         oop(obj)->oop_iterate(&smr_blk);
   786       }
   787       break;
   788     }
   789   }
   790 }
   792 // NOTE: In the following methods, in order to safely be able to
   793 // apply the closure to an object, we need to be sure that the
   794 // object has been initialized. We are guaranteed that an object
   795 // is initialized if we are holding the Heap_lock with the
   796 // world stopped.
   797 void CompactibleFreeListSpace::verify_objects_initialized() const {
   798   if (is_init_completed()) {
   799     assert_locked_or_safepoint(Heap_lock);
   800     if (Universe::is_fully_initialized()) {
   801       guarantee(SafepointSynchronize::is_at_safepoint(),
   802                 "Required for objects to be initialized");
   803     }
   804   } // else make a concession at vm start-up
   805 }
   807 // Apply the given closure to each object in the space
   808 void CompactibleFreeListSpace::object_iterate(ObjectClosure* blk) {
   809   assert_lock_strong(freelistLock());
   810   NOT_PRODUCT(verify_objects_initialized());
   811   HeapWord *cur, *limit;
   812   size_t curSize;
   813   for (cur = bottom(), limit = end(); cur < limit;
   814        cur += curSize) {
   815     curSize = block_size(cur);
   816     if (block_is_obj(cur)) {
   817       blk->do_object(oop(cur));
   818     }
   819   }
   820 }
   822 // Apply the given closure to each live object in the space
   823 //   The usage of CompactibleFreeListSpace
   824 // by the ConcurrentMarkSweepGeneration for concurrent GC's allows
   825 // objects in the space with references to objects that are no longer
   826 // valid.  For example, an object may reference another object
   827 // that has already been sweep up (collected).  This method uses
   828 // obj_is_alive() to determine whether it is safe to apply the closure to
   829 // an object.  See obj_is_alive() for details on how liveness of an
   830 // object is decided.
   832 void CompactibleFreeListSpace::safe_object_iterate(ObjectClosure* blk) {
   833   assert_lock_strong(freelistLock());
   834   NOT_PRODUCT(verify_objects_initialized());
   835   HeapWord *cur, *limit;
   836   size_t curSize;
   837   for (cur = bottom(), limit = end(); cur < limit;
   838        cur += curSize) {
   839     curSize = block_size(cur);
   840     if (block_is_obj(cur) && obj_is_alive(cur)) {
   841       blk->do_object(oop(cur));
   842     }
   843   }
   844 }
   846 void CompactibleFreeListSpace::object_iterate_mem(MemRegion mr,
   847                                                   UpwardsObjectClosure* cl) {
   848   assert_locked(freelistLock());
   849   NOT_PRODUCT(verify_objects_initialized());
   850   Space::object_iterate_mem(mr, cl);
   851 }
   853 // Callers of this iterator beware: The closure application should
   854 // be robust in the face of uninitialized objects and should (always)
   855 // return a correct size so that the next addr + size below gives us a
   856 // valid block boundary. [See for instance,
   857 // ScanMarkedObjectsAgainCarefullyClosure::do_object_careful()
   858 // in ConcurrentMarkSweepGeneration.cpp.]
   859 HeapWord*
   860 CompactibleFreeListSpace::object_iterate_careful(ObjectClosureCareful* cl) {
   861   assert_lock_strong(freelistLock());
   862   HeapWord *addr, *last;
   863   size_t size;
   864   for (addr = bottom(), last  = end();
   865        addr < last; addr += size) {
   866     FreeChunk* fc = (FreeChunk*)addr;
   867     if (fc->isFree()) {
   868       // Since we hold the free list lock, which protects direct
   869       // allocation in this generation by mutators, a free object
   870       // will remain free throughout this iteration code.
   871       size = fc->size();
   872     } else {
   873       // Note that the object need not necessarily be initialized,
   874       // because (for instance) the free list lock does NOT protect
   875       // object initialization. The closure application below must
   876       // therefore be correct in the face of uninitialized objects.
   877       size = cl->do_object_careful(oop(addr));
   878       if (size == 0) {
   879         // An unparsable object found. Signal early termination.
   880         return addr;
   881       }
   882     }
   883   }
   884   return NULL;
   885 }
   887 // Callers of this iterator beware: The closure application should
   888 // be robust in the face of uninitialized objects and should (always)
   889 // return a correct size so that the next addr + size below gives us a
   890 // valid block boundary. [See for instance,
   891 // ScanMarkedObjectsAgainCarefullyClosure::do_object_careful()
   892 // in ConcurrentMarkSweepGeneration.cpp.]
   893 HeapWord*
   894 CompactibleFreeListSpace::object_iterate_careful_m(MemRegion mr,
   895   ObjectClosureCareful* cl) {
   896   assert_lock_strong(freelistLock());
   897   // Can't use used_region() below because it may not necessarily
   898   // be the same as [bottom(),end()); although we could
   899   // use [used_region().start(),round_to(used_region().end(),CardSize)),
   900   // that appears too cumbersome, so we just do the simpler check
   901   // in the assertion below.
   902   assert(!mr.is_empty() && MemRegion(bottom(),end()).contains(mr),
   903          "mr should be non-empty and within used space");
   904   HeapWord *addr, *end;
   905   size_t size;
   906   for (addr = block_start_careful(mr.start()), end  = mr.end();
   907        addr < end; addr += size) {
   908     FreeChunk* fc = (FreeChunk*)addr;
   909     if (fc->isFree()) {
   910       // Since we hold the free list lock, which protects direct
   911       // allocation in this generation by mutators, a free object
   912       // will remain free throughout this iteration code.
   913       size = fc->size();
   914     } else {
   915       // Note that the object need not necessarily be initialized,
   916       // because (for instance) the free list lock does NOT protect
   917       // object initialization. The closure application below must
   918       // therefore be correct in the face of uninitialized objects.
   919       size = cl->do_object_careful_m(oop(addr), mr);
   920       if (size == 0) {
   921         // An unparsable object found. Signal early termination.
   922         return addr;
   923       }
   924     }
   925   }
   926   return NULL;
   927 }
   930 HeapWord* CompactibleFreeListSpace::block_start_const(const void* p) const {
   931   NOT_PRODUCT(verify_objects_initialized());
   932   return _bt.block_start(p);
   933 }
   935 HeapWord* CompactibleFreeListSpace::block_start_careful(const void* p) const {
   936   return _bt.block_start_careful(p);
   937 }
   939 size_t CompactibleFreeListSpace::block_size(const HeapWord* p) const {
   940   NOT_PRODUCT(verify_objects_initialized());
   941   assert(MemRegion(bottom(), end()).contains(p), "p not in space");
   942   // This must be volatile, or else there is a danger that the compiler
   943   // will compile the code below into a sometimes-infinite loop, by keeping
   944   // the value read the first time in a register.
   945   while (true) {
   946     // We must do this until we get a consistent view of the object.
   947     if (FreeChunk::indicatesFreeChunk(p)) {
   948       volatile FreeChunk* fc = (volatile FreeChunk*)p;
   949       size_t res = fc->size();
   950       // If the object is still a free chunk, return the size, else it
   951       // has been allocated so try again.
   952       if (FreeChunk::indicatesFreeChunk(p)) {
   953         assert(res != 0, "Block size should not be 0");
   954         return res;
   955       }
   956     } else {
   957       // must read from what 'p' points to in each loop.
   958       klassOop k = ((volatile oopDesc*)p)->klass_or_null();
   959       if (k != NULL) {
   960         assert(k->is_oop(true /* ignore mark word */), "Should really be klass oop.");
   961         oop o = (oop)p;
   962         assert(o->is_parsable(), "Should be parsable");
   963         assert(o->is_oop(true /* ignore mark word */), "Should be an oop.");
   964         size_t res = o->size_given_klass(k->klass_part());
   965         res = adjustObjectSize(res);
   966         assert(res != 0, "Block size should not be 0");
   967         return res;
   968       }
   969     }
   970   }
   971 }
   973 // A variant of the above that uses the Printezis bits for
   974 // unparsable but allocated objects. This avoids any possible
   975 // stalls waiting for mutators to initialize objects, and is
   976 // thus potentially faster than the variant above. However,
   977 // this variant may return a zero size for a block that is
   978 // under mutation and for which a consistent size cannot be
   979 // inferred without stalling; see CMSCollector::block_size_if_printezis_bits().
   980 size_t CompactibleFreeListSpace::block_size_no_stall(HeapWord* p,
   981                                                      const CMSCollector* c)
   982 const {
   983   assert(MemRegion(bottom(), end()).contains(p), "p not in space");
   984   // This must be volatile, or else there is a danger that the compiler
   985   // will compile the code below into a sometimes-infinite loop, by keeping
   986   // the value read the first time in a register.
   987   DEBUG_ONLY(uint loops = 0;)
   988   while (true) {
   989     // We must do this until we get a consistent view of the object.
   990     if (FreeChunk::indicatesFreeChunk(p)) {
   991       volatile FreeChunk* fc = (volatile FreeChunk*)p;
   992       size_t res = fc->size();
   993       if (FreeChunk::indicatesFreeChunk(p)) {
   994         assert(res != 0, "Block size should not be 0");
   995         assert(loops == 0, "Should be 0");
   996         return res;
   997       }
   998     } else {
   999       // must read from what 'p' points to in each loop.
  1000       klassOop k = ((volatile oopDesc*)p)->klass_or_null();
  1001       if (k != NULL &&
  1002           ((oopDesc*)p)->is_parsable() &&
  1003           ((oopDesc*)p)->is_conc_safe()) {
  1004         assert(k->is_oop(), "Should really be klass oop.");
  1005         oop o = (oop)p;
  1006         assert(o->is_oop(), "Should be an oop");
  1007         size_t res = o->size_given_klass(k->klass_part());
  1008         res = adjustObjectSize(res);
  1009         assert(res != 0, "Block size should not be 0");
  1010         return res;
  1011       } else {
  1012         return c->block_size_if_printezis_bits(p);
  1015     assert(loops == 0, "Can loop at most once");
  1016     DEBUG_ONLY(loops++;)
  1020 size_t CompactibleFreeListSpace::block_size_nopar(const HeapWord* p) const {
  1021   NOT_PRODUCT(verify_objects_initialized());
  1022   assert(MemRegion(bottom(), end()).contains(p), "p not in space");
  1023   FreeChunk* fc = (FreeChunk*)p;
  1024   if (fc->isFree()) {
  1025     return fc->size();
  1026   } else {
  1027     // Ignore mark word because this may be a recently promoted
  1028     // object whose mark word is used to chain together grey
  1029     // objects (the last one would have a null value).
  1030     assert(oop(p)->is_oop(true), "Should be an oop");
  1031     return adjustObjectSize(oop(p)->size());
  1035 // This implementation assumes that the property of "being an object" is
  1036 // stable.  But being a free chunk may not be (because of parallel
  1037 // promotion.)
  1038 bool CompactibleFreeListSpace::block_is_obj(const HeapWord* p) const {
  1039   FreeChunk* fc = (FreeChunk*)p;
  1040   assert(is_in_reserved(p), "Should be in space");
  1041   // When doing a mark-sweep-compact of the CMS generation, this
  1042   // assertion may fail because prepare_for_compaction() uses
  1043   // space that is garbage to maintain information on ranges of
  1044   // live objects so that these live ranges can be moved as a whole.
  1045   // Comment out this assertion until that problem can be solved
  1046   // (i.e., that the block start calculation may look at objects
  1047   // at address below "p" in finding the object that contains "p"
  1048   // and those objects (if garbage) may have been modified to hold
  1049   // live range information.
  1050   // assert(ParallelGCThreads > 0 || _bt.block_start(p) == p, "Should be a block boundary");
  1051   if (FreeChunk::indicatesFreeChunk(p)) return false;
  1052   klassOop k = oop(p)->klass_or_null();
  1053   if (k != NULL) {
  1054     // Ignore mark word because it may have been used to
  1055     // chain together promoted objects (the last one
  1056     // would have a null value).
  1057     assert(oop(p)->is_oop(true), "Should be an oop");
  1058     return true;
  1059   } else {
  1060     return false;  // Was not an object at the start of collection.
  1064 // Check if the object is alive. This fact is checked either by consulting
  1065 // the main marking bitmap in the sweeping phase or, if it's a permanent
  1066 // generation and we're not in the sweeping phase, by checking the
  1067 // perm_gen_verify_bit_map where we store the "deadness" information if
  1068 // we did not sweep the perm gen in the most recent previous GC cycle.
  1069 bool CompactibleFreeListSpace::obj_is_alive(const HeapWord* p) const {
  1070   assert (block_is_obj(p), "The address should point to an object");
  1072   // If we're sweeping, we use object liveness information from the main bit map
  1073   // for both perm gen and old gen.
  1074   // We don't need to lock the bitmap (live_map or dead_map below), because
  1075   // EITHER we are in the middle of the sweeping phase, and the
  1076   // main marking bit map (live_map below) is locked,
  1077   // OR we're in other phases and perm_gen_verify_bit_map (dead_map below)
  1078   // is stable, because it's mutated only in the sweeping phase.
  1079   if (_collector->abstract_state() == CMSCollector::Sweeping) {
  1080     CMSBitMap* live_map = _collector->markBitMap();
  1081     return live_map->isMarked((HeapWord*) p);
  1082   } else {
  1083     // If we're not currently sweeping and we haven't swept the perm gen in
  1084     // the previous concurrent cycle then we may have dead but unswept objects
  1085     // in the perm gen. In this case, we use the "deadness" information
  1086     // that we had saved in perm_gen_verify_bit_map at the last sweep.
  1087     if (!CMSClassUnloadingEnabled && _collector->_permGen->reserved().contains(p)) {
  1088       if (_collector->verifying()) {
  1089         CMSBitMap* dead_map = _collector->perm_gen_verify_bit_map();
  1090         // Object is marked in the dead_map bitmap at the previous sweep
  1091         // when we know that it's dead; if the bitmap is not allocated then
  1092         // the object is alive.
  1093         return (dead_map->sizeInBits() == 0) // bit_map has been allocated
  1094                || !dead_map->par_isMarked((HeapWord*) p);
  1095       } else {
  1096         return false; // We can't say for sure if it's live, so we say that it's dead.
  1100   return true;
  1103 bool CompactibleFreeListSpace::block_is_obj_nopar(const HeapWord* p) const {
  1104   FreeChunk* fc = (FreeChunk*)p;
  1105   assert(is_in_reserved(p), "Should be in space");
  1106   assert(_bt.block_start(p) == p, "Should be a block boundary");
  1107   if (!fc->isFree()) {
  1108     // Ignore mark word because it may have been used to
  1109     // chain together promoted objects (the last one
  1110     // would have a null value).
  1111     assert(oop(p)->is_oop(true), "Should be an oop");
  1112     return true;
  1114   return false;
  1117 // "MT-safe but not guaranteed MT-precise" (TM); you may get an
  1118 // approximate answer if you don't hold the freelistlock when you call this.
  1119 size_t CompactibleFreeListSpace::totalSizeInIndexedFreeLists() const {
  1120   size_t size = 0;
  1121   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
  1122     debug_only(
  1123       // We may be calling here without the lock in which case we
  1124       // won't do this modest sanity check.
  1125       if (freelistLock()->owned_by_self()) {
  1126         size_t total_list_size = 0;
  1127         for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
  1128           fc = fc->next()) {
  1129           total_list_size += i;
  1131         assert(total_list_size == i * _indexedFreeList[i].count(),
  1132                "Count in list is incorrect");
  1135     size += i * _indexedFreeList[i].count();
  1137   return size;
  1140 HeapWord* CompactibleFreeListSpace::par_allocate(size_t size) {
  1141   MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
  1142   return allocate(size);
  1145 HeapWord*
  1146 CompactibleFreeListSpace::getChunkFromSmallLinearAllocBlockRemainder(size_t size) {
  1147   return getChunkFromLinearAllocBlockRemainder(&_smallLinearAllocBlock, size);
  1150 HeapWord* CompactibleFreeListSpace::allocate(size_t size) {
  1151   assert_lock_strong(freelistLock());
  1152   HeapWord* res = NULL;
  1153   assert(size == adjustObjectSize(size),
  1154          "use adjustObjectSize() before calling into allocate()");
  1156   if (_adaptive_freelists) {
  1157     res = allocate_adaptive_freelists(size);
  1158   } else {  // non-adaptive free lists
  1159     res = allocate_non_adaptive_freelists(size);
  1162   if (res != NULL) {
  1163     // check that res does lie in this space!
  1164     assert(is_in_reserved(res), "Not in this space!");
  1165     assert(is_aligned((void*)res), "alignment check");
  1167     FreeChunk* fc = (FreeChunk*)res;
  1168     fc->markNotFree();
  1169     assert(!fc->isFree(), "shouldn't be marked free");
  1170     assert(oop(fc)->klass_or_null() == NULL, "should look uninitialized");
  1171     // Verify that the block offset table shows this to
  1172     // be a single block, but not one which is unallocated.
  1173     _bt.verify_single_block(res, size);
  1174     _bt.verify_not_unallocated(res, size);
  1175     // mangle a just allocated object with a distinct pattern.
  1176     debug_only(fc->mangleAllocated(size));
  1179   return res;
  1182 HeapWord* CompactibleFreeListSpace::allocate_non_adaptive_freelists(size_t size) {
  1183   HeapWord* res = NULL;
  1184   // try and use linear allocation for smaller blocks
  1185   if (size < _smallLinearAllocBlock._allocation_size_limit) {
  1186     // if successful, the following also adjusts block offset table
  1187     res = getChunkFromSmallLinearAllocBlock(size);
  1189   // Else triage to indexed lists for smaller sizes
  1190   if (res == NULL) {
  1191     if (size < SmallForDictionary) {
  1192       res = (HeapWord*) getChunkFromIndexedFreeList(size);
  1193     } else {
  1194       // else get it from the big dictionary; if even this doesn't
  1195       // work we are out of luck.
  1196       res = (HeapWord*)getChunkFromDictionaryExact(size);
  1200   return res;
  1203 HeapWord* CompactibleFreeListSpace::allocate_adaptive_freelists(size_t size) {
  1204   assert_lock_strong(freelistLock());
  1205   HeapWord* res = NULL;
  1206   assert(size == adjustObjectSize(size),
  1207          "use adjustObjectSize() before calling into allocate()");
  1209   // Strategy
  1210   //   if small
  1211   //     exact size from small object indexed list if small
  1212   //     small or large linear allocation block (linAB) as appropriate
  1213   //     take from lists of greater sized chunks
  1214   //   else
  1215   //     dictionary
  1216   //     small or large linear allocation block if it has the space
  1217   // Try allocating exact size from indexTable first
  1218   if (size < IndexSetSize) {
  1219     res = (HeapWord*) getChunkFromIndexedFreeList(size);
  1220     if(res != NULL) {
  1221       assert(res != (HeapWord*)_indexedFreeList[size].head(),
  1222         "Not removed from free list");
  1223       // no block offset table adjustment is necessary on blocks in
  1224       // the indexed lists.
  1226     // Try allocating from the small LinAB
  1227     } else if (size < _smallLinearAllocBlock._allocation_size_limit &&
  1228         (res = getChunkFromSmallLinearAllocBlock(size)) != NULL) {
  1229         // if successful, the above also adjusts block offset table
  1230         // Note that this call will refill the LinAB to
  1231         // satisfy the request.  This is different that
  1232         // evm.
  1233         // Don't record chunk off a LinAB?  smallSplitBirth(size);
  1235     } else {
  1236       // Raid the exact free lists larger than size, even if they are not
  1237       // overpopulated.
  1238       res = (HeapWord*) getChunkFromGreater(size);
  1240   } else {
  1241     // Big objects get allocated directly from the dictionary.
  1242     res = (HeapWord*) getChunkFromDictionaryExact(size);
  1243     if (res == NULL) {
  1244       // Try hard not to fail since an allocation failure will likely
  1245       // trigger a synchronous GC.  Try to get the space from the
  1246       // allocation blocks.
  1247       res = getChunkFromSmallLinearAllocBlockRemainder(size);
  1251   return res;
  1254 // A worst-case estimate of the space required (in HeapWords) to expand the heap
  1255 // when promoting obj.
  1256 size_t CompactibleFreeListSpace::expansionSpaceRequired(size_t obj_size) const {
  1257   // Depending on the object size, expansion may require refilling either a
  1258   // bigLAB or a smallLAB plus refilling a PromotionInfo object.  MinChunkSize
  1259   // is added because the dictionary may over-allocate to avoid fragmentation.
  1260   size_t space = obj_size;
  1261   if (!_adaptive_freelists) {
  1262     space = MAX2(space, _smallLinearAllocBlock._refillSize);
  1264   space += _promoInfo.refillSize() + 2 * MinChunkSize;
  1265   return space;
  1268 FreeChunk* CompactibleFreeListSpace::getChunkFromGreater(size_t numWords) {
  1269   FreeChunk* ret;
  1271   assert(numWords >= MinChunkSize, "Size is less than minimum");
  1272   assert(linearAllocationWouldFail() || bestFitFirst(),
  1273     "Should not be here");
  1275   size_t i;
  1276   size_t currSize = numWords + MinChunkSize;
  1277   assert(currSize % MinObjAlignment == 0, "currSize should be aligned");
  1278   for (i = currSize; i < IndexSetSize; i += IndexSetStride) {
  1279     FreeList* fl = &_indexedFreeList[i];
  1280     if (fl->head()) {
  1281       ret = getFromListGreater(fl, numWords);
  1282       assert(ret == NULL || ret->isFree(), "Should be returning a free chunk");
  1283       return ret;
  1287   currSize = MAX2((size_t)SmallForDictionary,
  1288                   (size_t)(numWords + MinChunkSize));
  1290   /* Try to get a chunk that satisfies request, while avoiding
  1291      fragmentation that can't be handled. */
  1293     ret =  dictionary()->getChunk(currSize);
  1294     if (ret != NULL) {
  1295       assert(ret->size() - numWords >= MinChunkSize,
  1296              "Chunk is too small");
  1297       _bt.allocated((HeapWord*)ret, ret->size());
  1298       /* Carve returned chunk. */
  1299       (void) splitChunkAndReturnRemainder(ret, numWords);
  1300       /* Label this as no longer a free chunk. */
  1301       assert(ret->isFree(), "This chunk should be free");
  1302       ret->linkPrev(NULL);
  1304     assert(ret == NULL || ret->isFree(), "Should be returning a free chunk");
  1305     return ret;
  1307   ShouldNotReachHere();
  1310 bool CompactibleFreeListSpace::verifyChunkInIndexedFreeLists(FreeChunk* fc)
  1311   const {
  1312   assert(fc->size() < IndexSetSize, "Size of chunk is too large");
  1313   return _indexedFreeList[fc->size()].verifyChunkInFreeLists(fc);
  1316 bool CompactibleFreeListSpace::verifyChunkInFreeLists(FreeChunk* fc) const {
  1317   if (fc->size() >= IndexSetSize) {
  1318     return dictionary()->verifyChunkInFreeLists(fc);
  1319   } else {
  1320     return verifyChunkInIndexedFreeLists(fc);
  1324 #ifndef PRODUCT
  1325 void CompactibleFreeListSpace::assert_locked() const {
  1326   CMSLockVerifier::assert_locked(freelistLock(), parDictionaryAllocLock());
  1329 void CompactibleFreeListSpace::assert_locked(const Mutex* lock) const {
  1330   CMSLockVerifier::assert_locked(lock);
  1332 #endif
  1334 FreeChunk* CompactibleFreeListSpace::allocateScratch(size_t size) {
  1335   // In the parallel case, the main thread holds the free list lock
  1336   // on behalf the parallel threads.
  1337   FreeChunk* fc;
  1339     // If GC is parallel, this might be called by several threads.
  1340     // This should be rare enough that the locking overhead won't affect
  1341     // the sequential code.
  1342     MutexLockerEx x(parDictionaryAllocLock(),
  1343                     Mutex::_no_safepoint_check_flag);
  1344     fc = getChunkFromDictionary(size);
  1346   if (fc != NULL) {
  1347     fc->dontCoalesce();
  1348     assert(fc->isFree(), "Should be free, but not coalescable");
  1349     // Verify that the block offset table shows this to
  1350     // be a single block, but not one which is unallocated.
  1351     _bt.verify_single_block((HeapWord*)fc, fc->size());
  1352     _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
  1354   return fc;
  1357 oop CompactibleFreeListSpace::promote(oop obj, size_t obj_size) {
  1358   assert(obj_size == (size_t)obj->size(), "bad obj_size passed in");
  1359   assert_locked();
  1361   // if we are tracking promotions, then first ensure space for
  1362   // promotion (including spooling space for saving header if necessary).
  1363   // then allocate and copy, then track promoted info if needed.
  1364   // When tracking (see PromotionInfo::track()), the mark word may
  1365   // be displaced and in this case restoration of the mark word
  1366   // occurs in the (oop_since_save_marks_)iterate phase.
  1367   if (_promoInfo.tracking() && !_promoInfo.ensure_spooling_space()) {
  1368     return NULL;
  1370   // Call the allocate(size_t, bool) form directly to avoid the
  1371   // additional call through the allocate(size_t) form.  Having
  1372   // the compile inline the call is problematic because allocate(size_t)
  1373   // is a virtual method.
  1374   HeapWord* res = allocate(adjustObjectSize(obj_size));
  1375   if (res != NULL) {
  1376     Copy::aligned_disjoint_words((HeapWord*)obj, res, obj_size);
  1377     // if we should be tracking promotions, do so.
  1378     if (_promoInfo.tracking()) {
  1379         _promoInfo.track((PromotedObject*)res);
  1382   return oop(res);
  1385 HeapWord*
  1386 CompactibleFreeListSpace::getChunkFromSmallLinearAllocBlock(size_t size) {
  1387   assert_locked();
  1388   assert(size >= MinChunkSize, "minimum chunk size");
  1389   assert(size <  _smallLinearAllocBlock._allocation_size_limit,
  1390     "maximum from smallLinearAllocBlock");
  1391   return getChunkFromLinearAllocBlock(&_smallLinearAllocBlock, size);
  1394 HeapWord*
  1395 CompactibleFreeListSpace::getChunkFromLinearAllocBlock(LinearAllocBlock *blk,
  1396                                                        size_t size) {
  1397   assert_locked();
  1398   assert(size >= MinChunkSize, "too small");
  1399   HeapWord* res = NULL;
  1400   // Try to do linear allocation from blk, making sure that
  1401   if (blk->_word_size == 0) {
  1402     // We have probably been unable to fill this either in the prologue or
  1403     // when it was exhausted at the last linear allocation. Bail out until
  1404     // next time.
  1405     assert(blk->_ptr == NULL, "consistency check");
  1406     return NULL;
  1408   assert(blk->_word_size != 0 && blk->_ptr != NULL, "consistency check");
  1409   res = getChunkFromLinearAllocBlockRemainder(blk, size);
  1410   if (res != NULL) return res;
  1412   // about to exhaust this linear allocation block
  1413   if (blk->_word_size == size) { // exactly satisfied
  1414     res = blk->_ptr;
  1415     _bt.allocated(res, blk->_word_size);
  1416   } else if (size + MinChunkSize <= blk->_refillSize) {
  1417     size_t sz = blk->_word_size;
  1418     // Update _unallocated_block if the size is such that chunk would be
  1419     // returned to the indexed free list.  All other chunks in the indexed
  1420     // free lists are allocated from the dictionary so that _unallocated_block
  1421     // has already been adjusted for them.  Do it here so that the cost
  1422     // for all chunks added back to the indexed free lists.
  1423     if (sz < SmallForDictionary) {
  1424       _bt.allocated(blk->_ptr, sz);
  1426     // Return the chunk that isn't big enough, and then refill below.
  1427     addChunkToFreeLists(blk->_ptr, sz);
  1428     splitBirth(sz);
  1429     // Don't keep statistics on adding back chunk from a LinAB.
  1430   } else {
  1431     // A refilled block would not satisfy the request.
  1432     return NULL;
  1435   blk->_ptr = NULL; blk->_word_size = 0;
  1436   refillLinearAllocBlock(blk);
  1437   assert(blk->_ptr == NULL || blk->_word_size >= size + MinChunkSize,
  1438          "block was replenished");
  1439   if (res != NULL) {
  1440     splitBirth(size);
  1441     repairLinearAllocBlock(blk);
  1442   } else if (blk->_ptr != NULL) {
  1443     res = blk->_ptr;
  1444     size_t blk_size = blk->_word_size;
  1445     blk->_word_size -= size;
  1446     blk->_ptr  += size;
  1447     splitBirth(size);
  1448     repairLinearAllocBlock(blk);
  1449     // Update BOT last so that other (parallel) GC threads see a consistent
  1450     // view of the BOT and free blocks.
  1451     // Above must occur before BOT is updated below.
  1452     _bt.split_block(res, blk_size, size);  // adjust block offset table
  1454   return res;
  1457 HeapWord*  CompactibleFreeListSpace::getChunkFromLinearAllocBlockRemainder(
  1458                                         LinearAllocBlock* blk,
  1459                                         size_t size) {
  1460   assert_locked();
  1461   assert(size >= MinChunkSize, "too small");
  1463   HeapWord* res = NULL;
  1464   // This is the common case.  Keep it simple.
  1465   if (blk->_word_size >= size + MinChunkSize) {
  1466     assert(blk->_ptr != NULL, "consistency check");
  1467     res = blk->_ptr;
  1468     // Note that the BOT is up-to-date for the linAB before allocation.  It
  1469     // indicates the start of the linAB.  The split_block() updates the
  1470     // BOT for the linAB after the allocation (indicates the start of the
  1471     // next chunk to be allocated).
  1472     size_t blk_size = blk->_word_size;
  1473     blk->_word_size -= size;
  1474     blk->_ptr  += size;
  1475     splitBirth(size);
  1476     repairLinearAllocBlock(blk);
  1477     // Update BOT last so that other (parallel) GC threads see a consistent
  1478     // view of the BOT and free blocks.
  1479     // Above must occur before BOT is updated below.
  1480     _bt.split_block(res, blk_size, size);  // adjust block offset table
  1481     _bt.allocated(res, size);
  1483   return res;
  1486 FreeChunk*
  1487 CompactibleFreeListSpace::getChunkFromIndexedFreeList(size_t size) {
  1488   assert_locked();
  1489   assert(size < SmallForDictionary, "just checking");
  1490   FreeChunk* res;
  1491   res = _indexedFreeList[size].getChunkAtHead();
  1492   if (res == NULL) {
  1493     res = getChunkFromIndexedFreeListHelper(size);
  1495   _bt.verify_not_unallocated((HeapWord*) res, size);
  1496   assert(res == NULL || res->size() == size, "Incorrect block size");
  1497   return res;
  1500 FreeChunk*
  1501 CompactibleFreeListSpace::getChunkFromIndexedFreeListHelper(size_t size,
  1502   bool replenish) {
  1503   assert_locked();
  1504   FreeChunk* fc = NULL;
  1505   if (size < SmallForDictionary) {
  1506     assert(_indexedFreeList[size].head() == NULL ||
  1507       _indexedFreeList[size].surplus() <= 0,
  1508       "List for this size should be empty or under populated");
  1509     // Try best fit in exact lists before replenishing the list
  1510     if (!bestFitFirst() || (fc = bestFitSmall(size)) == NULL) {
  1511       // Replenish list.
  1512       //
  1513       // Things tried that failed.
  1514       //   Tried allocating out of the two LinAB's first before
  1515       // replenishing lists.
  1516       //   Tried small linAB of size 256 (size in indexed list)
  1517       // and replenishing indexed lists from the small linAB.
  1518       //
  1519       FreeChunk* newFc = NULL;
  1520       const size_t replenish_size = CMSIndexedFreeListReplenish * size;
  1521       if (replenish_size < SmallForDictionary) {
  1522         // Do not replenish from an underpopulated size.
  1523         if (_indexedFreeList[replenish_size].surplus() > 0 &&
  1524             _indexedFreeList[replenish_size].head() != NULL) {
  1525           newFc = _indexedFreeList[replenish_size].getChunkAtHead();
  1526         } else if (bestFitFirst()) {
  1527           newFc = bestFitSmall(replenish_size);
  1530       if (newFc == NULL && replenish_size > size) {
  1531         assert(CMSIndexedFreeListReplenish > 1, "ctl pt invariant");
  1532         newFc = getChunkFromIndexedFreeListHelper(replenish_size, false);
  1534       // Note: The stats update re split-death of block obtained above
  1535       // will be recorded below precisely when we know we are going to
  1536       // be actually splitting it into more than one pieces below.
  1537       if (newFc != NULL) {
  1538         if  (replenish || CMSReplenishIntermediate) {
  1539           // Replenish this list and return one block to caller.
  1540           size_t i;
  1541           FreeChunk *curFc, *nextFc;
  1542           size_t num_blk = newFc->size() / size;
  1543           assert(num_blk >= 1, "Smaller than requested?");
  1544           assert(newFc->size() % size == 0, "Should be integral multiple of request");
  1545           if (num_blk > 1) {
  1546             // we are sure we will be splitting the block just obtained
  1547             // into multiple pieces; record the split-death of the original
  1548             splitDeath(replenish_size);
  1550           // carve up and link blocks 0, ..., num_blk - 2
  1551           // The last chunk is not added to the lists but is returned as the
  1552           // free chunk.
  1553           for (curFc = newFc, nextFc = (FreeChunk*)((HeapWord*)curFc + size),
  1554                i = 0;
  1555                i < (num_blk - 1);
  1556                curFc = nextFc, nextFc = (FreeChunk*)((HeapWord*)nextFc + size),
  1557                i++) {
  1558             curFc->setSize(size);
  1559             // Don't record this as a return in order to try and
  1560             // determine the "returns" from a GC.
  1561             _bt.verify_not_unallocated((HeapWord*) fc, size);
  1562             _indexedFreeList[size].returnChunkAtTail(curFc, false);
  1563             _bt.mark_block((HeapWord*)curFc, size);
  1564             splitBirth(size);
  1565             // Don't record the initial population of the indexed list
  1566             // as a split birth.
  1569           // check that the arithmetic was OK above
  1570           assert((HeapWord*)nextFc == (HeapWord*)newFc + num_blk*size,
  1571             "inconsistency in carving newFc");
  1572           curFc->setSize(size);
  1573           _bt.mark_block((HeapWord*)curFc, size);
  1574           splitBirth(size);
  1575           fc = curFc;
  1576         } else {
  1577           // Return entire block to caller
  1578           fc = newFc;
  1582   } else {
  1583     // Get a free chunk from the free chunk dictionary to be returned to
  1584     // replenish the indexed free list.
  1585     fc = getChunkFromDictionaryExact(size);
  1587   // assert(fc == NULL || fc->isFree(), "Should be returning a free chunk");
  1588   return fc;
  1591 FreeChunk*
  1592 CompactibleFreeListSpace::getChunkFromDictionary(size_t size) {
  1593   assert_locked();
  1594   FreeChunk* fc = _dictionary->getChunk(size);
  1595   if (fc == NULL) {
  1596     return NULL;
  1598   _bt.allocated((HeapWord*)fc, fc->size());
  1599   if (fc->size() >= size + MinChunkSize) {
  1600     fc = splitChunkAndReturnRemainder(fc, size);
  1602   assert(fc->size() >= size, "chunk too small");
  1603   assert(fc->size() < size + MinChunkSize, "chunk too big");
  1604   _bt.verify_single_block((HeapWord*)fc, fc->size());
  1605   return fc;
  1608 FreeChunk*
  1609 CompactibleFreeListSpace::getChunkFromDictionaryExact(size_t size) {
  1610   assert_locked();
  1611   FreeChunk* fc = _dictionary->getChunk(size);
  1612   if (fc == NULL) {
  1613     return fc;
  1615   _bt.allocated((HeapWord*)fc, fc->size());
  1616   if (fc->size() == size) {
  1617     _bt.verify_single_block((HeapWord*)fc, size);
  1618     return fc;
  1620   assert(fc->size() > size, "getChunk() guarantee");
  1621   if (fc->size() < size + MinChunkSize) {
  1622     // Return the chunk to the dictionary and go get a bigger one.
  1623     returnChunkToDictionary(fc);
  1624     fc = _dictionary->getChunk(size + MinChunkSize);
  1625     if (fc == NULL) {
  1626       return NULL;
  1628     _bt.allocated((HeapWord*)fc, fc->size());
  1630   assert(fc->size() >= size + MinChunkSize, "tautology");
  1631   fc = splitChunkAndReturnRemainder(fc, size);
  1632   assert(fc->size() == size, "chunk is wrong size");
  1633   _bt.verify_single_block((HeapWord*)fc, size);
  1634   return fc;
  1637 void
  1638 CompactibleFreeListSpace::returnChunkToDictionary(FreeChunk* chunk) {
  1639   assert_locked();
  1641   size_t size = chunk->size();
  1642   _bt.verify_single_block((HeapWord*)chunk, size);
  1643   // adjust _unallocated_block downward, as necessary
  1644   _bt.freed((HeapWord*)chunk, size);
  1645   _dictionary->returnChunk(chunk);
  1646 #ifndef PRODUCT
  1647   if (CMSCollector::abstract_state() != CMSCollector::Sweeping) {
  1648     TreeChunk::as_TreeChunk(chunk)->list()->verify_stats();
  1650 #endif // PRODUCT
  1653 void
  1654 CompactibleFreeListSpace::returnChunkToFreeList(FreeChunk* fc) {
  1655   assert_locked();
  1656   size_t size = fc->size();
  1657   _bt.verify_single_block((HeapWord*) fc, size);
  1658   _bt.verify_not_unallocated((HeapWord*) fc, size);
  1659   if (_adaptive_freelists) {
  1660     _indexedFreeList[size].returnChunkAtTail(fc);
  1661   } else {
  1662     _indexedFreeList[size].returnChunkAtHead(fc);
  1664 #ifndef PRODUCT
  1665   if (CMSCollector::abstract_state() != CMSCollector::Sweeping) {
  1666      _indexedFreeList[size].verify_stats();
  1668 #endif // PRODUCT
  1671 // Add chunk to end of last block -- if it's the largest
  1672 // block -- and update BOT and census data. We would
  1673 // of course have preferred to coalesce it with the
  1674 // last block, but it's currently less expensive to find the
  1675 // largest block than it is to find the last.
  1676 void
  1677 CompactibleFreeListSpace::addChunkToFreeListsAtEndRecordingStats(
  1678   HeapWord* chunk, size_t     size) {
  1679   // check that the chunk does lie in this space!
  1680   assert(chunk != NULL && is_in_reserved(chunk), "Not in this space!");
  1681   // One of the parallel gc task threads may be here
  1682   // whilst others are allocating.
  1683   Mutex* lock = NULL;
  1684   if (ParallelGCThreads != 0) {
  1685     lock = &_parDictionaryAllocLock;
  1687   FreeChunk* ec;
  1689     MutexLockerEx x(lock, Mutex::_no_safepoint_check_flag);
  1690     ec = dictionary()->findLargestDict();  // get largest block
  1691     if (ec != NULL && ec->end() == chunk) {
  1692       // It's a coterminal block - we can coalesce.
  1693       size_t old_size = ec->size();
  1694       coalDeath(old_size);
  1695       removeChunkFromDictionary(ec);
  1696       size += old_size;
  1697     } else {
  1698       ec = (FreeChunk*)chunk;
  1701   ec->setSize(size);
  1702   debug_only(ec->mangleFreed(size));
  1703   if (size < SmallForDictionary) {
  1704     lock = _indexedFreeListParLocks[size];
  1706   MutexLockerEx x(lock, Mutex::_no_safepoint_check_flag);
  1707   addChunkAndRepairOffsetTable((HeapWord*)ec, size, true);
  1708   // record the birth under the lock since the recording involves
  1709   // manipulation of the list on which the chunk lives and
  1710   // if the chunk is allocated and is the last on the list,
  1711   // the list can go away.
  1712   coalBirth(size);
  1715 void
  1716 CompactibleFreeListSpace::addChunkToFreeLists(HeapWord* chunk,
  1717                                               size_t     size) {
  1718   // check that the chunk does lie in this space!
  1719   assert(chunk != NULL && is_in_reserved(chunk), "Not in this space!");
  1720   assert_locked();
  1721   _bt.verify_single_block(chunk, size);
  1723   FreeChunk* fc = (FreeChunk*) chunk;
  1724   fc->setSize(size);
  1725   debug_only(fc->mangleFreed(size));
  1726   if (size < SmallForDictionary) {
  1727     returnChunkToFreeList(fc);
  1728   } else {
  1729     returnChunkToDictionary(fc);
  1733 void
  1734 CompactibleFreeListSpace::addChunkAndRepairOffsetTable(HeapWord* chunk,
  1735   size_t size, bool coalesced) {
  1736   assert_locked();
  1737   assert(chunk != NULL, "null chunk");
  1738   if (coalesced) {
  1739     // repair BOT
  1740     _bt.single_block(chunk, size);
  1742   addChunkToFreeLists(chunk, size);
  1745 // We _must_ find the purported chunk on our free lists;
  1746 // we assert if we don't.
  1747 void
  1748 CompactibleFreeListSpace::removeFreeChunkFromFreeLists(FreeChunk* fc) {
  1749   size_t size = fc->size();
  1750   assert_locked();
  1751   debug_only(verifyFreeLists());
  1752   if (size < SmallForDictionary) {
  1753     removeChunkFromIndexedFreeList(fc);
  1754   } else {
  1755     removeChunkFromDictionary(fc);
  1757   _bt.verify_single_block((HeapWord*)fc, size);
  1758   debug_only(verifyFreeLists());
  1761 void
  1762 CompactibleFreeListSpace::removeChunkFromDictionary(FreeChunk* fc) {
  1763   size_t size = fc->size();
  1764   assert_locked();
  1765   assert(fc != NULL, "null chunk");
  1766   _bt.verify_single_block((HeapWord*)fc, size);
  1767   _dictionary->removeChunk(fc);
  1768   // adjust _unallocated_block upward, as necessary
  1769   _bt.allocated((HeapWord*)fc, size);
  1772 void
  1773 CompactibleFreeListSpace::removeChunkFromIndexedFreeList(FreeChunk* fc) {
  1774   assert_locked();
  1775   size_t size = fc->size();
  1776   _bt.verify_single_block((HeapWord*)fc, size);
  1777   NOT_PRODUCT(
  1778     if (FLSVerifyIndexTable) {
  1779       verifyIndexedFreeList(size);
  1782   _indexedFreeList[size].removeChunk(fc);
  1783   debug_only(fc->clearNext());
  1784   debug_only(fc->clearPrev());
  1785   NOT_PRODUCT(
  1786     if (FLSVerifyIndexTable) {
  1787       verifyIndexedFreeList(size);
  1792 FreeChunk* CompactibleFreeListSpace::bestFitSmall(size_t numWords) {
  1793   /* A hint is the next larger size that has a surplus.
  1794      Start search at a size large enough to guarantee that
  1795      the excess is >= MIN_CHUNK. */
  1796   size_t start = align_object_size(numWords + MinChunkSize);
  1797   if (start < IndexSetSize) {
  1798     FreeList* it   = _indexedFreeList;
  1799     size_t    hint = _indexedFreeList[start].hint();
  1800     while (hint < IndexSetSize) {
  1801       assert(hint % MinObjAlignment == 0, "hint should be aligned");
  1802       FreeList *fl = &_indexedFreeList[hint];
  1803       if (fl->surplus() > 0 && fl->head() != NULL) {
  1804         // Found a list with surplus, reset original hint
  1805         // and split out a free chunk which is returned.
  1806         _indexedFreeList[start].set_hint(hint);
  1807         FreeChunk* res = getFromListGreater(fl, numWords);
  1808         assert(res == NULL || res->isFree(),
  1809           "Should be returning a free chunk");
  1810         return res;
  1812       hint = fl->hint(); /* keep looking */
  1814     /* None found. */
  1815     it[start].set_hint(IndexSetSize);
  1817   return NULL;
  1820 /* Requires fl->size >= numWords + MinChunkSize */
  1821 FreeChunk* CompactibleFreeListSpace::getFromListGreater(FreeList* fl,
  1822   size_t numWords) {
  1823   FreeChunk *curr = fl->head();
  1824   size_t oldNumWords = curr->size();
  1825   assert(numWords >= MinChunkSize, "Word size is too small");
  1826   assert(curr != NULL, "List is empty");
  1827   assert(oldNumWords >= numWords + MinChunkSize,
  1828         "Size of chunks in the list is too small");
  1830   fl->removeChunk(curr);
  1831   // recorded indirectly by splitChunkAndReturnRemainder -
  1832   // smallSplit(oldNumWords, numWords);
  1833   FreeChunk* new_chunk = splitChunkAndReturnRemainder(curr, numWords);
  1834   // Does anything have to be done for the remainder in terms of
  1835   // fixing the card table?
  1836   assert(new_chunk == NULL || new_chunk->isFree(),
  1837     "Should be returning a free chunk");
  1838   return new_chunk;
  1841 FreeChunk*
  1842 CompactibleFreeListSpace::splitChunkAndReturnRemainder(FreeChunk* chunk,
  1843   size_t new_size) {
  1844   assert_locked();
  1845   size_t size = chunk->size();
  1846   assert(size > new_size, "Split from a smaller block?");
  1847   assert(is_aligned(chunk), "alignment problem");
  1848   assert(size == adjustObjectSize(size), "alignment problem");
  1849   size_t rem_size = size - new_size;
  1850   assert(rem_size == adjustObjectSize(rem_size), "alignment problem");
  1851   assert(rem_size >= MinChunkSize, "Free chunk smaller than minimum");
  1852   FreeChunk* ffc = (FreeChunk*)((HeapWord*)chunk + new_size);
  1853   assert(is_aligned(ffc), "alignment problem");
  1854   ffc->setSize(rem_size);
  1855   ffc->linkNext(NULL);
  1856   ffc->linkPrev(NULL); // Mark as a free block for other (parallel) GC threads.
  1857   // Above must occur before BOT is updated below.
  1858   // adjust block offset table
  1859   _bt.split_block((HeapWord*)chunk, chunk->size(), new_size);
  1860   if (rem_size < SmallForDictionary) {
  1861     bool is_par = (SharedHeap::heap()->n_par_threads() > 0);
  1862     if (is_par) _indexedFreeListParLocks[rem_size]->lock();
  1863     returnChunkToFreeList(ffc);
  1864     split(size, rem_size);
  1865     if (is_par) _indexedFreeListParLocks[rem_size]->unlock();
  1866   } else {
  1867     returnChunkToDictionary(ffc);
  1868     split(size ,rem_size);
  1870   chunk->setSize(new_size);
  1871   return chunk;
  1874 void
  1875 CompactibleFreeListSpace::sweep_completed() {
  1876   // Now that space is probably plentiful, refill linear
  1877   // allocation blocks as needed.
  1878   refillLinearAllocBlocksIfNeeded();
  1881 void
  1882 CompactibleFreeListSpace::gc_prologue() {
  1883   assert_locked();
  1884   if (PrintFLSStatistics != 0) {
  1885     gclog_or_tty->print("Before GC:\n");
  1886     reportFreeListStatistics();
  1888   refillLinearAllocBlocksIfNeeded();
  1891 void
  1892 CompactibleFreeListSpace::gc_epilogue() {
  1893   assert_locked();
  1894   if (PrintGCDetails && Verbose && !_adaptive_freelists) {
  1895     if (_smallLinearAllocBlock._word_size == 0)
  1896       warning("CompactibleFreeListSpace(epilogue):: Linear allocation failure");
  1898   assert(_promoInfo.noPromotions(), "_promoInfo inconsistency");
  1899   _promoInfo.stopTrackingPromotions();
  1900   repairLinearAllocationBlocks();
  1901   // Print Space's stats
  1902   if (PrintFLSStatistics != 0) {
  1903     gclog_or_tty->print("After GC:\n");
  1904     reportFreeListStatistics();
  1908 // Iteration support, mostly delegated from a CMS generation
  1910 void CompactibleFreeListSpace::save_marks() {
  1911   // mark the "end" of the used space at the time of this call;
  1912   // note, however, that promoted objects from this point
  1913   // on are tracked in the _promoInfo below.
  1914   set_saved_mark_word(BlockOffsetArrayUseUnallocatedBlock ?
  1915                       unallocated_block() : end());
  1916   // inform allocator that promotions should be tracked.
  1917   assert(_promoInfo.noPromotions(), "_promoInfo inconsistency");
  1918   _promoInfo.startTrackingPromotions();
  1921 bool CompactibleFreeListSpace::no_allocs_since_save_marks() {
  1922   assert(_promoInfo.tracking(), "No preceding save_marks?");
  1923   guarantee(SharedHeap::heap()->n_par_threads() == 0,
  1924             "Shouldn't be called (yet) during parallel part of gc.");
  1925   return _promoInfo.noPromotions();
  1928 #define CFLS_OOP_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix)           \
  1930 void CompactibleFreeListSpace::                                             \
  1931 oop_since_save_marks_iterate##nv_suffix(OopClosureType* blk) {              \
  1932   assert(SharedHeap::heap()->n_par_threads() == 0,                          \
  1933          "Shouldn't be called (yet) during parallel part of gc.");          \
  1934   _promoInfo.promoted_oops_iterate##nv_suffix(blk);                         \
  1935   /*                                                                        \
  1936    * This also restores any displaced headers and removes the elements from \
  1937    * the iteration set as they are processed, so that we have a clean slate \
  1938    * at the end of the iteration. Note, thus, that if new objects are       \
  1939    * promoted as a result of the iteration they are iterated over as well.  \
  1940    */                                                                       \
  1941   assert(_promoInfo.noPromotions(), "_promoInfo inconsistency");            \
  1944 ALL_SINCE_SAVE_MARKS_CLOSURES(CFLS_OOP_SINCE_SAVE_MARKS_DEFN)
  1947 void CompactibleFreeListSpace::object_iterate_since_last_GC(ObjectClosure* cl) {
  1948   // ugghh... how would one do this efficiently for a non-contiguous space?
  1949   guarantee(false, "NYI");
  1952 bool CompactibleFreeListSpace::linearAllocationWouldFail() const {
  1953   return _smallLinearAllocBlock._word_size == 0;
  1956 void CompactibleFreeListSpace::repairLinearAllocationBlocks() {
  1957   // Fix up linear allocation blocks to look like free blocks
  1958   repairLinearAllocBlock(&_smallLinearAllocBlock);
  1961 void CompactibleFreeListSpace::repairLinearAllocBlock(LinearAllocBlock* blk) {
  1962   assert_locked();
  1963   if (blk->_ptr != NULL) {
  1964     assert(blk->_word_size != 0 && blk->_word_size >= MinChunkSize,
  1965            "Minimum block size requirement");
  1966     FreeChunk* fc = (FreeChunk*)(blk->_ptr);
  1967     fc->setSize(blk->_word_size);
  1968     fc->linkPrev(NULL);   // mark as free
  1969     fc->dontCoalesce();
  1970     assert(fc->isFree(), "just marked it free");
  1971     assert(fc->cantCoalesce(), "just marked it uncoalescable");
  1975 void CompactibleFreeListSpace::refillLinearAllocBlocksIfNeeded() {
  1976   assert_locked();
  1977   if (_smallLinearAllocBlock._ptr == NULL) {
  1978     assert(_smallLinearAllocBlock._word_size == 0,
  1979       "Size of linAB should be zero if the ptr is NULL");
  1980     // Reset the linAB refill and allocation size limit.
  1981     _smallLinearAllocBlock.set(0, 0, 1024*SmallForLinearAlloc, SmallForLinearAlloc);
  1983   refillLinearAllocBlockIfNeeded(&_smallLinearAllocBlock);
  1986 void
  1987 CompactibleFreeListSpace::refillLinearAllocBlockIfNeeded(LinearAllocBlock* blk) {
  1988   assert_locked();
  1989   assert((blk->_ptr == NULL && blk->_word_size == 0) ||
  1990          (blk->_ptr != NULL && blk->_word_size >= MinChunkSize),
  1991          "blk invariant");
  1992   if (blk->_ptr == NULL) {
  1993     refillLinearAllocBlock(blk);
  1995   if (PrintMiscellaneous && Verbose) {
  1996     if (blk->_word_size == 0) {
  1997       warning("CompactibleFreeListSpace(prologue):: Linear allocation failure");
  2002 void
  2003 CompactibleFreeListSpace::refillLinearAllocBlock(LinearAllocBlock* blk) {
  2004   assert_locked();
  2005   assert(blk->_word_size == 0 && blk->_ptr == NULL,
  2006          "linear allocation block should be empty");
  2007   FreeChunk* fc;
  2008   if (blk->_refillSize < SmallForDictionary &&
  2009       (fc = getChunkFromIndexedFreeList(blk->_refillSize)) != NULL) {
  2010     // A linAB's strategy might be to use small sizes to reduce
  2011     // fragmentation but still get the benefits of allocation from a
  2012     // linAB.
  2013   } else {
  2014     fc = getChunkFromDictionary(blk->_refillSize);
  2016   if (fc != NULL) {
  2017     blk->_ptr  = (HeapWord*)fc;
  2018     blk->_word_size = fc->size();
  2019     fc->dontCoalesce();   // to prevent sweeper from sweeping us up
  2023 // Support for concurrent collection policy decisions.
  2024 bool CompactibleFreeListSpace::should_concurrent_collect() const {
  2025   // In the future we might want to add in frgamentation stats --
  2026   // including erosion of the "mountain" into this decision as well.
  2027   return !adaptive_freelists() && linearAllocationWouldFail();
  2030 // Support for compaction
  2032 void CompactibleFreeListSpace::prepare_for_compaction(CompactPoint* cp) {
  2033   SCAN_AND_FORWARD(cp,end,block_is_obj,block_size);
  2034   // prepare_for_compaction() uses the space between live objects
  2035   // so that later phase can skip dead space quickly.  So verification
  2036   // of the free lists doesn't work after.
  2039 #define obj_size(q) adjustObjectSize(oop(q)->size())
  2040 #define adjust_obj_size(s) adjustObjectSize(s)
  2042 void CompactibleFreeListSpace::adjust_pointers() {
  2043   // In other versions of adjust_pointers(), a bail out
  2044   // based on the amount of live data in the generation
  2045   // (i.e., if 0, bail out) may be used.
  2046   // Cannot test used() == 0 here because the free lists have already
  2047   // been mangled by the compaction.
  2049   SCAN_AND_ADJUST_POINTERS(adjust_obj_size);
  2050   // See note about verification in prepare_for_compaction().
  2053 void CompactibleFreeListSpace::compact() {
  2054   SCAN_AND_COMPACT(obj_size);
  2057 // fragmentation_metric = 1 - [sum of (fbs**2) / (sum of fbs)**2]
  2058 // where fbs is free block sizes
  2059 double CompactibleFreeListSpace::flsFrag() const {
  2060   size_t itabFree = totalSizeInIndexedFreeLists();
  2061   double frag = 0.0;
  2062   size_t i;
  2064   for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
  2065     double sz  = i;
  2066     frag      += _indexedFreeList[i].count() * (sz * sz);
  2069   double totFree = itabFree +
  2070                    _dictionary->totalChunkSize(DEBUG_ONLY(freelistLock()));
  2071   if (totFree > 0) {
  2072     frag = ((frag + _dictionary->sum_of_squared_block_sizes()) /
  2073             (totFree * totFree));
  2074     frag = (double)1.0  - frag;
  2075   } else {
  2076     assert(frag == 0.0, "Follows from totFree == 0");
  2078   return frag;
  2081 void CompactibleFreeListSpace::beginSweepFLCensus(
  2082   float inter_sweep_current,
  2083   float inter_sweep_estimate,
  2084   float intra_sweep_estimate) {
  2085   assert_locked();
  2086   size_t i;
  2087   for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
  2088     FreeList* fl    = &_indexedFreeList[i];
  2089     if (PrintFLSStatistics > 1) {
  2090       gclog_or_tty->print("size[%d] : ", i);
  2092     fl->compute_desired(inter_sweep_current, inter_sweep_estimate, intra_sweep_estimate);
  2093     fl->set_coalDesired((ssize_t)((double)fl->desired() * CMSSmallCoalSurplusPercent));
  2094     fl->set_beforeSweep(fl->count());
  2095     fl->set_bfrSurp(fl->surplus());
  2097   _dictionary->beginSweepDictCensus(CMSLargeCoalSurplusPercent,
  2098                                     inter_sweep_current,
  2099                                     inter_sweep_estimate,
  2100                                     intra_sweep_estimate);
  2103 void CompactibleFreeListSpace::setFLSurplus() {
  2104   assert_locked();
  2105   size_t i;
  2106   for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
  2107     FreeList *fl = &_indexedFreeList[i];
  2108     fl->set_surplus(fl->count() -
  2109                     (ssize_t)((double)fl->desired() * CMSSmallSplitSurplusPercent));
  2113 void CompactibleFreeListSpace::setFLHints() {
  2114   assert_locked();
  2115   size_t i;
  2116   size_t h = IndexSetSize;
  2117   for (i = IndexSetSize - 1; i != 0; i -= IndexSetStride) {
  2118     FreeList *fl = &_indexedFreeList[i];
  2119     fl->set_hint(h);
  2120     if (fl->surplus() > 0) {
  2121       h = i;
  2126 void CompactibleFreeListSpace::clearFLCensus() {
  2127   assert_locked();
  2128   int i;
  2129   for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
  2130     FreeList *fl = &_indexedFreeList[i];
  2131     fl->set_prevSweep(fl->count());
  2132     fl->set_coalBirths(0);
  2133     fl->set_coalDeaths(0);
  2134     fl->set_splitBirths(0);
  2135     fl->set_splitDeaths(0);
  2139 void CompactibleFreeListSpace::endSweepFLCensus(size_t sweep_count) {
  2140   if (PrintFLSStatistics > 0) {
  2141     HeapWord* largestAddr = (HeapWord*) dictionary()->findLargestDict();
  2142     gclog_or_tty->print_cr("CMS: Large block " PTR_FORMAT,
  2143                            largestAddr);
  2145   setFLSurplus();
  2146   setFLHints();
  2147   if (PrintGC && PrintFLSCensus > 0) {
  2148     printFLCensus(sweep_count);
  2150   clearFLCensus();
  2151   assert_locked();
  2152   _dictionary->endSweepDictCensus(CMSLargeSplitSurplusPercent);
  2155 bool CompactibleFreeListSpace::coalOverPopulated(size_t size) {
  2156   if (size < SmallForDictionary) {
  2157     FreeList *fl = &_indexedFreeList[size];
  2158     return (fl->coalDesired() < 0) ||
  2159            ((int)fl->count() > fl->coalDesired());
  2160   } else {
  2161     return dictionary()->coalDictOverPopulated(size);
  2165 void CompactibleFreeListSpace::smallCoalBirth(size_t size) {
  2166   assert(size < SmallForDictionary, "Size too large for indexed list");
  2167   FreeList *fl = &_indexedFreeList[size];
  2168   fl->increment_coalBirths();
  2169   fl->increment_surplus();
  2172 void CompactibleFreeListSpace::smallCoalDeath(size_t size) {
  2173   assert(size < SmallForDictionary, "Size too large for indexed list");
  2174   FreeList *fl = &_indexedFreeList[size];
  2175   fl->increment_coalDeaths();
  2176   fl->decrement_surplus();
  2179 void CompactibleFreeListSpace::coalBirth(size_t size) {
  2180   if (size  < SmallForDictionary) {
  2181     smallCoalBirth(size);
  2182   } else {
  2183     dictionary()->dictCensusUpdate(size,
  2184                                    false /* split */,
  2185                                    true /* birth */);
  2189 void CompactibleFreeListSpace::coalDeath(size_t size) {
  2190   if(size  < SmallForDictionary) {
  2191     smallCoalDeath(size);
  2192   } else {
  2193     dictionary()->dictCensusUpdate(size,
  2194                                    false /* split */,
  2195                                    false /* birth */);
  2199 void CompactibleFreeListSpace::smallSplitBirth(size_t size) {
  2200   assert(size < SmallForDictionary, "Size too large for indexed list");
  2201   FreeList *fl = &_indexedFreeList[size];
  2202   fl->increment_splitBirths();
  2203   fl->increment_surplus();
  2206 void CompactibleFreeListSpace::smallSplitDeath(size_t size) {
  2207   assert(size < SmallForDictionary, "Size too large for indexed list");
  2208   FreeList *fl = &_indexedFreeList[size];
  2209   fl->increment_splitDeaths();
  2210   fl->decrement_surplus();
  2213 void CompactibleFreeListSpace::splitBirth(size_t size) {
  2214   if (size  < SmallForDictionary) {
  2215     smallSplitBirth(size);
  2216   } else {
  2217     dictionary()->dictCensusUpdate(size,
  2218                                    true /* split */,
  2219                                    true /* birth */);
  2223 void CompactibleFreeListSpace::splitDeath(size_t size) {
  2224   if (size  < SmallForDictionary) {
  2225     smallSplitDeath(size);
  2226   } else {
  2227     dictionary()->dictCensusUpdate(size,
  2228                                    true /* split */,
  2229                                    false /* birth */);
  2233 void CompactibleFreeListSpace::split(size_t from, size_t to1) {
  2234   size_t to2 = from - to1;
  2235   splitDeath(from);
  2236   splitBirth(to1);
  2237   splitBirth(to2);
  2240 void CompactibleFreeListSpace::print() const {
  2241   tty->print(" CompactibleFreeListSpace");
  2242   Space::print();
  2245 void CompactibleFreeListSpace::prepare_for_verify() {
  2246   assert_locked();
  2247   repairLinearAllocationBlocks();
  2248   // Verify that the SpoolBlocks look like free blocks of
  2249   // appropriate sizes... To be done ...
  2252 class VerifyAllBlksClosure: public BlkClosure {
  2253  private:
  2254   const CompactibleFreeListSpace* _sp;
  2255   const MemRegion                 _span;
  2257  public:
  2258   VerifyAllBlksClosure(const CompactibleFreeListSpace* sp,
  2259     MemRegion span) :  _sp(sp), _span(span) { }
  2261   virtual size_t do_blk(HeapWord* addr) {
  2262     size_t res;
  2263     if (_sp->block_is_obj(addr)) {
  2264       oop p = oop(addr);
  2265       guarantee(p->is_oop(), "Should be an oop");
  2266       res = _sp->adjustObjectSize(p->size());
  2267       if (_sp->obj_is_alive(addr)) {
  2268         p->verify();
  2270     } else {
  2271       FreeChunk* fc = (FreeChunk*)addr;
  2272       res = fc->size();
  2273       if (FLSVerifyLists && !fc->cantCoalesce()) {
  2274         guarantee(_sp->verifyChunkInFreeLists(fc),
  2275                   "Chunk should be on a free list");
  2278     guarantee(res != 0, "Livelock: no rank reduction!");
  2279     return res;
  2281 };
  2283 class VerifyAllOopsClosure: public OopClosure {
  2284  private:
  2285   const CMSCollector*             _collector;
  2286   const CompactibleFreeListSpace* _sp;
  2287   const MemRegion                 _span;
  2288   const bool                      _past_remark;
  2289   const CMSBitMap*                _bit_map;
  2291  protected:
  2292   void do_oop(void* p, oop obj) {
  2293     if (_span.contains(obj)) { // the interior oop points into CMS heap
  2294       if (!_span.contains(p)) { // reference from outside CMS heap
  2295         // Should be a valid object; the first disjunct below allows
  2296         // us to sidestep an assertion in block_is_obj() that insists
  2297         // that p be in _sp. Note that several generations (and spaces)
  2298         // are spanned by _span (CMS heap) above.
  2299         guarantee(!_sp->is_in_reserved(obj) ||
  2300                   _sp->block_is_obj((HeapWord*)obj),
  2301                   "Should be an object");
  2302         guarantee(obj->is_oop(), "Should be an oop");
  2303         obj->verify();
  2304         if (_past_remark) {
  2305           // Remark has been completed, the object should be marked
  2306           _bit_map->isMarked((HeapWord*)obj);
  2308       } else { // reference within CMS heap
  2309         if (_past_remark) {
  2310           // Remark has been completed -- so the referent should have
  2311           // been marked, if referring object is.
  2312           if (_bit_map->isMarked(_collector->block_start(p))) {
  2313             guarantee(_bit_map->isMarked((HeapWord*)obj), "Marking error?");
  2317     } else if (_sp->is_in_reserved(p)) {
  2318       // the reference is from FLS, and points out of FLS
  2319       guarantee(obj->is_oop(), "Should be an oop");
  2320       obj->verify();
  2324   template <class T> void do_oop_work(T* p) {
  2325     T heap_oop = oopDesc::load_heap_oop(p);
  2326     if (!oopDesc::is_null(heap_oop)) {
  2327       oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
  2328       do_oop(p, obj);
  2332  public:
  2333   VerifyAllOopsClosure(const CMSCollector* collector,
  2334     const CompactibleFreeListSpace* sp, MemRegion span,
  2335     bool past_remark, CMSBitMap* bit_map) :
  2336     OopClosure(), _collector(collector), _sp(sp), _span(span),
  2337     _past_remark(past_remark), _bit_map(bit_map) { }
  2339   virtual void do_oop(oop* p)       { VerifyAllOopsClosure::do_oop_work(p); }
  2340   virtual void do_oop(narrowOop* p) { VerifyAllOopsClosure::do_oop_work(p); }
  2341 };
  2343 void CompactibleFreeListSpace::verify(bool ignored) const {
  2344   assert_lock_strong(&_freelistLock);
  2345   verify_objects_initialized();
  2346   MemRegion span = _collector->_span;
  2347   bool past_remark = (_collector->abstract_state() ==
  2348                       CMSCollector::Sweeping);
  2350   ResourceMark rm;
  2351   HandleMark  hm;
  2353   // Check integrity of CFL data structures
  2354   _promoInfo.verify();
  2355   _dictionary->verify();
  2356   if (FLSVerifyIndexTable) {
  2357     verifyIndexedFreeLists();
  2359   // Check integrity of all objects and free blocks in space
  2361     VerifyAllBlksClosure cl(this, span);
  2362     ((CompactibleFreeListSpace*)this)->blk_iterate(&cl);  // cast off const
  2364   // Check that all references in the heap to FLS
  2365   // are to valid objects in FLS or that references in
  2366   // FLS are to valid objects elsewhere in the heap
  2367   if (FLSVerifyAllHeapReferences)
  2369     VerifyAllOopsClosure cl(_collector, this, span, past_remark,
  2370       _collector->markBitMap());
  2371     CollectedHeap* ch = Universe::heap();
  2372     ch->oop_iterate(&cl);              // all oops in generations
  2373     ch->permanent_oop_iterate(&cl);    // all oops in perm gen
  2376   if (VerifyObjectStartArray) {
  2377     // Verify the block offset table
  2378     _bt.verify();
  2382 #ifndef PRODUCT
  2383 void CompactibleFreeListSpace::verifyFreeLists() const {
  2384   if (FLSVerifyLists) {
  2385     _dictionary->verify();
  2386     verifyIndexedFreeLists();
  2387   } else {
  2388     if (FLSVerifyDictionary) {
  2389       _dictionary->verify();
  2391     if (FLSVerifyIndexTable) {
  2392       verifyIndexedFreeLists();
  2396 #endif
  2398 void CompactibleFreeListSpace::verifyIndexedFreeLists() const {
  2399   size_t i = 0;
  2400   for (; i < MinChunkSize; i++) {
  2401     guarantee(_indexedFreeList[i].head() == NULL, "should be NULL");
  2403   for (; i < IndexSetSize; i++) {
  2404     verifyIndexedFreeList(i);
  2408 void CompactibleFreeListSpace::verifyIndexedFreeList(size_t size) const {
  2409   FreeChunk* fc   =  _indexedFreeList[size].head();
  2410   FreeChunk* tail =  _indexedFreeList[size].tail();
  2411   size_t    num = _indexedFreeList[size].count();
  2412   size_t      n = 0;
  2413   guarantee((size % 2 == 0) || fc == NULL, "Odd slots should be empty");
  2414   for (; fc != NULL; fc = fc->next(), n++) {
  2415     guarantee(fc->size() == size, "Size inconsistency");
  2416     guarantee(fc->isFree(), "!free?");
  2417     guarantee(fc->next() == NULL || fc->next()->prev() == fc, "Broken list");
  2418     guarantee((fc->next() == NULL) == (fc == tail), "Incorrect tail");
  2420   guarantee(n == num, "Incorrect count");
  2423 #ifndef PRODUCT
  2424 void CompactibleFreeListSpace::checkFreeListConsistency() const {
  2425   assert(_dictionary->minSize() <= IndexSetSize,
  2426     "Some sizes can't be allocated without recourse to"
  2427     " linear allocation buffers");
  2428   assert(MIN_TREE_CHUNK_SIZE*HeapWordSize == sizeof(TreeChunk),
  2429     "else MIN_TREE_CHUNK_SIZE is wrong");
  2430   assert((IndexSetStride == 2 && IndexSetStart == 2) ||
  2431          (IndexSetStride == 1 && IndexSetStart == 1), "just checking");
  2432   assert((IndexSetStride != 2) || (MinChunkSize % 2 == 0),
  2433       "Some for-loops may be incorrectly initialized");
  2434   assert((IndexSetStride != 2) || (IndexSetSize % 2 == 1),
  2435       "For-loops that iterate over IndexSet with stride 2 may be wrong");
  2437 #endif
  2439 void CompactibleFreeListSpace::printFLCensus(size_t sweep_count) const {
  2440   assert_lock_strong(&_freelistLock);
  2441   FreeList total;
  2442   gclog_or_tty->print("end sweep# " SIZE_FORMAT "\n", sweep_count);
  2443   FreeList::print_labels_on(gclog_or_tty, "size");
  2444   size_t totalFree = 0;
  2445   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
  2446     const FreeList *fl = &_indexedFreeList[i];
  2447     totalFree += fl->count() * fl->size();
  2448     if (i % (40*IndexSetStride) == 0) {
  2449       FreeList::print_labels_on(gclog_or_tty, "size");
  2451     fl->print_on(gclog_or_tty);
  2452     total.set_bfrSurp(    total.bfrSurp()     + fl->bfrSurp()    );
  2453     total.set_surplus(    total.surplus()     + fl->surplus()    );
  2454     total.set_desired(    total.desired()     + fl->desired()    );
  2455     total.set_prevSweep(  total.prevSweep()   + fl->prevSweep()  );
  2456     total.set_beforeSweep(total.beforeSweep() + fl->beforeSweep());
  2457     total.set_count(      total.count()       + fl->count()      );
  2458     total.set_coalBirths( total.coalBirths()  + fl->coalBirths() );
  2459     total.set_coalDeaths( total.coalDeaths()  + fl->coalDeaths() );
  2460     total.set_splitBirths(total.splitBirths() + fl->splitBirths());
  2461     total.set_splitDeaths(total.splitDeaths() + fl->splitDeaths());
  2463   total.print_on(gclog_or_tty, "TOTAL");
  2464   gclog_or_tty->print_cr("Total free in indexed lists "
  2465                          SIZE_FORMAT " words", totalFree);
  2466   gclog_or_tty->print("growth: %8.5f  deficit: %8.5f\n",
  2467     (double)(total.splitBirths()+total.coalBirths()-total.splitDeaths()-total.coalDeaths())/
  2468             (total.prevSweep() != 0 ? (double)total.prevSweep() : 1.0),
  2469     (double)(total.desired() - total.count())/(total.desired() != 0 ? (double)total.desired() : 1.0));
  2470   _dictionary->printDictCensus();
  2473 ///////////////////////////////////////////////////////////////////////////
  2474 // CFLS_LAB
  2475 ///////////////////////////////////////////////////////////////////////////
  2477 #define VECTOR_257(x)                                                                                  \
  2478   /* 1  2  3  4  5  6  7  8  9 1x 11 12 13 14 15 16 17 18 19 2x 21 22 23 24 25 26 27 28 29 3x 31 32 */ \
  2479   {  x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
  2480      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
  2481      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
  2482      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
  2483      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
  2484      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
  2485      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
  2486      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
  2487      x }
  2489 // Initialize with default setting of CMSParPromoteBlocksToClaim, _not_
  2490 // OldPLABSize, whose static default is different; if overridden at the
  2491 // command-line, this will get reinitialized via a call to
  2492 // modify_initialization() below.
  2493 AdaptiveWeightedAverage CFLS_LAB::_blocks_to_claim[]    =
  2494   VECTOR_257(AdaptiveWeightedAverage(OldPLABWeight, (float)CMSParPromoteBlocksToClaim));
  2495 size_t CFLS_LAB::_global_num_blocks[]  = VECTOR_257(0);
  2496 int    CFLS_LAB::_global_num_workers[] = VECTOR_257(0);
  2498 CFLS_LAB::CFLS_LAB(CompactibleFreeListSpace* cfls) :
  2499   _cfls(cfls)
  2501   assert(CompactibleFreeListSpace::IndexSetSize == 257, "Modify VECTOR_257() macro above");
  2502   for (size_t i = CompactibleFreeListSpace::IndexSetStart;
  2503        i < CompactibleFreeListSpace::IndexSetSize;
  2504        i += CompactibleFreeListSpace::IndexSetStride) {
  2505     _indexedFreeList[i].set_size(i);
  2506     _num_blocks[i] = 0;
  2510 static bool _CFLS_LAB_modified = false;
  2512 void CFLS_LAB::modify_initialization(size_t n, unsigned wt) {
  2513   assert(!_CFLS_LAB_modified, "Call only once");
  2514   _CFLS_LAB_modified = true;
  2515   for (size_t i = CompactibleFreeListSpace::IndexSetStart;
  2516        i < CompactibleFreeListSpace::IndexSetSize;
  2517        i += CompactibleFreeListSpace::IndexSetStride) {
  2518     _blocks_to_claim[i].modify(n, wt, true /* force */);
  2522 HeapWord* CFLS_LAB::alloc(size_t word_sz) {
  2523   FreeChunk* res;
  2524   word_sz = _cfls->adjustObjectSize(word_sz);
  2525   if (word_sz >=  CompactibleFreeListSpace::IndexSetSize) {
  2526     // This locking manages sync with other large object allocations.
  2527     MutexLockerEx x(_cfls->parDictionaryAllocLock(),
  2528                     Mutex::_no_safepoint_check_flag);
  2529     res = _cfls->getChunkFromDictionaryExact(word_sz);
  2530     if (res == NULL) return NULL;
  2531   } else {
  2532     FreeList* fl = &_indexedFreeList[word_sz];
  2533     if (fl->count() == 0) {
  2534       // Attempt to refill this local free list.
  2535       get_from_global_pool(word_sz, fl);
  2536       // If it didn't work, give up.
  2537       if (fl->count() == 0) return NULL;
  2539     res = fl->getChunkAtHead();
  2540     assert(res != NULL, "Why was count non-zero?");
  2542   res->markNotFree();
  2543   assert(!res->isFree(), "shouldn't be marked free");
  2544   assert(oop(res)->klass_or_null() == NULL, "should look uninitialized");
  2545   // mangle a just allocated object with a distinct pattern.
  2546   debug_only(res->mangleAllocated(word_sz));
  2547   return (HeapWord*)res;
  2550 // Get a chunk of blocks of the right size and update related
  2551 // book-keeping stats
  2552 void CFLS_LAB::get_from_global_pool(size_t word_sz, FreeList* fl) {
  2553   // Get the #blocks we want to claim
  2554   size_t n_blks = (size_t)_blocks_to_claim[word_sz].average();
  2555   assert(n_blks > 0, "Error");
  2556   assert(ResizePLAB || n_blks == OldPLABSize, "Error");
  2557   // In some cases, when the application has a phase change,
  2558   // there may be a sudden and sharp shift in the object survival
  2559   // profile, and updating the counts at the end of a scavenge
  2560   // may not be quick enough, giving rise to large scavenge pauses
  2561   // during these phase changes. It is beneficial to detect such
  2562   // changes on-the-fly during a scavenge and avoid such a phase-change
  2563   // pothole. The following code is a heuristic attempt to do that.
  2564   // It is protected by a product flag until we have gained
  2565   // enough experience with this heuristic and fine-tuned its behaviour.
  2566   // WARNING: This might increase fragmentation if we overreact to
  2567   // small spikes, so some kind of historical smoothing based on
  2568   // previous experience with the greater reactivity might be useful.
  2569   // Lacking sufficient experience, CMSOldPLABResizeQuicker is disabled by
  2570   // default.
  2571   if (ResizeOldPLAB && CMSOldPLABResizeQuicker) {
  2572     size_t multiple = _num_blocks[word_sz]/(CMSOldPLABToleranceFactor*CMSOldPLABNumRefills*n_blks);
  2573     n_blks +=  CMSOldPLABReactivityFactor*multiple*n_blks;
  2574     n_blks = MIN2(n_blks, CMSOldPLABMax);
  2576   assert(n_blks > 0, "Error");
  2577   _cfls->par_get_chunk_of_blocks(word_sz, n_blks, fl);
  2578   // Update stats table entry for this block size
  2579   _num_blocks[word_sz] += fl->count();
  2582 void CFLS_LAB::compute_desired_plab_size() {
  2583   for (size_t i =  CompactibleFreeListSpace::IndexSetStart;
  2584        i < CompactibleFreeListSpace::IndexSetSize;
  2585        i += CompactibleFreeListSpace::IndexSetStride) {
  2586     assert((_global_num_workers[i] == 0) == (_global_num_blocks[i] == 0),
  2587            "Counter inconsistency");
  2588     if (_global_num_workers[i] > 0) {
  2589       // Need to smooth wrt historical average
  2590       if (ResizeOldPLAB) {
  2591         _blocks_to_claim[i].sample(
  2592           MAX2((size_t)CMSOldPLABMin,
  2593           MIN2((size_t)CMSOldPLABMax,
  2594                _global_num_blocks[i]/(_global_num_workers[i]*CMSOldPLABNumRefills))));
  2596       // Reset counters for next round
  2597       _global_num_workers[i] = 0;
  2598       _global_num_blocks[i] = 0;
  2599       if (PrintOldPLAB) {
  2600         gclog_or_tty->print_cr("[%d]: %d", i, (size_t)_blocks_to_claim[i].average());
  2606 void CFLS_LAB::retire(int tid) {
  2607   // We run this single threaded with the world stopped;
  2608   // so no need for locks and such.
  2609 #define CFLS_LAB_PARALLEL_ACCESS 0
  2610   NOT_PRODUCT(Thread* t = Thread::current();)
  2611   assert(Thread::current()->is_VM_thread(), "Error");
  2612   assert(CompactibleFreeListSpace::IndexSetStart == CompactibleFreeListSpace::IndexSetStride,
  2613          "Will access to uninitialized slot below");
  2614 #if CFLS_LAB_PARALLEL_ACCESS
  2615   for (size_t i = CompactibleFreeListSpace::IndexSetSize - 1;
  2616        i > 0;
  2617        i -= CompactibleFreeListSpace::IndexSetStride) {
  2618 #else // CFLS_LAB_PARALLEL_ACCESS
  2619   for (size_t i =  CompactibleFreeListSpace::IndexSetStart;
  2620        i < CompactibleFreeListSpace::IndexSetSize;
  2621        i += CompactibleFreeListSpace::IndexSetStride) {
  2622 #endif // !CFLS_LAB_PARALLEL_ACCESS
  2623     assert(_num_blocks[i] >= (size_t)_indexedFreeList[i].count(),
  2624            "Can't retire more than what we obtained");
  2625     if (_num_blocks[i] > 0) {
  2626       size_t num_retire =  _indexedFreeList[i].count();
  2627       assert(_num_blocks[i] > num_retire, "Should have used at least one");
  2629 #if CFLS_LAB_PARALLEL_ACCESS
  2630         MutexLockerEx x(_cfls->_indexedFreeListParLocks[i],
  2631                         Mutex::_no_safepoint_check_flag);
  2632 #endif // CFLS_LAB_PARALLEL_ACCESS
  2633         // Update globals stats for num_blocks used
  2634         _global_num_blocks[i] += (_num_blocks[i] - num_retire);
  2635         _global_num_workers[i]++;
  2636         assert(_global_num_workers[i] <= (ssize_t)ParallelGCThreads, "Too big");
  2637         if (num_retire > 0) {
  2638           _cfls->_indexedFreeList[i].prepend(&_indexedFreeList[i]);
  2639           // Reset this list.
  2640           _indexedFreeList[i] = FreeList();
  2641           _indexedFreeList[i].set_size(i);
  2644       if (PrintOldPLAB) {
  2645         gclog_or_tty->print_cr("%d[%d]: %d/%d/%d",
  2646                                tid, i, num_retire, _num_blocks[i], (size_t)_blocks_to_claim[i].average());
  2648       // Reset stats for next round
  2649       _num_blocks[i]         = 0;
  2654 void CompactibleFreeListSpace:: par_get_chunk_of_blocks(size_t word_sz, size_t n, FreeList* fl) {
  2655   assert(fl->count() == 0, "Precondition.");
  2656   assert(word_sz < CompactibleFreeListSpace::IndexSetSize,
  2657          "Precondition");
  2659   // We'll try all multiples of word_sz in the indexed set, starting with
  2660   // word_sz itself and, if CMSSplitIndexedFreeListBlocks, try larger multiples,
  2661   // then try getting a big chunk and splitting it.
  2663     bool found;
  2664     int  k;
  2665     size_t cur_sz;
  2666     for (k = 1, cur_sz = k * word_sz, found = false;
  2667          (cur_sz < CompactibleFreeListSpace::IndexSetSize) &&
  2668          (CMSSplitIndexedFreeListBlocks || k <= 1);
  2669          k++, cur_sz = k * word_sz) {
  2670       FreeList* gfl = &_indexedFreeList[cur_sz];
  2671       FreeList fl_for_cur_sz;  // Empty.
  2672       fl_for_cur_sz.set_size(cur_sz);
  2674         MutexLockerEx x(_indexedFreeListParLocks[cur_sz],
  2675                         Mutex::_no_safepoint_check_flag);
  2676         if (gfl->count() != 0) {
  2677           // nn is the number of chunks of size cur_sz that
  2678           // we'd need to split k-ways each, in order to create
  2679           // "n" chunks of size word_sz each.
  2680           const size_t nn = MAX2(n/k, (size_t)1);
  2681           gfl->getFirstNChunksFromList(nn, &fl_for_cur_sz);
  2682           found = true;
  2683           if (k > 1) {
  2684             // Update split death stats for the cur_sz-size blocks list:
  2685             // we increment the split death count by the number of blocks
  2686             // we just took from the cur_sz-size blocks list and which
  2687             // we will be splitting below.
  2688             ssize_t deaths = _indexedFreeList[cur_sz].splitDeaths() +
  2689                              fl_for_cur_sz.count();
  2690             _indexedFreeList[cur_sz].set_splitDeaths(deaths);
  2694       // Now transfer fl_for_cur_sz to fl.  Common case, we hope, is k = 1.
  2695       if (found) {
  2696         if (k == 1) {
  2697           fl->prepend(&fl_for_cur_sz);
  2698         } else {
  2699           // Divide each block on fl_for_cur_sz up k ways.
  2700           FreeChunk* fc;
  2701           while ((fc = fl_for_cur_sz.getChunkAtHead()) != NULL) {
  2702             // Must do this in reverse order, so that anybody attempting to
  2703             // access the main chunk sees it as a single free block until we
  2704             // change it.
  2705             size_t fc_size = fc->size();
  2706             for (int i = k-1; i >= 0; i--) {
  2707               FreeChunk* ffc = (FreeChunk*)((HeapWord*)fc + i * word_sz);
  2708               ffc->setSize(word_sz);
  2709               ffc->linkNext(NULL);
  2710               ffc->linkPrev(NULL); // Mark as a free block for other (parallel) GC threads.
  2711               // Above must occur before BOT is updated below.
  2712               // splitting from the right, fc_size == (k - i + 1) * wordsize
  2713               _bt.mark_block((HeapWord*)ffc, word_sz);
  2714               fc_size -= word_sz;
  2715               _bt.verify_not_unallocated((HeapWord*)ffc, ffc->size());
  2716               _bt.verify_single_block((HeapWord*)fc, fc_size);
  2717               _bt.verify_single_block((HeapWord*)ffc, ffc->size());
  2718               // Push this on "fl".
  2719               fl->returnChunkAtHead(ffc);
  2721             // TRAP
  2722             assert(fl->tail()->next() == NULL, "List invariant.");
  2725         // Update birth stats for this block size.
  2726         size_t num = fl->count();
  2727         MutexLockerEx x(_indexedFreeListParLocks[word_sz],
  2728                         Mutex::_no_safepoint_check_flag);
  2729         ssize_t births = _indexedFreeList[word_sz].splitBirths() + num;
  2730         _indexedFreeList[word_sz].set_splitBirths(births);
  2731         return;
  2735   // Otherwise, we'll split a block from the dictionary.
  2736   FreeChunk* fc = NULL;
  2737   FreeChunk* rem_fc = NULL;
  2738   size_t rem;
  2740     MutexLockerEx x(parDictionaryAllocLock(),
  2741                     Mutex::_no_safepoint_check_flag);
  2742     while (n > 0) {
  2743       fc = dictionary()->getChunk(MAX2(n * word_sz,
  2744                                   _dictionary->minSize()),
  2745                                   FreeBlockDictionary::atLeast);
  2746       if (fc != NULL) {
  2747         _bt.allocated((HeapWord*)fc, fc->size());  // update _unallocated_blk
  2748         dictionary()->dictCensusUpdate(fc->size(),
  2749                                        true /*split*/,
  2750                                        false /*birth*/);
  2751         break;
  2752       } else {
  2753         n--;
  2756     if (fc == NULL) return;
  2757     assert((ssize_t)n >= 1, "Control point invariant");
  2758     // Otherwise, split up that block.
  2759     const size_t nn = fc->size() / word_sz;
  2760     n = MIN2(nn, n);
  2761     assert((ssize_t)n >= 1, "Control point invariant");
  2762     rem = fc->size() - n * word_sz;
  2763     // If there is a remainder, and it's too small, allocate one fewer.
  2764     if (rem > 0 && rem < MinChunkSize) {
  2765       n--; rem += word_sz;
  2767     // Note that at this point we may have n == 0.
  2768     assert((ssize_t)n >= 0, "Control point invariant");
  2770     // If n is 0, the chunk fc that was found is not large
  2771     // enough to leave a viable remainder.  We are unable to
  2772     // allocate even one block.  Return fc to the
  2773     // dictionary and return, leaving "fl" empty.
  2774     if (n == 0) {
  2775       returnChunkToDictionary(fc);
  2776       return;
  2779     // First return the remainder, if any.
  2780     // Note that we hold the lock until we decide if we're going to give
  2781     // back the remainder to the dictionary, since a concurrent allocation
  2782     // may otherwise see the heap as empty.  (We're willing to take that
  2783     // hit if the block is a small block.)
  2784     if (rem > 0) {
  2785       size_t prefix_size = n * word_sz;
  2786       rem_fc = (FreeChunk*)((HeapWord*)fc + prefix_size);
  2787       rem_fc->setSize(rem);
  2788       rem_fc->linkNext(NULL);
  2789       rem_fc->linkPrev(NULL); // Mark as a free block for other (parallel) GC threads.
  2790       // Above must occur before BOT is updated below.
  2791       assert((ssize_t)n > 0 && prefix_size > 0 && rem_fc > fc, "Error");
  2792       _bt.split_block((HeapWord*)fc, fc->size(), prefix_size);
  2793       if (rem >= IndexSetSize) {
  2794         returnChunkToDictionary(rem_fc);
  2795         dictionary()->dictCensusUpdate(rem, true /*split*/, true /*birth*/);
  2796         rem_fc = NULL;
  2798       // Otherwise, return it to the small list below.
  2801   if (rem_fc != NULL) {
  2802     MutexLockerEx x(_indexedFreeListParLocks[rem],
  2803                     Mutex::_no_safepoint_check_flag);
  2804     _bt.verify_not_unallocated((HeapWord*)rem_fc, rem_fc->size());
  2805     _indexedFreeList[rem].returnChunkAtHead(rem_fc);
  2806     smallSplitBirth(rem);
  2808   assert((ssize_t)n > 0 && fc != NULL, "Consistency");
  2809   // Now do the splitting up.
  2810   // Must do this in reverse order, so that anybody attempting to
  2811   // access the main chunk sees it as a single free block until we
  2812   // change it.
  2813   size_t fc_size = n * word_sz;
  2814   // All but first chunk in this loop
  2815   for (ssize_t i = n-1; i > 0; i--) {
  2816     FreeChunk* ffc = (FreeChunk*)((HeapWord*)fc + i * word_sz);
  2817     ffc->setSize(word_sz);
  2818     ffc->linkNext(NULL);
  2819     ffc->linkPrev(NULL); // Mark as a free block for other (parallel) GC threads.
  2820     // Above must occur before BOT is updated below.
  2821     // splitting from the right, fc_size == (n - i + 1) * wordsize
  2822     _bt.mark_block((HeapWord*)ffc, word_sz);
  2823     fc_size -= word_sz;
  2824     _bt.verify_not_unallocated((HeapWord*)ffc, ffc->size());
  2825     _bt.verify_single_block((HeapWord*)ffc, ffc->size());
  2826     _bt.verify_single_block((HeapWord*)fc, fc_size);
  2827     // Push this on "fl".
  2828     fl->returnChunkAtHead(ffc);
  2830   // First chunk
  2831   fc->setSize(word_sz);
  2832   fc->linkNext(NULL);
  2833   fc->linkPrev(NULL);
  2834   _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
  2835   _bt.verify_single_block((HeapWord*)fc, fc->size());
  2836   fl->returnChunkAtHead(fc);
  2838   assert((ssize_t)n > 0 && (ssize_t)n == fl->count(), "Incorrect number of blocks");
  2840     // Update the stats for this block size.
  2841     MutexLockerEx x(_indexedFreeListParLocks[word_sz],
  2842                     Mutex::_no_safepoint_check_flag);
  2843     const ssize_t births = _indexedFreeList[word_sz].splitBirths() + n;
  2844     _indexedFreeList[word_sz].set_splitBirths(births);
  2845     // ssize_t new_surplus = _indexedFreeList[word_sz].surplus() + n;
  2846     // _indexedFreeList[word_sz].set_surplus(new_surplus);
  2849   // TRAP
  2850   assert(fl->tail()->next() == NULL, "List invariant.");
  2853 // Set up the space's par_seq_tasks structure for work claiming
  2854 // for parallel rescan. See CMSParRemarkTask where this is currently used.
  2855 // XXX Need to suitably abstract and generalize this and the next
  2856 // method into one.
  2857 void
  2858 CompactibleFreeListSpace::
  2859 initialize_sequential_subtasks_for_rescan(int n_threads) {
  2860   // The "size" of each task is fixed according to rescan_task_size.
  2861   assert(n_threads > 0, "Unexpected n_threads argument");
  2862   const size_t task_size = rescan_task_size();
  2863   size_t n_tasks = (used_region().word_size() + task_size - 1)/task_size;
  2864   assert((n_tasks == 0) == used_region().is_empty(), "n_tasks incorrect");
  2865   assert(n_tasks == 0 ||
  2866          ((used_region().start() + (n_tasks - 1)*task_size < used_region().end()) &&
  2867           (used_region().start() + n_tasks*task_size >= used_region().end())),
  2868          "n_tasks calculation incorrect");
  2869   SequentialSubTasksDone* pst = conc_par_seq_tasks();
  2870   assert(!pst->valid(), "Clobbering existing data?");
  2871   pst->set_par_threads(n_threads);
  2872   pst->set_n_tasks((int)n_tasks);
  2875 // Set up the space's par_seq_tasks structure for work claiming
  2876 // for parallel concurrent marking. See CMSConcMarkTask where this is currently used.
  2877 void
  2878 CompactibleFreeListSpace::
  2879 initialize_sequential_subtasks_for_marking(int n_threads,
  2880                                            HeapWord* low) {
  2881   // The "size" of each task is fixed according to rescan_task_size.
  2882   assert(n_threads > 0, "Unexpected n_threads argument");
  2883   const size_t task_size = marking_task_size();
  2884   assert(task_size > CardTableModRefBS::card_size_in_words &&
  2885          (task_size %  CardTableModRefBS::card_size_in_words == 0),
  2886          "Otherwise arithmetic below would be incorrect");
  2887   MemRegion span = _gen->reserved();
  2888   if (low != NULL) {
  2889     if (span.contains(low)) {
  2890       // Align low down to  a card boundary so that
  2891       // we can use block_offset_careful() on span boundaries.
  2892       HeapWord* aligned_low = (HeapWord*)align_size_down((uintptr_t)low,
  2893                                  CardTableModRefBS::card_size);
  2894       // Clip span prefix at aligned_low
  2895       span = span.intersection(MemRegion(aligned_low, span.end()));
  2896     } else if (low > span.end()) {
  2897       span = MemRegion(low, low);  // Null region
  2898     } // else use entire span
  2900   assert(span.is_empty() ||
  2901          ((uintptr_t)span.start() %  CardTableModRefBS::card_size == 0),
  2902         "span should start at a card boundary");
  2903   size_t n_tasks = (span.word_size() + task_size - 1)/task_size;
  2904   assert((n_tasks == 0) == span.is_empty(), "Inconsistency");
  2905   assert(n_tasks == 0 ||
  2906          ((span.start() + (n_tasks - 1)*task_size < span.end()) &&
  2907           (span.start() + n_tasks*task_size >= span.end())),
  2908          "n_tasks calculation incorrect");
  2909   SequentialSubTasksDone* pst = conc_par_seq_tasks();
  2910   assert(!pst->valid(), "Clobbering existing data?");
  2911   pst->set_par_threads(n_threads);
  2912   pst->set_n_tasks((int)n_tasks);

mercurial