src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp

Mon, 16 Aug 2010 15:58:42 -0700

author
ysr
date
Mon, 16 Aug 2010 15:58:42 -0700
changeset 2071
be3f9c242c9d
parent 1934
e9ff18c4ace7
child 2132
179464550c7d
permissions
-rw-r--r--

6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
Summary: GC workers now recognize an intermediate transient state of blocks which are allocated but have not yet completed initialization. blk_start() calls do not attempt to determine the size of a block in the transient state, rather waiting for the block to become initialized so that it is safe to query its size. Audited and ensured the order of initialization of object fields (klass, free bit and size) to respect block state transition protocol. Also included some new assertion checking code enabled in debug mode.
Reviewed-by: chrisphi, johnc, poonam

     1 /*
     2  * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    20  * or visit www.oracle.com if you need additional information or have any
    21  * questions.
    22  *
    23  */
    25 # include "incls/_precompiled.incl"
    26 # include "incls/_compactibleFreeListSpace.cpp.incl"
    28 /////////////////////////////////////////////////////////////////////////
    29 //// CompactibleFreeListSpace
    30 /////////////////////////////////////////////////////////////////////////
    32 // highest ranked  free list lock rank
    33 int CompactibleFreeListSpace::_lockRank = Mutex::leaf + 3;
    35 // Defaults are 0 so things will break badly if incorrectly initialized.
    36 int CompactibleFreeListSpace::IndexSetStart  = 0;
    37 int CompactibleFreeListSpace::IndexSetStride = 0;
    39 size_t MinChunkSize = 0;
    41 void CompactibleFreeListSpace::set_cms_values() {
    42   // Set CMS global values
    43   assert(MinChunkSize == 0, "already set");
    44   #define numQuanta(x,y) ((x+y-1)/y)
    45   MinChunkSize = numQuanta(sizeof(FreeChunk), MinObjAlignmentInBytes) * MinObjAlignment;
    47   assert(IndexSetStart == 0 && IndexSetStride == 0, "already set");
    48   IndexSetStart  = MinObjAlignment;
    49   IndexSetStride = MinObjAlignment;
    50 }
    52 // Constructor
    53 CompactibleFreeListSpace::CompactibleFreeListSpace(BlockOffsetSharedArray* bs,
    54   MemRegion mr, bool use_adaptive_freelists,
    55   FreeBlockDictionary::DictionaryChoice dictionaryChoice) :
    56   _dictionaryChoice(dictionaryChoice),
    57   _adaptive_freelists(use_adaptive_freelists),
    58   _bt(bs, mr),
    59   // free list locks are in the range of values taken by _lockRank
    60   // This range currently is [_leaf+2, _leaf+3]
    61   // Note: this requires that CFLspace c'tors
    62   // are called serially in the order in which the locks are
    63   // are acquired in the program text. This is true today.
    64   _freelistLock(_lockRank--, "CompactibleFreeListSpace._lock", true),
    65   _parDictionaryAllocLock(Mutex::leaf - 1,  // == rank(ExpandHeap_lock) - 1
    66                           "CompactibleFreeListSpace._dict_par_lock", true),
    67   _rescan_task_size(CardTableModRefBS::card_size_in_words * BitsPerWord *
    68                     CMSRescanMultiple),
    69   _marking_task_size(CardTableModRefBS::card_size_in_words * BitsPerWord *
    70                     CMSConcMarkMultiple),
    71   _collector(NULL)
    72 {
    73   _bt.set_space(this);
    74   initialize(mr, SpaceDecorator::Clear, SpaceDecorator::Mangle);
    75   // We have all of "mr", all of which we place in the dictionary
    76   // as one big chunk. We'll need to decide here which of several
    77   // possible alternative dictionary implementations to use. For
    78   // now the choice is easy, since we have only one working
    79   // implementation, namely, the simple binary tree (splaying
    80   // temporarily disabled).
    81   switch (dictionaryChoice) {
    82     case FreeBlockDictionary::dictionarySplayTree:
    83     case FreeBlockDictionary::dictionarySkipList:
    84     default:
    85       warning("dictionaryChoice: selected option not understood; using"
    86               " default BinaryTreeDictionary implementation instead.");
    87     case FreeBlockDictionary::dictionaryBinaryTree:
    88       _dictionary = new BinaryTreeDictionary(mr);
    89       break;
    90   }
    91   assert(_dictionary != NULL, "CMS dictionary initialization");
    92   // The indexed free lists are initially all empty and are lazily
    93   // filled in on demand. Initialize the array elements to NULL.
    94   initializeIndexedFreeListArray();
    96   // Not using adaptive free lists assumes that allocation is first
    97   // from the linAB's.  Also a cms perm gen which can be compacted
    98   // has to have the klass's klassKlass allocated at a lower
    99   // address in the heap than the klass so that the klassKlass is
   100   // moved to its new location before the klass is moved.
   101   // Set the _refillSize for the linear allocation blocks
   102   if (!use_adaptive_freelists) {
   103     FreeChunk* fc = _dictionary->getChunk(mr.word_size());
   104     // The small linAB initially has all the space and will allocate
   105     // a chunk of any size.
   106     HeapWord* addr = (HeapWord*) fc;
   107     _smallLinearAllocBlock.set(addr, fc->size() ,
   108       1024*SmallForLinearAlloc, fc->size());
   109     // Note that _unallocated_block is not updated here.
   110     // Allocations from the linear allocation block should
   111     // update it.
   112   } else {
   113     _smallLinearAllocBlock.set(0, 0, 1024*SmallForLinearAlloc,
   114                                SmallForLinearAlloc);
   115   }
   116   // CMSIndexedFreeListReplenish should be at least 1
   117   CMSIndexedFreeListReplenish = MAX2((uintx)1, CMSIndexedFreeListReplenish);
   118   _promoInfo.setSpace(this);
   119   if (UseCMSBestFit) {
   120     _fitStrategy = FreeBlockBestFitFirst;
   121   } else {
   122     _fitStrategy = FreeBlockStrategyNone;
   123   }
   124   checkFreeListConsistency();
   126   // Initialize locks for parallel case.
   127   if (ParallelGCThreads > 0) {
   128     for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
   129       _indexedFreeListParLocks[i] = new Mutex(Mutex::leaf - 1, // == ExpandHeap_lock - 1
   130                                               "a freelist par lock",
   131                                               true);
   132       if (_indexedFreeListParLocks[i] == NULL)
   133         vm_exit_during_initialization("Could not allocate a par lock");
   134       DEBUG_ONLY(
   135         _indexedFreeList[i].set_protecting_lock(_indexedFreeListParLocks[i]);
   136       )
   137     }
   138     _dictionary->set_par_lock(&_parDictionaryAllocLock);
   139   }
   140 }
   142 // Like CompactibleSpace forward() but always calls cross_threshold() to
   143 // update the block offset table.  Removed initialize_threshold call because
   144 // CFLS does not use a block offset array for contiguous spaces.
   145 HeapWord* CompactibleFreeListSpace::forward(oop q, size_t size,
   146                                     CompactPoint* cp, HeapWord* compact_top) {
   147   // q is alive
   148   // First check if we should switch compaction space
   149   assert(this == cp->space, "'this' should be current compaction space.");
   150   size_t compaction_max_size = pointer_delta(end(), compact_top);
   151   assert(adjustObjectSize(size) == cp->space->adjust_object_size_v(size),
   152     "virtual adjustObjectSize_v() method is not correct");
   153   size_t adjusted_size = adjustObjectSize(size);
   154   assert(compaction_max_size >= MinChunkSize || compaction_max_size == 0,
   155          "no small fragments allowed");
   156   assert(minimum_free_block_size() == MinChunkSize,
   157          "for de-virtualized reference below");
   158   // Can't leave a nonzero size, residual fragment smaller than MinChunkSize
   159   if (adjusted_size + MinChunkSize > compaction_max_size &&
   160       adjusted_size != compaction_max_size) {
   161     do {
   162       // switch to next compaction space
   163       cp->space->set_compaction_top(compact_top);
   164       cp->space = cp->space->next_compaction_space();
   165       if (cp->space == NULL) {
   166         cp->gen = GenCollectedHeap::heap()->prev_gen(cp->gen);
   167         assert(cp->gen != NULL, "compaction must succeed");
   168         cp->space = cp->gen->first_compaction_space();
   169         assert(cp->space != NULL, "generation must have a first compaction space");
   170       }
   171       compact_top = cp->space->bottom();
   172       cp->space->set_compaction_top(compact_top);
   173       // The correct adjusted_size may not be the same as that for this method
   174       // (i.e., cp->space may no longer be "this" so adjust the size again.
   175       // Use the virtual method which is not used above to save the virtual
   176       // dispatch.
   177       adjusted_size = cp->space->adjust_object_size_v(size);
   178       compaction_max_size = pointer_delta(cp->space->end(), compact_top);
   179       assert(cp->space->minimum_free_block_size() == 0, "just checking");
   180     } while (adjusted_size > compaction_max_size);
   181   }
   183   // store the forwarding pointer into the mark word
   184   if ((HeapWord*)q != compact_top) {
   185     q->forward_to(oop(compact_top));
   186     assert(q->is_gc_marked(), "encoding the pointer should preserve the mark");
   187   } else {
   188     // if the object isn't moving we can just set the mark to the default
   189     // mark and handle it specially later on.
   190     q->init_mark();
   191     assert(q->forwardee() == NULL, "should be forwarded to NULL");
   192   }
   194   VALIDATE_MARK_SWEEP_ONLY(MarkSweep::register_live_oop(q, adjusted_size));
   195   compact_top += adjusted_size;
   197   // we need to update the offset table so that the beginnings of objects can be
   198   // found during scavenge.  Note that we are updating the offset table based on
   199   // where the object will be once the compaction phase finishes.
   201   // Always call cross_threshold().  A contiguous space can only call it when
   202   // the compaction_top exceeds the current threshold but not for an
   203   // non-contiguous space.
   204   cp->threshold =
   205     cp->space->cross_threshold(compact_top - adjusted_size, compact_top);
   206   return compact_top;
   207 }
   209 // A modified copy of OffsetTableContigSpace::cross_threshold() with _offsets -> _bt
   210 // and use of single_block instead of alloc_block.  The name here is not really
   211 // appropriate - maybe a more general name could be invented for both the
   212 // contiguous and noncontiguous spaces.
   214 HeapWord* CompactibleFreeListSpace::cross_threshold(HeapWord* start, HeapWord* the_end) {
   215   _bt.single_block(start, the_end);
   216   return end();
   217 }
   219 // Initialize them to NULL.
   220 void CompactibleFreeListSpace::initializeIndexedFreeListArray() {
   221   for (size_t i = 0; i < IndexSetSize; i++) {
   222     // Note that on platforms where objects are double word aligned,
   223     // the odd array elements are not used.  It is convenient, however,
   224     // to map directly from the object size to the array element.
   225     _indexedFreeList[i].reset(IndexSetSize);
   226     _indexedFreeList[i].set_size(i);
   227     assert(_indexedFreeList[i].count() == 0, "reset check failed");
   228     assert(_indexedFreeList[i].head() == NULL, "reset check failed");
   229     assert(_indexedFreeList[i].tail() == NULL, "reset check failed");
   230     assert(_indexedFreeList[i].hint() == IndexSetSize, "reset check failed");
   231   }
   232 }
   234 void CompactibleFreeListSpace::resetIndexedFreeListArray() {
   235   for (int i = 1; i < IndexSetSize; i++) {
   236     assert(_indexedFreeList[i].size() == (size_t) i,
   237       "Indexed free list sizes are incorrect");
   238     _indexedFreeList[i].reset(IndexSetSize);
   239     assert(_indexedFreeList[i].count() == 0, "reset check failed");
   240     assert(_indexedFreeList[i].head() == NULL, "reset check failed");
   241     assert(_indexedFreeList[i].tail() == NULL, "reset check failed");
   242     assert(_indexedFreeList[i].hint() == IndexSetSize, "reset check failed");
   243   }
   244 }
   246 void CompactibleFreeListSpace::reset(MemRegion mr) {
   247   resetIndexedFreeListArray();
   248   dictionary()->reset();
   249   if (BlockOffsetArrayUseUnallocatedBlock) {
   250     assert(end() == mr.end(), "We are compacting to the bottom of CMS gen");
   251     // Everything's allocated until proven otherwise.
   252     _bt.set_unallocated_block(end());
   253   }
   254   if (!mr.is_empty()) {
   255     assert(mr.word_size() >= MinChunkSize, "Chunk size is too small");
   256     _bt.single_block(mr.start(), mr.word_size());
   257     FreeChunk* fc = (FreeChunk*) mr.start();
   258     fc->setSize(mr.word_size());
   259     if (mr.word_size() >= IndexSetSize ) {
   260       returnChunkToDictionary(fc);
   261     } else {
   262       _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
   263       _indexedFreeList[mr.word_size()].returnChunkAtHead(fc);
   264     }
   265   }
   266   _promoInfo.reset();
   267   _smallLinearAllocBlock._ptr = NULL;
   268   _smallLinearAllocBlock._word_size = 0;
   269 }
   271 void CompactibleFreeListSpace::reset_after_compaction() {
   272   // Reset the space to the new reality - one free chunk.
   273   MemRegion mr(compaction_top(), end());
   274   reset(mr);
   275   // Now refill the linear allocation block(s) if possible.
   276   if (_adaptive_freelists) {
   277     refillLinearAllocBlocksIfNeeded();
   278   } else {
   279     // Place as much of mr in the linAB as we can get,
   280     // provided it was big enough to go into the dictionary.
   281     FreeChunk* fc = dictionary()->findLargestDict();
   282     if (fc != NULL) {
   283       assert(fc->size() == mr.word_size(),
   284              "Why was the chunk broken up?");
   285       removeChunkFromDictionary(fc);
   286       HeapWord* addr = (HeapWord*) fc;
   287       _smallLinearAllocBlock.set(addr, fc->size() ,
   288         1024*SmallForLinearAlloc, fc->size());
   289       // Note that _unallocated_block is not updated here.
   290     }
   291   }
   292 }
   294 // Walks the entire dictionary, returning a coterminal
   295 // chunk, if it exists. Use with caution since it involves
   296 // a potentially complete walk of a potentially large tree.
   297 FreeChunk* CompactibleFreeListSpace::find_chunk_at_end() {
   299   assert_lock_strong(&_freelistLock);
   301   return dictionary()->find_chunk_ends_at(end());
   302 }
   305 #ifndef PRODUCT
   306 void CompactibleFreeListSpace::initializeIndexedFreeListArrayReturnedBytes() {
   307   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
   308     _indexedFreeList[i].allocation_stats()->set_returnedBytes(0);
   309   }
   310 }
   312 size_t CompactibleFreeListSpace::sumIndexedFreeListArrayReturnedBytes() {
   313   size_t sum = 0;
   314   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
   315     sum += _indexedFreeList[i].allocation_stats()->returnedBytes();
   316   }
   317   return sum;
   318 }
   320 size_t CompactibleFreeListSpace::totalCountInIndexedFreeLists() const {
   321   size_t count = 0;
   322   for (int i = (int)MinChunkSize; i < IndexSetSize; i++) {
   323     debug_only(
   324       ssize_t total_list_count = 0;
   325       for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
   326          fc = fc->next()) {
   327         total_list_count++;
   328       }
   329       assert(total_list_count ==  _indexedFreeList[i].count(),
   330         "Count in list is incorrect");
   331     )
   332     count += _indexedFreeList[i].count();
   333   }
   334   return count;
   335 }
   337 size_t CompactibleFreeListSpace::totalCount() {
   338   size_t num = totalCountInIndexedFreeLists();
   339   num +=  dictionary()->totalCount();
   340   if (_smallLinearAllocBlock._word_size != 0) {
   341     num++;
   342   }
   343   return num;
   344 }
   345 #endif
   347 bool CompactibleFreeListSpace::is_free_block(const HeapWord* p) const {
   348   FreeChunk* fc = (FreeChunk*) p;
   349   return fc->isFree();
   350 }
   352 size_t CompactibleFreeListSpace::used() const {
   353   return capacity() - free();
   354 }
   356 size_t CompactibleFreeListSpace::free() const {
   357   // "MT-safe, but not MT-precise"(TM), if you will: i.e.
   358   // if you do this while the structures are in flux you
   359   // may get an approximate answer only; for instance
   360   // because there is concurrent allocation either
   361   // directly by mutators or for promotion during a GC.
   362   // It's "MT-safe", however, in the sense that you are guaranteed
   363   // not to crash and burn, for instance, because of walking
   364   // pointers that could disappear as you were walking them.
   365   // The approximation is because the various components
   366   // that are read below are not read atomically (and
   367   // further the computation of totalSizeInIndexedFreeLists()
   368   // is itself a non-atomic computation. The normal use of
   369   // this is during a resize operation at the end of GC
   370   // and at that time you are guaranteed to get the
   371   // correct actual value. However, for instance, this is
   372   // also read completely asynchronously by the "perf-sampler"
   373   // that supports jvmstat, and you are apt to see the values
   374   // flicker in such cases.
   375   assert(_dictionary != NULL, "No _dictionary?");
   376   return (_dictionary->totalChunkSize(DEBUG_ONLY(freelistLock())) +
   377           totalSizeInIndexedFreeLists() +
   378           _smallLinearAllocBlock._word_size) * HeapWordSize;
   379 }
   381 size_t CompactibleFreeListSpace::max_alloc_in_words() const {
   382   assert(_dictionary != NULL, "No _dictionary?");
   383   assert_locked();
   384   size_t res = _dictionary->maxChunkSize();
   385   res = MAX2(res, MIN2(_smallLinearAllocBlock._word_size,
   386                        (size_t) SmallForLinearAlloc - 1));
   387   // XXX the following could potentially be pretty slow;
   388   // should one, pesimally for the rare cases when res
   389   // caclulated above is less than IndexSetSize,
   390   // just return res calculated above? My reasoning was that
   391   // those cases will be so rare that the extra time spent doesn't
   392   // really matter....
   393   // Note: do not change the loop test i >= res + IndexSetStride
   394   // to i > res below, because i is unsigned and res may be zero.
   395   for (size_t i = IndexSetSize - 1; i >= res + IndexSetStride;
   396        i -= IndexSetStride) {
   397     if (_indexedFreeList[i].head() != NULL) {
   398       assert(_indexedFreeList[i].count() != 0, "Inconsistent FreeList");
   399       return i;
   400     }
   401   }
   402   return res;
   403 }
   405 void LinearAllocBlock::print_on(outputStream* st) const {
   406   st->print_cr(" LinearAllocBlock: ptr = " PTR_FORMAT ", word_size = " SIZE_FORMAT
   407             ", refillsize = " SIZE_FORMAT ", allocation_size_limit = " SIZE_FORMAT,
   408             _ptr, _word_size, _refillSize, _allocation_size_limit);
   409 }
   411 void CompactibleFreeListSpace::print_on(outputStream* st) const {
   412   st->print_cr("COMPACTIBLE FREELIST SPACE");
   413   st->print_cr(" Space:");
   414   Space::print_on(st);
   416   st->print_cr("promoInfo:");
   417   _promoInfo.print_on(st);
   419   st->print_cr("_smallLinearAllocBlock");
   420   _smallLinearAllocBlock.print_on(st);
   422   // dump_memory_block(_smallLinearAllocBlock->_ptr, 128);
   424   st->print_cr(" _fitStrategy = %s, _adaptive_freelists = %s",
   425                _fitStrategy?"true":"false", _adaptive_freelists?"true":"false");
   426 }
   428 void CompactibleFreeListSpace::print_indexed_free_lists(outputStream* st)
   429 const {
   430   reportIndexedFreeListStatistics();
   431   gclog_or_tty->print_cr("Layout of Indexed Freelists");
   432   gclog_or_tty->print_cr("---------------------------");
   433   FreeList::print_labels_on(st, "size");
   434   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
   435     _indexedFreeList[i].print_on(gclog_or_tty);
   436     for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
   437          fc = fc->next()) {
   438       gclog_or_tty->print_cr("\t[" PTR_FORMAT "," PTR_FORMAT ")  %s",
   439                           fc, (HeapWord*)fc + i,
   440                           fc->cantCoalesce() ? "\t CC" : "");
   441     }
   442   }
   443 }
   445 void CompactibleFreeListSpace::print_promo_info_blocks(outputStream* st)
   446 const {
   447   _promoInfo.print_on(st);
   448 }
   450 void CompactibleFreeListSpace::print_dictionary_free_lists(outputStream* st)
   451 const {
   452   _dictionary->reportStatistics();
   453   st->print_cr("Layout of Freelists in Tree");
   454   st->print_cr("---------------------------");
   455   _dictionary->print_free_lists(st);
   456 }
   458 class BlkPrintingClosure: public BlkClosure {
   459   const CMSCollector*             _collector;
   460   const CompactibleFreeListSpace* _sp;
   461   const CMSBitMap*                _live_bit_map;
   462   const bool                      _post_remark;
   463   outputStream*                   _st;
   464 public:
   465   BlkPrintingClosure(const CMSCollector* collector,
   466                      const CompactibleFreeListSpace* sp,
   467                      const CMSBitMap* live_bit_map,
   468                      outputStream* st):
   469     _collector(collector),
   470     _sp(sp),
   471     _live_bit_map(live_bit_map),
   472     _post_remark(collector->abstract_state() > CMSCollector::FinalMarking),
   473     _st(st) { }
   474   size_t do_blk(HeapWord* addr);
   475 };
   477 size_t BlkPrintingClosure::do_blk(HeapWord* addr) {
   478   size_t sz = _sp->block_size_no_stall(addr, _collector);
   479   assert(sz != 0, "Should always be able to compute a size");
   480   if (_sp->block_is_obj(addr)) {
   481     const bool dead = _post_remark && !_live_bit_map->isMarked(addr);
   482     _st->print_cr(PTR_FORMAT ": %s object of size " SIZE_FORMAT "%s",
   483       addr,
   484       dead ? "dead" : "live",
   485       sz,
   486       (!dead && CMSPrintObjectsInDump) ? ":" : ".");
   487     if (CMSPrintObjectsInDump && !dead) {
   488       oop(addr)->print_on(_st);
   489       _st->print_cr("--------------------------------------");
   490     }
   491   } else { // free block
   492     _st->print_cr(PTR_FORMAT ": free block of size " SIZE_FORMAT "%s",
   493       addr, sz, CMSPrintChunksInDump ? ":" : ".");
   494     if (CMSPrintChunksInDump) {
   495       ((FreeChunk*)addr)->print_on(_st);
   496       _st->print_cr("--------------------------------------");
   497     }
   498   }
   499   return sz;
   500 }
   502 void CompactibleFreeListSpace::dump_at_safepoint_with_locks(CMSCollector* c,
   503   outputStream* st) {
   504   st->print_cr("\n=========================");
   505   st->print_cr("Block layout in CMS Heap:");
   506   st->print_cr("=========================");
   507   BlkPrintingClosure  bpcl(c, this, c->markBitMap(), st);
   508   blk_iterate(&bpcl);
   510   st->print_cr("\n=======================================");
   511   st->print_cr("Order & Layout of Promotion Info Blocks");
   512   st->print_cr("=======================================");
   513   print_promo_info_blocks(st);
   515   st->print_cr("\n===========================");
   516   st->print_cr("Order of Indexed Free Lists");
   517   st->print_cr("=========================");
   518   print_indexed_free_lists(st);
   520   st->print_cr("\n=================================");
   521   st->print_cr("Order of Free Lists in Dictionary");
   522   st->print_cr("=================================");
   523   print_dictionary_free_lists(st);
   524 }
   527 void CompactibleFreeListSpace::reportFreeListStatistics() const {
   528   assert_lock_strong(&_freelistLock);
   529   assert(PrintFLSStatistics != 0, "Reporting error");
   530   _dictionary->reportStatistics();
   531   if (PrintFLSStatistics > 1) {
   532     reportIndexedFreeListStatistics();
   533     size_t totalSize = totalSizeInIndexedFreeLists() +
   534                        _dictionary->totalChunkSize(DEBUG_ONLY(freelistLock()));
   535     gclog_or_tty->print(" free=%ld frag=%1.4f\n", totalSize, flsFrag());
   536   }
   537 }
   539 void CompactibleFreeListSpace::reportIndexedFreeListStatistics() const {
   540   assert_lock_strong(&_freelistLock);
   541   gclog_or_tty->print("Statistics for IndexedFreeLists:\n"
   542                       "--------------------------------\n");
   543   size_t totalSize = totalSizeInIndexedFreeLists();
   544   size_t   freeBlocks = numFreeBlocksInIndexedFreeLists();
   545   gclog_or_tty->print("Total Free Space: %d\n", totalSize);
   546   gclog_or_tty->print("Max   Chunk Size: %d\n", maxChunkSizeInIndexedFreeLists());
   547   gclog_or_tty->print("Number of Blocks: %d\n", freeBlocks);
   548   if (freeBlocks != 0) {
   549     gclog_or_tty->print("Av.  Block  Size: %d\n", totalSize/freeBlocks);
   550   }
   551 }
   553 size_t CompactibleFreeListSpace::numFreeBlocksInIndexedFreeLists() const {
   554   size_t res = 0;
   555   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
   556     debug_only(
   557       ssize_t recount = 0;
   558       for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
   559          fc = fc->next()) {
   560         recount += 1;
   561       }
   562       assert(recount == _indexedFreeList[i].count(),
   563         "Incorrect count in list");
   564     )
   565     res += _indexedFreeList[i].count();
   566   }
   567   return res;
   568 }
   570 size_t CompactibleFreeListSpace::maxChunkSizeInIndexedFreeLists() const {
   571   for (size_t i = IndexSetSize - 1; i != 0; i -= IndexSetStride) {
   572     if (_indexedFreeList[i].head() != NULL) {
   573       assert(_indexedFreeList[i].count() != 0, "Inconsistent FreeList");
   574       return (size_t)i;
   575     }
   576   }
   577   return 0;
   578 }
   580 void CompactibleFreeListSpace::set_end(HeapWord* value) {
   581   HeapWord* prevEnd = end();
   582   assert(prevEnd != value, "unnecessary set_end call");
   583   assert(prevEnd == NULL || !BlockOffsetArrayUseUnallocatedBlock || value >= unallocated_block(),
   584         "New end is below unallocated block");
   585   _end = value;
   586   if (prevEnd != NULL) {
   587     // Resize the underlying block offset table.
   588     _bt.resize(pointer_delta(value, bottom()));
   589     if (value <= prevEnd) {
   590       assert(!BlockOffsetArrayUseUnallocatedBlock || value >= unallocated_block(),
   591              "New end is below unallocated block");
   592     } else {
   593       // Now, take this new chunk and add it to the free blocks.
   594       // Note that the BOT has not yet been updated for this block.
   595       size_t newFcSize = pointer_delta(value, prevEnd);
   596       // XXX This is REALLY UGLY and should be fixed up. XXX
   597       if (!_adaptive_freelists && _smallLinearAllocBlock._ptr == NULL) {
   598         // Mark the boundary of the new block in BOT
   599         _bt.mark_block(prevEnd, value);
   600         // put it all in the linAB
   601         if (ParallelGCThreads == 0) {
   602           _smallLinearAllocBlock._ptr = prevEnd;
   603           _smallLinearAllocBlock._word_size = newFcSize;
   604           repairLinearAllocBlock(&_smallLinearAllocBlock);
   605         } else { // ParallelGCThreads > 0
   606           MutexLockerEx x(parDictionaryAllocLock(),
   607                           Mutex::_no_safepoint_check_flag);
   608           _smallLinearAllocBlock._ptr = prevEnd;
   609           _smallLinearAllocBlock._word_size = newFcSize;
   610           repairLinearAllocBlock(&_smallLinearAllocBlock);
   611         }
   612         // Births of chunks put into a LinAB are not recorded.  Births
   613         // of chunks as they are allocated out of a LinAB are.
   614       } else {
   615         // Add the block to the free lists, if possible coalescing it
   616         // with the last free block, and update the BOT and census data.
   617         addChunkToFreeListsAtEndRecordingStats(prevEnd, newFcSize);
   618       }
   619     }
   620   }
   621 }
   623 class FreeListSpace_DCTOC : public Filtering_DCTOC {
   624   CompactibleFreeListSpace* _cfls;
   625   CMSCollector* _collector;
   626 protected:
   627   // Override.
   628 #define walk_mem_region_with_cl_DECL(ClosureType)                       \
   629   virtual void walk_mem_region_with_cl(MemRegion mr,                    \
   630                                        HeapWord* bottom, HeapWord* top, \
   631                                        ClosureType* cl);                \
   632       void walk_mem_region_with_cl_par(MemRegion mr,                    \
   633                                        HeapWord* bottom, HeapWord* top, \
   634                                        ClosureType* cl);                \
   635     void walk_mem_region_with_cl_nopar(MemRegion mr,                    \
   636                                        HeapWord* bottom, HeapWord* top, \
   637                                        ClosureType* cl)
   638   walk_mem_region_with_cl_DECL(OopClosure);
   639   walk_mem_region_with_cl_DECL(FilteringClosure);
   641 public:
   642   FreeListSpace_DCTOC(CompactibleFreeListSpace* sp,
   643                       CMSCollector* collector,
   644                       OopClosure* cl,
   645                       CardTableModRefBS::PrecisionStyle precision,
   646                       HeapWord* boundary) :
   647     Filtering_DCTOC(sp, cl, precision, boundary),
   648     _cfls(sp), _collector(collector) {}
   649 };
   651 // We de-virtualize the block-related calls below, since we know that our
   652 // space is a CompactibleFreeListSpace.
   653 #define FreeListSpace_DCTOC__walk_mem_region_with_cl_DEFN(ClosureType)          \
   654 void FreeListSpace_DCTOC::walk_mem_region_with_cl(MemRegion mr,                 \
   655                                                  HeapWord* bottom,              \
   656                                                  HeapWord* top,                 \
   657                                                  ClosureType* cl) {             \
   658    if (SharedHeap::heap()->n_par_threads() > 0) {                               \
   659      walk_mem_region_with_cl_par(mr, bottom, top, cl);                          \
   660    } else {                                                                     \
   661      walk_mem_region_with_cl_nopar(mr, bottom, top, cl);                        \
   662    }                                                                            \
   663 }                                                                               \
   664 void FreeListSpace_DCTOC::walk_mem_region_with_cl_par(MemRegion mr,             \
   665                                                       HeapWord* bottom,         \
   666                                                       HeapWord* top,            \
   667                                                       ClosureType* cl) {        \
   668   /* Skip parts that are before "mr", in case "block_start" sent us             \
   669      back too far. */                                                           \
   670   HeapWord* mr_start = mr.start();                                              \
   671   size_t bot_size = _cfls->CompactibleFreeListSpace::block_size(bottom);        \
   672   HeapWord* next = bottom + bot_size;                                           \
   673   while (next < mr_start) {                                                     \
   674     bottom = next;                                                              \
   675     bot_size = _cfls->CompactibleFreeListSpace::block_size(bottom);             \
   676     next = bottom + bot_size;                                                   \
   677   }                                                                             \
   678                                                                                 \
   679   while (bottom < top) {                                                        \
   680     if (_cfls->CompactibleFreeListSpace::block_is_obj(bottom) &&                \
   681         !_cfls->CompactibleFreeListSpace::obj_allocated_since_save_marks(       \
   682                     oop(bottom)) &&                                             \
   683         !_collector->CMSCollector::is_dead_obj(oop(bottom))) {                  \
   684       size_t word_sz = oop(bottom)->oop_iterate(cl, mr);                        \
   685       bottom += _cfls->adjustObjectSize(word_sz);                               \
   686     } else {                                                                    \
   687       bottom += _cfls->CompactibleFreeListSpace::block_size(bottom);            \
   688     }                                                                           \
   689   }                                                                             \
   690 }                                                                               \
   691 void FreeListSpace_DCTOC::walk_mem_region_with_cl_nopar(MemRegion mr,           \
   692                                                         HeapWord* bottom,       \
   693                                                         HeapWord* top,          \
   694                                                         ClosureType* cl) {      \
   695   /* Skip parts that are before "mr", in case "block_start" sent us             \
   696      back too far. */                                                           \
   697   HeapWord* mr_start = mr.start();                                              \
   698   size_t bot_size = _cfls->CompactibleFreeListSpace::block_size_nopar(bottom);  \
   699   HeapWord* next = bottom + bot_size;                                           \
   700   while (next < mr_start) {                                                     \
   701     bottom = next;                                                              \
   702     bot_size = _cfls->CompactibleFreeListSpace::block_size_nopar(bottom);       \
   703     next = bottom + bot_size;                                                   \
   704   }                                                                             \
   705                                                                                 \
   706   while (bottom < top) {                                                        \
   707     if (_cfls->CompactibleFreeListSpace::block_is_obj_nopar(bottom) &&          \
   708         !_cfls->CompactibleFreeListSpace::obj_allocated_since_save_marks(       \
   709                     oop(bottom)) &&                                             \
   710         !_collector->CMSCollector::is_dead_obj(oop(bottom))) {                  \
   711       size_t word_sz = oop(bottom)->oop_iterate(cl, mr);                        \
   712       bottom += _cfls->adjustObjectSize(word_sz);                               \
   713     } else {                                                                    \
   714       bottom += _cfls->CompactibleFreeListSpace::block_size_nopar(bottom);      \
   715     }                                                                           \
   716   }                                                                             \
   717 }
   719 // (There are only two of these, rather than N, because the split is due
   720 // only to the introduction of the FilteringClosure, a local part of the
   721 // impl of this abstraction.)
   722 FreeListSpace_DCTOC__walk_mem_region_with_cl_DEFN(OopClosure)
   723 FreeListSpace_DCTOC__walk_mem_region_with_cl_DEFN(FilteringClosure)
   725 DirtyCardToOopClosure*
   726 CompactibleFreeListSpace::new_dcto_cl(OopClosure* cl,
   727                                       CardTableModRefBS::PrecisionStyle precision,
   728                                       HeapWord* boundary) {
   729   return new FreeListSpace_DCTOC(this, _collector, cl, precision, boundary);
   730 }
   733 // Note on locking for the space iteration functions:
   734 // since the collector's iteration activities are concurrent with
   735 // allocation activities by mutators, absent a suitable mutual exclusion
   736 // mechanism the iterators may go awry. For instace a block being iterated
   737 // may suddenly be allocated or divided up and part of it allocated and
   738 // so on.
   740 // Apply the given closure to each block in the space.
   741 void CompactibleFreeListSpace::blk_iterate_careful(BlkClosureCareful* cl) {
   742   assert_lock_strong(freelistLock());
   743   HeapWord *cur, *limit;
   744   for (cur = bottom(), limit = end(); cur < limit;
   745        cur += cl->do_blk_careful(cur));
   746 }
   748 // Apply the given closure to each block in the space.
   749 void CompactibleFreeListSpace::blk_iterate(BlkClosure* cl) {
   750   assert_lock_strong(freelistLock());
   751   HeapWord *cur, *limit;
   752   for (cur = bottom(), limit = end(); cur < limit;
   753        cur += cl->do_blk(cur));
   754 }
   756 // Apply the given closure to each oop in the space.
   757 void CompactibleFreeListSpace::oop_iterate(OopClosure* cl) {
   758   assert_lock_strong(freelistLock());
   759   HeapWord *cur, *limit;
   760   size_t curSize;
   761   for (cur = bottom(), limit = end(); cur < limit;
   762        cur += curSize) {
   763     curSize = block_size(cur);
   764     if (block_is_obj(cur)) {
   765       oop(cur)->oop_iterate(cl);
   766     }
   767   }
   768 }
   770 // Apply the given closure to each oop in the space \intersect memory region.
   771 void CompactibleFreeListSpace::oop_iterate(MemRegion mr, OopClosure* cl) {
   772   assert_lock_strong(freelistLock());
   773   if (is_empty()) {
   774     return;
   775   }
   776   MemRegion cur = MemRegion(bottom(), end());
   777   mr = mr.intersection(cur);
   778   if (mr.is_empty()) {
   779     return;
   780   }
   781   if (mr.equals(cur)) {
   782     oop_iterate(cl);
   783     return;
   784   }
   785   assert(mr.end() <= end(), "just took an intersection above");
   786   HeapWord* obj_addr = block_start(mr.start());
   787   HeapWord* t = mr.end();
   789   SpaceMemRegionOopsIterClosure smr_blk(cl, mr);
   790   if (block_is_obj(obj_addr)) {
   791     // Handle first object specially.
   792     oop obj = oop(obj_addr);
   793     obj_addr += adjustObjectSize(obj->oop_iterate(&smr_blk));
   794   } else {
   795     FreeChunk* fc = (FreeChunk*)obj_addr;
   796     obj_addr += fc->size();
   797   }
   798   while (obj_addr < t) {
   799     HeapWord* obj = obj_addr;
   800     obj_addr += block_size(obj_addr);
   801     // If "obj_addr" is not greater than top, then the
   802     // entire object "obj" is within the region.
   803     if (obj_addr <= t) {
   804       if (block_is_obj(obj)) {
   805         oop(obj)->oop_iterate(cl);
   806       }
   807     } else {
   808       // "obj" extends beyond end of region
   809       if (block_is_obj(obj)) {
   810         oop(obj)->oop_iterate(&smr_blk);
   811       }
   812       break;
   813     }
   814   }
   815 }
   817 // NOTE: In the following methods, in order to safely be able to
   818 // apply the closure to an object, we need to be sure that the
   819 // object has been initialized. We are guaranteed that an object
   820 // is initialized if we are holding the Heap_lock with the
   821 // world stopped.
   822 void CompactibleFreeListSpace::verify_objects_initialized() const {
   823   if (is_init_completed()) {
   824     assert_locked_or_safepoint(Heap_lock);
   825     if (Universe::is_fully_initialized()) {
   826       guarantee(SafepointSynchronize::is_at_safepoint(),
   827                 "Required for objects to be initialized");
   828     }
   829   } // else make a concession at vm start-up
   830 }
   832 // Apply the given closure to each object in the space
   833 void CompactibleFreeListSpace::object_iterate(ObjectClosure* blk) {
   834   assert_lock_strong(freelistLock());
   835   NOT_PRODUCT(verify_objects_initialized());
   836   HeapWord *cur, *limit;
   837   size_t curSize;
   838   for (cur = bottom(), limit = end(); cur < limit;
   839        cur += curSize) {
   840     curSize = block_size(cur);
   841     if (block_is_obj(cur)) {
   842       blk->do_object(oop(cur));
   843     }
   844   }
   845 }
   847 // Apply the given closure to each live object in the space
   848 //   The usage of CompactibleFreeListSpace
   849 // by the ConcurrentMarkSweepGeneration for concurrent GC's allows
   850 // objects in the space with references to objects that are no longer
   851 // valid.  For example, an object may reference another object
   852 // that has already been sweep up (collected).  This method uses
   853 // obj_is_alive() to determine whether it is safe to apply the closure to
   854 // an object.  See obj_is_alive() for details on how liveness of an
   855 // object is decided.
   857 void CompactibleFreeListSpace::safe_object_iterate(ObjectClosure* blk) {
   858   assert_lock_strong(freelistLock());
   859   NOT_PRODUCT(verify_objects_initialized());
   860   HeapWord *cur, *limit;
   861   size_t curSize;
   862   for (cur = bottom(), limit = end(); cur < limit;
   863        cur += curSize) {
   864     curSize = block_size(cur);
   865     if (block_is_obj(cur) && obj_is_alive(cur)) {
   866       blk->do_object(oop(cur));
   867     }
   868   }
   869 }
   871 void CompactibleFreeListSpace::object_iterate_mem(MemRegion mr,
   872                                                   UpwardsObjectClosure* cl) {
   873   assert_locked(freelistLock());
   874   NOT_PRODUCT(verify_objects_initialized());
   875   Space::object_iterate_mem(mr, cl);
   876 }
   878 // Callers of this iterator beware: The closure application should
   879 // be robust in the face of uninitialized objects and should (always)
   880 // return a correct size so that the next addr + size below gives us a
   881 // valid block boundary. [See for instance,
   882 // ScanMarkedObjectsAgainCarefullyClosure::do_object_careful()
   883 // in ConcurrentMarkSweepGeneration.cpp.]
   884 HeapWord*
   885 CompactibleFreeListSpace::object_iterate_careful(ObjectClosureCareful* cl) {
   886   assert_lock_strong(freelistLock());
   887   HeapWord *addr, *last;
   888   size_t size;
   889   for (addr = bottom(), last  = end();
   890        addr < last; addr += size) {
   891     FreeChunk* fc = (FreeChunk*)addr;
   892     if (fc->isFree()) {
   893       // Since we hold the free list lock, which protects direct
   894       // allocation in this generation by mutators, a free object
   895       // will remain free throughout this iteration code.
   896       size = fc->size();
   897     } else {
   898       // Note that the object need not necessarily be initialized,
   899       // because (for instance) the free list lock does NOT protect
   900       // object initialization. The closure application below must
   901       // therefore be correct in the face of uninitialized objects.
   902       size = cl->do_object_careful(oop(addr));
   903       if (size == 0) {
   904         // An unparsable object found. Signal early termination.
   905         return addr;
   906       }
   907     }
   908   }
   909   return NULL;
   910 }
   912 // Callers of this iterator beware: The closure application should
   913 // be robust in the face of uninitialized objects and should (always)
   914 // return a correct size so that the next addr + size below gives us a
   915 // valid block boundary. [See for instance,
   916 // ScanMarkedObjectsAgainCarefullyClosure::do_object_careful()
   917 // in ConcurrentMarkSweepGeneration.cpp.]
   918 HeapWord*
   919 CompactibleFreeListSpace::object_iterate_careful_m(MemRegion mr,
   920   ObjectClosureCareful* cl) {
   921   assert_lock_strong(freelistLock());
   922   // Can't use used_region() below because it may not necessarily
   923   // be the same as [bottom(),end()); although we could
   924   // use [used_region().start(),round_to(used_region().end(),CardSize)),
   925   // that appears too cumbersome, so we just do the simpler check
   926   // in the assertion below.
   927   assert(!mr.is_empty() && MemRegion(bottom(),end()).contains(mr),
   928          "mr should be non-empty and within used space");
   929   HeapWord *addr, *end;
   930   size_t size;
   931   for (addr = block_start_careful(mr.start()), end  = mr.end();
   932        addr < end; addr += size) {
   933     FreeChunk* fc = (FreeChunk*)addr;
   934     if (fc->isFree()) {
   935       // Since we hold the free list lock, which protects direct
   936       // allocation in this generation by mutators, a free object
   937       // will remain free throughout this iteration code.
   938       size = fc->size();
   939     } else {
   940       // Note that the object need not necessarily be initialized,
   941       // because (for instance) the free list lock does NOT protect
   942       // object initialization. The closure application below must
   943       // therefore be correct in the face of uninitialized objects.
   944       size = cl->do_object_careful_m(oop(addr), mr);
   945       if (size == 0) {
   946         // An unparsable object found. Signal early termination.
   947         return addr;
   948       }
   949     }
   950   }
   951   return NULL;
   952 }
   955 HeapWord* CompactibleFreeListSpace::block_start_const(const void* p) const {
   956   NOT_PRODUCT(verify_objects_initialized());
   957   return _bt.block_start(p);
   958 }
   960 HeapWord* CompactibleFreeListSpace::block_start_careful(const void* p) const {
   961   return _bt.block_start_careful(p);
   962 }
   964 size_t CompactibleFreeListSpace::block_size(const HeapWord* p) const {
   965   NOT_PRODUCT(verify_objects_initialized());
   966   // This must be volatile, or else there is a danger that the compiler
   967   // will compile the code below into a sometimes-infinite loop, by keeping
   968   // the value read the first time in a register.
   969   while (true) {
   970     // We must do this until we get a consistent view of the object.
   971     if (FreeChunk::indicatesFreeChunk(p)) {
   972       volatile FreeChunk* fc = (volatile FreeChunk*)p;
   973       size_t res = fc->size();
   974       // If the object is still a free chunk, return the size, else it
   975       // has been allocated so try again.
   976       if (FreeChunk::indicatesFreeChunk(p)) {
   977         assert(res != 0, "Block size should not be 0");
   978         return res;
   979       }
   980     } else {
   981       // must read from what 'p' points to in each loop.
   982       klassOop k = ((volatile oopDesc*)p)->klass_or_null();
   983       if (k != NULL) {
   984         assert(k->is_oop(true /* ignore mark word */), "Should be klass oop");
   985         oop o = (oop)p;
   986         assert(o->is_parsable(), "Should be parsable");
   987         assert(o->is_oop(true /* ignore mark word */), "Should be an oop.");
   988         size_t res = o->size_given_klass(k->klass_part());
   989         res = adjustObjectSize(res);
   990         assert(res != 0, "Block size should not be 0");
   991         return res;
   992       }
   993     }
   994   }
   995 }
   997 // A variant of the above that uses the Printezis bits for
   998 // unparsable but allocated objects. This avoids any possible
   999 // stalls waiting for mutators to initialize objects, and is
  1000 // thus potentially faster than the variant above. However,
  1001 // this variant may return a zero size for a block that is
  1002 // under mutation and for which a consistent size cannot be
  1003 // inferred without stalling; see CMSCollector::block_size_if_printezis_bits().
  1004 size_t CompactibleFreeListSpace::block_size_no_stall(HeapWord* p,
  1005                                                      const CMSCollector* c)
  1006 const {
  1007   assert(MemRegion(bottom(), end()).contains(p), "p not in space");
  1008   // This must be volatile, or else there is a danger that the compiler
  1009   // will compile the code below into a sometimes-infinite loop, by keeping
  1010   // the value read the first time in a register.
  1011   DEBUG_ONLY(uint loops = 0;)
  1012   while (true) {
  1013     // We must do this until we get a consistent view of the object.
  1014     if (FreeChunk::indicatesFreeChunk(p)) {
  1015       volatile FreeChunk* fc = (volatile FreeChunk*)p;
  1016       size_t res = fc->size();
  1017       if (FreeChunk::indicatesFreeChunk(p)) {
  1018         assert(res != 0, "Block size should not be 0");
  1019         assert(loops == 0, "Should be 0");
  1020         return res;
  1022     } else {
  1023       // must read from what 'p' points to in each loop.
  1024       klassOop k = ((volatile oopDesc*)p)->klass_or_null();
  1025       if (k != NULL &&
  1026           ((oopDesc*)p)->is_parsable() &&
  1027           ((oopDesc*)p)->is_conc_safe()) {
  1028         assert(k->is_oop(), "Should really be klass oop.");
  1029         oop o = (oop)p;
  1030         assert(o->is_oop(), "Should be an oop");
  1031         size_t res = o->size_given_klass(k->klass_part());
  1032         res = adjustObjectSize(res);
  1033         assert(res != 0, "Block size should not be 0");
  1034         return res;
  1035       } else {
  1036         return c->block_size_if_printezis_bits(p);
  1039     assert(loops == 0, "Can loop at most once");
  1040     DEBUG_ONLY(loops++;)
  1044 size_t CompactibleFreeListSpace::block_size_nopar(const HeapWord* p) const {
  1045   NOT_PRODUCT(verify_objects_initialized());
  1046   assert(MemRegion(bottom(), end()).contains(p), "p not in space");
  1047   FreeChunk* fc = (FreeChunk*)p;
  1048   if (fc->isFree()) {
  1049     return fc->size();
  1050   } else {
  1051     // Ignore mark word because this may be a recently promoted
  1052     // object whose mark word is used to chain together grey
  1053     // objects (the last one would have a null value).
  1054     assert(oop(p)->is_oop(true), "Should be an oop");
  1055     return adjustObjectSize(oop(p)->size());
  1059 // This implementation assumes that the property of "being an object" is
  1060 // stable.  But being a free chunk may not be (because of parallel
  1061 // promotion.)
  1062 bool CompactibleFreeListSpace::block_is_obj(const HeapWord* p) const {
  1063   FreeChunk* fc = (FreeChunk*)p;
  1064   assert(is_in_reserved(p), "Should be in space");
  1065   // When doing a mark-sweep-compact of the CMS generation, this
  1066   // assertion may fail because prepare_for_compaction() uses
  1067   // space that is garbage to maintain information on ranges of
  1068   // live objects so that these live ranges can be moved as a whole.
  1069   // Comment out this assertion until that problem can be solved
  1070   // (i.e., that the block start calculation may look at objects
  1071   // at address below "p" in finding the object that contains "p"
  1072   // and those objects (if garbage) may have been modified to hold
  1073   // live range information.
  1074   // assert(ParallelGCThreads > 0 || _bt.block_start(p) == p, "Should be a block boundary");
  1075   if (FreeChunk::indicatesFreeChunk(p)) return false;
  1076   klassOop k = oop(p)->klass_or_null();
  1077   if (k != NULL) {
  1078     // Ignore mark word because it may have been used to
  1079     // chain together promoted objects (the last one
  1080     // would have a null value).
  1081     assert(oop(p)->is_oop(true), "Should be an oop");
  1082     return true;
  1083   } else {
  1084     return false;  // Was not an object at the start of collection.
  1088 // Check if the object is alive. This fact is checked either by consulting
  1089 // the main marking bitmap in the sweeping phase or, if it's a permanent
  1090 // generation and we're not in the sweeping phase, by checking the
  1091 // perm_gen_verify_bit_map where we store the "deadness" information if
  1092 // we did not sweep the perm gen in the most recent previous GC cycle.
  1093 bool CompactibleFreeListSpace::obj_is_alive(const HeapWord* p) const {
  1094   assert (block_is_obj(p), "The address should point to an object");
  1096   // If we're sweeping, we use object liveness information from the main bit map
  1097   // for both perm gen and old gen.
  1098   // We don't need to lock the bitmap (live_map or dead_map below), because
  1099   // EITHER we are in the middle of the sweeping phase, and the
  1100   // main marking bit map (live_map below) is locked,
  1101   // OR we're in other phases and perm_gen_verify_bit_map (dead_map below)
  1102   // is stable, because it's mutated only in the sweeping phase.
  1103   if (_collector->abstract_state() == CMSCollector::Sweeping) {
  1104     CMSBitMap* live_map = _collector->markBitMap();
  1105     return live_map->isMarked((HeapWord*) p);
  1106   } else {
  1107     // If we're not currently sweeping and we haven't swept the perm gen in
  1108     // the previous concurrent cycle then we may have dead but unswept objects
  1109     // in the perm gen. In this case, we use the "deadness" information
  1110     // that we had saved in perm_gen_verify_bit_map at the last sweep.
  1111     if (!CMSClassUnloadingEnabled && _collector->_permGen->reserved().contains(p)) {
  1112       if (_collector->verifying()) {
  1113         CMSBitMap* dead_map = _collector->perm_gen_verify_bit_map();
  1114         // Object is marked in the dead_map bitmap at the previous sweep
  1115         // when we know that it's dead; if the bitmap is not allocated then
  1116         // the object is alive.
  1117         return (dead_map->sizeInBits() == 0) // bit_map has been allocated
  1118                || !dead_map->par_isMarked((HeapWord*) p);
  1119       } else {
  1120         return false; // We can't say for sure if it's live, so we say that it's dead.
  1124   return true;
  1127 bool CompactibleFreeListSpace::block_is_obj_nopar(const HeapWord* p) const {
  1128   FreeChunk* fc = (FreeChunk*)p;
  1129   assert(is_in_reserved(p), "Should be in space");
  1130   assert(_bt.block_start(p) == p, "Should be a block boundary");
  1131   if (!fc->isFree()) {
  1132     // Ignore mark word because it may have been used to
  1133     // chain together promoted objects (the last one
  1134     // would have a null value).
  1135     assert(oop(p)->is_oop(true), "Should be an oop");
  1136     return true;
  1138   return false;
  1141 // "MT-safe but not guaranteed MT-precise" (TM); you may get an
  1142 // approximate answer if you don't hold the freelistlock when you call this.
  1143 size_t CompactibleFreeListSpace::totalSizeInIndexedFreeLists() const {
  1144   size_t size = 0;
  1145   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
  1146     debug_only(
  1147       // We may be calling here without the lock in which case we
  1148       // won't do this modest sanity check.
  1149       if (freelistLock()->owned_by_self()) {
  1150         size_t total_list_size = 0;
  1151         for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
  1152           fc = fc->next()) {
  1153           total_list_size += i;
  1155         assert(total_list_size == i * _indexedFreeList[i].count(),
  1156                "Count in list is incorrect");
  1159     size += i * _indexedFreeList[i].count();
  1161   return size;
  1164 HeapWord* CompactibleFreeListSpace::par_allocate(size_t size) {
  1165   MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
  1166   return allocate(size);
  1169 HeapWord*
  1170 CompactibleFreeListSpace::getChunkFromSmallLinearAllocBlockRemainder(size_t size) {
  1171   return getChunkFromLinearAllocBlockRemainder(&_smallLinearAllocBlock, size);
  1174 HeapWord* CompactibleFreeListSpace::allocate(size_t size) {
  1175   assert_lock_strong(freelistLock());
  1176   HeapWord* res = NULL;
  1177   assert(size == adjustObjectSize(size),
  1178          "use adjustObjectSize() before calling into allocate()");
  1180   if (_adaptive_freelists) {
  1181     res = allocate_adaptive_freelists(size);
  1182   } else {  // non-adaptive free lists
  1183     res = allocate_non_adaptive_freelists(size);
  1186   if (res != NULL) {
  1187     // check that res does lie in this space!
  1188     assert(is_in_reserved(res), "Not in this space!");
  1189     assert(is_aligned((void*)res), "alignment check");
  1191     FreeChunk* fc = (FreeChunk*)res;
  1192     fc->markNotFree();
  1193     assert(!fc->isFree(), "shouldn't be marked free");
  1194     assert(oop(fc)->klass_or_null() == NULL, "should look uninitialized");
  1195     // Verify that the block offset table shows this to
  1196     // be a single block, but not one which is unallocated.
  1197     _bt.verify_single_block(res, size);
  1198     _bt.verify_not_unallocated(res, size);
  1199     // mangle a just allocated object with a distinct pattern.
  1200     debug_only(fc->mangleAllocated(size));
  1203   return res;
  1206 HeapWord* CompactibleFreeListSpace::allocate_non_adaptive_freelists(size_t size) {
  1207   HeapWord* res = NULL;
  1208   // try and use linear allocation for smaller blocks
  1209   if (size < _smallLinearAllocBlock._allocation_size_limit) {
  1210     // if successful, the following also adjusts block offset table
  1211     res = getChunkFromSmallLinearAllocBlock(size);
  1213   // Else triage to indexed lists for smaller sizes
  1214   if (res == NULL) {
  1215     if (size < SmallForDictionary) {
  1216       res = (HeapWord*) getChunkFromIndexedFreeList(size);
  1217     } else {
  1218       // else get it from the big dictionary; if even this doesn't
  1219       // work we are out of luck.
  1220       res = (HeapWord*)getChunkFromDictionaryExact(size);
  1224   return res;
  1227 HeapWord* CompactibleFreeListSpace::allocate_adaptive_freelists(size_t size) {
  1228   assert_lock_strong(freelistLock());
  1229   HeapWord* res = NULL;
  1230   assert(size == adjustObjectSize(size),
  1231          "use adjustObjectSize() before calling into allocate()");
  1233   // Strategy
  1234   //   if small
  1235   //     exact size from small object indexed list if small
  1236   //     small or large linear allocation block (linAB) as appropriate
  1237   //     take from lists of greater sized chunks
  1238   //   else
  1239   //     dictionary
  1240   //     small or large linear allocation block if it has the space
  1241   // Try allocating exact size from indexTable first
  1242   if (size < IndexSetSize) {
  1243     res = (HeapWord*) getChunkFromIndexedFreeList(size);
  1244     if(res != NULL) {
  1245       assert(res != (HeapWord*)_indexedFreeList[size].head(),
  1246         "Not removed from free list");
  1247       // no block offset table adjustment is necessary on blocks in
  1248       // the indexed lists.
  1250     // Try allocating from the small LinAB
  1251     } else if (size < _smallLinearAllocBlock._allocation_size_limit &&
  1252         (res = getChunkFromSmallLinearAllocBlock(size)) != NULL) {
  1253         // if successful, the above also adjusts block offset table
  1254         // Note that this call will refill the LinAB to
  1255         // satisfy the request.  This is different that
  1256         // evm.
  1257         // Don't record chunk off a LinAB?  smallSplitBirth(size);
  1258     } else {
  1259       // Raid the exact free lists larger than size, even if they are not
  1260       // overpopulated.
  1261       res = (HeapWord*) getChunkFromGreater(size);
  1263   } else {
  1264     // Big objects get allocated directly from the dictionary.
  1265     res = (HeapWord*) getChunkFromDictionaryExact(size);
  1266     if (res == NULL) {
  1267       // Try hard not to fail since an allocation failure will likely
  1268       // trigger a synchronous GC.  Try to get the space from the
  1269       // allocation blocks.
  1270       res = getChunkFromSmallLinearAllocBlockRemainder(size);
  1274   return res;
  1277 // A worst-case estimate of the space required (in HeapWords) to expand the heap
  1278 // when promoting obj.
  1279 size_t CompactibleFreeListSpace::expansionSpaceRequired(size_t obj_size) const {
  1280   // Depending on the object size, expansion may require refilling either a
  1281   // bigLAB or a smallLAB plus refilling a PromotionInfo object.  MinChunkSize
  1282   // is added because the dictionary may over-allocate to avoid fragmentation.
  1283   size_t space = obj_size;
  1284   if (!_adaptive_freelists) {
  1285     space = MAX2(space, _smallLinearAllocBlock._refillSize);
  1287   space += _promoInfo.refillSize() + 2 * MinChunkSize;
  1288   return space;
  1291 FreeChunk* CompactibleFreeListSpace::getChunkFromGreater(size_t numWords) {
  1292   FreeChunk* ret;
  1294   assert(numWords >= MinChunkSize, "Size is less than minimum");
  1295   assert(linearAllocationWouldFail() || bestFitFirst(),
  1296     "Should not be here");
  1298   size_t i;
  1299   size_t currSize = numWords + MinChunkSize;
  1300   assert(currSize % MinObjAlignment == 0, "currSize should be aligned");
  1301   for (i = currSize; i < IndexSetSize; i += IndexSetStride) {
  1302     FreeList* fl = &_indexedFreeList[i];
  1303     if (fl->head()) {
  1304       ret = getFromListGreater(fl, numWords);
  1305       assert(ret == NULL || ret->isFree(), "Should be returning a free chunk");
  1306       return ret;
  1310   currSize = MAX2((size_t)SmallForDictionary,
  1311                   (size_t)(numWords + MinChunkSize));
  1313   /* Try to get a chunk that satisfies request, while avoiding
  1314      fragmentation that can't be handled. */
  1316     ret =  dictionary()->getChunk(currSize);
  1317     if (ret != NULL) {
  1318       assert(ret->size() - numWords >= MinChunkSize,
  1319              "Chunk is too small");
  1320       _bt.allocated((HeapWord*)ret, ret->size());
  1321       /* Carve returned chunk. */
  1322       (void) splitChunkAndReturnRemainder(ret, numWords);
  1323       /* Label this as no longer a free chunk. */
  1324       assert(ret->isFree(), "This chunk should be free");
  1325       ret->linkPrev(NULL);
  1327     assert(ret == NULL || ret->isFree(), "Should be returning a free chunk");
  1328     return ret;
  1330   ShouldNotReachHere();
  1333 bool CompactibleFreeListSpace::verifyChunkInIndexedFreeLists(FreeChunk* fc)
  1334   const {
  1335   assert(fc->size() < IndexSetSize, "Size of chunk is too large");
  1336   return _indexedFreeList[fc->size()].verifyChunkInFreeLists(fc);
  1339 bool CompactibleFreeListSpace::verifyChunkInFreeLists(FreeChunk* fc) const {
  1340   if (fc->size() >= IndexSetSize) {
  1341     return dictionary()->verifyChunkInFreeLists(fc);
  1342   } else {
  1343     return verifyChunkInIndexedFreeLists(fc);
  1347 #ifndef PRODUCT
  1348 void CompactibleFreeListSpace::assert_locked() const {
  1349   CMSLockVerifier::assert_locked(freelistLock(), parDictionaryAllocLock());
  1352 void CompactibleFreeListSpace::assert_locked(const Mutex* lock) const {
  1353   CMSLockVerifier::assert_locked(lock);
  1355 #endif
  1357 FreeChunk* CompactibleFreeListSpace::allocateScratch(size_t size) {
  1358   // In the parallel case, the main thread holds the free list lock
  1359   // on behalf the parallel threads.
  1360   FreeChunk* fc;
  1362     // If GC is parallel, this might be called by several threads.
  1363     // This should be rare enough that the locking overhead won't affect
  1364     // the sequential code.
  1365     MutexLockerEx x(parDictionaryAllocLock(),
  1366                     Mutex::_no_safepoint_check_flag);
  1367     fc = getChunkFromDictionary(size);
  1369   if (fc != NULL) {
  1370     fc->dontCoalesce();
  1371     assert(fc->isFree(), "Should be free, but not coalescable");
  1372     // Verify that the block offset table shows this to
  1373     // be a single block, but not one which is unallocated.
  1374     _bt.verify_single_block((HeapWord*)fc, fc->size());
  1375     _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
  1377   return fc;
  1380 oop CompactibleFreeListSpace::promote(oop obj, size_t obj_size) {
  1381   assert(obj_size == (size_t)obj->size(), "bad obj_size passed in");
  1382   assert_locked();
  1384   // if we are tracking promotions, then first ensure space for
  1385   // promotion (including spooling space for saving header if necessary).
  1386   // then allocate and copy, then track promoted info if needed.
  1387   // When tracking (see PromotionInfo::track()), the mark word may
  1388   // be displaced and in this case restoration of the mark word
  1389   // occurs in the (oop_since_save_marks_)iterate phase.
  1390   if (_promoInfo.tracking() && !_promoInfo.ensure_spooling_space()) {
  1391     return NULL;
  1393   // Call the allocate(size_t, bool) form directly to avoid the
  1394   // additional call through the allocate(size_t) form.  Having
  1395   // the compile inline the call is problematic because allocate(size_t)
  1396   // is a virtual method.
  1397   HeapWord* res = allocate(adjustObjectSize(obj_size));
  1398   if (res != NULL) {
  1399     Copy::aligned_disjoint_words((HeapWord*)obj, res, obj_size);
  1400     // if we should be tracking promotions, do so.
  1401     if (_promoInfo.tracking()) {
  1402         _promoInfo.track((PromotedObject*)res);
  1405   return oop(res);
  1408 HeapWord*
  1409 CompactibleFreeListSpace::getChunkFromSmallLinearAllocBlock(size_t size) {
  1410   assert_locked();
  1411   assert(size >= MinChunkSize, "minimum chunk size");
  1412   assert(size <  _smallLinearAllocBlock._allocation_size_limit,
  1413     "maximum from smallLinearAllocBlock");
  1414   return getChunkFromLinearAllocBlock(&_smallLinearAllocBlock, size);
  1417 HeapWord*
  1418 CompactibleFreeListSpace::getChunkFromLinearAllocBlock(LinearAllocBlock *blk,
  1419                                                        size_t size) {
  1420   assert_locked();
  1421   assert(size >= MinChunkSize, "too small");
  1422   HeapWord* res = NULL;
  1423   // Try to do linear allocation from blk, making sure that
  1424   if (blk->_word_size == 0) {
  1425     // We have probably been unable to fill this either in the prologue or
  1426     // when it was exhausted at the last linear allocation. Bail out until
  1427     // next time.
  1428     assert(blk->_ptr == NULL, "consistency check");
  1429     return NULL;
  1431   assert(blk->_word_size != 0 && blk->_ptr != NULL, "consistency check");
  1432   res = getChunkFromLinearAllocBlockRemainder(blk, size);
  1433   if (res != NULL) return res;
  1435   // about to exhaust this linear allocation block
  1436   if (blk->_word_size == size) { // exactly satisfied
  1437     res = blk->_ptr;
  1438     _bt.allocated(res, blk->_word_size);
  1439   } else if (size + MinChunkSize <= blk->_refillSize) {
  1440     size_t sz = blk->_word_size;
  1441     // Update _unallocated_block if the size is such that chunk would be
  1442     // returned to the indexed free list.  All other chunks in the indexed
  1443     // free lists are allocated from the dictionary so that _unallocated_block
  1444     // has already been adjusted for them.  Do it here so that the cost
  1445     // for all chunks added back to the indexed free lists.
  1446     if (sz < SmallForDictionary) {
  1447       _bt.allocated(blk->_ptr, sz);
  1449     // Return the chunk that isn't big enough, and then refill below.
  1450     addChunkToFreeLists(blk->_ptr, sz);
  1451     splitBirth(sz);
  1452     // Don't keep statistics on adding back chunk from a LinAB.
  1453   } else {
  1454     // A refilled block would not satisfy the request.
  1455     return NULL;
  1458   blk->_ptr = NULL; blk->_word_size = 0;
  1459   refillLinearAllocBlock(blk);
  1460   assert(blk->_ptr == NULL || blk->_word_size >= size + MinChunkSize,
  1461          "block was replenished");
  1462   if (res != NULL) {
  1463     splitBirth(size);
  1464     repairLinearAllocBlock(blk);
  1465   } else if (blk->_ptr != NULL) {
  1466     res = blk->_ptr;
  1467     size_t blk_size = blk->_word_size;
  1468     blk->_word_size -= size;
  1469     blk->_ptr  += size;
  1470     splitBirth(size);
  1471     repairLinearAllocBlock(blk);
  1472     // Update BOT last so that other (parallel) GC threads see a consistent
  1473     // view of the BOT and free blocks.
  1474     // Above must occur before BOT is updated below.
  1475     OrderAccess::storestore();
  1476     _bt.split_block(res, blk_size, size);  // adjust block offset table
  1478   return res;
  1481 HeapWord*  CompactibleFreeListSpace::getChunkFromLinearAllocBlockRemainder(
  1482                                         LinearAllocBlock* blk,
  1483                                         size_t size) {
  1484   assert_locked();
  1485   assert(size >= MinChunkSize, "too small");
  1487   HeapWord* res = NULL;
  1488   // This is the common case.  Keep it simple.
  1489   if (blk->_word_size >= size + MinChunkSize) {
  1490     assert(blk->_ptr != NULL, "consistency check");
  1491     res = blk->_ptr;
  1492     // Note that the BOT is up-to-date for the linAB before allocation.  It
  1493     // indicates the start of the linAB.  The split_block() updates the
  1494     // BOT for the linAB after the allocation (indicates the start of the
  1495     // next chunk to be allocated).
  1496     size_t blk_size = blk->_word_size;
  1497     blk->_word_size -= size;
  1498     blk->_ptr  += size;
  1499     splitBirth(size);
  1500     repairLinearAllocBlock(blk);
  1501     // Update BOT last so that other (parallel) GC threads see a consistent
  1502     // view of the BOT and free blocks.
  1503     // Above must occur before BOT is updated below.
  1504     OrderAccess::storestore();
  1505     _bt.split_block(res, blk_size, size);  // adjust block offset table
  1506     _bt.allocated(res, size);
  1508   return res;
  1511 FreeChunk*
  1512 CompactibleFreeListSpace::getChunkFromIndexedFreeList(size_t size) {
  1513   assert_locked();
  1514   assert(size < SmallForDictionary, "just checking");
  1515   FreeChunk* res;
  1516   res = _indexedFreeList[size].getChunkAtHead();
  1517   if (res == NULL) {
  1518     res = getChunkFromIndexedFreeListHelper(size);
  1520   _bt.verify_not_unallocated((HeapWord*) res, size);
  1521   assert(res == NULL || res->size() == size, "Incorrect block size");
  1522   return res;
  1525 FreeChunk*
  1526 CompactibleFreeListSpace::getChunkFromIndexedFreeListHelper(size_t size,
  1527   bool replenish) {
  1528   assert_locked();
  1529   FreeChunk* fc = NULL;
  1530   if (size < SmallForDictionary) {
  1531     assert(_indexedFreeList[size].head() == NULL ||
  1532       _indexedFreeList[size].surplus() <= 0,
  1533       "List for this size should be empty or under populated");
  1534     // Try best fit in exact lists before replenishing the list
  1535     if (!bestFitFirst() || (fc = bestFitSmall(size)) == NULL) {
  1536       // Replenish list.
  1537       //
  1538       // Things tried that failed.
  1539       //   Tried allocating out of the two LinAB's first before
  1540       // replenishing lists.
  1541       //   Tried small linAB of size 256 (size in indexed list)
  1542       // and replenishing indexed lists from the small linAB.
  1543       //
  1544       FreeChunk* newFc = NULL;
  1545       const size_t replenish_size = CMSIndexedFreeListReplenish * size;
  1546       if (replenish_size < SmallForDictionary) {
  1547         // Do not replenish from an underpopulated size.
  1548         if (_indexedFreeList[replenish_size].surplus() > 0 &&
  1549             _indexedFreeList[replenish_size].head() != NULL) {
  1550           newFc = _indexedFreeList[replenish_size].getChunkAtHead();
  1551         } else if (bestFitFirst()) {
  1552           newFc = bestFitSmall(replenish_size);
  1555       if (newFc == NULL && replenish_size > size) {
  1556         assert(CMSIndexedFreeListReplenish > 1, "ctl pt invariant");
  1557         newFc = getChunkFromIndexedFreeListHelper(replenish_size, false);
  1559       // Note: The stats update re split-death of block obtained above
  1560       // will be recorded below precisely when we know we are going to
  1561       // be actually splitting it into more than one pieces below.
  1562       if (newFc != NULL) {
  1563         if  (replenish || CMSReplenishIntermediate) {
  1564           // Replenish this list and return one block to caller.
  1565           size_t i;
  1566           FreeChunk *curFc, *nextFc;
  1567           size_t num_blk = newFc->size() / size;
  1568           assert(num_blk >= 1, "Smaller than requested?");
  1569           assert(newFc->size() % size == 0, "Should be integral multiple of request");
  1570           if (num_blk > 1) {
  1571             // we are sure we will be splitting the block just obtained
  1572             // into multiple pieces; record the split-death of the original
  1573             splitDeath(replenish_size);
  1575           // carve up and link blocks 0, ..., num_blk - 2
  1576           // The last chunk is not added to the lists but is returned as the
  1577           // free chunk.
  1578           for (curFc = newFc, nextFc = (FreeChunk*)((HeapWord*)curFc + size),
  1579                i = 0;
  1580                i < (num_blk - 1);
  1581                curFc = nextFc, nextFc = (FreeChunk*)((HeapWord*)nextFc + size),
  1582                i++) {
  1583             curFc->setSize(size);
  1584             // Don't record this as a return in order to try and
  1585             // determine the "returns" from a GC.
  1586             _bt.verify_not_unallocated((HeapWord*) fc, size);
  1587             _indexedFreeList[size].returnChunkAtTail(curFc, false);
  1588             _bt.mark_block((HeapWord*)curFc, size);
  1589             splitBirth(size);
  1590             // Don't record the initial population of the indexed list
  1591             // as a split birth.
  1594           // check that the arithmetic was OK above
  1595           assert((HeapWord*)nextFc == (HeapWord*)newFc + num_blk*size,
  1596             "inconsistency in carving newFc");
  1597           curFc->setSize(size);
  1598           _bt.mark_block((HeapWord*)curFc, size);
  1599           splitBirth(size);
  1600           fc = curFc;
  1601         } else {
  1602           // Return entire block to caller
  1603           fc = newFc;
  1607   } else {
  1608     // Get a free chunk from the free chunk dictionary to be returned to
  1609     // replenish the indexed free list.
  1610     fc = getChunkFromDictionaryExact(size);
  1612   // assert(fc == NULL || fc->isFree(), "Should be returning a free chunk");
  1613   return fc;
  1616 FreeChunk*
  1617 CompactibleFreeListSpace::getChunkFromDictionary(size_t size) {
  1618   assert_locked();
  1619   FreeChunk* fc = _dictionary->getChunk(size);
  1620   if (fc == NULL) {
  1621     return NULL;
  1623   _bt.allocated((HeapWord*)fc, fc->size());
  1624   if (fc->size() >= size + MinChunkSize) {
  1625     fc = splitChunkAndReturnRemainder(fc, size);
  1627   assert(fc->size() >= size, "chunk too small");
  1628   assert(fc->size() < size + MinChunkSize, "chunk too big");
  1629   _bt.verify_single_block((HeapWord*)fc, fc->size());
  1630   return fc;
  1633 FreeChunk*
  1634 CompactibleFreeListSpace::getChunkFromDictionaryExact(size_t size) {
  1635   assert_locked();
  1636   FreeChunk* fc = _dictionary->getChunk(size);
  1637   if (fc == NULL) {
  1638     return fc;
  1640   _bt.allocated((HeapWord*)fc, fc->size());
  1641   if (fc->size() == size) {
  1642     _bt.verify_single_block((HeapWord*)fc, size);
  1643     return fc;
  1645   assert(fc->size() > size, "getChunk() guarantee");
  1646   if (fc->size() < size + MinChunkSize) {
  1647     // Return the chunk to the dictionary and go get a bigger one.
  1648     returnChunkToDictionary(fc);
  1649     fc = _dictionary->getChunk(size + MinChunkSize);
  1650     if (fc == NULL) {
  1651       return NULL;
  1653     _bt.allocated((HeapWord*)fc, fc->size());
  1655   assert(fc->size() >= size + MinChunkSize, "tautology");
  1656   fc = splitChunkAndReturnRemainder(fc, size);
  1657   assert(fc->size() == size, "chunk is wrong size");
  1658   _bt.verify_single_block((HeapWord*)fc, size);
  1659   return fc;
  1662 void
  1663 CompactibleFreeListSpace::returnChunkToDictionary(FreeChunk* chunk) {
  1664   assert_locked();
  1666   size_t size = chunk->size();
  1667   _bt.verify_single_block((HeapWord*)chunk, size);
  1668   // adjust _unallocated_block downward, as necessary
  1669   _bt.freed((HeapWord*)chunk, size);
  1670   _dictionary->returnChunk(chunk);
  1671 #ifndef PRODUCT
  1672   if (CMSCollector::abstract_state() != CMSCollector::Sweeping) {
  1673     TreeChunk::as_TreeChunk(chunk)->list()->verify_stats();
  1675 #endif // PRODUCT
  1678 void
  1679 CompactibleFreeListSpace::returnChunkToFreeList(FreeChunk* fc) {
  1680   assert_locked();
  1681   size_t size = fc->size();
  1682   _bt.verify_single_block((HeapWord*) fc, size);
  1683   _bt.verify_not_unallocated((HeapWord*) fc, size);
  1684   if (_adaptive_freelists) {
  1685     _indexedFreeList[size].returnChunkAtTail(fc);
  1686   } else {
  1687     _indexedFreeList[size].returnChunkAtHead(fc);
  1689 #ifndef PRODUCT
  1690   if (CMSCollector::abstract_state() != CMSCollector::Sweeping) {
  1691      _indexedFreeList[size].verify_stats();
  1693 #endif // PRODUCT
  1696 // Add chunk to end of last block -- if it's the largest
  1697 // block -- and update BOT and census data. We would
  1698 // of course have preferred to coalesce it with the
  1699 // last block, but it's currently less expensive to find the
  1700 // largest block than it is to find the last.
  1701 void
  1702 CompactibleFreeListSpace::addChunkToFreeListsAtEndRecordingStats(
  1703   HeapWord* chunk, size_t     size) {
  1704   // check that the chunk does lie in this space!
  1705   assert(chunk != NULL && is_in_reserved(chunk), "Not in this space!");
  1706   // One of the parallel gc task threads may be here
  1707   // whilst others are allocating.
  1708   Mutex* lock = NULL;
  1709   if (ParallelGCThreads != 0) {
  1710     lock = &_parDictionaryAllocLock;
  1712   FreeChunk* ec;
  1714     MutexLockerEx x(lock, Mutex::_no_safepoint_check_flag);
  1715     ec = dictionary()->findLargestDict();  // get largest block
  1716     if (ec != NULL && ec->end() == chunk) {
  1717       // It's a coterminal block - we can coalesce.
  1718       size_t old_size = ec->size();
  1719       coalDeath(old_size);
  1720       removeChunkFromDictionary(ec);
  1721       size += old_size;
  1722     } else {
  1723       ec = (FreeChunk*)chunk;
  1726   ec->setSize(size);
  1727   debug_only(ec->mangleFreed(size));
  1728   if (size < SmallForDictionary) {
  1729     lock = _indexedFreeListParLocks[size];
  1731   MutexLockerEx x(lock, Mutex::_no_safepoint_check_flag);
  1732   addChunkAndRepairOffsetTable((HeapWord*)ec, size, true);
  1733   // record the birth under the lock since the recording involves
  1734   // manipulation of the list on which the chunk lives and
  1735   // if the chunk is allocated and is the last on the list,
  1736   // the list can go away.
  1737   coalBirth(size);
  1740 void
  1741 CompactibleFreeListSpace::addChunkToFreeLists(HeapWord* chunk,
  1742                                               size_t     size) {
  1743   // check that the chunk does lie in this space!
  1744   assert(chunk != NULL && is_in_reserved(chunk), "Not in this space!");
  1745   assert_locked();
  1746   _bt.verify_single_block(chunk, size);
  1748   FreeChunk* fc = (FreeChunk*) chunk;
  1749   fc->setSize(size);
  1750   debug_only(fc->mangleFreed(size));
  1751   if (size < SmallForDictionary) {
  1752     returnChunkToFreeList(fc);
  1753   } else {
  1754     returnChunkToDictionary(fc);
  1758 void
  1759 CompactibleFreeListSpace::addChunkAndRepairOffsetTable(HeapWord* chunk,
  1760   size_t size, bool coalesced) {
  1761   assert_locked();
  1762   assert(chunk != NULL, "null chunk");
  1763   if (coalesced) {
  1764     // repair BOT
  1765     _bt.single_block(chunk, size);
  1767   addChunkToFreeLists(chunk, size);
  1770 // We _must_ find the purported chunk on our free lists;
  1771 // we assert if we don't.
  1772 void
  1773 CompactibleFreeListSpace::removeFreeChunkFromFreeLists(FreeChunk* fc) {
  1774   size_t size = fc->size();
  1775   assert_locked();
  1776   debug_only(verifyFreeLists());
  1777   if (size < SmallForDictionary) {
  1778     removeChunkFromIndexedFreeList(fc);
  1779   } else {
  1780     removeChunkFromDictionary(fc);
  1782   _bt.verify_single_block((HeapWord*)fc, size);
  1783   debug_only(verifyFreeLists());
  1786 void
  1787 CompactibleFreeListSpace::removeChunkFromDictionary(FreeChunk* fc) {
  1788   size_t size = fc->size();
  1789   assert_locked();
  1790   assert(fc != NULL, "null chunk");
  1791   _bt.verify_single_block((HeapWord*)fc, size);
  1792   _dictionary->removeChunk(fc);
  1793   // adjust _unallocated_block upward, as necessary
  1794   _bt.allocated((HeapWord*)fc, size);
  1797 void
  1798 CompactibleFreeListSpace::removeChunkFromIndexedFreeList(FreeChunk* fc) {
  1799   assert_locked();
  1800   size_t size = fc->size();
  1801   _bt.verify_single_block((HeapWord*)fc, size);
  1802   NOT_PRODUCT(
  1803     if (FLSVerifyIndexTable) {
  1804       verifyIndexedFreeList(size);
  1807   _indexedFreeList[size].removeChunk(fc);
  1808   debug_only(fc->clearNext());
  1809   debug_only(fc->clearPrev());
  1810   NOT_PRODUCT(
  1811     if (FLSVerifyIndexTable) {
  1812       verifyIndexedFreeList(size);
  1817 FreeChunk* CompactibleFreeListSpace::bestFitSmall(size_t numWords) {
  1818   /* A hint is the next larger size that has a surplus.
  1819      Start search at a size large enough to guarantee that
  1820      the excess is >= MIN_CHUNK. */
  1821   size_t start = align_object_size(numWords + MinChunkSize);
  1822   if (start < IndexSetSize) {
  1823     FreeList* it   = _indexedFreeList;
  1824     size_t    hint = _indexedFreeList[start].hint();
  1825     while (hint < IndexSetSize) {
  1826       assert(hint % MinObjAlignment == 0, "hint should be aligned");
  1827       FreeList *fl = &_indexedFreeList[hint];
  1828       if (fl->surplus() > 0 && fl->head() != NULL) {
  1829         // Found a list with surplus, reset original hint
  1830         // and split out a free chunk which is returned.
  1831         _indexedFreeList[start].set_hint(hint);
  1832         FreeChunk* res = getFromListGreater(fl, numWords);
  1833         assert(res == NULL || res->isFree(),
  1834           "Should be returning a free chunk");
  1835         return res;
  1837       hint = fl->hint(); /* keep looking */
  1839     /* None found. */
  1840     it[start].set_hint(IndexSetSize);
  1842   return NULL;
  1845 /* Requires fl->size >= numWords + MinChunkSize */
  1846 FreeChunk* CompactibleFreeListSpace::getFromListGreater(FreeList* fl,
  1847   size_t numWords) {
  1848   FreeChunk *curr = fl->head();
  1849   size_t oldNumWords = curr->size();
  1850   assert(numWords >= MinChunkSize, "Word size is too small");
  1851   assert(curr != NULL, "List is empty");
  1852   assert(oldNumWords >= numWords + MinChunkSize,
  1853         "Size of chunks in the list is too small");
  1855   fl->removeChunk(curr);
  1856   // recorded indirectly by splitChunkAndReturnRemainder -
  1857   // smallSplit(oldNumWords, numWords);
  1858   FreeChunk* new_chunk = splitChunkAndReturnRemainder(curr, numWords);
  1859   // Does anything have to be done for the remainder in terms of
  1860   // fixing the card table?
  1861   assert(new_chunk == NULL || new_chunk->isFree(),
  1862     "Should be returning a free chunk");
  1863   return new_chunk;
  1866 FreeChunk*
  1867 CompactibleFreeListSpace::splitChunkAndReturnRemainder(FreeChunk* chunk,
  1868   size_t new_size) {
  1869   assert_locked();
  1870   size_t size = chunk->size();
  1871   assert(size > new_size, "Split from a smaller block?");
  1872   assert(is_aligned(chunk), "alignment problem");
  1873   assert(size == adjustObjectSize(size), "alignment problem");
  1874   size_t rem_size = size - new_size;
  1875   assert(rem_size == adjustObjectSize(rem_size), "alignment problem");
  1876   assert(rem_size >= MinChunkSize, "Free chunk smaller than minimum");
  1877   FreeChunk* ffc = (FreeChunk*)((HeapWord*)chunk + new_size);
  1878   assert(is_aligned(ffc), "alignment problem");
  1879   ffc->setSize(rem_size);
  1880   ffc->linkNext(NULL);
  1881   ffc->linkPrev(NULL); // Mark as a free block for other (parallel) GC threads.
  1882   // Above must occur before BOT is updated below.
  1883   // adjust block offset table
  1884   OrderAccess::storestore();
  1885   assert(chunk->isFree() && ffc->isFree(), "Error");
  1886   _bt.split_block((HeapWord*)chunk, chunk->size(), new_size);
  1887   if (rem_size < SmallForDictionary) {
  1888     bool is_par = (SharedHeap::heap()->n_par_threads() > 0);
  1889     if (is_par) _indexedFreeListParLocks[rem_size]->lock();
  1890     returnChunkToFreeList(ffc);
  1891     split(size, rem_size);
  1892     if (is_par) _indexedFreeListParLocks[rem_size]->unlock();
  1893   } else {
  1894     returnChunkToDictionary(ffc);
  1895     split(size ,rem_size);
  1897   chunk->setSize(new_size);
  1898   return chunk;
  1901 void
  1902 CompactibleFreeListSpace::sweep_completed() {
  1903   // Now that space is probably plentiful, refill linear
  1904   // allocation blocks as needed.
  1905   refillLinearAllocBlocksIfNeeded();
  1908 void
  1909 CompactibleFreeListSpace::gc_prologue() {
  1910   assert_locked();
  1911   if (PrintFLSStatistics != 0) {
  1912     gclog_or_tty->print("Before GC:\n");
  1913     reportFreeListStatistics();
  1915   refillLinearAllocBlocksIfNeeded();
  1918 void
  1919 CompactibleFreeListSpace::gc_epilogue() {
  1920   assert_locked();
  1921   if (PrintGCDetails && Verbose && !_adaptive_freelists) {
  1922     if (_smallLinearAllocBlock._word_size == 0)
  1923       warning("CompactibleFreeListSpace(epilogue):: Linear allocation failure");
  1925   assert(_promoInfo.noPromotions(), "_promoInfo inconsistency");
  1926   _promoInfo.stopTrackingPromotions();
  1927   repairLinearAllocationBlocks();
  1928   // Print Space's stats
  1929   if (PrintFLSStatistics != 0) {
  1930     gclog_or_tty->print("After GC:\n");
  1931     reportFreeListStatistics();
  1935 // Iteration support, mostly delegated from a CMS generation
  1937 void CompactibleFreeListSpace::save_marks() {
  1938   // mark the "end" of the used space at the time of this call;
  1939   // note, however, that promoted objects from this point
  1940   // on are tracked in the _promoInfo below.
  1941   set_saved_mark_word(unallocated_block());
  1942   // inform allocator that promotions should be tracked.
  1943   assert(_promoInfo.noPromotions(), "_promoInfo inconsistency");
  1944   _promoInfo.startTrackingPromotions();
  1947 bool CompactibleFreeListSpace::no_allocs_since_save_marks() {
  1948   assert(_promoInfo.tracking(), "No preceding save_marks?");
  1949   guarantee(SharedHeap::heap()->n_par_threads() == 0,
  1950             "Shouldn't be called (yet) during parallel part of gc.");
  1951   return _promoInfo.noPromotions();
  1954 #define CFLS_OOP_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix)           \
  1956 void CompactibleFreeListSpace::                                             \
  1957 oop_since_save_marks_iterate##nv_suffix(OopClosureType* blk) {              \
  1958   assert(SharedHeap::heap()->n_par_threads() == 0,                          \
  1959          "Shouldn't be called (yet) during parallel part of gc.");          \
  1960   _promoInfo.promoted_oops_iterate##nv_suffix(blk);                         \
  1961   /*                                                                        \
  1962    * This also restores any displaced headers and removes the elements from \
  1963    * the iteration set as they are processed, so that we have a clean slate \
  1964    * at the end of the iteration. Note, thus, that if new objects are       \
  1965    * promoted as a result of the iteration they are iterated over as well.  \
  1966    */                                                                       \
  1967   assert(_promoInfo.noPromotions(), "_promoInfo inconsistency");            \
  1970 ALL_SINCE_SAVE_MARKS_CLOSURES(CFLS_OOP_SINCE_SAVE_MARKS_DEFN)
  1973 void CompactibleFreeListSpace::object_iterate_since_last_GC(ObjectClosure* cl) {
  1974   // ugghh... how would one do this efficiently for a non-contiguous space?
  1975   guarantee(false, "NYI");
  1978 bool CompactibleFreeListSpace::linearAllocationWouldFail() const {
  1979   return _smallLinearAllocBlock._word_size == 0;
  1982 void CompactibleFreeListSpace::repairLinearAllocationBlocks() {
  1983   // Fix up linear allocation blocks to look like free blocks
  1984   repairLinearAllocBlock(&_smallLinearAllocBlock);
  1987 void CompactibleFreeListSpace::repairLinearAllocBlock(LinearAllocBlock* blk) {
  1988   assert_locked();
  1989   if (blk->_ptr != NULL) {
  1990     assert(blk->_word_size != 0 && blk->_word_size >= MinChunkSize,
  1991            "Minimum block size requirement");
  1992     FreeChunk* fc = (FreeChunk*)(blk->_ptr);
  1993     fc->setSize(blk->_word_size);
  1994     fc->linkPrev(NULL);   // mark as free
  1995     fc->dontCoalesce();
  1996     assert(fc->isFree(), "just marked it free");
  1997     assert(fc->cantCoalesce(), "just marked it uncoalescable");
  2001 void CompactibleFreeListSpace::refillLinearAllocBlocksIfNeeded() {
  2002   assert_locked();
  2003   if (_smallLinearAllocBlock._ptr == NULL) {
  2004     assert(_smallLinearAllocBlock._word_size == 0,
  2005       "Size of linAB should be zero if the ptr is NULL");
  2006     // Reset the linAB refill and allocation size limit.
  2007     _smallLinearAllocBlock.set(0, 0, 1024*SmallForLinearAlloc, SmallForLinearAlloc);
  2009   refillLinearAllocBlockIfNeeded(&_smallLinearAllocBlock);
  2012 void
  2013 CompactibleFreeListSpace::refillLinearAllocBlockIfNeeded(LinearAllocBlock* blk) {
  2014   assert_locked();
  2015   assert((blk->_ptr == NULL && blk->_word_size == 0) ||
  2016          (blk->_ptr != NULL && blk->_word_size >= MinChunkSize),
  2017          "blk invariant");
  2018   if (blk->_ptr == NULL) {
  2019     refillLinearAllocBlock(blk);
  2021   if (PrintMiscellaneous && Verbose) {
  2022     if (blk->_word_size == 0) {
  2023       warning("CompactibleFreeListSpace(prologue):: Linear allocation failure");
  2028 void
  2029 CompactibleFreeListSpace::refillLinearAllocBlock(LinearAllocBlock* blk) {
  2030   assert_locked();
  2031   assert(blk->_word_size == 0 && blk->_ptr == NULL,
  2032          "linear allocation block should be empty");
  2033   FreeChunk* fc;
  2034   if (blk->_refillSize < SmallForDictionary &&
  2035       (fc = getChunkFromIndexedFreeList(blk->_refillSize)) != NULL) {
  2036     // A linAB's strategy might be to use small sizes to reduce
  2037     // fragmentation but still get the benefits of allocation from a
  2038     // linAB.
  2039   } else {
  2040     fc = getChunkFromDictionary(blk->_refillSize);
  2042   if (fc != NULL) {
  2043     blk->_ptr  = (HeapWord*)fc;
  2044     blk->_word_size = fc->size();
  2045     fc->dontCoalesce();   // to prevent sweeper from sweeping us up
  2049 // Support for concurrent collection policy decisions.
  2050 bool CompactibleFreeListSpace::should_concurrent_collect() const {
  2051   // In the future we might want to add in frgamentation stats --
  2052   // including erosion of the "mountain" into this decision as well.
  2053   return !adaptive_freelists() && linearAllocationWouldFail();
  2056 // Support for compaction
  2058 void CompactibleFreeListSpace::prepare_for_compaction(CompactPoint* cp) {
  2059   SCAN_AND_FORWARD(cp,end,block_is_obj,block_size);
  2060   // prepare_for_compaction() uses the space between live objects
  2061   // so that later phase can skip dead space quickly.  So verification
  2062   // of the free lists doesn't work after.
  2065 #define obj_size(q) adjustObjectSize(oop(q)->size())
  2066 #define adjust_obj_size(s) adjustObjectSize(s)
  2068 void CompactibleFreeListSpace::adjust_pointers() {
  2069   // In other versions of adjust_pointers(), a bail out
  2070   // based on the amount of live data in the generation
  2071   // (i.e., if 0, bail out) may be used.
  2072   // Cannot test used() == 0 here because the free lists have already
  2073   // been mangled by the compaction.
  2075   SCAN_AND_ADJUST_POINTERS(adjust_obj_size);
  2076   // See note about verification in prepare_for_compaction().
  2079 void CompactibleFreeListSpace::compact() {
  2080   SCAN_AND_COMPACT(obj_size);
  2083 // fragmentation_metric = 1 - [sum of (fbs**2) / (sum of fbs)**2]
  2084 // where fbs is free block sizes
  2085 double CompactibleFreeListSpace::flsFrag() const {
  2086   size_t itabFree = totalSizeInIndexedFreeLists();
  2087   double frag = 0.0;
  2088   size_t i;
  2090   for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
  2091     double sz  = i;
  2092     frag      += _indexedFreeList[i].count() * (sz * sz);
  2095   double totFree = itabFree +
  2096                    _dictionary->totalChunkSize(DEBUG_ONLY(freelistLock()));
  2097   if (totFree > 0) {
  2098     frag = ((frag + _dictionary->sum_of_squared_block_sizes()) /
  2099             (totFree * totFree));
  2100     frag = (double)1.0  - frag;
  2101   } else {
  2102     assert(frag == 0.0, "Follows from totFree == 0");
  2104   return frag;
  2107 void CompactibleFreeListSpace::beginSweepFLCensus(
  2108   float inter_sweep_current,
  2109   float inter_sweep_estimate,
  2110   float intra_sweep_estimate) {
  2111   assert_locked();
  2112   size_t i;
  2113   for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
  2114     FreeList* fl    = &_indexedFreeList[i];
  2115     if (PrintFLSStatistics > 1) {
  2116       gclog_or_tty->print("size[%d] : ", i);
  2118     fl->compute_desired(inter_sweep_current, inter_sweep_estimate, intra_sweep_estimate);
  2119     fl->set_coalDesired((ssize_t)((double)fl->desired() * CMSSmallCoalSurplusPercent));
  2120     fl->set_beforeSweep(fl->count());
  2121     fl->set_bfrSurp(fl->surplus());
  2123   _dictionary->beginSweepDictCensus(CMSLargeCoalSurplusPercent,
  2124                                     inter_sweep_current,
  2125                                     inter_sweep_estimate,
  2126                                     intra_sweep_estimate);
  2129 void CompactibleFreeListSpace::setFLSurplus() {
  2130   assert_locked();
  2131   size_t i;
  2132   for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
  2133     FreeList *fl = &_indexedFreeList[i];
  2134     fl->set_surplus(fl->count() -
  2135                     (ssize_t)((double)fl->desired() * CMSSmallSplitSurplusPercent));
  2139 void CompactibleFreeListSpace::setFLHints() {
  2140   assert_locked();
  2141   size_t i;
  2142   size_t h = IndexSetSize;
  2143   for (i = IndexSetSize - 1; i != 0; i -= IndexSetStride) {
  2144     FreeList *fl = &_indexedFreeList[i];
  2145     fl->set_hint(h);
  2146     if (fl->surplus() > 0) {
  2147       h = i;
  2152 void CompactibleFreeListSpace::clearFLCensus() {
  2153   assert_locked();
  2154   int i;
  2155   for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
  2156     FreeList *fl = &_indexedFreeList[i];
  2157     fl->set_prevSweep(fl->count());
  2158     fl->set_coalBirths(0);
  2159     fl->set_coalDeaths(0);
  2160     fl->set_splitBirths(0);
  2161     fl->set_splitDeaths(0);
  2165 void CompactibleFreeListSpace::endSweepFLCensus(size_t sweep_count) {
  2166   if (PrintFLSStatistics > 0) {
  2167     HeapWord* largestAddr = (HeapWord*) dictionary()->findLargestDict();
  2168     gclog_or_tty->print_cr("CMS: Large block " PTR_FORMAT,
  2169                            largestAddr);
  2171   setFLSurplus();
  2172   setFLHints();
  2173   if (PrintGC && PrintFLSCensus > 0) {
  2174     printFLCensus(sweep_count);
  2176   clearFLCensus();
  2177   assert_locked();
  2178   _dictionary->endSweepDictCensus(CMSLargeSplitSurplusPercent);
  2181 bool CompactibleFreeListSpace::coalOverPopulated(size_t size) {
  2182   if (size < SmallForDictionary) {
  2183     FreeList *fl = &_indexedFreeList[size];
  2184     return (fl->coalDesired() < 0) ||
  2185            ((int)fl->count() > fl->coalDesired());
  2186   } else {
  2187     return dictionary()->coalDictOverPopulated(size);
  2191 void CompactibleFreeListSpace::smallCoalBirth(size_t size) {
  2192   assert(size < SmallForDictionary, "Size too large for indexed list");
  2193   FreeList *fl = &_indexedFreeList[size];
  2194   fl->increment_coalBirths();
  2195   fl->increment_surplus();
  2198 void CompactibleFreeListSpace::smallCoalDeath(size_t size) {
  2199   assert(size < SmallForDictionary, "Size too large for indexed list");
  2200   FreeList *fl = &_indexedFreeList[size];
  2201   fl->increment_coalDeaths();
  2202   fl->decrement_surplus();
  2205 void CompactibleFreeListSpace::coalBirth(size_t size) {
  2206   if (size  < SmallForDictionary) {
  2207     smallCoalBirth(size);
  2208   } else {
  2209     dictionary()->dictCensusUpdate(size,
  2210                                    false /* split */,
  2211                                    true /* birth */);
  2215 void CompactibleFreeListSpace::coalDeath(size_t size) {
  2216   if(size  < SmallForDictionary) {
  2217     smallCoalDeath(size);
  2218   } else {
  2219     dictionary()->dictCensusUpdate(size,
  2220                                    false /* split */,
  2221                                    false /* birth */);
  2225 void CompactibleFreeListSpace::smallSplitBirth(size_t size) {
  2226   assert(size < SmallForDictionary, "Size too large for indexed list");
  2227   FreeList *fl = &_indexedFreeList[size];
  2228   fl->increment_splitBirths();
  2229   fl->increment_surplus();
  2232 void CompactibleFreeListSpace::smallSplitDeath(size_t size) {
  2233   assert(size < SmallForDictionary, "Size too large for indexed list");
  2234   FreeList *fl = &_indexedFreeList[size];
  2235   fl->increment_splitDeaths();
  2236   fl->decrement_surplus();
  2239 void CompactibleFreeListSpace::splitBirth(size_t size) {
  2240   if (size  < SmallForDictionary) {
  2241     smallSplitBirth(size);
  2242   } else {
  2243     dictionary()->dictCensusUpdate(size,
  2244                                    true /* split */,
  2245                                    true /* birth */);
  2249 void CompactibleFreeListSpace::splitDeath(size_t size) {
  2250   if (size  < SmallForDictionary) {
  2251     smallSplitDeath(size);
  2252   } else {
  2253     dictionary()->dictCensusUpdate(size,
  2254                                    true /* split */,
  2255                                    false /* birth */);
  2259 void CompactibleFreeListSpace::split(size_t from, size_t to1) {
  2260   size_t to2 = from - to1;
  2261   splitDeath(from);
  2262   splitBirth(to1);
  2263   splitBirth(to2);
  2266 void CompactibleFreeListSpace::print() const {
  2267   Space::print_on(tty);
  2270 void CompactibleFreeListSpace::prepare_for_verify() {
  2271   assert_locked();
  2272   repairLinearAllocationBlocks();
  2273   // Verify that the SpoolBlocks look like free blocks of
  2274   // appropriate sizes... To be done ...
  2277 class VerifyAllBlksClosure: public BlkClosure {
  2278  private:
  2279   const CompactibleFreeListSpace* _sp;
  2280   const MemRegion                 _span;
  2281   HeapWord*                       _last_addr;
  2282   size_t                          _last_size;
  2283   bool                            _last_was_obj;
  2284   bool                            _last_was_live;
  2286  public:
  2287   VerifyAllBlksClosure(const CompactibleFreeListSpace* sp,
  2288     MemRegion span) :  _sp(sp), _span(span),
  2289                        _last_addr(NULL), _last_size(0),
  2290                        _last_was_obj(false), _last_was_live(false) { }
  2292   virtual size_t do_blk(HeapWord* addr) {
  2293     size_t res;
  2294     bool   was_obj  = false;
  2295     bool   was_live = false;
  2296     if (_sp->block_is_obj(addr)) {
  2297       was_obj = true;
  2298       oop p = oop(addr);
  2299       guarantee(p->is_oop(), "Should be an oop");
  2300       res = _sp->adjustObjectSize(p->size());
  2301       if (_sp->obj_is_alive(addr)) {
  2302         was_live = true;
  2303         p->verify();
  2305     } else {
  2306       FreeChunk* fc = (FreeChunk*)addr;
  2307       res = fc->size();
  2308       if (FLSVerifyLists && !fc->cantCoalesce()) {
  2309         guarantee(_sp->verifyChunkInFreeLists(fc),
  2310                   "Chunk should be on a free list");
  2313     if (res == 0) {
  2314       gclog_or_tty->print_cr("Livelock: no rank reduction!");
  2315       gclog_or_tty->print_cr(
  2316         " Current:  addr = " PTR_FORMAT ", size = " SIZE_FORMAT ", obj = %s, live = %s \n"
  2317         " Previous: addr = " PTR_FORMAT ", size = " SIZE_FORMAT ", obj = %s, live = %s \n",
  2318         addr,       res,        was_obj      ?"true":"false", was_live      ?"true":"false",
  2319         _last_addr, _last_size, _last_was_obj?"true":"false", _last_was_live?"true":"false");
  2320       _sp->print_on(gclog_or_tty);
  2321       guarantee(false, "Seppuku!");
  2323     _last_addr = addr;
  2324     _last_size = res;
  2325     _last_was_obj  = was_obj;
  2326     _last_was_live = was_live;
  2327     return res;
  2329 };
  2331 class VerifyAllOopsClosure: public OopClosure {
  2332  private:
  2333   const CMSCollector*             _collector;
  2334   const CompactibleFreeListSpace* _sp;
  2335   const MemRegion                 _span;
  2336   const bool                      _past_remark;
  2337   const CMSBitMap*                _bit_map;
  2339  protected:
  2340   void do_oop(void* p, oop obj) {
  2341     if (_span.contains(obj)) { // the interior oop points into CMS heap
  2342       if (!_span.contains(p)) { // reference from outside CMS heap
  2343         // Should be a valid object; the first disjunct below allows
  2344         // us to sidestep an assertion in block_is_obj() that insists
  2345         // that p be in _sp. Note that several generations (and spaces)
  2346         // are spanned by _span (CMS heap) above.
  2347         guarantee(!_sp->is_in_reserved(obj) ||
  2348                   _sp->block_is_obj((HeapWord*)obj),
  2349                   "Should be an object");
  2350         guarantee(obj->is_oop(), "Should be an oop");
  2351         obj->verify();
  2352         if (_past_remark) {
  2353           // Remark has been completed, the object should be marked
  2354           _bit_map->isMarked((HeapWord*)obj);
  2356       } else { // reference within CMS heap
  2357         if (_past_remark) {
  2358           // Remark has been completed -- so the referent should have
  2359           // been marked, if referring object is.
  2360           if (_bit_map->isMarked(_collector->block_start(p))) {
  2361             guarantee(_bit_map->isMarked((HeapWord*)obj), "Marking error?");
  2365     } else if (_sp->is_in_reserved(p)) {
  2366       // the reference is from FLS, and points out of FLS
  2367       guarantee(obj->is_oop(), "Should be an oop");
  2368       obj->verify();
  2372   template <class T> void do_oop_work(T* p) {
  2373     T heap_oop = oopDesc::load_heap_oop(p);
  2374     if (!oopDesc::is_null(heap_oop)) {
  2375       oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
  2376       do_oop(p, obj);
  2380  public:
  2381   VerifyAllOopsClosure(const CMSCollector* collector,
  2382     const CompactibleFreeListSpace* sp, MemRegion span,
  2383     bool past_remark, CMSBitMap* bit_map) :
  2384     OopClosure(), _collector(collector), _sp(sp), _span(span),
  2385     _past_remark(past_remark), _bit_map(bit_map) { }
  2387   virtual void do_oop(oop* p)       { VerifyAllOopsClosure::do_oop_work(p); }
  2388   virtual void do_oop(narrowOop* p) { VerifyAllOopsClosure::do_oop_work(p); }
  2389 };
  2391 void CompactibleFreeListSpace::verify(bool ignored) const {
  2392   assert_lock_strong(&_freelistLock);
  2393   verify_objects_initialized();
  2394   MemRegion span = _collector->_span;
  2395   bool past_remark = (_collector->abstract_state() ==
  2396                       CMSCollector::Sweeping);
  2398   ResourceMark rm;
  2399   HandleMark  hm;
  2401   // Check integrity of CFL data structures
  2402   _promoInfo.verify();
  2403   _dictionary->verify();
  2404   if (FLSVerifyIndexTable) {
  2405     verifyIndexedFreeLists();
  2407   // Check integrity of all objects and free blocks in space
  2409     VerifyAllBlksClosure cl(this, span);
  2410     ((CompactibleFreeListSpace*)this)->blk_iterate(&cl);  // cast off const
  2412   // Check that all references in the heap to FLS
  2413   // are to valid objects in FLS or that references in
  2414   // FLS are to valid objects elsewhere in the heap
  2415   if (FLSVerifyAllHeapReferences)
  2417     VerifyAllOopsClosure cl(_collector, this, span, past_remark,
  2418       _collector->markBitMap());
  2419     CollectedHeap* ch = Universe::heap();
  2420     ch->oop_iterate(&cl);              // all oops in generations
  2421     ch->permanent_oop_iterate(&cl);    // all oops in perm gen
  2424   if (VerifyObjectStartArray) {
  2425     // Verify the block offset table
  2426     _bt.verify();
  2430 #ifndef PRODUCT
  2431 void CompactibleFreeListSpace::verifyFreeLists() const {
  2432   if (FLSVerifyLists) {
  2433     _dictionary->verify();
  2434     verifyIndexedFreeLists();
  2435   } else {
  2436     if (FLSVerifyDictionary) {
  2437       _dictionary->verify();
  2439     if (FLSVerifyIndexTable) {
  2440       verifyIndexedFreeLists();
  2444 #endif
  2446 void CompactibleFreeListSpace::verifyIndexedFreeLists() const {
  2447   size_t i = 0;
  2448   for (; i < MinChunkSize; i++) {
  2449     guarantee(_indexedFreeList[i].head() == NULL, "should be NULL");
  2451   for (; i < IndexSetSize; i++) {
  2452     verifyIndexedFreeList(i);
  2456 void CompactibleFreeListSpace::verifyIndexedFreeList(size_t size) const {
  2457   FreeChunk* fc   =  _indexedFreeList[size].head();
  2458   FreeChunk* tail =  _indexedFreeList[size].tail();
  2459   size_t    num = _indexedFreeList[size].count();
  2460   size_t      n = 0;
  2461   guarantee((size % 2 == 0) || fc == NULL, "Odd slots should be empty");
  2462   for (; fc != NULL; fc = fc->next(), n++) {
  2463     guarantee(fc->size() == size, "Size inconsistency");
  2464     guarantee(fc->isFree(), "!free?");
  2465     guarantee(fc->next() == NULL || fc->next()->prev() == fc, "Broken list");
  2466     guarantee((fc->next() == NULL) == (fc == tail), "Incorrect tail");
  2468   guarantee(n == num, "Incorrect count");
  2471 #ifndef PRODUCT
  2472 void CompactibleFreeListSpace::checkFreeListConsistency() const {
  2473   assert(_dictionary->minSize() <= IndexSetSize,
  2474     "Some sizes can't be allocated without recourse to"
  2475     " linear allocation buffers");
  2476   assert(MIN_TREE_CHUNK_SIZE*HeapWordSize == sizeof(TreeChunk),
  2477     "else MIN_TREE_CHUNK_SIZE is wrong");
  2478   assert((IndexSetStride == 2 && IndexSetStart == 2) ||
  2479          (IndexSetStride == 1 && IndexSetStart == 1), "just checking");
  2480   assert((IndexSetStride != 2) || (MinChunkSize % 2 == 0),
  2481       "Some for-loops may be incorrectly initialized");
  2482   assert((IndexSetStride != 2) || (IndexSetSize % 2 == 1),
  2483       "For-loops that iterate over IndexSet with stride 2 may be wrong");
  2485 #endif
  2487 void CompactibleFreeListSpace::printFLCensus(size_t sweep_count) const {
  2488   assert_lock_strong(&_freelistLock);
  2489   FreeList total;
  2490   gclog_or_tty->print("end sweep# " SIZE_FORMAT "\n", sweep_count);
  2491   FreeList::print_labels_on(gclog_or_tty, "size");
  2492   size_t totalFree = 0;
  2493   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
  2494     const FreeList *fl = &_indexedFreeList[i];
  2495     totalFree += fl->count() * fl->size();
  2496     if (i % (40*IndexSetStride) == 0) {
  2497       FreeList::print_labels_on(gclog_or_tty, "size");
  2499     fl->print_on(gclog_or_tty);
  2500     total.set_bfrSurp(    total.bfrSurp()     + fl->bfrSurp()    );
  2501     total.set_surplus(    total.surplus()     + fl->surplus()    );
  2502     total.set_desired(    total.desired()     + fl->desired()    );
  2503     total.set_prevSweep(  total.prevSweep()   + fl->prevSweep()  );
  2504     total.set_beforeSweep(total.beforeSweep() + fl->beforeSweep());
  2505     total.set_count(      total.count()       + fl->count()      );
  2506     total.set_coalBirths( total.coalBirths()  + fl->coalBirths() );
  2507     total.set_coalDeaths( total.coalDeaths()  + fl->coalDeaths() );
  2508     total.set_splitBirths(total.splitBirths() + fl->splitBirths());
  2509     total.set_splitDeaths(total.splitDeaths() + fl->splitDeaths());
  2511   total.print_on(gclog_or_tty, "TOTAL");
  2512   gclog_or_tty->print_cr("Total free in indexed lists "
  2513                          SIZE_FORMAT " words", totalFree);
  2514   gclog_or_tty->print("growth: %8.5f  deficit: %8.5f\n",
  2515     (double)(total.splitBirths()+total.coalBirths()-total.splitDeaths()-total.coalDeaths())/
  2516             (total.prevSweep() != 0 ? (double)total.prevSweep() : 1.0),
  2517     (double)(total.desired() - total.count())/(total.desired() != 0 ? (double)total.desired() : 1.0));
  2518   _dictionary->printDictCensus();
  2521 ///////////////////////////////////////////////////////////////////////////
  2522 // CFLS_LAB
  2523 ///////////////////////////////////////////////////////////////////////////
  2525 #define VECTOR_257(x)                                                                                  \
  2526   /* 1  2  3  4  5  6  7  8  9 1x 11 12 13 14 15 16 17 18 19 2x 21 22 23 24 25 26 27 28 29 3x 31 32 */ \
  2527   {  x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
  2528      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
  2529      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
  2530      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
  2531      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
  2532      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
  2533      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
  2534      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
  2535      x }
  2537 // Initialize with default setting of CMSParPromoteBlocksToClaim, _not_
  2538 // OldPLABSize, whose static default is different; if overridden at the
  2539 // command-line, this will get reinitialized via a call to
  2540 // modify_initialization() below.
  2541 AdaptiveWeightedAverage CFLS_LAB::_blocks_to_claim[]    =
  2542   VECTOR_257(AdaptiveWeightedAverage(OldPLABWeight, (float)CMSParPromoteBlocksToClaim));
  2543 size_t CFLS_LAB::_global_num_blocks[]  = VECTOR_257(0);
  2544 int    CFLS_LAB::_global_num_workers[] = VECTOR_257(0);
  2546 CFLS_LAB::CFLS_LAB(CompactibleFreeListSpace* cfls) :
  2547   _cfls(cfls)
  2549   assert(CompactibleFreeListSpace::IndexSetSize == 257, "Modify VECTOR_257() macro above");
  2550   for (size_t i = CompactibleFreeListSpace::IndexSetStart;
  2551        i < CompactibleFreeListSpace::IndexSetSize;
  2552        i += CompactibleFreeListSpace::IndexSetStride) {
  2553     _indexedFreeList[i].set_size(i);
  2554     _num_blocks[i] = 0;
  2558 static bool _CFLS_LAB_modified = false;
  2560 void CFLS_LAB::modify_initialization(size_t n, unsigned wt) {
  2561   assert(!_CFLS_LAB_modified, "Call only once");
  2562   _CFLS_LAB_modified = true;
  2563   for (size_t i = CompactibleFreeListSpace::IndexSetStart;
  2564        i < CompactibleFreeListSpace::IndexSetSize;
  2565        i += CompactibleFreeListSpace::IndexSetStride) {
  2566     _blocks_to_claim[i].modify(n, wt, true /* force */);
  2570 HeapWord* CFLS_LAB::alloc(size_t word_sz) {
  2571   FreeChunk* res;
  2572   guarantee(word_sz == _cfls->adjustObjectSize(word_sz), "Error");
  2573   if (word_sz >=  CompactibleFreeListSpace::IndexSetSize) {
  2574     // This locking manages sync with other large object allocations.
  2575     MutexLockerEx x(_cfls->parDictionaryAllocLock(),
  2576                     Mutex::_no_safepoint_check_flag);
  2577     res = _cfls->getChunkFromDictionaryExact(word_sz);
  2578     if (res == NULL) return NULL;
  2579   } else {
  2580     FreeList* fl = &_indexedFreeList[word_sz];
  2581     if (fl->count() == 0) {
  2582       // Attempt to refill this local free list.
  2583       get_from_global_pool(word_sz, fl);
  2584       // If it didn't work, give up.
  2585       if (fl->count() == 0) return NULL;
  2587     res = fl->getChunkAtHead();
  2588     assert(res != NULL, "Why was count non-zero?");
  2590   res->markNotFree();
  2591   assert(!res->isFree(), "shouldn't be marked free");
  2592   assert(oop(res)->klass_or_null() == NULL, "should look uninitialized");
  2593   // mangle a just allocated object with a distinct pattern.
  2594   debug_only(res->mangleAllocated(word_sz));
  2595   return (HeapWord*)res;
  2598 // Get a chunk of blocks of the right size and update related
  2599 // book-keeping stats
  2600 void CFLS_LAB::get_from_global_pool(size_t word_sz, FreeList* fl) {
  2601   // Get the #blocks we want to claim
  2602   size_t n_blks = (size_t)_blocks_to_claim[word_sz].average();
  2603   assert(n_blks > 0, "Error");
  2604   assert(ResizePLAB || n_blks == OldPLABSize, "Error");
  2605   // In some cases, when the application has a phase change,
  2606   // there may be a sudden and sharp shift in the object survival
  2607   // profile, and updating the counts at the end of a scavenge
  2608   // may not be quick enough, giving rise to large scavenge pauses
  2609   // during these phase changes. It is beneficial to detect such
  2610   // changes on-the-fly during a scavenge and avoid such a phase-change
  2611   // pothole. The following code is a heuristic attempt to do that.
  2612   // It is protected by a product flag until we have gained
  2613   // enough experience with this heuristic and fine-tuned its behaviour.
  2614   // WARNING: This might increase fragmentation if we overreact to
  2615   // small spikes, so some kind of historical smoothing based on
  2616   // previous experience with the greater reactivity might be useful.
  2617   // Lacking sufficient experience, CMSOldPLABResizeQuicker is disabled by
  2618   // default.
  2619   if (ResizeOldPLAB && CMSOldPLABResizeQuicker) {
  2620     size_t multiple = _num_blocks[word_sz]/(CMSOldPLABToleranceFactor*CMSOldPLABNumRefills*n_blks);
  2621     n_blks +=  CMSOldPLABReactivityFactor*multiple*n_blks;
  2622     n_blks = MIN2(n_blks, CMSOldPLABMax);
  2624   assert(n_blks > 0, "Error");
  2625   _cfls->par_get_chunk_of_blocks(word_sz, n_blks, fl);
  2626   // Update stats table entry for this block size
  2627   _num_blocks[word_sz] += fl->count();
  2630 void CFLS_LAB::compute_desired_plab_size() {
  2631   for (size_t i =  CompactibleFreeListSpace::IndexSetStart;
  2632        i < CompactibleFreeListSpace::IndexSetSize;
  2633        i += CompactibleFreeListSpace::IndexSetStride) {
  2634     assert((_global_num_workers[i] == 0) == (_global_num_blocks[i] == 0),
  2635            "Counter inconsistency");
  2636     if (_global_num_workers[i] > 0) {
  2637       // Need to smooth wrt historical average
  2638       if (ResizeOldPLAB) {
  2639         _blocks_to_claim[i].sample(
  2640           MAX2((size_t)CMSOldPLABMin,
  2641           MIN2((size_t)CMSOldPLABMax,
  2642                _global_num_blocks[i]/(_global_num_workers[i]*CMSOldPLABNumRefills))));
  2644       // Reset counters for next round
  2645       _global_num_workers[i] = 0;
  2646       _global_num_blocks[i] = 0;
  2647       if (PrintOldPLAB) {
  2648         gclog_or_tty->print_cr("[%d]: %d", i, (size_t)_blocks_to_claim[i].average());
  2654 void CFLS_LAB::retire(int tid) {
  2655   // We run this single threaded with the world stopped;
  2656   // so no need for locks and such.
  2657 #define CFLS_LAB_PARALLEL_ACCESS 0
  2658   NOT_PRODUCT(Thread* t = Thread::current();)
  2659   assert(Thread::current()->is_VM_thread(), "Error");
  2660   assert(CompactibleFreeListSpace::IndexSetStart == CompactibleFreeListSpace::IndexSetStride,
  2661          "Will access to uninitialized slot below");
  2662 #if CFLS_LAB_PARALLEL_ACCESS
  2663   for (size_t i = CompactibleFreeListSpace::IndexSetSize - 1;
  2664        i > 0;
  2665        i -= CompactibleFreeListSpace::IndexSetStride) {
  2666 #else // CFLS_LAB_PARALLEL_ACCESS
  2667   for (size_t i =  CompactibleFreeListSpace::IndexSetStart;
  2668        i < CompactibleFreeListSpace::IndexSetSize;
  2669        i += CompactibleFreeListSpace::IndexSetStride) {
  2670 #endif // !CFLS_LAB_PARALLEL_ACCESS
  2671     assert(_num_blocks[i] >= (size_t)_indexedFreeList[i].count(),
  2672            "Can't retire more than what we obtained");
  2673     if (_num_blocks[i] > 0) {
  2674       size_t num_retire =  _indexedFreeList[i].count();
  2675       assert(_num_blocks[i] > num_retire, "Should have used at least one");
  2677 #if CFLS_LAB_PARALLEL_ACCESS
  2678         MutexLockerEx x(_cfls->_indexedFreeListParLocks[i],
  2679                         Mutex::_no_safepoint_check_flag);
  2680 #endif // CFLS_LAB_PARALLEL_ACCESS
  2681         // Update globals stats for num_blocks used
  2682         _global_num_blocks[i] += (_num_blocks[i] - num_retire);
  2683         _global_num_workers[i]++;
  2684         assert(_global_num_workers[i] <= (ssize_t)ParallelGCThreads, "Too big");
  2685         if (num_retire > 0) {
  2686           _cfls->_indexedFreeList[i].prepend(&_indexedFreeList[i]);
  2687           // Reset this list.
  2688           _indexedFreeList[i] = FreeList();
  2689           _indexedFreeList[i].set_size(i);
  2692       if (PrintOldPLAB) {
  2693         gclog_or_tty->print_cr("%d[%d]: %d/%d/%d",
  2694                                tid, i, num_retire, _num_blocks[i], (size_t)_blocks_to_claim[i].average());
  2696       // Reset stats for next round
  2697       _num_blocks[i]         = 0;
  2702 void CompactibleFreeListSpace:: par_get_chunk_of_blocks(size_t word_sz, size_t n, FreeList* fl) {
  2703   assert(fl->count() == 0, "Precondition.");
  2704   assert(word_sz < CompactibleFreeListSpace::IndexSetSize,
  2705          "Precondition");
  2707   // We'll try all multiples of word_sz in the indexed set, starting with
  2708   // word_sz itself and, if CMSSplitIndexedFreeListBlocks, try larger multiples,
  2709   // then try getting a big chunk and splitting it.
  2711     bool found;
  2712     int  k;
  2713     size_t cur_sz;
  2714     for (k = 1, cur_sz = k * word_sz, found = false;
  2715          (cur_sz < CompactibleFreeListSpace::IndexSetSize) &&
  2716          (CMSSplitIndexedFreeListBlocks || k <= 1);
  2717          k++, cur_sz = k * word_sz) {
  2718       FreeList fl_for_cur_sz;  // Empty.
  2719       fl_for_cur_sz.set_size(cur_sz);
  2721         MutexLockerEx x(_indexedFreeListParLocks[cur_sz],
  2722                         Mutex::_no_safepoint_check_flag);
  2723         FreeList* gfl = &_indexedFreeList[cur_sz];
  2724         if (gfl->count() != 0) {
  2725           // nn is the number of chunks of size cur_sz that
  2726           // we'd need to split k-ways each, in order to create
  2727           // "n" chunks of size word_sz each.
  2728           const size_t nn = MAX2(n/k, (size_t)1);
  2729           gfl->getFirstNChunksFromList(nn, &fl_for_cur_sz);
  2730           found = true;
  2731           if (k > 1) {
  2732             // Update split death stats for the cur_sz-size blocks list:
  2733             // we increment the split death count by the number of blocks
  2734             // we just took from the cur_sz-size blocks list and which
  2735             // we will be splitting below.
  2736             ssize_t deaths = gfl->splitDeaths() +
  2737                              fl_for_cur_sz.count();
  2738             gfl->set_splitDeaths(deaths);
  2742       // Now transfer fl_for_cur_sz to fl.  Common case, we hope, is k = 1.
  2743       if (found) {
  2744         if (k == 1) {
  2745           fl->prepend(&fl_for_cur_sz);
  2746         } else {
  2747           // Divide each block on fl_for_cur_sz up k ways.
  2748           FreeChunk* fc;
  2749           while ((fc = fl_for_cur_sz.getChunkAtHead()) != NULL) {
  2750             // Must do this in reverse order, so that anybody attempting to
  2751             // access the main chunk sees it as a single free block until we
  2752             // change it.
  2753             size_t fc_size = fc->size();
  2754             assert(fc->isFree(), "Error");
  2755             for (int i = k-1; i >= 0; i--) {
  2756               FreeChunk* ffc = (FreeChunk*)((HeapWord*)fc + i * word_sz);
  2757               assert((i != 0) ||
  2758                         ((fc == ffc) && ffc->isFree() &&
  2759                          (ffc->size() == k*word_sz) && (fc_size == word_sz)),
  2760                         "Counting error");
  2761               ffc->setSize(word_sz);
  2762               ffc->linkPrev(NULL); // Mark as a free block for other (parallel) GC threads.
  2763               ffc->linkNext(NULL);
  2764               // Above must occur before BOT is updated below.
  2765               OrderAccess::storestore();
  2766               // splitting from the right, fc_size == i * word_sz
  2767               _bt.mark_block((HeapWord*)ffc, word_sz, true /* reducing */);
  2768               fc_size -= word_sz;
  2769               assert(fc_size == i*word_sz, "Error");
  2770               _bt.verify_not_unallocated((HeapWord*)ffc, word_sz);
  2771               _bt.verify_single_block((HeapWord*)fc, fc_size);
  2772               _bt.verify_single_block((HeapWord*)ffc, word_sz);
  2773               // Push this on "fl".
  2774               fl->returnChunkAtHead(ffc);
  2776             // TRAP
  2777             assert(fl->tail()->next() == NULL, "List invariant.");
  2780         // Update birth stats for this block size.
  2781         size_t num = fl->count();
  2782         MutexLockerEx x(_indexedFreeListParLocks[word_sz],
  2783                         Mutex::_no_safepoint_check_flag);
  2784         ssize_t births = _indexedFreeList[word_sz].splitBirths() + num;
  2785         _indexedFreeList[word_sz].set_splitBirths(births);
  2786         return;
  2790   // Otherwise, we'll split a block from the dictionary.
  2791   FreeChunk* fc = NULL;
  2792   FreeChunk* rem_fc = NULL;
  2793   size_t rem;
  2795     MutexLockerEx x(parDictionaryAllocLock(),
  2796                     Mutex::_no_safepoint_check_flag);
  2797     while (n > 0) {
  2798       fc = dictionary()->getChunk(MAX2(n * word_sz,
  2799                                   _dictionary->minSize()),
  2800                                   FreeBlockDictionary::atLeast);
  2801       if (fc != NULL) {
  2802         _bt.allocated((HeapWord*)fc, fc->size(), true /* reducing */);  // update _unallocated_blk
  2803         dictionary()->dictCensusUpdate(fc->size(),
  2804                                        true /*split*/,
  2805                                        false /*birth*/);
  2806         break;
  2807       } else {
  2808         n--;
  2811     if (fc == NULL) return;
  2812     // Otherwise, split up that block.
  2813     assert((ssize_t)n >= 1, "Control point invariant");
  2814     assert(fc->isFree(), "Error: should be a free block");
  2815     _bt.verify_single_block((HeapWord*)fc, fc->size());
  2816     const size_t nn = fc->size() / word_sz;
  2817     n = MIN2(nn, n);
  2818     assert((ssize_t)n >= 1, "Control point invariant");
  2819     rem = fc->size() - n * word_sz;
  2820     // If there is a remainder, and it's too small, allocate one fewer.
  2821     if (rem > 0 && rem < MinChunkSize) {
  2822       n--; rem += word_sz;
  2824     // Note that at this point we may have n == 0.
  2825     assert((ssize_t)n >= 0, "Control point invariant");
  2827     // If n is 0, the chunk fc that was found is not large
  2828     // enough to leave a viable remainder.  We are unable to
  2829     // allocate even one block.  Return fc to the
  2830     // dictionary and return, leaving "fl" empty.
  2831     if (n == 0) {
  2832       returnChunkToDictionary(fc);
  2833       assert(fl->count() == 0, "We never allocated any blocks");
  2834       return;
  2837     // First return the remainder, if any.
  2838     // Note that we hold the lock until we decide if we're going to give
  2839     // back the remainder to the dictionary, since a concurrent allocation
  2840     // may otherwise see the heap as empty.  (We're willing to take that
  2841     // hit if the block is a small block.)
  2842     if (rem > 0) {
  2843       size_t prefix_size = n * word_sz;
  2844       rem_fc = (FreeChunk*)((HeapWord*)fc + prefix_size);
  2845       rem_fc->setSize(rem);
  2846       rem_fc->linkPrev(NULL); // Mark as a free block for other (parallel) GC threads.
  2847       rem_fc->linkNext(NULL);
  2848       // Above must occur before BOT is updated below.
  2849       assert((ssize_t)n > 0 && prefix_size > 0 && rem_fc > fc, "Error");
  2850       OrderAccess::storestore();
  2851       _bt.split_block((HeapWord*)fc, fc->size(), prefix_size);
  2852       assert(fc->isFree(), "Error");
  2853       fc->setSize(prefix_size);
  2854       if (rem >= IndexSetSize) {
  2855         returnChunkToDictionary(rem_fc);
  2856         dictionary()->dictCensusUpdate(rem, true /*split*/, true /*birth*/);
  2857         rem_fc = NULL;
  2859       // Otherwise, return it to the small list below.
  2862   if (rem_fc != NULL) {
  2863     MutexLockerEx x(_indexedFreeListParLocks[rem],
  2864                     Mutex::_no_safepoint_check_flag);
  2865     _bt.verify_not_unallocated((HeapWord*)rem_fc, rem_fc->size());
  2866     _indexedFreeList[rem].returnChunkAtHead(rem_fc);
  2867     smallSplitBirth(rem);
  2869   assert((ssize_t)n > 0 && fc != NULL, "Consistency");
  2870   // Now do the splitting up.
  2871   // Must do this in reverse order, so that anybody attempting to
  2872   // access the main chunk sees it as a single free block until we
  2873   // change it.
  2874   size_t fc_size = n * word_sz;
  2875   // All but first chunk in this loop
  2876   for (ssize_t i = n-1; i > 0; i--) {
  2877     FreeChunk* ffc = (FreeChunk*)((HeapWord*)fc + i * word_sz);
  2878     ffc->setSize(word_sz);
  2879     ffc->linkPrev(NULL); // Mark as a free block for other (parallel) GC threads.
  2880     ffc->linkNext(NULL);
  2881     // Above must occur before BOT is updated below.
  2882     OrderAccess::storestore();
  2883     // splitting from the right, fc_size == (n - i + 1) * wordsize
  2884     _bt.mark_block((HeapWord*)ffc, word_sz, true /* reducing */);
  2885     fc_size -= word_sz;
  2886     _bt.verify_not_unallocated((HeapWord*)ffc, ffc->size());
  2887     _bt.verify_single_block((HeapWord*)ffc, ffc->size());
  2888     _bt.verify_single_block((HeapWord*)fc, fc_size);
  2889     // Push this on "fl".
  2890     fl->returnChunkAtHead(ffc);
  2892   // First chunk
  2893   assert(fc->isFree() && fc->size() == n*word_sz, "Error: should still be a free block");
  2894   // The blocks above should show their new sizes before the first block below
  2895   fc->setSize(word_sz);
  2896   fc->linkPrev(NULL);    // idempotent wrt free-ness, see assert above
  2897   fc->linkNext(NULL);
  2898   _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
  2899   _bt.verify_single_block((HeapWord*)fc, fc->size());
  2900   fl->returnChunkAtHead(fc);
  2902   assert((ssize_t)n > 0 && (ssize_t)n == fl->count(), "Incorrect number of blocks");
  2904     // Update the stats for this block size.
  2905     MutexLockerEx x(_indexedFreeListParLocks[word_sz],
  2906                     Mutex::_no_safepoint_check_flag);
  2907     const ssize_t births = _indexedFreeList[word_sz].splitBirths() + n;
  2908     _indexedFreeList[word_sz].set_splitBirths(births);
  2909     // ssize_t new_surplus = _indexedFreeList[word_sz].surplus() + n;
  2910     // _indexedFreeList[word_sz].set_surplus(new_surplus);
  2913   // TRAP
  2914   assert(fl->tail()->next() == NULL, "List invariant.");
  2917 // Set up the space's par_seq_tasks structure for work claiming
  2918 // for parallel rescan. See CMSParRemarkTask where this is currently used.
  2919 // XXX Need to suitably abstract and generalize this and the next
  2920 // method into one.
  2921 void
  2922 CompactibleFreeListSpace::
  2923 initialize_sequential_subtasks_for_rescan(int n_threads) {
  2924   // The "size" of each task is fixed according to rescan_task_size.
  2925   assert(n_threads > 0, "Unexpected n_threads argument");
  2926   const size_t task_size = rescan_task_size();
  2927   size_t n_tasks = (used_region().word_size() + task_size - 1)/task_size;
  2928   assert((n_tasks == 0) == used_region().is_empty(), "n_tasks incorrect");
  2929   assert(n_tasks == 0 ||
  2930          ((used_region().start() + (n_tasks - 1)*task_size < used_region().end()) &&
  2931           (used_region().start() + n_tasks*task_size >= used_region().end())),
  2932          "n_tasks calculation incorrect");
  2933   SequentialSubTasksDone* pst = conc_par_seq_tasks();
  2934   assert(!pst->valid(), "Clobbering existing data?");
  2935   pst->set_par_threads(n_threads);
  2936   pst->set_n_tasks((int)n_tasks);
  2939 // Set up the space's par_seq_tasks structure for work claiming
  2940 // for parallel concurrent marking. See CMSConcMarkTask where this is currently used.
  2941 void
  2942 CompactibleFreeListSpace::
  2943 initialize_sequential_subtasks_for_marking(int n_threads,
  2944                                            HeapWord* low) {
  2945   // The "size" of each task is fixed according to rescan_task_size.
  2946   assert(n_threads > 0, "Unexpected n_threads argument");
  2947   const size_t task_size = marking_task_size();
  2948   assert(task_size > CardTableModRefBS::card_size_in_words &&
  2949          (task_size %  CardTableModRefBS::card_size_in_words == 0),
  2950          "Otherwise arithmetic below would be incorrect");
  2951   MemRegion span = _gen->reserved();
  2952   if (low != NULL) {
  2953     if (span.contains(low)) {
  2954       // Align low down to  a card boundary so that
  2955       // we can use block_offset_careful() on span boundaries.
  2956       HeapWord* aligned_low = (HeapWord*)align_size_down((uintptr_t)low,
  2957                                  CardTableModRefBS::card_size);
  2958       // Clip span prefix at aligned_low
  2959       span = span.intersection(MemRegion(aligned_low, span.end()));
  2960     } else if (low > span.end()) {
  2961       span = MemRegion(low, low);  // Null region
  2962     } // else use entire span
  2964   assert(span.is_empty() ||
  2965          ((uintptr_t)span.start() %  CardTableModRefBS::card_size == 0),
  2966         "span should start at a card boundary");
  2967   size_t n_tasks = (span.word_size() + task_size - 1)/task_size;
  2968   assert((n_tasks == 0) == span.is_empty(), "Inconsistency");
  2969   assert(n_tasks == 0 ||
  2970          ((span.start() + (n_tasks - 1)*task_size < span.end()) &&
  2971           (span.start() + n_tasks*task_size >= span.end())),
  2972          "n_tasks calculation incorrect");
  2973   SequentialSubTasksDone* pst = conc_par_seq_tasks();
  2974   assert(!pst->valid(), "Clobbering existing data?");
  2975   pst->set_par_threads(n_threads);
  2976   pst->set_n_tasks((int)n_tasks);

mercurial