src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp

Tue, 21 May 2013 08:50:20 +0200

author
brutisso
date
Tue, 21 May 2013 08:50:20 +0200
changeset 5163
28e53b8db94f
parent 4489
ef1e11845e18
child 5166
7c5a1b62f53d
permissions
-rw-r--r--

7066063: CMS: "Conservation Principle" assert failed
Summary: Add call to coalBirth() in CompactibleFreeListSpace::reset()
Reviewed-by: ysr, jmasa

     1 /*
     2  * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    20  * or visit www.oracle.com if you need additional information or have any
    21  * questions.
    22  *
    23  */
    25 #include "precompiled.hpp"
    26 #include "gc_implementation/concurrentMarkSweep/cmsLockVerifier.hpp"
    27 #include "gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp"
    28 #include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.inline.hpp"
    29 #include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.hpp"
    30 #include "gc_implementation/shared/liveRange.hpp"
    31 #include "gc_implementation/shared/spaceDecorator.hpp"
    32 #include "gc_interface/collectedHeap.inline.hpp"
    33 #include "memory/allocation.inline.hpp"
    34 #include "memory/blockOffsetTable.inline.hpp"
    35 #include "memory/resourceArea.hpp"
    36 #include "memory/universe.inline.hpp"
    37 #include "oops/oop.inline.hpp"
    38 #include "runtime/globals.hpp"
    39 #include "runtime/handles.inline.hpp"
    40 #include "runtime/init.hpp"
    41 #include "runtime/java.hpp"
    42 #include "runtime/vmThread.hpp"
    43 #include "utilities/copy.hpp"
    45 /////////////////////////////////////////////////////////////////////////
    46 //// CompactibleFreeListSpace
    47 /////////////////////////////////////////////////////////////////////////
    49 // highest ranked  free list lock rank
    50 int CompactibleFreeListSpace::_lockRank = Mutex::leaf + 3;
    52 // Defaults are 0 so things will break badly if incorrectly initialized.
    53 size_t CompactibleFreeListSpace::IndexSetStart  = 0;
    54 size_t CompactibleFreeListSpace::IndexSetStride = 0;
    56 size_t MinChunkSize = 0;
    58 void CompactibleFreeListSpace::set_cms_values() {
    59   // Set CMS global values
    60   assert(MinChunkSize == 0, "already set");
    62   // MinChunkSize should be a multiple of MinObjAlignment and be large enough
    63   // for chunks to contain a FreeChunk.
    64   size_t min_chunk_size_in_bytes = align_size_up(sizeof(FreeChunk), MinObjAlignmentInBytes);
    65   MinChunkSize = min_chunk_size_in_bytes / BytesPerWord;
    67   assert(IndexSetStart == 0 && IndexSetStride == 0, "already set");
    68   IndexSetStart  = MinChunkSize;
    69   IndexSetStride = MinObjAlignment;
    70 }
    72 // Constructor
    73 CompactibleFreeListSpace::CompactibleFreeListSpace(BlockOffsetSharedArray* bs,
    74   MemRegion mr, bool use_adaptive_freelists,
    75   FreeBlockDictionary<FreeChunk>::DictionaryChoice dictionaryChoice) :
    76   _dictionaryChoice(dictionaryChoice),
    77   _adaptive_freelists(use_adaptive_freelists),
    78   _bt(bs, mr),
    79   // free list locks are in the range of values taken by _lockRank
    80   // This range currently is [_leaf+2, _leaf+3]
    81   // Note: this requires that CFLspace c'tors
    82   // are called serially in the order in which the locks are
    83   // are acquired in the program text. This is true today.
    84   _freelistLock(_lockRank--, "CompactibleFreeListSpace._lock", true),
    85   _parDictionaryAllocLock(Mutex::leaf - 1,  // == rank(ExpandHeap_lock) - 1
    86                           "CompactibleFreeListSpace._dict_par_lock", true),
    87   _rescan_task_size(CardTableModRefBS::card_size_in_words * BitsPerWord *
    88                     CMSRescanMultiple),
    89   _marking_task_size(CardTableModRefBS::card_size_in_words * BitsPerWord *
    90                     CMSConcMarkMultiple),
    91   _collector(NULL)
    92 {
    93   assert(sizeof(FreeChunk) / BytesPerWord <= MinChunkSize,
    94          "FreeChunk is larger than expected");
    95   _bt.set_space(this);
    96   initialize(mr, SpaceDecorator::Clear, SpaceDecorator::Mangle);
    97   // We have all of "mr", all of which we place in the dictionary
    98   // as one big chunk. We'll need to decide here which of several
    99   // possible alternative dictionary implementations to use. For
   100   // now the choice is easy, since we have only one working
   101   // implementation, namely, the simple binary tree (splaying
   102   // temporarily disabled).
   103   switch (dictionaryChoice) {
   104     case FreeBlockDictionary<FreeChunk>::dictionaryBinaryTree:
   105       _dictionary = new AFLBinaryTreeDictionary(mr);
   106       break;
   107     case FreeBlockDictionary<FreeChunk>::dictionarySplayTree:
   108     case FreeBlockDictionary<FreeChunk>::dictionarySkipList:
   109     default:
   110       warning("dictionaryChoice: selected option not understood; using"
   111               " default BinaryTreeDictionary implementation instead.");
   112   }
   113   assert(_dictionary != NULL, "CMS dictionary initialization");
   114   // The indexed free lists are initially all empty and are lazily
   115   // filled in on demand. Initialize the array elements to NULL.
   116   initializeIndexedFreeListArray();
   118   // Not using adaptive free lists assumes that allocation is first
   119   // from the linAB's.  Also a cms perm gen which can be compacted
   120   // has to have the klass's klassKlass allocated at a lower
   121   // address in the heap than the klass so that the klassKlass is
   122   // moved to its new location before the klass is moved.
   123   // Set the _refillSize for the linear allocation blocks
   124   if (!use_adaptive_freelists) {
   125     FreeChunk* fc = _dictionary->get_chunk(mr.word_size(),
   126                                            FreeBlockDictionary<FreeChunk>::atLeast);
   127     // The small linAB initially has all the space and will allocate
   128     // a chunk of any size.
   129     HeapWord* addr = (HeapWord*) fc;
   130     _smallLinearAllocBlock.set(addr, fc->size() ,
   131       1024*SmallForLinearAlloc, fc->size());
   132     // Note that _unallocated_block is not updated here.
   133     // Allocations from the linear allocation block should
   134     // update it.
   135   } else {
   136     _smallLinearAllocBlock.set(0, 0, 1024*SmallForLinearAlloc,
   137                                SmallForLinearAlloc);
   138   }
   139   // CMSIndexedFreeListReplenish should be at least 1
   140   CMSIndexedFreeListReplenish = MAX2((uintx)1, CMSIndexedFreeListReplenish);
   141   _promoInfo.setSpace(this);
   142   if (UseCMSBestFit) {
   143     _fitStrategy = FreeBlockBestFitFirst;
   144   } else {
   145     _fitStrategy = FreeBlockStrategyNone;
   146   }
   147   check_free_list_consistency();
   149   // Initialize locks for parallel case.
   151   if (CollectedHeap::use_parallel_gc_threads()) {
   152     for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
   153       _indexedFreeListParLocks[i] = new Mutex(Mutex::leaf - 1, // == ExpandHeap_lock - 1
   154                                               "a freelist par lock",
   155                                               true);
   156       if (_indexedFreeListParLocks[i] == NULL)
   157         vm_exit_during_initialization("Could not allocate a par lock");
   158       DEBUG_ONLY(
   159         _indexedFreeList[i].set_protecting_lock(_indexedFreeListParLocks[i]);
   160       )
   161     }
   162     _dictionary->set_par_lock(&_parDictionaryAllocLock);
   163   }
   164 }
   166 // Like CompactibleSpace forward() but always calls cross_threshold() to
   167 // update the block offset table.  Removed initialize_threshold call because
   168 // CFLS does not use a block offset array for contiguous spaces.
   169 HeapWord* CompactibleFreeListSpace::forward(oop q, size_t size,
   170                                     CompactPoint* cp, HeapWord* compact_top) {
   171   // q is alive
   172   // First check if we should switch compaction space
   173   assert(this == cp->space, "'this' should be current compaction space.");
   174   size_t compaction_max_size = pointer_delta(end(), compact_top);
   175   assert(adjustObjectSize(size) == cp->space->adjust_object_size_v(size),
   176     "virtual adjustObjectSize_v() method is not correct");
   177   size_t adjusted_size = adjustObjectSize(size);
   178   assert(compaction_max_size >= MinChunkSize || compaction_max_size == 0,
   179          "no small fragments allowed");
   180   assert(minimum_free_block_size() == MinChunkSize,
   181          "for de-virtualized reference below");
   182   // Can't leave a nonzero size, residual fragment smaller than MinChunkSize
   183   if (adjusted_size + MinChunkSize > compaction_max_size &&
   184       adjusted_size != compaction_max_size) {
   185     do {
   186       // switch to next compaction space
   187       cp->space->set_compaction_top(compact_top);
   188       cp->space = cp->space->next_compaction_space();
   189       if (cp->space == NULL) {
   190         cp->gen = GenCollectedHeap::heap()->prev_gen(cp->gen);
   191         assert(cp->gen != NULL, "compaction must succeed");
   192         cp->space = cp->gen->first_compaction_space();
   193         assert(cp->space != NULL, "generation must have a first compaction space");
   194       }
   195       compact_top = cp->space->bottom();
   196       cp->space->set_compaction_top(compact_top);
   197       // The correct adjusted_size may not be the same as that for this method
   198       // (i.e., cp->space may no longer be "this" so adjust the size again.
   199       // Use the virtual method which is not used above to save the virtual
   200       // dispatch.
   201       adjusted_size = cp->space->adjust_object_size_v(size);
   202       compaction_max_size = pointer_delta(cp->space->end(), compact_top);
   203       assert(cp->space->minimum_free_block_size() == 0, "just checking");
   204     } while (adjusted_size > compaction_max_size);
   205   }
   207   // store the forwarding pointer into the mark word
   208   if ((HeapWord*)q != compact_top) {
   209     q->forward_to(oop(compact_top));
   210     assert(q->is_gc_marked(), "encoding the pointer should preserve the mark");
   211   } else {
   212     // if the object isn't moving we can just set the mark to the default
   213     // mark and handle it specially later on.
   214     q->init_mark();
   215     assert(q->forwardee() == NULL, "should be forwarded to NULL");
   216   }
   218   compact_top += adjusted_size;
   220   // we need to update the offset table so that the beginnings of objects can be
   221   // found during scavenge.  Note that we are updating the offset table based on
   222   // where the object will be once the compaction phase finishes.
   224   // Always call cross_threshold().  A contiguous space can only call it when
   225   // the compaction_top exceeds the current threshold but not for an
   226   // non-contiguous space.
   227   cp->threshold =
   228     cp->space->cross_threshold(compact_top - adjusted_size, compact_top);
   229   return compact_top;
   230 }
   232 // A modified copy of OffsetTableContigSpace::cross_threshold() with _offsets -> _bt
   233 // and use of single_block instead of alloc_block.  The name here is not really
   234 // appropriate - maybe a more general name could be invented for both the
   235 // contiguous and noncontiguous spaces.
   237 HeapWord* CompactibleFreeListSpace::cross_threshold(HeapWord* start, HeapWord* the_end) {
   238   _bt.single_block(start, the_end);
   239   return end();
   240 }
   242 // Initialize them to NULL.
   243 void CompactibleFreeListSpace::initializeIndexedFreeListArray() {
   244   for (size_t i = 0; i < IndexSetSize; i++) {
   245     // Note that on platforms where objects are double word aligned,
   246     // the odd array elements are not used.  It is convenient, however,
   247     // to map directly from the object size to the array element.
   248     _indexedFreeList[i].reset(IndexSetSize);
   249     _indexedFreeList[i].set_size(i);
   250     assert(_indexedFreeList[i].count() == 0, "reset check failed");
   251     assert(_indexedFreeList[i].head() == NULL, "reset check failed");
   252     assert(_indexedFreeList[i].tail() == NULL, "reset check failed");
   253     assert(_indexedFreeList[i].hint() == IndexSetSize, "reset check failed");
   254   }
   255 }
   257 void CompactibleFreeListSpace::resetIndexedFreeListArray() {
   258   for (size_t i = 1; i < IndexSetSize; i++) {
   259     assert(_indexedFreeList[i].size() == (size_t) i,
   260       "Indexed free list sizes are incorrect");
   261     _indexedFreeList[i].reset(IndexSetSize);
   262     assert(_indexedFreeList[i].count() == 0, "reset check failed");
   263     assert(_indexedFreeList[i].head() == NULL, "reset check failed");
   264     assert(_indexedFreeList[i].tail() == NULL, "reset check failed");
   265     assert(_indexedFreeList[i].hint() == IndexSetSize, "reset check failed");
   266   }
   267 }
   269 void CompactibleFreeListSpace::reset(MemRegion mr) {
   270   resetIndexedFreeListArray();
   271   dictionary()->reset();
   272   if (BlockOffsetArrayUseUnallocatedBlock) {
   273     assert(end() == mr.end(), "We are compacting to the bottom of CMS gen");
   274     // Everything's allocated until proven otherwise.
   275     _bt.set_unallocated_block(end());
   276   }
   277   if (!mr.is_empty()) {
   278     assert(mr.word_size() >= MinChunkSize, "Chunk size is too small");
   279     _bt.single_block(mr.start(), mr.word_size());
   280     FreeChunk* fc = (FreeChunk*) mr.start();
   281     fc->set_size(mr.word_size());
   282     if (mr.word_size() >= IndexSetSize ) {
   283       returnChunkToDictionary(fc);
   284     } else {
   285       _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
   286       _indexedFreeList[mr.word_size()].return_chunk_at_head(fc);
   287     }
   288     coalBirth(mr.word_size());
   289   }
   290   _promoInfo.reset();
   291   _smallLinearAllocBlock._ptr = NULL;
   292   _smallLinearAllocBlock._word_size = 0;
   293 }
   295 void CompactibleFreeListSpace::reset_after_compaction() {
   296   // Reset the space to the new reality - one free chunk.
   297   MemRegion mr(compaction_top(), end());
   298   reset(mr);
   299   // Now refill the linear allocation block(s) if possible.
   300   if (_adaptive_freelists) {
   301     refillLinearAllocBlocksIfNeeded();
   302   } else {
   303     // Place as much of mr in the linAB as we can get,
   304     // provided it was big enough to go into the dictionary.
   305     FreeChunk* fc = dictionary()->find_largest_dict();
   306     if (fc != NULL) {
   307       assert(fc->size() == mr.word_size(),
   308              "Why was the chunk broken up?");
   309       removeChunkFromDictionary(fc);
   310       HeapWord* addr = (HeapWord*) fc;
   311       _smallLinearAllocBlock.set(addr, fc->size() ,
   312         1024*SmallForLinearAlloc, fc->size());
   313       // Note that _unallocated_block is not updated here.
   314     }
   315   }
   316 }
   318 // Walks the entire dictionary, returning a coterminal
   319 // chunk, if it exists. Use with caution since it involves
   320 // a potentially complete walk of a potentially large tree.
   321 FreeChunk* CompactibleFreeListSpace::find_chunk_at_end() {
   323   assert_lock_strong(&_freelistLock);
   325   return dictionary()->find_chunk_ends_at(end());
   326 }
   329 #ifndef PRODUCT
   330 void CompactibleFreeListSpace::initializeIndexedFreeListArrayReturnedBytes() {
   331   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
   332     _indexedFreeList[i].allocation_stats()->set_returned_bytes(0);
   333   }
   334 }
   336 size_t CompactibleFreeListSpace::sumIndexedFreeListArrayReturnedBytes() {
   337   size_t sum = 0;
   338   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
   339     sum += _indexedFreeList[i].allocation_stats()->returned_bytes();
   340   }
   341   return sum;
   342 }
   344 size_t CompactibleFreeListSpace::totalCountInIndexedFreeLists() const {
   345   size_t count = 0;
   346   for (size_t i = IndexSetStart; i < IndexSetSize; i++) {
   347     debug_only(
   348       ssize_t total_list_count = 0;
   349       for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
   350          fc = fc->next()) {
   351         total_list_count++;
   352       }
   353       assert(total_list_count ==  _indexedFreeList[i].count(),
   354         "Count in list is incorrect");
   355     )
   356     count += _indexedFreeList[i].count();
   357   }
   358   return count;
   359 }
   361 size_t CompactibleFreeListSpace::totalCount() {
   362   size_t num = totalCountInIndexedFreeLists();
   363   num +=  dictionary()->total_count();
   364   if (_smallLinearAllocBlock._word_size != 0) {
   365     num++;
   366   }
   367   return num;
   368 }
   369 #endif
   371 bool CompactibleFreeListSpace::is_free_block(const HeapWord* p) const {
   372   FreeChunk* fc = (FreeChunk*) p;
   373   return fc->is_free();
   374 }
   376 size_t CompactibleFreeListSpace::used() const {
   377   return capacity() - free();
   378 }
   380 size_t CompactibleFreeListSpace::free() const {
   381   // "MT-safe, but not MT-precise"(TM), if you will: i.e.
   382   // if you do this while the structures are in flux you
   383   // may get an approximate answer only; for instance
   384   // because there is concurrent allocation either
   385   // directly by mutators or for promotion during a GC.
   386   // It's "MT-safe", however, in the sense that you are guaranteed
   387   // not to crash and burn, for instance, because of walking
   388   // pointers that could disappear as you were walking them.
   389   // The approximation is because the various components
   390   // that are read below are not read atomically (and
   391   // further the computation of totalSizeInIndexedFreeLists()
   392   // is itself a non-atomic computation. The normal use of
   393   // this is during a resize operation at the end of GC
   394   // and at that time you are guaranteed to get the
   395   // correct actual value. However, for instance, this is
   396   // also read completely asynchronously by the "perf-sampler"
   397   // that supports jvmstat, and you are apt to see the values
   398   // flicker in such cases.
   399   assert(_dictionary != NULL, "No _dictionary?");
   400   return (_dictionary->total_chunk_size(DEBUG_ONLY(freelistLock())) +
   401           totalSizeInIndexedFreeLists() +
   402           _smallLinearAllocBlock._word_size) * HeapWordSize;
   403 }
   405 size_t CompactibleFreeListSpace::max_alloc_in_words() const {
   406   assert(_dictionary != NULL, "No _dictionary?");
   407   assert_locked();
   408   size_t res = _dictionary->max_chunk_size();
   409   res = MAX2(res, MIN2(_smallLinearAllocBlock._word_size,
   410                        (size_t) SmallForLinearAlloc - 1));
   411   // XXX the following could potentially be pretty slow;
   412   // should one, pesimally for the rare cases when res
   413   // caclulated above is less than IndexSetSize,
   414   // just return res calculated above? My reasoning was that
   415   // those cases will be so rare that the extra time spent doesn't
   416   // really matter....
   417   // Note: do not change the loop test i >= res + IndexSetStride
   418   // to i > res below, because i is unsigned and res may be zero.
   419   for (size_t i = IndexSetSize - 1; i >= res + IndexSetStride;
   420        i -= IndexSetStride) {
   421     if (_indexedFreeList[i].head() != NULL) {
   422       assert(_indexedFreeList[i].count() != 0, "Inconsistent FreeList");
   423       return i;
   424     }
   425   }
   426   return res;
   427 }
   429 void LinearAllocBlock::print_on(outputStream* st) const {
   430   st->print_cr(" LinearAllocBlock: ptr = " PTR_FORMAT ", word_size = " SIZE_FORMAT
   431             ", refillsize = " SIZE_FORMAT ", allocation_size_limit = " SIZE_FORMAT,
   432             _ptr, _word_size, _refillSize, _allocation_size_limit);
   433 }
   435 void CompactibleFreeListSpace::print_on(outputStream* st) const {
   436   st->print_cr("COMPACTIBLE FREELIST SPACE");
   437   st->print_cr(" Space:");
   438   Space::print_on(st);
   440   st->print_cr("promoInfo:");
   441   _promoInfo.print_on(st);
   443   st->print_cr("_smallLinearAllocBlock");
   444   _smallLinearAllocBlock.print_on(st);
   446   // dump_memory_block(_smallLinearAllocBlock->_ptr, 128);
   448   st->print_cr(" _fitStrategy = %s, _adaptive_freelists = %s",
   449                _fitStrategy?"true":"false", _adaptive_freelists?"true":"false");
   450 }
   452 void CompactibleFreeListSpace::print_indexed_free_lists(outputStream* st)
   453 const {
   454   reportIndexedFreeListStatistics();
   455   gclog_or_tty->print_cr("Layout of Indexed Freelists");
   456   gclog_or_tty->print_cr("---------------------------");
   457   AdaptiveFreeList<FreeChunk>::print_labels_on(st, "size");
   458   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
   459     _indexedFreeList[i].print_on(gclog_or_tty);
   460     for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
   461          fc = fc->next()) {
   462       gclog_or_tty->print_cr("\t[" PTR_FORMAT "," PTR_FORMAT ")  %s",
   463                           fc, (HeapWord*)fc + i,
   464                           fc->cantCoalesce() ? "\t CC" : "");
   465     }
   466   }
   467 }
   469 void CompactibleFreeListSpace::print_promo_info_blocks(outputStream* st)
   470 const {
   471   _promoInfo.print_on(st);
   472 }
   474 void CompactibleFreeListSpace::print_dictionary_free_lists(outputStream* st)
   475 const {
   476   _dictionary->report_statistics();
   477   st->print_cr("Layout of Freelists in Tree");
   478   st->print_cr("---------------------------");
   479   _dictionary->print_free_lists(st);
   480 }
   482 class BlkPrintingClosure: public BlkClosure {
   483   const CMSCollector*             _collector;
   484   const CompactibleFreeListSpace* _sp;
   485   const CMSBitMap*                _live_bit_map;
   486   const bool                      _post_remark;
   487   outputStream*                   _st;
   488 public:
   489   BlkPrintingClosure(const CMSCollector* collector,
   490                      const CompactibleFreeListSpace* sp,
   491                      const CMSBitMap* live_bit_map,
   492                      outputStream* st):
   493     _collector(collector),
   494     _sp(sp),
   495     _live_bit_map(live_bit_map),
   496     _post_remark(collector->abstract_state() > CMSCollector::FinalMarking),
   497     _st(st) { }
   498   size_t do_blk(HeapWord* addr);
   499 };
   501 size_t BlkPrintingClosure::do_blk(HeapWord* addr) {
   502   size_t sz = _sp->block_size_no_stall(addr, _collector);
   503   assert(sz != 0, "Should always be able to compute a size");
   504   if (_sp->block_is_obj(addr)) {
   505     const bool dead = _post_remark && !_live_bit_map->isMarked(addr);
   506     _st->print_cr(PTR_FORMAT ": %s object of size " SIZE_FORMAT "%s",
   507       addr,
   508       dead ? "dead" : "live",
   509       sz,
   510       (!dead && CMSPrintObjectsInDump) ? ":" : ".");
   511     if (CMSPrintObjectsInDump && !dead) {
   512       oop(addr)->print_on(_st);
   513       _st->print_cr("--------------------------------------");
   514     }
   515   } else { // free block
   516     _st->print_cr(PTR_FORMAT ": free block of size " SIZE_FORMAT "%s",
   517       addr, sz, CMSPrintChunksInDump ? ":" : ".");
   518     if (CMSPrintChunksInDump) {
   519       ((FreeChunk*)addr)->print_on(_st);
   520       _st->print_cr("--------------------------------------");
   521     }
   522   }
   523   return sz;
   524 }
   526 void CompactibleFreeListSpace::dump_at_safepoint_with_locks(CMSCollector* c,
   527   outputStream* st) {
   528   st->print_cr("\n=========================");
   529   st->print_cr("Block layout in CMS Heap:");
   530   st->print_cr("=========================");
   531   BlkPrintingClosure  bpcl(c, this, c->markBitMap(), st);
   532   blk_iterate(&bpcl);
   534   st->print_cr("\n=======================================");
   535   st->print_cr("Order & Layout of Promotion Info Blocks");
   536   st->print_cr("=======================================");
   537   print_promo_info_blocks(st);
   539   st->print_cr("\n===========================");
   540   st->print_cr("Order of Indexed Free Lists");
   541   st->print_cr("=========================");
   542   print_indexed_free_lists(st);
   544   st->print_cr("\n=================================");
   545   st->print_cr("Order of Free Lists in Dictionary");
   546   st->print_cr("=================================");
   547   print_dictionary_free_lists(st);
   548 }
   551 void CompactibleFreeListSpace::reportFreeListStatistics() const {
   552   assert_lock_strong(&_freelistLock);
   553   assert(PrintFLSStatistics != 0, "Reporting error");
   554   _dictionary->report_statistics();
   555   if (PrintFLSStatistics > 1) {
   556     reportIndexedFreeListStatistics();
   557     size_t total_size = totalSizeInIndexedFreeLists() +
   558                        _dictionary->total_chunk_size(DEBUG_ONLY(freelistLock()));
   559     gclog_or_tty->print(" free=" SIZE_FORMAT " frag=%1.4f\n", total_size, flsFrag());
   560   }
   561 }
   563 void CompactibleFreeListSpace::reportIndexedFreeListStatistics() const {
   564   assert_lock_strong(&_freelistLock);
   565   gclog_or_tty->print("Statistics for IndexedFreeLists:\n"
   566                       "--------------------------------\n");
   567   size_t total_size = totalSizeInIndexedFreeLists();
   568   size_t   free_blocks = numFreeBlocksInIndexedFreeLists();
   569   gclog_or_tty->print("Total Free Space: %d\n", total_size);
   570   gclog_or_tty->print("Max   Chunk Size: %d\n", maxChunkSizeInIndexedFreeLists());
   571   gclog_or_tty->print("Number of Blocks: %d\n", free_blocks);
   572   if (free_blocks != 0) {
   573     gclog_or_tty->print("Av.  Block  Size: %d\n", total_size/free_blocks);
   574   }
   575 }
   577 size_t CompactibleFreeListSpace::numFreeBlocksInIndexedFreeLists() const {
   578   size_t res = 0;
   579   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
   580     debug_only(
   581       ssize_t recount = 0;
   582       for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
   583          fc = fc->next()) {
   584         recount += 1;
   585       }
   586       assert(recount == _indexedFreeList[i].count(),
   587         "Incorrect count in list");
   588     )
   589     res += _indexedFreeList[i].count();
   590   }
   591   return res;
   592 }
   594 size_t CompactibleFreeListSpace::maxChunkSizeInIndexedFreeLists() const {
   595   for (size_t i = IndexSetSize - 1; i != 0; i -= IndexSetStride) {
   596     if (_indexedFreeList[i].head() != NULL) {
   597       assert(_indexedFreeList[i].count() != 0, "Inconsistent FreeList");
   598       return (size_t)i;
   599     }
   600   }
   601   return 0;
   602 }
   604 void CompactibleFreeListSpace::set_end(HeapWord* value) {
   605   HeapWord* prevEnd = end();
   606   assert(prevEnd != value, "unnecessary set_end call");
   607   assert(prevEnd == NULL || !BlockOffsetArrayUseUnallocatedBlock || value >= unallocated_block(),
   608         "New end is below unallocated block");
   609   _end = value;
   610   if (prevEnd != NULL) {
   611     // Resize the underlying block offset table.
   612     _bt.resize(pointer_delta(value, bottom()));
   613     if (value <= prevEnd) {
   614       assert(!BlockOffsetArrayUseUnallocatedBlock || value >= unallocated_block(),
   615              "New end is below unallocated block");
   616     } else {
   617       // Now, take this new chunk and add it to the free blocks.
   618       // Note that the BOT has not yet been updated for this block.
   619       size_t newFcSize = pointer_delta(value, prevEnd);
   620       // XXX This is REALLY UGLY and should be fixed up. XXX
   621       if (!_adaptive_freelists && _smallLinearAllocBlock._ptr == NULL) {
   622         // Mark the boundary of the new block in BOT
   623         _bt.mark_block(prevEnd, value);
   624         // put it all in the linAB
   625         if (ParallelGCThreads == 0) {
   626           _smallLinearAllocBlock._ptr = prevEnd;
   627           _smallLinearAllocBlock._word_size = newFcSize;
   628           repairLinearAllocBlock(&_smallLinearAllocBlock);
   629         } else { // ParallelGCThreads > 0
   630           MutexLockerEx x(parDictionaryAllocLock(),
   631                           Mutex::_no_safepoint_check_flag);
   632           _smallLinearAllocBlock._ptr = prevEnd;
   633           _smallLinearAllocBlock._word_size = newFcSize;
   634           repairLinearAllocBlock(&_smallLinearAllocBlock);
   635         }
   636         // Births of chunks put into a LinAB are not recorded.  Births
   637         // of chunks as they are allocated out of a LinAB are.
   638       } else {
   639         // Add the block to the free lists, if possible coalescing it
   640         // with the last free block, and update the BOT and census data.
   641         addChunkToFreeListsAtEndRecordingStats(prevEnd, newFcSize);
   642       }
   643     }
   644   }
   645 }
   647 class FreeListSpace_DCTOC : public Filtering_DCTOC {
   648   CompactibleFreeListSpace* _cfls;
   649   CMSCollector* _collector;
   650 protected:
   651   // Override.
   652 #define walk_mem_region_with_cl_DECL(ClosureType)                       \
   653   virtual void walk_mem_region_with_cl(MemRegion mr,                    \
   654                                        HeapWord* bottom, HeapWord* top, \
   655                                        ClosureType* cl);                \
   656       void walk_mem_region_with_cl_par(MemRegion mr,                    \
   657                                        HeapWord* bottom, HeapWord* top, \
   658                                        ClosureType* cl);                \
   659     void walk_mem_region_with_cl_nopar(MemRegion mr,                    \
   660                                        HeapWord* bottom, HeapWord* top, \
   661                                        ClosureType* cl)
   662   walk_mem_region_with_cl_DECL(ExtendedOopClosure);
   663   walk_mem_region_with_cl_DECL(FilteringClosure);
   665 public:
   666   FreeListSpace_DCTOC(CompactibleFreeListSpace* sp,
   667                       CMSCollector* collector,
   668                       ExtendedOopClosure* cl,
   669                       CardTableModRefBS::PrecisionStyle precision,
   670                       HeapWord* boundary) :
   671     Filtering_DCTOC(sp, cl, precision, boundary),
   672     _cfls(sp), _collector(collector) {}
   673 };
   675 // We de-virtualize the block-related calls below, since we know that our
   676 // space is a CompactibleFreeListSpace.
   678 #define FreeListSpace_DCTOC__walk_mem_region_with_cl_DEFN(ClosureType)          \
   679 void FreeListSpace_DCTOC::walk_mem_region_with_cl(MemRegion mr,                 \
   680                                                  HeapWord* bottom,              \
   681                                                  HeapWord* top,                 \
   682                                                  ClosureType* cl) {             \
   683    bool is_par = SharedHeap::heap()->n_par_threads() > 0;                       \
   684    if (is_par) {                                                                \
   685      assert(SharedHeap::heap()->n_par_threads() ==                              \
   686             SharedHeap::heap()->workers()->active_workers(), "Mismatch");       \
   687      walk_mem_region_with_cl_par(mr, bottom, top, cl);                          \
   688    } else {                                                                     \
   689      walk_mem_region_with_cl_nopar(mr, bottom, top, cl);                        \
   690    }                                                                            \
   691 }                                                                               \
   692 void FreeListSpace_DCTOC::walk_mem_region_with_cl_par(MemRegion mr,             \
   693                                                       HeapWord* bottom,         \
   694                                                       HeapWord* top,            \
   695                                                       ClosureType* cl) {        \
   696   /* Skip parts that are before "mr", in case "block_start" sent us             \
   697      back too far. */                                                           \
   698   HeapWord* mr_start = mr.start();                                              \
   699   size_t bot_size = _cfls->CompactibleFreeListSpace::block_size(bottom);        \
   700   HeapWord* next = bottom + bot_size;                                           \
   701   while (next < mr_start) {                                                     \
   702     bottom = next;                                                              \
   703     bot_size = _cfls->CompactibleFreeListSpace::block_size(bottom);             \
   704     next = bottom + bot_size;                                                   \
   705   }                                                                             \
   706                                                                                 \
   707   while (bottom < top) {                                                        \
   708     if (_cfls->CompactibleFreeListSpace::block_is_obj(bottom) &&                \
   709         !_cfls->CompactibleFreeListSpace::obj_allocated_since_save_marks(       \
   710                     oop(bottom)) &&                                             \
   711         !_collector->CMSCollector::is_dead_obj(oop(bottom))) {                  \
   712       size_t word_sz = oop(bottom)->oop_iterate(cl, mr);                        \
   713       bottom += _cfls->adjustObjectSize(word_sz);                               \
   714     } else {                                                                    \
   715       bottom += _cfls->CompactibleFreeListSpace::block_size(bottom);            \
   716     }                                                                           \
   717   }                                                                             \
   718 }                                                                               \
   719 void FreeListSpace_DCTOC::walk_mem_region_with_cl_nopar(MemRegion mr,           \
   720                                                         HeapWord* bottom,       \
   721                                                         HeapWord* top,          \
   722                                                         ClosureType* cl) {      \
   723   /* Skip parts that are before "mr", in case "block_start" sent us             \
   724      back too far. */                                                           \
   725   HeapWord* mr_start = mr.start();                                              \
   726   size_t bot_size = _cfls->CompactibleFreeListSpace::block_size_nopar(bottom);  \
   727   HeapWord* next = bottom + bot_size;                                           \
   728   while (next < mr_start) {                                                     \
   729     bottom = next;                                                              \
   730     bot_size = _cfls->CompactibleFreeListSpace::block_size_nopar(bottom);       \
   731     next = bottom + bot_size;                                                   \
   732   }                                                                             \
   733                                                                                 \
   734   while (bottom < top) {                                                        \
   735     if (_cfls->CompactibleFreeListSpace::block_is_obj_nopar(bottom) &&          \
   736         !_cfls->CompactibleFreeListSpace::obj_allocated_since_save_marks(       \
   737                     oop(bottom)) &&                                             \
   738         !_collector->CMSCollector::is_dead_obj(oop(bottom))) {                  \
   739       size_t word_sz = oop(bottom)->oop_iterate(cl, mr);                        \
   740       bottom += _cfls->adjustObjectSize(word_sz);                               \
   741     } else {                                                                    \
   742       bottom += _cfls->CompactibleFreeListSpace::block_size_nopar(bottom);      \
   743     }                                                                           \
   744   }                                                                             \
   745 }
   747 // (There are only two of these, rather than N, because the split is due
   748 // only to the introduction of the FilteringClosure, a local part of the
   749 // impl of this abstraction.)
   750 FreeListSpace_DCTOC__walk_mem_region_with_cl_DEFN(ExtendedOopClosure)
   751 FreeListSpace_DCTOC__walk_mem_region_with_cl_DEFN(FilteringClosure)
   753 DirtyCardToOopClosure*
   754 CompactibleFreeListSpace::new_dcto_cl(ExtendedOopClosure* cl,
   755                                       CardTableModRefBS::PrecisionStyle precision,
   756                                       HeapWord* boundary) {
   757   return new FreeListSpace_DCTOC(this, _collector, cl, precision, boundary);
   758 }
   761 // Note on locking for the space iteration functions:
   762 // since the collector's iteration activities are concurrent with
   763 // allocation activities by mutators, absent a suitable mutual exclusion
   764 // mechanism the iterators may go awry. For instace a block being iterated
   765 // may suddenly be allocated or divided up and part of it allocated and
   766 // so on.
   768 // Apply the given closure to each block in the space.
   769 void CompactibleFreeListSpace::blk_iterate_careful(BlkClosureCareful* cl) {
   770   assert_lock_strong(freelistLock());
   771   HeapWord *cur, *limit;
   772   for (cur = bottom(), limit = end(); cur < limit;
   773        cur += cl->do_blk_careful(cur));
   774 }
   776 // Apply the given closure to each block in the space.
   777 void CompactibleFreeListSpace::blk_iterate(BlkClosure* cl) {
   778   assert_lock_strong(freelistLock());
   779   HeapWord *cur, *limit;
   780   for (cur = bottom(), limit = end(); cur < limit;
   781        cur += cl->do_blk(cur));
   782 }
   784 // Apply the given closure to each oop in the space.
   785 void CompactibleFreeListSpace::oop_iterate(ExtendedOopClosure* cl) {
   786   assert_lock_strong(freelistLock());
   787   HeapWord *cur, *limit;
   788   size_t curSize;
   789   for (cur = bottom(), limit = end(); cur < limit;
   790        cur += curSize) {
   791     curSize = block_size(cur);
   792     if (block_is_obj(cur)) {
   793       oop(cur)->oop_iterate(cl);
   794     }
   795   }
   796 }
   798 // Apply the given closure to each oop in the space \intersect memory region.
   799 void CompactibleFreeListSpace::oop_iterate(MemRegion mr, ExtendedOopClosure* cl) {
   800   assert_lock_strong(freelistLock());
   801   if (is_empty()) {
   802     return;
   803   }
   804   MemRegion cur = MemRegion(bottom(), end());
   805   mr = mr.intersection(cur);
   806   if (mr.is_empty()) {
   807     return;
   808   }
   809   if (mr.equals(cur)) {
   810     oop_iterate(cl);
   811     return;
   812   }
   813   assert(mr.end() <= end(), "just took an intersection above");
   814   HeapWord* obj_addr = block_start(mr.start());
   815   HeapWord* t = mr.end();
   817   SpaceMemRegionOopsIterClosure smr_blk(cl, mr);
   818   if (block_is_obj(obj_addr)) {
   819     // Handle first object specially.
   820     oop obj = oop(obj_addr);
   821     obj_addr += adjustObjectSize(obj->oop_iterate(&smr_blk));
   822   } else {
   823     FreeChunk* fc = (FreeChunk*)obj_addr;
   824     obj_addr += fc->size();
   825   }
   826   while (obj_addr < t) {
   827     HeapWord* obj = obj_addr;
   828     obj_addr += block_size(obj_addr);
   829     // If "obj_addr" is not greater than top, then the
   830     // entire object "obj" is within the region.
   831     if (obj_addr <= t) {
   832       if (block_is_obj(obj)) {
   833         oop(obj)->oop_iterate(cl);
   834       }
   835     } else {
   836       // "obj" extends beyond end of region
   837       if (block_is_obj(obj)) {
   838         oop(obj)->oop_iterate(&smr_blk);
   839       }
   840       break;
   841     }
   842   }
   843 }
   845 // NOTE: In the following methods, in order to safely be able to
   846 // apply the closure to an object, we need to be sure that the
   847 // object has been initialized. We are guaranteed that an object
   848 // is initialized if we are holding the Heap_lock with the
   849 // world stopped.
   850 void CompactibleFreeListSpace::verify_objects_initialized() const {
   851   if (is_init_completed()) {
   852     assert_locked_or_safepoint(Heap_lock);
   853     if (Universe::is_fully_initialized()) {
   854       guarantee(SafepointSynchronize::is_at_safepoint(),
   855                 "Required for objects to be initialized");
   856     }
   857   } // else make a concession at vm start-up
   858 }
   860 // Apply the given closure to each object in the space
   861 void CompactibleFreeListSpace::object_iterate(ObjectClosure* blk) {
   862   assert_lock_strong(freelistLock());
   863   NOT_PRODUCT(verify_objects_initialized());
   864   HeapWord *cur, *limit;
   865   size_t curSize;
   866   for (cur = bottom(), limit = end(); cur < limit;
   867        cur += curSize) {
   868     curSize = block_size(cur);
   869     if (block_is_obj(cur)) {
   870       blk->do_object(oop(cur));
   871     }
   872   }
   873 }
   875 // Apply the given closure to each live object in the space
   876 //   The usage of CompactibleFreeListSpace
   877 // by the ConcurrentMarkSweepGeneration for concurrent GC's allows
   878 // objects in the space with references to objects that are no longer
   879 // valid.  For example, an object may reference another object
   880 // that has already been sweep up (collected).  This method uses
   881 // obj_is_alive() to determine whether it is safe to apply the closure to
   882 // an object.  See obj_is_alive() for details on how liveness of an
   883 // object is decided.
   885 void CompactibleFreeListSpace::safe_object_iterate(ObjectClosure* blk) {
   886   assert_lock_strong(freelistLock());
   887   NOT_PRODUCT(verify_objects_initialized());
   888   HeapWord *cur, *limit;
   889   size_t curSize;
   890   for (cur = bottom(), limit = end(); cur < limit;
   891        cur += curSize) {
   892     curSize = block_size(cur);
   893     if (block_is_obj(cur) && obj_is_alive(cur)) {
   894       blk->do_object(oop(cur));
   895     }
   896   }
   897 }
   899 void CompactibleFreeListSpace::object_iterate_mem(MemRegion mr,
   900                                                   UpwardsObjectClosure* cl) {
   901   assert_locked(freelistLock());
   902   NOT_PRODUCT(verify_objects_initialized());
   903   Space::object_iterate_mem(mr, cl);
   904 }
   906 // Callers of this iterator beware: The closure application should
   907 // be robust in the face of uninitialized objects and should (always)
   908 // return a correct size so that the next addr + size below gives us a
   909 // valid block boundary. [See for instance,
   910 // ScanMarkedObjectsAgainCarefullyClosure::do_object_careful()
   911 // in ConcurrentMarkSweepGeneration.cpp.]
   912 HeapWord*
   913 CompactibleFreeListSpace::object_iterate_careful(ObjectClosureCareful* cl) {
   914   assert_lock_strong(freelistLock());
   915   HeapWord *addr, *last;
   916   size_t size;
   917   for (addr = bottom(), last  = end();
   918        addr < last; addr += size) {
   919     FreeChunk* fc = (FreeChunk*)addr;
   920     if (fc->is_free()) {
   921       // Since we hold the free list lock, which protects direct
   922       // allocation in this generation by mutators, a free object
   923       // will remain free throughout this iteration code.
   924       size = fc->size();
   925     } else {
   926       // Note that the object need not necessarily be initialized,
   927       // because (for instance) the free list lock does NOT protect
   928       // object initialization. The closure application below must
   929       // therefore be correct in the face of uninitialized objects.
   930       size = cl->do_object_careful(oop(addr));
   931       if (size == 0) {
   932         // An unparsable object found. Signal early termination.
   933         return addr;
   934       }
   935     }
   936   }
   937   return NULL;
   938 }
   940 // Callers of this iterator beware: The closure application should
   941 // be robust in the face of uninitialized objects and should (always)
   942 // return a correct size so that the next addr + size below gives us a
   943 // valid block boundary. [See for instance,
   944 // ScanMarkedObjectsAgainCarefullyClosure::do_object_careful()
   945 // in ConcurrentMarkSweepGeneration.cpp.]
   946 HeapWord*
   947 CompactibleFreeListSpace::object_iterate_careful_m(MemRegion mr,
   948   ObjectClosureCareful* cl) {
   949   assert_lock_strong(freelistLock());
   950   // Can't use used_region() below because it may not necessarily
   951   // be the same as [bottom(),end()); although we could
   952   // use [used_region().start(),round_to(used_region().end(),CardSize)),
   953   // that appears too cumbersome, so we just do the simpler check
   954   // in the assertion below.
   955   assert(!mr.is_empty() && MemRegion(bottom(),end()).contains(mr),
   956          "mr should be non-empty and within used space");
   957   HeapWord *addr, *end;
   958   size_t size;
   959   for (addr = block_start_careful(mr.start()), end  = mr.end();
   960        addr < end; addr += size) {
   961     FreeChunk* fc = (FreeChunk*)addr;
   962     if (fc->is_free()) {
   963       // Since we hold the free list lock, which protects direct
   964       // allocation in this generation by mutators, a free object
   965       // will remain free throughout this iteration code.
   966       size = fc->size();
   967     } else {
   968       // Note that the object need not necessarily be initialized,
   969       // because (for instance) the free list lock does NOT protect
   970       // object initialization. The closure application below must
   971       // therefore be correct in the face of uninitialized objects.
   972       size = cl->do_object_careful_m(oop(addr), mr);
   973       if (size == 0) {
   974         // An unparsable object found. Signal early termination.
   975         return addr;
   976       }
   977     }
   978   }
   979   return NULL;
   980 }
   983 HeapWord* CompactibleFreeListSpace::block_start_const(const void* p) const {
   984   NOT_PRODUCT(verify_objects_initialized());
   985   return _bt.block_start(p);
   986 }
   988 HeapWord* CompactibleFreeListSpace::block_start_careful(const void* p) const {
   989   return _bt.block_start_careful(p);
   990 }
   992 size_t CompactibleFreeListSpace::block_size(const HeapWord* p) const {
   993   NOT_PRODUCT(verify_objects_initialized());
   994   // This must be volatile, or else there is a danger that the compiler
   995   // will compile the code below into a sometimes-infinite loop, by keeping
   996   // the value read the first time in a register.
   997   while (true) {
   998     // We must do this until we get a consistent view of the object.
   999     if (FreeChunk::indicatesFreeChunk(p)) {
  1000       volatile FreeChunk* fc = (volatile FreeChunk*)p;
  1001       size_t res = fc->size();
  1002       // If the object is still a free chunk, return the size, else it
  1003       // has been allocated so try again.
  1004       if (FreeChunk::indicatesFreeChunk(p)) {
  1005         assert(res != 0, "Block size should not be 0");
  1006         return res;
  1008     } else {
  1009       // must read from what 'p' points to in each loop.
  1010       Klass* k = ((volatile oopDesc*)p)->klass_or_null();
  1011       if (k != NULL) {
  1012         assert(k->is_klass(), "Should really be klass oop.");
  1013         oop o = (oop)p;
  1014         assert(o->is_oop(true /* ignore mark word */), "Should be an oop.");
  1015         size_t res = o->size_given_klass(k);
  1016         res = adjustObjectSize(res);
  1017         assert(res != 0, "Block size should not be 0");
  1018         return res;
  1024 // TODO: Now that is_parsable is gone, we should combine these two functions.
  1025 // A variant of the above that uses the Printezis bits for
  1026 // unparsable but allocated objects. This avoids any possible
  1027 // stalls waiting for mutators to initialize objects, and is
  1028 // thus potentially faster than the variant above. However,
  1029 // this variant may return a zero size for a block that is
  1030 // under mutation and for which a consistent size cannot be
  1031 // inferred without stalling; see CMSCollector::block_size_if_printezis_bits().
  1032 size_t CompactibleFreeListSpace::block_size_no_stall(HeapWord* p,
  1033                                                      const CMSCollector* c)
  1034 const {
  1035   assert(MemRegion(bottom(), end()).contains(p), "p not in space");
  1036   // This must be volatile, or else there is a danger that the compiler
  1037   // will compile the code below into a sometimes-infinite loop, by keeping
  1038   // the value read the first time in a register.
  1039   DEBUG_ONLY(uint loops = 0;)
  1040   while (true) {
  1041     // We must do this until we get a consistent view of the object.
  1042     if (FreeChunk::indicatesFreeChunk(p)) {
  1043       volatile FreeChunk* fc = (volatile FreeChunk*)p;
  1044       size_t res = fc->size();
  1045       if (FreeChunk::indicatesFreeChunk(p)) {
  1046         assert(res != 0, "Block size should not be 0");
  1047         assert(loops == 0, "Should be 0");
  1048         return res;
  1050     } else {
  1051       // must read from what 'p' points to in each loop.
  1052       Klass* k = ((volatile oopDesc*)p)->klass_or_null();
  1053       // We trust the size of any object that has a non-NULL
  1054       // klass and (for those in the perm gen) is parsable
  1055       // -- irrespective of its conc_safe-ty.
  1056       if (k != NULL) {
  1057         assert(k->is_klass(), "Should really be klass oop.");
  1058         oop o = (oop)p;
  1059         assert(o->is_oop(), "Should be an oop");
  1060         size_t res = o->size_given_klass(k);
  1061         res = adjustObjectSize(res);
  1062         assert(res != 0, "Block size should not be 0");
  1063         return res;
  1064       } else {
  1065         // May return 0 if P-bits not present.
  1066         return c->block_size_if_printezis_bits(p);
  1069     assert(loops == 0, "Can loop at most once");
  1070     DEBUG_ONLY(loops++;)
  1074 size_t CompactibleFreeListSpace::block_size_nopar(const HeapWord* p) const {
  1075   NOT_PRODUCT(verify_objects_initialized());
  1076   assert(MemRegion(bottom(), end()).contains(p), "p not in space");
  1077   FreeChunk* fc = (FreeChunk*)p;
  1078   if (fc->is_free()) {
  1079     return fc->size();
  1080   } else {
  1081     // Ignore mark word because this may be a recently promoted
  1082     // object whose mark word is used to chain together grey
  1083     // objects (the last one would have a null value).
  1084     assert(oop(p)->is_oop(true), "Should be an oop");
  1085     return adjustObjectSize(oop(p)->size());
  1089 // This implementation assumes that the property of "being an object" is
  1090 // stable.  But being a free chunk may not be (because of parallel
  1091 // promotion.)
  1092 bool CompactibleFreeListSpace::block_is_obj(const HeapWord* p) const {
  1093   FreeChunk* fc = (FreeChunk*)p;
  1094   assert(is_in_reserved(p), "Should be in space");
  1095   // When doing a mark-sweep-compact of the CMS generation, this
  1096   // assertion may fail because prepare_for_compaction() uses
  1097   // space that is garbage to maintain information on ranges of
  1098   // live objects so that these live ranges can be moved as a whole.
  1099   // Comment out this assertion until that problem can be solved
  1100   // (i.e., that the block start calculation may look at objects
  1101   // at address below "p" in finding the object that contains "p"
  1102   // and those objects (if garbage) may have been modified to hold
  1103   // live range information.
  1104   // assert(CollectedHeap::use_parallel_gc_threads() || _bt.block_start(p) == p,
  1105   //        "Should be a block boundary");
  1106   if (FreeChunk::indicatesFreeChunk(p)) return false;
  1107   Klass* k = oop(p)->klass_or_null();
  1108   if (k != NULL) {
  1109     // Ignore mark word because it may have been used to
  1110     // chain together promoted objects (the last one
  1111     // would have a null value).
  1112     assert(oop(p)->is_oop(true), "Should be an oop");
  1113     return true;
  1114   } else {
  1115     return false;  // Was not an object at the start of collection.
  1119 // Check if the object is alive. This fact is checked either by consulting
  1120 // the main marking bitmap in the sweeping phase or, if it's a permanent
  1121 // generation and we're not in the sweeping phase, by checking the
  1122 // perm_gen_verify_bit_map where we store the "deadness" information if
  1123 // we did not sweep the perm gen in the most recent previous GC cycle.
  1124 bool CompactibleFreeListSpace::obj_is_alive(const HeapWord* p) const {
  1125   assert(SafepointSynchronize::is_at_safepoint() || !is_init_completed(),
  1126          "Else races are possible");
  1127   assert(block_is_obj(p), "The address should point to an object");
  1129   // If we're sweeping, we use object liveness information from the main bit map
  1130   // for both perm gen and old gen.
  1131   // We don't need to lock the bitmap (live_map or dead_map below), because
  1132   // EITHER we are in the middle of the sweeping phase, and the
  1133   // main marking bit map (live_map below) is locked,
  1134   // OR we're in other phases and perm_gen_verify_bit_map (dead_map below)
  1135   // is stable, because it's mutated only in the sweeping phase.
  1136   // NOTE: This method is also used by jmap where, if class unloading is
  1137   // off, the results can return "false" for legitimate perm objects,
  1138   // when we are not in the midst of a sweeping phase, which can result
  1139   // in jmap not reporting certain perm gen objects. This will be moot
  1140   // if/when the perm gen goes away in the future.
  1141   if (_collector->abstract_state() == CMSCollector::Sweeping) {
  1142     CMSBitMap* live_map = _collector->markBitMap();
  1143     return live_map->par_isMarked((HeapWord*) p);
  1145   return true;
  1148 bool CompactibleFreeListSpace::block_is_obj_nopar(const HeapWord* p) const {
  1149   FreeChunk* fc = (FreeChunk*)p;
  1150   assert(is_in_reserved(p), "Should be in space");
  1151   assert(_bt.block_start(p) == p, "Should be a block boundary");
  1152   if (!fc->is_free()) {
  1153     // Ignore mark word because it may have been used to
  1154     // chain together promoted objects (the last one
  1155     // would have a null value).
  1156     assert(oop(p)->is_oop(true), "Should be an oop");
  1157     return true;
  1159   return false;
  1162 // "MT-safe but not guaranteed MT-precise" (TM); you may get an
  1163 // approximate answer if you don't hold the freelistlock when you call this.
  1164 size_t CompactibleFreeListSpace::totalSizeInIndexedFreeLists() const {
  1165   size_t size = 0;
  1166   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
  1167     debug_only(
  1168       // We may be calling here without the lock in which case we
  1169       // won't do this modest sanity check.
  1170       if (freelistLock()->owned_by_self()) {
  1171         size_t total_list_size = 0;
  1172         for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
  1173           fc = fc->next()) {
  1174           total_list_size += i;
  1176         assert(total_list_size == i * _indexedFreeList[i].count(),
  1177                "Count in list is incorrect");
  1180     size += i * _indexedFreeList[i].count();
  1182   return size;
  1185 HeapWord* CompactibleFreeListSpace::par_allocate(size_t size) {
  1186   MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
  1187   return allocate(size);
  1190 HeapWord*
  1191 CompactibleFreeListSpace::getChunkFromSmallLinearAllocBlockRemainder(size_t size) {
  1192   return getChunkFromLinearAllocBlockRemainder(&_smallLinearAllocBlock, size);
  1195 HeapWord* CompactibleFreeListSpace::allocate(size_t size) {
  1196   assert_lock_strong(freelistLock());
  1197   HeapWord* res = NULL;
  1198   assert(size == adjustObjectSize(size),
  1199          "use adjustObjectSize() before calling into allocate()");
  1201   if (_adaptive_freelists) {
  1202     res = allocate_adaptive_freelists(size);
  1203   } else {  // non-adaptive free lists
  1204     res = allocate_non_adaptive_freelists(size);
  1207   if (res != NULL) {
  1208     // check that res does lie in this space!
  1209     assert(is_in_reserved(res), "Not in this space!");
  1210     assert(is_aligned((void*)res), "alignment check");
  1212     FreeChunk* fc = (FreeChunk*)res;
  1213     fc->markNotFree();
  1214     assert(!fc->is_free(), "shouldn't be marked free");
  1215     assert(oop(fc)->klass_or_null() == NULL, "should look uninitialized");
  1216     // Verify that the block offset table shows this to
  1217     // be a single block, but not one which is unallocated.
  1218     _bt.verify_single_block(res, size);
  1219     _bt.verify_not_unallocated(res, size);
  1220     // mangle a just allocated object with a distinct pattern.
  1221     debug_only(fc->mangleAllocated(size));
  1224   return res;
  1227 HeapWord* CompactibleFreeListSpace::allocate_non_adaptive_freelists(size_t size) {
  1228   HeapWord* res = NULL;
  1229   // try and use linear allocation for smaller blocks
  1230   if (size < _smallLinearAllocBlock._allocation_size_limit) {
  1231     // if successful, the following also adjusts block offset table
  1232     res = getChunkFromSmallLinearAllocBlock(size);
  1234   // Else triage to indexed lists for smaller sizes
  1235   if (res == NULL) {
  1236     if (size < SmallForDictionary) {
  1237       res = (HeapWord*) getChunkFromIndexedFreeList(size);
  1238     } else {
  1239       // else get it from the big dictionary; if even this doesn't
  1240       // work we are out of luck.
  1241       res = (HeapWord*)getChunkFromDictionaryExact(size);
  1245   return res;
  1248 HeapWord* CompactibleFreeListSpace::allocate_adaptive_freelists(size_t size) {
  1249   assert_lock_strong(freelistLock());
  1250   HeapWord* res = NULL;
  1251   assert(size == adjustObjectSize(size),
  1252          "use adjustObjectSize() before calling into allocate()");
  1254   // Strategy
  1255   //   if small
  1256   //     exact size from small object indexed list if small
  1257   //     small or large linear allocation block (linAB) as appropriate
  1258   //     take from lists of greater sized chunks
  1259   //   else
  1260   //     dictionary
  1261   //     small or large linear allocation block if it has the space
  1262   // Try allocating exact size from indexTable first
  1263   if (size < IndexSetSize) {
  1264     res = (HeapWord*) getChunkFromIndexedFreeList(size);
  1265     if(res != NULL) {
  1266       assert(res != (HeapWord*)_indexedFreeList[size].head(),
  1267         "Not removed from free list");
  1268       // no block offset table adjustment is necessary on blocks in
  1269       // the indexed lists.
  1271     // Try allocating from the small LinAB
  1272     } else if (size < _smallLinearAllocBlock._allocation_size_limit &&
  1273         (res = getChunkFromSmallLinearAllocBlock(size)) != NULL) {
  1274         // if successful, the above also adjusts block offset table
  1275         // Note that this call will refill the LinAB to
  1276         // satisfy the request.  This is different that
  1277         // evm.
  1278         // Don't record chunk off a LinAB?  smallSplitBirth(size);
  1279     } else {
  1280       // Raid the exact free lists larger than size, even if they are not
  1281       // overpopulated.
  1282       res = (HeapWord*) getChunkFromGreater(size);
  1284   } else {
  1285     // Big objects get allocated directly from the dictionary.
  1286     res = (HeapWord*) getChunkFromDictionaryExact(size);
  1287     if (res == NULL) {
  1288       // Try hard not to fail since an allocation failure will likely
  1289       // trigger a synchronous GC.  Try to get the space from the
  1290       // allocation blocks.
  1291       res = getChunkFromSmallLinearAllocBlockRemainder(size);
  1295   return res;
  1298 // A worst-case estimate of the space required (in HeapWords) to expand the heap
  1299 // when promoting obj.
  1300 size_t CompactibleFreeListSpace::expansionSpaceRequired(size_t obj_size) const {
  1301   // Depending on the object size, expansion may require refilling either a
  1302   // bigLAB or a smallLAB plus refilling a PromotionInfo object.  MinChunkSize
  1303   // is added because the dictionary may over-allocate to avoid fragmentation.
  1304   size_t space = obj_size;
  1305   if (!_adaptive_freelists) {
  1306     space = MAX2(space, _smallLinearAllocBlock._refillSize);
  1308   space += _promoInfo.refillSize() + 2 * MinChunkSize;
  1309   return space;
  1312 FreeChunk* CompactibleFreeListSpace::getChunkFromGreater(size_t numWords) {
  1313   FreeChunk* ret;
  1315   assert(numWords >= MinChunkSize, "Size is less than minimum");
  1316   assert(linearAllocationWouldFail() || bestFitFirst(),
  1317     "Should not be here");
  1319   size_t i;
  1320   size_t currSize = numWords + MinChunkSize;
  1321   assert(currSize % MinObjAlignment == 0, "currSize should be aligned");
  1322   for (i = currSize; i < IndexSetSize; i += IndexSetStride) {
  1323     AdaptiveFreeList<FreeChunk>* fl = &_indexedFreeList[i];
  1324     if (fl->head()) {
  1325       ret = getFromListGreater(fl, numWords);
  1326       assert(ret == NULL || ret->is_free(), "Should be returning a free chunk");
  1327       return ret;
  1331   currSize = MAX2((size_t)SmallForDictionary,
  1332                   (size_t)(numWords + MinChunkSize));
  1334   /* Try to get a chunk that satisfies request, while avoiding
  1335      fragmentation that can't be handled. */
  1337     ret =  dictionary()->get_chunk(currSize);
  1338     if (ret != NULL) {
  1339       assert(ret->size() - numWords >= MinChunkSize,
  1340              "Chunk is too small");
  1341       _bt.allocated((HeapWord*)ret, ret->size());
  1342       /* Carve returned chunk. */
  1343       (void) splitChunkAndReturnRemainder(ret, numWords);
  1344       /* Label this as no longer a free chunk. */
  1345       assert(ret->is_free(), "This chunk should be free");
  1346       ret->link_prev(NULL);
  1348     assert(ret == NULL || ret->is_free(), "Should be returning a free chunk");
  1349     return ret;
  1351   ShouldNotReachHere();
  1354 bool CompactibleFreeListSpace::verifyChunkInIndexedFreeLists(FreeChunk* fc) const {
  1355   assert(fc->size() < IndexSetSize, "Size of chunk is too large");
  1356   return _indexedFreeList[fc->size()].verify_chunk_in_free_list(fc);
  1359 bool CompactibleFreeListSpace::verify_chunk_is_linear_alloc_block(FreeChunk* fc) const {
  1360   assert((_smallLinearAllocBlock._ptr != (HeapWord*)fc) ||
  1361          (_smallLinearAllocBlock._word_size == fc->size()),
  1362          "Linear allocation block shows incorrect size");
  1363   return ((_smallLinearAllocBlock._ptr == (HeapWord*)fc) &&
  1364           (_smallLinearAllocBlock._word_size == fc->size()));
  1367 // Check if the purported free chunk is present either as a linear
  1368 // allocation block, the size-indexed table of (smaller) free blocks,
  1369 // or the larger free blocks kept in the binary tree dictionary.
  1370 bool CompactibleFreeListSpace::verify_chunk_in_free_list(FreeChunk* fc) const {
  1371   if (verify_chunk_is_linear_alloc_block(fc)) {
  1372     return true;
  1373   } else if (fc->size() < IndexSetSize) {
  1374     return verifyChunkInIndexedFreeLists(fc);
  1375   } else {
  1376     return dictionary()->verify_chunk_in_free_list(fc);
  1380 #ifndef PRODUCT
  1381 void CompactibleFreeListSpace::assert_locked() const {
  1382   CMSLockVerifier::assert_locked(freelistLock(), parDictionaryAllocLock());
  1385 void CompactibleFreeListSpace::assert_locked(const Mutex* lock) const {
  1386   CMSLockVerifier::assert_locked(lock);
  1388 #endif
  1390 FreeChunk* CompactibleFreeListSpace::allocateScratch(size_t size) {
  1391   // In the parallel case, the main thread holds the free list lock
  1392   // on behalf the parallel threads.
  1393   FreeChunk* fc;
  1395     // If GC is parallel, this might be called by several threads.
  1396     // This should be rare enough that the locking overhead won't affect
  1397     // the sequential code.
  1398     MutexLockerEx x(parDictionaryAllocLock(),
  1399                     Mutex::_no_safepoint_check_flag);
  1400     fc = getChunkFromDictionary(size);
  1402   if (fc != NULL) {
  1403     fc->dontCoalesce();
  1404     assert(fc->is_free(), "Should be free, but not coalescable");
  1405     // Verify that the block offset table shows this to
  1406     // be a single block, but not one which is unallocated.
  1407     _bt.verify_single_block((HeapWord*)fc, fc->size());
  1408     _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
  1410   return fc;
  1413 oop CompactibleFreeListSpace::promote(oop obj, size_t obj_size) {
  1414   assert(obj_size == (size_t)obj->size(), "bad obj_size passed in");
  1415   assert_locked();
  1417   // if we are tracking promotions, then first ensure space for
  1418   // promotion (including spooling space for saving header if necessary).
  1419   // then allocate and copy, then track promoted info if needed.
  1420   // When tracking (see PromotionInfo::track()), the mark word may
  1421   // be displaced and in this case restoration of the mark word
  1422   // occurs in the (oop_since_save_marks_)iterate phase.
  1423   if (_promoInfo.tracking() && !_promoInfo.ensure_spooling_space()) {
  1424     return NULL;
  1426   // Call the allocate(size_t, bool) form directly to avoid the
  1427   // additional call through the allocate(size_t) form.  Having
  1428   // the compile inline the call is problematic because allocate(size_t)
  1429   // is a virtual method.
  1430   HeapWord* res = allocate(adjustObjectSize(obj_size));
  1431   if (res != NULL) {
  1432     Copy::aligned_disjoint_words((HeapWord*)obj, res, obj_size);
  1433     // if we should be tracking promotions, do so.
  1434     if (_promoInfo.tracking()) {
  1435         _promoInfo.track((PromotedObject*)res);
  1438   return oop(res);
  1441 HeapWord*
  1442 CompactibleFreeListSpace::getChunkFromSmallLinearAllocBlock(size_t size) {
  1443   assert_locked();
  1444   assert(size >= MinChunkSize, "minimum chunk size");
  1445   assert(size <  _smallLinearAllocBlock._allocation_size_limit,
  1446     "maximum from smallLinearAllocBlock");
  1447   return getChunkFromLinearAllocBlock(&_smallLinearAllocBlock, size);
  1450 HeapWord*
  1451 CompactibleFreeListSpace::getChunkFromLinearAllocBlock(LinearAllocBlock *blk,
  1452                                                        size_t size) {
  1453   assert_locked();
  1454   assert(size >= MinChunkSize, "too small");
  1455   HeapWord* res = NULL;
  1456   // Try to do linear allocation from blk, making sure that
  1457   if (blk->_word_size == 0) {
  1458     // We have probably been unable to fill this either in the prologue or
  1459     // when it was exhausted at the last linear allocation. Bail out until
  1460     // next time.
  1461     assert(blk->_ptr == NULL, "consistency check");
  1462     return NULL;
  1464   assert(blk->_word_size != 0 && blk->_ptr != NULL, "consistency check");
  1465   res = getChunkFromLinearAllocBlockRemainder(blk, size);
  1466   if (res != NULL) return res;
  1468   // about to exhaust this linear allocation block
  1469   if (blk->_word_size == size) { // exactly satisfied
  1470     res = blk->_ptr;
  1471     _bt.allocated(res, blk->_word_size);
  1472   } else if (size + MinChunkSize <= blk->_refillSize) {
  1473     size_t sz = blk->_word_size;
  1474     // Update _unallocated_block if the size is such that chunk would be
  1475     // returned to the indexed free list.  All other chunks in the indexed
  1476     // free lists are allocated from the dictionary so that _unallocated_block
  1477     // has already been adjusted for them.  Do it here so that the cost
  1478     // for all chunks added back to the indexed free lists.
  1479     if (sz < SmallForDictionary) {
  1480       _bt.allocated(blk->_ptr, sz);
  1482     // Return the chunk that isn't big enough, and then refill below.
  1483     addChunkToFreeLists(blk->_ptr, sz);
  1484     split_birth(sz);
  1485     // Don't keep statistics on adding back chunk from a LinAB.
  1486   } else {
  1487     // A refilled block would not satisfy the request.
  1488     return NULL;
  1491   blk->_ptr = NULL; blk->_word_size = 0;
  1492   refillLinearAllocBlock(blk);
  1493   assert(blk->_ptr == NULL || blk->_word_size >= size + MinChunkSize,
  1494          "block was replenished");
  1495   if (res != NULL) {
  1496     split_birth(size);
  1497     repairLinearAllocBlock(blk);
  1498   } else if (blk->_ptr != NULL) {
  1499     res = blk->_ptr;
  1500     size_t blk_size = blk->_word_size;
  1501     blk->_word_size -= size;
  1502     blk->_ptr  += size;
  1503     split_birth(size);
  1504     repairLinearAllocBlock(blk);
  1505     // Update BOT last so that other (parallel) GC threads see a consistent
  1506     // view of the BOT and free blocks.
  1507     // Above must occur before BOT is updated below.
  1508     OrderAccess::storestore();
  1509     _bt.split_block(res, blk_size, size);  // adjust block offset table
  1511   return res;
  1514 HeapWord*  CompactibleFreeListSpace::getChunkFromLinearAllocBlockRemainder(
  1515                                         LinearAllocBlock* blk,
  1516                                         size_t size) {
  1517   assert_locked();
  1518   assert(size >= MinChunkSize, "too small");
  1520   HeapWord* res = NULL;
  1521   // This is the common case.  Keep it simple.
  1522   if (blk->_word_size >= size + MinChunkSize) {
  1523     assert(blk->_ptr != NULL, "consistency check");
  1524     res = blk->_ptr;
  1525     // Note that the BOT is up-to-date for the linAB before allocation.  It
  1526     // indicates the start of the linAB.  The split_block() updates the
  1527     // BOT for the linAB after the allocation (indicates the start of the
  1528     // next chunk to be allocated).
  1529     size_t blk_size = blk->_word_size;
  1530     blk->_word_size -= size;
  1531     blk->_ptr  += size;
  1532     split_birth(size);
  1533     repairLinearAllocBlock(blk);
  1534     // Update BOT last so that other (parallel) GC threads see a consistent
  1535     // view of the BOT and free blocks.
  1536     // Above must occur before BOT is updated below.
  1537     OrderAccess::storestore();
  1538     _bt.split_block(res, blk_size, size);  // adjust block offset table
  1539     _bt.allocated(res, size);
  1541   return res;
  1544 FreeChunk*
  1545 CompactibleFreeListSpace::getChunkFromIndexedFreeList(size_t size) {
  1546   assert_locked();
  1547   assert(size < SmallForDictionary, "just checking");
  1548   FreeChunk* res;
  1549   res = _indexedFreeList[size].get_chunk_at_head();
  1550   if (res == NULL) {
  1551     res = getChunkFromIndexedFreeListHelper(size);
  1553   _bt.verify_not_unallocated((HeapWord*) res, size);
  1554   assert(res == NULL || res->size() == size, "Incorrect block size");
  1555   return res;
  1558 FreeChunk*
  1559 CompactibleFreeListSpace::getChunkFromIndexedFreeListHelper(size_t size,
  1560   bool replenish) {
  1561   assert_locked();
  1562   FreeChunk* fc = NULL;
  1563   if (size < SmallForDictionary) {
  1564     assert(_indexedFreeList[size].head() == NULL ||
  1565       _indexedFreeList[size].surplus() <= 0,
  1566       "List for this size should be empty or under populated");
  1567     // Try best fit in exact lists before replenishing the list
  1568     if (!bestFitFirst() || (fc = bestFitSmall(size)) == NULL) {
  1569       // Replenish list.
  1570       //
  1571       // Things tried that failed.
  1572       //   Tried allocating out of the two LinAB's first before
  1573       // replenishing lists.
  1574       //   Tried small linAB of size 256 (size in indexed list)
  1575       // and replenishing indexed lists from the small linAB.
  1576       //
  1577       FreeChunk* newFc = NULL;
  1578       const size_t replenish_size = CMSIndexedFreeListReplenish * size;
  1579       if (replenish_size < SmallForDictionary) {
  1580         // Do not replenish from an underpopulated size.
  1581         if (_indexedFreeList[replenish_size].surplus() > 0 &&
  1582             _indexedFreeList[replenish_size].head() != NULL) {
  1583           newFc = _indexedFreeList[replenish_size].get_chunk_at_head();
  1584         } else if (bestFitFirst()) {
  1585           newFc = bestFitSmall(replenish_size);
  1588       if (newFc == NULL && replenish_size > size) {
  1589         assert(CMSIndexedFreeListReplenish > 1, "ctl pt invariant");
  1590         newFc = getChunkFromIndexedFreeListHelper(replenish_size, false);
  1592       // Note: The stats update re split-death of block obtained above
  1593       // will be recorded below precisely when we know we are going to
  1594       // be actually splitting it into more than one pieces below.
  1595       if (newFc != NULL) {
  1596         if  (replenish || CMSReplenishIntermediate) {
  1597           // Replenish this list and return one block to caller.
  1598           size_t i;
  1599           FreeChunk *curFc, *nextFc;
  1600           size_t num_blk = newFc->size() / size;
  1601           assert(num_blk >= 1, "Smaller than requested?");
  1602           assert(newFc->size() % size == 0, "Should be integral multiple of request");
  1603           if (num_blk > 1) {
  1604             // we are sure we will be splitting the block just obtained
  1605             // into multiple pieces; record the split-death of the original
  1606             splitDeath(replenish_size);
  1608           // carve up and link blocks 0, ..., num_blk - 2
  1609           // The last chunk is not added to the lists but is returned as the
  1610           // free chunk.
  1611           for (curFc = newFc, nextFc = (FreeChunk*)((HeapWord*)curFc + size),
  1612                i = 0;
  1613                i < (num_blk - 1);
  1614                curFc = nextFc, nextFc = (FreeChunk*)((HeapWord*)nextFc + size),
  1615                i++) {
  1616             curFc->set_size(size);
  1617             // Don't record this as a return in order to try and
  1618             // determine the "returns" from a GC.
  1619             _bt.verify_not_unallocated((HeapWord*) fc, size);
  1620             _indexedFreeList[size].return_chunk_at_tail(curFc, false);
  1621             _bt.mark_block((HeapWord*)curFc, size);
  1622             split_birth(size);
  1623             // Don't record the initial population of the indexed list
  1624             // as a split birth.
  1627           // check that the arithmetic was OK above
  1628           assert((HeapWord*)nextFc == (HeapWord*)newFc + num_blk*size,
  1629             "inconsistency in carving newFc");
  1630           curFc->set_size(size);
  1631           _bt.mark_block((HeapWord*)curFc, size);
  1632           split_birth(size);
  1633           fc = curFc;
  1634         } else {
  1635           // Return entire block to caller
  1636           fc = newFc;
  1640   } else {
  1641     // Get a free chunk from the free chunk dictionary to be returned to
  1642     // replenish the indexed free list.
  1643     fc = getChunkFromDictionaryExact(size);
  1645   // assert(fc == NULL || fc->is_free(), "Should be returning a free chunk");
  1646   return fc;
  1649 FreeChunk*
  1650 CompactibleFreeListSpace::getChunkFromDictionary(size_t size) {
  1651   assert_locked();
  1652   FreeChunk* fc = _dictionary->get_chunk(size,
  1653                                          FreeBlockDictionary<FreeChunk>::atLeast);
  1654   if (fc == NULL) {
  1655     return NULL;
  1657   _bt.allocated((HeapWord*)fc, fc->size());
  1658   if (fc->size() >= size + MinChunkSize) {
  1659     fc = splitChunkAndReturnRemainder(fc, size);
  1661   assert(fc->size() >= size, "chunk too small");
  1662   assert(fc->size() < size + MinChunkSize, "chunk too big");
  1663   _bt.verify_single_block((HeapWord*)fc, fc->size());
  1664   return fc;
  1667 FreeChunk*
  1668 CompactibleFreeListSpace::getChunkFromDictionaryExact(size_t size) {
  1669   assert_locked();
  1670   FreeChunk* fc = _dictionary->get_chunk(size,
  1671                                          FreeBlockDictionary<FreeChunk>::atLeast);
  1672   if (fc == NULL) {
  1673     return fc;
  1675   _bt.allocated((HeapWord*)fc, fc->size());
  1676   if (fc->size() == size) {
  1677     _bt.verify_single_block((HeapWord*)fc, size);
  1678     return fc;
  1680   assert(fc->size() > size, "get_chunk() guarantee");
  1681   if (fc->size() < size + MinChunkSize) {
  1682     // Return the chunk to the dictionary and go get a bigger one.
  1683     returnChunkToDictionary(fc);
  1684     fc = _dictionary->get_chunk(size + MinChunkSize,
  1685                                 FreeBlockDictionary<FreeChunk>::atLeast);
  1686     if (fc == NULL) {
  1687       return NULL;
  1689     _bt.allocated((HeapWord*)fc, fc->size());
  1691   assert(fc->size() >= size + MinChunkSize, "tautology");
  1692   fc = splitChunkAndReturnRemainder(fc, size);
  1693   assert(fc->size() == size, "chunk is wrong size");
  1694   _bt.verify_single_block((HeapWord*)fc, size);
  1695   return fc;
  1698 void
  1699 CompactibleFreeListSpace::returnChunkToDictionary(FreeChunk* chunk) {
  1700   assert_locked();
  1702   size_t size = chunk->size();
  1703   _bt.verify_single_block((HeapWord*)chunk, size);
  1704   // adjust _unallocated_block downward, as necessary
  1705   _bt.freed((HeapWord*)chunk, size);
  1706   _dictionary->return_chunk(chunk);
  1707 #ifndef PRODUCT
  1708   if (CMSCollector::abstract_state() != CMSCollector::Sweeping) {
  1709     TreeChunk<FreeChunk, AdaptiveFreeList>* tc = TreeChunk<FreeChunk, AdaptiveFreeList>::as_TreeChunk(chunk);
  1710     TreeList<FreeChunk, AdaptiveFreeList>* tl = tc->list();
  1711     tl->verify_stats();
  1713 #endif // PRODUCT
  1716 void
  1717 CompactibleFreeListSpace::returnChunkToFreeList(FreeChunk* fc) {
  1718   assert_locked();
  1719   size_t size = fc->size();
  1720   _bt.verify_single_block((HeapWord*) fc, size);
  1721   _bt.verify_not_unallocated((HeapWord*) fc, size);
  1722   if (_adaptive_freelists) {
  1723     _indexedFreeList[size].return_chunk_at_tail(fc);
  1724   } else {
  1725     _indexedFreeList[size].return_chunk_at_head(fc);
  1727 #ifndef PRODUCT
  1728   if (CMSCollector::abstract_state() != CMSCollector::Sweeping) {
  1729      _indexedFreeList[size].verify_stats();
  1731 #endif // PRODUCT
  1734 // Add chunk to end of last block -- if it's the largest
  1735 // block -- and update BOT and census data. We would
  1736 // of course have preferred to coalesce it with the
  1737 // last block, but it's currently less expensive to find the
  1738 // largest block than it is to find the last.
  1739 void
  1740 CompactibleFreeListSpace::addChunkToFreeListsAtEndRecordingStats(
  1741   HeapWord* chunk, size_t     size) {
  1742   // check that the chunk does lie in this space!
  1743   assert(chunk != NULL && is_in_reserved(chunk), "Not in this space!");
  1744   // One of the parallel gc task threads may be here
  1745   // whilst others are allocating.
  1746   Mutex* lock = NULL;
  1747   if (ParallelGCThreads != 0) {
  1748     lock = &_parDictionaryAllocLock;
  1750   FreeChunk* ec;
  1752     MutexLockerEx x(lock, Mutex::_no_safepoint_check_flag);
  1753     ec = dictionary()->find_largest_dict();  // get largest block
  1754     if (ec != NULL && ec->end() == (uintptr_t*) chunk) {
  1755       // It's a coterminal block - we can coalesce.
  1756       size_t old_size = ec->size();
  1757       coalDeath(old_size);
  1758       removeChunkFromDictionary(ec);
  1759       size += old_size;
  1760     } else {
  1761       ec = (FreeChunk*)chunk;
  1764   ec->set_size(size);
  1765   debug_only(ec->mangleFreed(size));
  1766   if (size < SmallForDictionary) {
  1767     lock = _indexedFreeListParLocks[size];
  1769   MutexLockerEx x(lock, Mutex::_no_safepoint_check_flag);
  1770   addChunkAndRepairOffsetTable((HeapWord*)ec, size, true);
  1771   // record the birth under the lock since the recording involves
  1772   // manipulation of the list on which the chunk lives and
  1773   // if the chunk is allocated and is the last on the list,
  1774   // the list can go away.
  1775   coalBirth(size);
  1778 void
  1779 CompactibleFreeListSpace::addChunkToFreeLists(HeapWord* chunk,
  1780                                               size_t     size) {
  1781   // check that the chunk does lie in this space!
  1782   assert(chunk != NULL && is_in_reserved(chunk), "Not in this space!");
  1783   assert_locked();
  1784   _bt.verify_single_block(chunk, size);
  1786   FreeChunk* fc = (FreeChunk*) chunk;
  1787   fc->set_size(size);
  1788   debug_only(fc->mangleFreed(size));
  1789   if (size < SmallForDictionary) {
  1790     returnChunkToFreeList(fc);
  1791   } else {
  1792     returnChunkToDictionary(fc);
  1796 void
  1797 CompactibleFreeListSpace::addChunkAndRepairOffsetTable(HeapWord* chunk,
  1798   size_t size, bool coalesced) {
  1799   assert_locked();
  1800   assert(chunk != NULL, "null chunk");
  1801   if (coalesced) {
  1802     // repair BOT
  1803     _bt.single_block(chunk, size);
  1805   addChunkToFreeLists(chunk, size);
  1808 // We _must_ find the purported chunk on our free lists;
  1809 // we assert if we don't.
  1810 void
  1811 CompactibleFreeListSpace::removeFreeChunkFromFreeLists(FreeChunk* fc) {
  1812   size_t size = fc->size();
  1813   assert_locked();
  1814   debug_only(verifyFreeLists());
  1815   if (size < SmallForDictionary) {
  1816     removeChunkFromIndexedFreeList(fc);
  1817   } else {
  1818     removeChunkFromDictionary(fc);
  1820   _bt.verify_single_block((HeapWord*)fc, size);
  1821   debug_only(verifyFreeLists());
  1824 void
  1825 CompactibleFreeListSpace::removeChunkFromDictionary(FreeChunk* fc) {
  1826   size_t size = fc->size();
  1827   assert_locked();
  1828   assert(fc != NULL, "null chunk");
  1829   _bt.verify_single_block((HeapWord*)fc, size);
  1830   _dictionary->remove_chunk(fc);
  1831   // adjust _unallocated_block upward, as necessary
  1832   _bt.allocated((HeapWord*)fc, size);
  1835 void
  1836 CompactibleFreeListSpace::removeChunkFromIndexedFreeList(FreeChunk* fc) {
  1837   assert_locked();
  1838   size_t size = fc->size();
  1839   _bt.verify_single_block((HeapWord*)fc, size);
  1840   NOT_PRODUCT(
  1841     if (FLSVerifyIndexTable) {
  1842       verifyIndexedFreeList(size);
  1845   _indexedFreeList[size].remove_chunk(fc);
  1846   NOT_PRODUCT(
  1847     if (FLSVerifyIndexTable) {
  1848       verifyIndexedFreeList(size);
  1853 FreeChunk* CompactibleFreeListSpace::bestFitSmall(size_t numWords) {
  1854   /* A hint is the next larger size that has a surplus.
  1855      Start search at a size large enough to guarantee that
  1856      the excess is >= MIN_CHUNK. */
  1857   size_t start = align_object_size(numWords + MinChunkSize);
  1858   if (start < IndexSetSize) {
  1859     AdaptiveFreeList<FreeChunk>* it   = _indexedFreeList;
  1860     size_t    hint = _indexedFreeList[start].hint();
  1861     while (hint < IndexSetSize) {
  1862       assert(hint % MinObjAlignment == 0, "hint should be aligned");
  1863       AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[hint];
  1864       if (fl->surplus() > 0 && fl->head() != NULL) {
  1865         // Found a list with surplus, reset original hint
  1866         // and split out a free chunk which is returned.
  1867         _indexedFreeList[start].set_hint(hint);
  1868         FreeChunk* res = getFromListGreater(fl, numWords);
  1869         assert(res == NULL || res->is_free(),
  1870           "Should be returning a free chunk");
  1871         return res;
  1873       hint = fl->hint(); /* keep looking */
  1875     /* None found. */
  1876     it[start].set_hint(IndexSetSize);
  1878   return NULL;
  1881 /* Requires fl->size >= numWords + MinChunkSize */
  1882 FreeChunk* CompactibleFreeListSpace::getFromListGreater(AdaptiveFreeList<FreeChunk>* fl,
  1883   size_t numWords) {
  1884   FreeChunk *curr = fl->head();
  1885   size_t oldNumWords = curr->size();
  1886   assert(numWords >= MinChunkSize, "Word size is too small");
  1887   assert(curr != NULL, "List is empty");
  1888   assert(oldNumWords >= numWords + MinChunkSize,
  1889         "Size of chunks in the list is too small");
  1891   fl->remove_chunk(curr);
  1892   // recorded indirectly by splitChunkAndReturnRemainder -
  1893   // smallSplit(oldNumWords, numWords);
  1894   FreeChunk* new_chunk = splitChunkAndReturnRemainder(curr, numWords);
  1895   // Does anything have to be done for the remainder in terms of
  1896   // fixing the card table?
  1897   assert(new_chunk == NULL || new_chunk->is_free(),
  1898     "Should be returning a free chunk");
  1899   return new_chunk;
  1902 FreeChunk*
  1903 CompactibleFreeListSpace::splitChunkAndReturnRemainder(FreeChunk* chunk,
  1904   size_t new_size) {
  1905   assert_locked();
  1906   size_t size = chunk->size();
  1907   assert(size > new_size, "Split from a smaller block?");
  1908   assert(is_aligned(chunk), "alignment problem");
  1909   assert(size == adjustObjectSize(size), "alignment problem");
  1910   size_t rem_size = size - new_size;
  1911   assert(rem_size == adjustObjectSize(rem_size), "alignment problem");
  1912   assert(rem_size >= MinChunkSize, "Free chunk smaller than minimum");
  1913   FreeChunk* ffc = (FreeChunk*)((HeapWord*)chunk + new_size);
  1914   assert(is_aligned(ffc), "alignment problem");
  1915   ffc->set_size(rem_size);
  1916   ffc->link_next(NULL);
  1917   ffc->link_prev(NULL); // Mark as a free block for other (parallel) GC threads.
  1918   // Above must occur before BOT is updated below.
  1919   // adjust block offset table
  1920   OrderAccess::storestore();
  1921   assert(chunk->is_free() && ffc->is_free(), "Error");
  1922   _bt.split_block((HeapWord*)chunk, chunk->size(), new_size);
  1923   if (rem_size < SmallForDictionary) {
  1924     bool is_par = (SharedHeap::heap()->n_par_threads() > 0);
  1925     if (is_par) _indexedFreeListParLocks[rem_size]->lock();
  1926     assert(!is_par ||
  1927            (SharedHeap::heap()->n_par_threads() ==
  1928             SharedHeap::heap()->workers()->active_workers()), "Mismatch");
  1929     returnChunkToFreeList(ffc);
  1930     split(size, rem_size);
  1931     if (is_par) _indexedFreeListParLocks[rem_size]->unlock();
  1932   } else {
  1933     returnChunkToDictionary(ffc);
  1934     split(size ,rem_size);
  1936   chunk->set_size(new_size);
  1937   return chunk;
  1940 void
  1941 CompactibleFreeListSpace::sweep_completed() {
  1942   // Now that space is probably plentiful, refill linear
  1943   // allocation blocks as needed.
  1944   refillLinearAllocBlocksIfNeeded();
  1947 void
  1948 CompactibleFreeListSpace::gc_prologue() {
  1949   assert_locked();
  1950   if (PrintFLSStatistics != 0) {
  1951     gclog_or_tty->print("Before GC:\n");
  1952     reportFreeListStatistics();
  1954   refillLinearAllocBlocksIfNeeded();
  1957 void
  1958 CompactibleFreeListSpace::gc_epilogue() {
  1959   assert_locked();
  1960   if (PrintGCDetails && Verbose && !_adaptive_freelists) {
  1961     if (_smallLinearAllocBlock._word_size == 0)
  1962       warning("CompactibleFreeListSpace(epilogue):: Linear allocation failure");
  1964   assert(_promoInfo.noPromotions(), "_promoInfo inconsistency");
  1965   _promoInfo.stopTrackingPromotions();
  1966   repairLinearAllocationBlocks();
  1967   // Print Space's stats
  1968   if (PrintFLSStatistics != 0) {
  1969     gclog_or_tty->print("After GC:\n");
  1970     reportFreeListStatistics();
  1974 // Iteration support, mostly delegated from a CMS generation
  1976 void CompactibleFreeListSpace::save_marks() {
  1977   assert(Thread::current()->is_VM_thread(),
  1978          "Global variable should only be set when single-threaded");
  1979   // Mark the "end" of the used space at the time of this call;
  1980   // note, however, that promoted objects from this point
  1981   // on are tracked in the _promoInfo below.
  1982   set_saved_mark_word(unallocated_block());
  1983 #ifdef ASSERT
  1984   // Check the sanity of save_marks() etc.
  1985   MemRegion ur    = used_region();
  1986   MemRegion urasm = used_region_at_save_marks();
  1987   assert(ur.contains(urasm),
  1988          err_msg(" Error at save_marks(): [" PTR_FORMAT "," PTR_FORMAT ")"
  1989                  " should contain [" PTR_FORMAT "," PTR_FORMAT ")",
  1990                  ur.start(), ur.end(), urasm.start(), urasm.end()));
  1991 #endif
  1992   // inform allocator that promotions should be tracked.
  1993   assert(_promoInfo.noPromotions(), "_promoInfo inconsistency");
  1994   _promoInfo.startTrackingPromotions();
  1997 bool CompactibleFreeListSpace::no_allocs_since_save_marks() {
  1998   assert(_promoInfo.tracking(), "No preceding save_marks?");
  1999   assert(SharedHeap::heap()->n_par_threads() == 0,
  2000          "Shouldn't be called if using parallel gc.");
  2001   return _promoInfo.noPromotions();
  2004 #define CFLS_OOP_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix)           \
  2006 void CompactibleFreeListSpace::                                             \
  2007 oop_since_save_marks_iterate##nv_suffix(OopClosureType* blk) {              \
  2008   assert(SharedHeap::heap()->n_par_threads() == 0,                          \
  2009          "Shouldn't be called (yet) during parallel part of gc.");          \
  2010   _promoInfo.promoted_oops_iterate##nv_suffix(blk);                         \
  2011   /*                                                                        \
  2012    * This also restores any displaced headers and removes the elements from \
  2013    * the iteration set as they are processed, so that we have a clean slate \
  2014    * at the end of the iteration. Note, thus, that if new objects are       \
  2015    * promoted as a result of the iteration they are iterated over as well.  \
  2016    */                                                                       \
  2017   assert(_promoInfo.noPromotions(), "_promoInfo inconsistency");            \
  2020 ALL_SINCE_SAVE_MARKS_CLOSURES(CFLS_OOP_SINCE_SAVE_MARKS_DEFN)
  2023 void CompactibleFreeListSpace::object_iterate_since_last_GC(ObjectClosure* cl) {
  2024   // ugghh... how would one do this efficiently for a non-contiguous space?
  2025   guarantee(false, "NYI");
  2028 bool CompactibleFreeListSpace::linearAllocationWouldFail() const {
  2029   return _smallLinearAllocBlock._word_size == 0;
  2032 void CompactibleFreeListSpace::repairLinearAllocationBlocks() {
  2033   // Fix up linear allocation blocks to look like free blocks
  2034   repairLinearAllocBlock(&_smallLinearAllocBlock);
  2037 void CompactibleFreeListSpace::repairLinearAllocBlock(LinearAllocBlock* blk) {
  2038   assert_locked();
  2039   if (blk->_ptr != NULL) {
  2040     assert(blk->_word_size != 0 && blk->_word_size >= MinChunkSize,
  2041            "Minimum block size requirement");
  2042     FreeChunk* fc = (FreeChunk*)(blk->_ptr);
  2043     fc->set_size(blk->_word_size);
  2044     fc->link_prev(NULL);   // mark as free
  2045     fc->dontCoalesce();
  2046     assert(fc->is_free(), "just marked it free");
  2047     assert(fc->cantCoalesce(), "just marked it uncoalescable");
  2051 void CompactibleFreeListSpace::refillLinearAllocBlocksIfNeeded() {
  2052   assert_locked();
  2053   if (_smallLinearAllocBlock._ptr == NULL) {
  2054     assert(_smallLinearAllocBlock._word_size == 0,
  2055       "Size of linAB should be zero if the ptr is NULL");
  2056     // Reset the linAB refill and allocation size limit.
  2057     _smallLinearAllocBlock.set(0, 0, 1024*SmallForLinearAlloc, SmallForLinearAlloc);
  2059   refillLinearAllocBlockIfNeeded(&_smallLinearAllocBlock);
  2062 void
  2063 CompactibleFreeListSpace::refillLinearAllocBlockIfNeeded(LinearAllocBlock* blk) {
  2064   assert_locked();
  2065   assert((blk->_ptr == NULL && blk->_word_size == 0) ||
  2066          (blk->_ptr != NULL && blk->_word_size >= MinChunkSize),
  2067          "blk invariant");
  2068   if (blk->_ptr == NULL) {
  2069     refillLinearAllocBlock(blk);
  2071   if (PrintMiscellaneous && Verbose) {
  2072     if (blk->_word_size == 0) {
  2073       warning("CompactibleFreeListSpace(prologue):: Linear allocation failure");
  2078 void
  2079 CompactibleFreeListSpace::refillLinearAllocBlock(LinearAllocBlock* blk) {
  2080   assert_locked();
  2081   assert(blk->_word_size == 0 && blk->_ptr == NULL,
  2082          "linear allocation block should be empty");
  2083   FreeChunk* fc;
  2084   if (blk->_refillSize < SmallForDictionary &&
  2085       (fc = getChunkFromIndexedFreeList(blk->_refillSize)) != NULL) {
  2086     // A linAB's strategy might be to use small sizes to reduce
  2087     // fragmentation but still get the benefits of allocation from a
  2088     // linAB.
  2089   } else {
  2090     fc = getChunkFromDictionary(blk->_refillSize);
  2092   if (fc != NULL) {
  2093     blk->_ptr  = (HeapWord*)fc;
  2094     blk->_word_size = fc->size();
  2095     fc->dontCoalesce();   // to prevent sweeper from sweeping us up
  2099 // Support for concurrent collection policy decisions.
  2100 bool CompactibleFreeListSpace::should_concurrent_collect() const {
  2101   // In the future we might want to add in frgamentation stats --
  2102   // including erosion of the "mountain" into this decision as well.
  2103   return !adaptive_freelists() && linearAllocationWouldFail();
  2106 // Support for compaction
  2108 void CompactibleFreeListSpace::prepare_for_compaction(CompactPoint* cp) {
  2109   SCAN_AND_FORWARD(cp,end,block_is_obj,block_size);
  2110   // prepare_for_compaction() uses the space between live objects
  2111   // so that later phase can skip dead space quickly.  So verification
  2112   // of the free lists doesn't work after.
  2115 #define obj_size(q) adjustObjectSize(oop(q)->size())
  2116 #define adjust_obj_size(s) adjustObjectSize(s)
  2118 void CompactibleFreeListSpace::adjust_pointers() {
  2119   // In other versions of adjust_pointers(), a bail out
  2120   // based on the amount of live data in the generation
  2121   // (i.e., if 0, bail out) may be used.
  2122   // Cannot test used() == 0 here because the free lists have already
  2123   // been mangled by the compaction.
  2125   SCAN_AND_ADJUST_POINTERS(adjust_obj_size);
  2126   // See note about verification in prepare_for_compaction().
  2129 void CompactibleFreeListSpace::compact() {
  2130   SCAN_AND_COMPACT(obj_size);
  2133 // fragmentation_metric = 1 - [sum of (fbs**2) / (sum of fbs)**2]
  2134 // where fbs is free block sizes
  2135 double CompactibleFreeListSpace::flsFrag() const {
  2136   size_t itabFree = totalSizeInIndexedFreeLists();
  2137   double frag = 0.0;
  2138   size_t i;
  2140   for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
  2141     double sz  = i;
  2142     frag      += _indexedFreeList[i].count() * (sz * sz);
  2145   double totFree = itabFree +
  2146                    _dictionary->total_chunk_size(DEBUG_ONLY(freelistLock()));
  2147   if (totFree > 0) {
  2148     frag = ((frag + _dictionary->sum_of_squared_block_sizes()) /
  2149             (totFree * totFree));
  2150     frag = (double)1.0  - frag;
  2151   } else {
  2152     assert(frag == 0.0, "Follows from totFree == 0");
  2154   return frag;
  2157 void CompactibleFreeListSpace::beginSweepFLCensus(
  2158   float inter_sweep_current,
  2159   float inter_sweep_estimate,
  2160   float intra_sweep_estimate) {
  2161   assert_locked();
  2162   size_t i;
  2163   for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
  2164     AdaptiveFreeList<FreeChunk>* fl    = &_indexedFreeList[i];
  2165     if (PrintFLSStatistics > 1) {
  2166       gclog_or_tty->print("size[%d] : ", i);
  2168     fl->compute_desired(inter_sweep_current, inter_sweep_estimate, intra_sweep_estimate);
  2169     fl->set_coal_desired((ssize_t)((double)fl->desired() * CMSSmallCoalSurplusPercent));
  2170     fl->set_before_sweep(fl->count());
  2171     fl->set_bfr_surp(fl->surplus());
  2173   _dictionary->begin_sweep_dict_census(CMSLargeCoalSurplusPercent,
  2174                                     inter_sweep_current,
  2175                                     inter_sweep_estimate,
  2176                                     intra_sweep_estimate);
  2179 void CompactibleFreeListSpace::setFLSurplus() {
  2180   assert_locked();
  2181   size_t i;
  2182   for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
  2183     AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[i];
  2184     fl->set_surplus(fl->count() -
  2185                     (ssize_t)((double)fl->desired() * CMSSmallSplitSurplusPercent));
  2189 void CompactibleFreeListSpace::setFLHints() {
  2190   assert_locked();
  2191   size_t i;
  2192   size_t h = IndexSetSize;
  2193   for (i = IndexSetSize - 1; i != 0; i -= IndexSetStride) {
  2194     AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[i];
  2195     fl->set_hint(h);
  2196     if (fl->surplus() > 0) {
  2197       h = i;
  2202 void CompactibleFreeListSpace::clearFLCensus() {
  2203   assert_locked();
  2204   size_t i;
  2205   for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
  2206     AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[i];
  2207     fl->set_prev_sweep(fl->count());
  2208     fl->set_coal_births(0);
  2209     fl->set_coal_deaths(0);
  2210     fl->set_split_births(0);
  2211     fl->set_split_deaths(0);
  2215 void CompactibleFreeListSpace::endSweepFLCensus(size_t sweep_count) {
  2216   if (PrintFLSStatistics > 0) {
  2217     HeapWord* largestAddr = (HeapWord*) dictionary()->find_largest_dict();
  2218     gclog_or_tty->print_cr("CMS: Large block " PTR_FORMAT,
  2219                            largestAddr);
  2221   setFLSurplus();
  2222   setFLHints();
  2223   if (PrintGC && PrintFLSCensus > 0) {
  2224     printFLCensus(sweep_count);
  2226   clearFLCensus();
  2227   assert_locked();
  2228   _dictionary->end_sweep_dict_census(CMSLargeSplitSurplusPercent);
  2231 bool CompactibleFreeListSpace::coalOverPopulated(size_t size) {
  2232   if (size < SmallForDictionary) {
  2233     AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
  2234     return (fl->coal_desired() < 0) ||
  2235            ((int)fl->count() > fl->coal_desired());
  2236   } else {
  2237     return dictionary()->coal_dict_over_populated(size);
  2241 void CompactibleFreeListSpace::smallCoalBirth(size_t size) {
  2242   assert(size < SmallForDictionary, "Size too large for indexed list");
  2243   AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
  2244   fl->increment_coal_births();
  2245   fl->increment_surplus();
  2248 void CompactibleFreeListSpace::smallCoalDeath(size_t size) {
  2249   assert(size < SmallForDictionary, "Size too large for indexed list");
  2250   AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
  2251   fl->increment_coal_deaths();
  2252   fl->decrement_surplus();
  2255 void CompactibleFreeListSpace::coalBirth(size_t size) {
  2256   if (size  < SmallForDictionary) {
  2257     smallCoalBirth(size);
  2258   } else {
  2259     dictionary()->dict_census_update(size,
  2260                                    false /* split */,
  2261                                    true /* birth */);
  2265 void CompactibleFreeListSpace::coalDeath(size_t size) {
  2266   if(size  < SmallForDictionary) {
  2267     smallCoalDeath(size);
  2268   } else {
  2269     dictionary()->dict_census_update(size,
  2270                                    false /* split */,
  2271                                    false /* birth */);
  2275 void CompactibleFreeListSpace::smallSplitBirth(size_t size) {
  2276   assert(size < SmallForDictionary, "Size too large for indexed list");
  2277   AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
  2278   fl->increment_split_births();
  2279   fl->increment_surplus();
  2282 void CompactibleFreeListSpace::smallSplitDeath(size_t size) {
  2283   assert(size < SmallForDictionary, "Size too large for indexed list");
  2284   AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
  2285   fl->increment_split_deaths();
  2286   fl->decrement_surplus();
  2289 void CompactibleFreeListSpace::split_birth(size_t size) {
  2290   if (size  < SmallForDictionary) {
  2291     smallSplitBirth(size);
  2292   } else {
  2293     dictionary()->dict_census_update(size,
  2294                                    true /* split */,
  2295                                    true /* birth */);
  2299 void CompactibleFreeListSpace::splitDeath(size_t size) {
  2300   if (size  < SmallForDictionary) {
  2301     smallSplitDeath(size);
  2302   } else {
  2303     dictionary()->dict_census_update(size,
  2304                                    true /* split */,
  2305                                    false /* birth */);
  2309 void CompactibleFreeListSpace::split(size_t from, size_t to1) {
  2310   size_t to2 = from - to1;
  2311   splitDeath(from);
  2312   split_birth(to1);
  2313   split_birth(to2);
  2316 void CompactibleFreeListSpace::print() const {
  2317   print_on(tty);
  2320 void CompactibleFreeListSpace::prepare_for_verify() {
  2321   assert_locked();
  2322   repairLinearAllocationBlocks();
  2323   // Verify that the SpoolBlocks look like free blocks of
  2324   // appropriate sizes... To be done ...
  2327 class VerifyAllBlksClosure: public BlkClosure {
  2328  private:
  2329   const CompactibleFreeListSpace* _sp;
  2330   const MemRegion                 _span;
  2331   HeapWord*                       _last_addr;
  2332   size_t                          _last_size;
  2333   bool                            _last_was_obj;
  2334   bool                            _last_was_live;
  2336  public:
  2337   VerifyAllBlksClosure(const CompactibleFreeListSpace* sp,
  2338     MemRegion span) :  _sp(sp), _span(span),
  2339                        _last_addr(NULL), _last_size(0),
  2340                        _last_was_obj(false), _last_was_live(false) { }
  2342   virtual size_t do_blk(HeapWord* addr) {
  2343     size_t res;
  2344     bool   was_obj  = false;
  2345     bool   was_live = false;
  2346     if (_sp->block_is_obj(addr)) {
  2347       was_obj = true;
  2348       oop p = oop(addr);
  2349       guarantee(p->is_oop(), "Should be an oop");
  2350       res = _sp->adjustObjectSize(p->size());
  2351       if (_sp->obj_is_alive(addr)) {
  2352         was_live = true;
  2353         p->verify();
  2355     } else {
  2356       FreeChunk* fc = (FreeChunk*)addr;
  2357       res = fc->size();
  2358       if (FLSVerifyLists && !fc->cantCoalesce()) {
  2359         guarantee(_sp->verify_chunk_in_free_list(fc),
  2360                   "Chunk should be on a free list");
  2363     if (res == 0) {
  2364       gclog_or_tty->print_cr("Livelock: no rank reduction!");
  2365       gclog_or_tty->print_cr(
  2366         " Current:  addr = " PTR_FORMAT ", size = " SIZE_FORMAT ", obj = %s, live = %s \n"
  2367         " Previous: addr = " PTR_FORMAT ", size = " SIZE_FORMAT ", obj = %s, live = %s \n",
  2368         addr,       res,        was_obj      ?"true":"false", was_live      ?"true":"false",
  2369         _last_addr, _last_size, _last_was_obj?"true":"false", _last_was_live?"true":"false");
  2370       _sp->print_on(gclog_or_tty);
  2371       guarantee(false, "Seppuku!");
  2373     _last_addr = addr;
  2374     _last_size = res;
  2375     _last_was_obj  = was_obj;
  2376     _last_was_live = was_live;
  2377     return res;
  2379 };
  2381 class VerifyAllOopsClosure: public OopClosure {
  2382  private:
  2383   const CMSCollector*             _collector;
  2384   const CompactibleFreeListSpace* _sp;
  2385   const MemRegion                 _span;
  2386   const bool                      _past_remark;
  2387   const CMSBitMap*                _bit_map;
  2389  protected:
  2390   void do_oop(void* p, oop obj) {
  2391     if (_span.contains(obj)) { // the interior oop points into CMS heap
  2392       if (!_span.contains(p)) { // reference from outside CMS heap
  2393         // Should be a valid object; the first disjunct below allows
  2394         // us to sidestep an assertion in block_is_obj() that insists
  2395         // that p be in _sp. Note that several generations (and spaces)
  2396         // are spanned by _span (CMS heap) above.
  2397         guarantee(!_sp->is_in_reserved(obj) ||
  2398                   _sp->block_is_obj((HeapWord*)obj),
  2399                   "Should be an object");
  2400         guarantee(obj->is_oop(), "Should be an oop");
  2401         obj->verify();
  2402         if (_past_remark) {
  2403           // Remark has been completed, the object should be marked
  2404           _bit_map->isMarked((HeapWord*)obj);
  2406       } else { // reference within CMS heap
  2407         if (_past_remark) {
  2408           // Remark has been completed -- so the referent should have
  2409           // been marked, if referring object is.
  2410           if (_bit_map->isMarked(_collector->block_start(p))) {
  2411             guarantee(_bit_map->isMarked((HeapWord*)obj), "Marking error?");
  2415     } else if (_sp->is_in_reserved(p)) {
  2416       // the reference is from FLS, and points out of FLS
  2417       guarantee(obj->is_oop(), "Should be an oop");
  2418       obj->verify();
  2422   template <class T> void do_oop_work(T* p) {
  2423     T heap_oop = oopDesc::load_heap_oop(p);
  2424     if (!oopDesc::is_null(heap_oop)) {
  2425       oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
  2426       do_oop(p, obj);
  2430  public:
  2431   VerifyAllOopsClosure(const CMSCollector* collector,
  2432     const CompactibleFreeListSpace* sp, MemRegion span,
  2433     bool past_remark, CMSBitMap* bit_map) :
  2434     _collector(collector), _sp(sp), _span(span),
  2435     _past_remark(past_remark), _bit_map(bit_map) { }
  2437   virtual void do_oop(oop* p)       { VerifyAllOopsClosure::do_oop_work(p); }
  2438   virtual void do_oop(narrowOop* p) { VerifyAllOopsClosure::do_oop_work(p); }
  2439 };
  2441 void CompactibleFreeListSpace::verify() const {
  2442   assert_lock_strong(&_freelistLock);
  2443   verify_objects_initialized();
  2444   MemRegion span = _collector->_span;
  2445   bool past_remark = (_collector->abstract_state() ==
  2446                       CMSCollector::Sweeping);
  2448   ResourceMark rm;
  2449   HandleMark  hm;
  2451   // Check integrity of CFL data structures
  2452   _promoInfo.verify();
  2453   _dictionary->verify();
  2454   if (FLSVerifyIndexTable) {
  2455     verifyIndexedFreeLists();
  2457   // Check integrity of all objects and free blocks in space
  2459     VerifyAllBlksClosure cl(this, span);
  2460     ((CompactibleFreeListSpace*)this)->blk_iterate(&cl);  // cast off const
  2462   // Check that all references in the heap to FLS
  2463   // are to valid objects in FLS or that references in
  2464   // FLS are to valid objects elsewhere in the heap
  2465   if (FLSVerifyAllHeapReferences)
  2467     VerifyAllOopsClosure cl(_collector, this, span, past_remark,
  2468       _collector->markBitMap());
  2469     CollectedHeap* ch = Universe::heap();
  2471     // Iterate over all oops in the heap. Uses the _no_header version
  2472     // since we are not interested in following the klass pointers.
  2473     ch->oop_iterate_no_header(&cl);
  2476   if (VerifyObjectStartArray) {
  2477     // Verify the block offset table
  2478     _bt.verify();
  2482 #ifndef PRODUCT
  2483 void CompactibleFreeListSpace::verifyFreeLists() const {
  2484   if (FLSVerifyLists) {
  2485     _dictionary->verify();
  2486     verifyIndexedFreeLists();
  2487   } else {
  2488     if (FLSVerifyDictionary) {
  2489       _dictionary->verify();
  2491     if (FLSVerifyIndexTable) {
  2492       verifyIndexedFreeLists();
  2496 #endif
  2498 void CompactibleFreeListSpace::verifyIndexedFreeLists() const {
  2499   size_t i = 0;
  2500   for (; i < IndexSetStart; i++) {
  2501     guarantee(_indexedFreeList[i].head() == NULL, "should be NULL");
  2503   for (; i < IndexSetSize; i++) {
  2504     verifyIndexedFreeList(i);
  2508 void CompactibleFreeListSpace::verifyIndexedFreeList(size_t size) const {
  2509   FreeChunk* fc   =  _indexedFreeList[size].head();
  2510   FreeChunk* tail =  _indexedFreeList[size].tail();
  2511   size_t    num = _indexedFreeList[size].count();
  2512   size_t      n = 0;
  2513   guarantee(((size >= IndexSetStart) && (size % IndexSetStride == 0)) || fc == NULL,
  2514             "Slot should have been empty");
  2515   for (; fc != NULL; fc = fc->next(), n++) {
  2516     guarantee(fc->size() == size, "Size inconsistency");
  2517     guarantee(fc->is_free(), "!free?");
  2518     guarantee(fc->next() == NULL || fc->next()->prev() == fc, "Broken list");
  2519     guarantee((fc->next() == NULL) == (fc == tail), "Incorrect tail");
  2521   guarantee(n == num, "Incorrect count");
  2524 #ifndef PRODUCT
  2525 void CompactibleFreeListSpace::check_free_list_consistency() const {
  2526   assert((TreeChunk<FreeChunk, AdaptiveFreeList>::min_size() <= IndexSetSize),
  2527     "Some sizes can't be allocated without recourse to"
  2528     " linear allocation buffers");
  2529   assert((TreeChunk<FreeChunk, AdaptiveFreeList>::min_size()*HeapWordSize == sizeof(TreeChunk<FreeChunk, AdaptiveFreeList>)),
  2530     "else MIN_TREE_CHUNK_SIZE is wrong");
  2531   assert(IndexSetStart != 0, "IndexSetStart not initialized");
  2532   assert(IndexSetStride != 0, "IndexSetStride not initialized");
  2534 #endif
  2536 void CompactibleFreeListSpace::printFLCensus(size_t sweep_count) const {
  2537   assert_lock_strong(&_freelistLock);
  2538   AdaptiveFreeList<FreeChunk> total;
  2539   gclog_or_tty->print("end sweep# " SIZE_FORMAT "\n", sweep_count);
  2540   AdaptiveFreeList<FreeChunk>::print_labels_on(gclog_or_tty, "size");
  2541   size_t total_free = 0;
  2542   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
  2543     const AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[i];
  2544     total_free += fl->count() * fl->size();
  2545     if (i % (40*IndexSetStride) == 0) {
  2546       AdaptiveFreeList<FreeChunk>::print_labels_on(gclog_or_tty, "size");
  2548     fl->print_on(gclog_or_tty);
  2549     total.set_bfr_surp(    total.bfr_surp()     + fl->bfr_surp()    );
  2550     total.set_surplus(    total.surplus()     + fl->surplus()    );
  2551     total.set_desired(    total.desired()     + fl->desired()    );
  2552     total.set_prev_sweep(  total.prev_sweep()   + fl->prev_sweep()  );
  2553     total.set_before_sweep(total.before_sweep() + fl->before_sweep());
  2554     total.set_count(      total.count()       + fl->count()      );
  2555     total.set_coal_births( total.coal_births()  + fl->coal_births() );
  2556     total.set_coal_deaths( total.coal_deaths()  + fl->coal_deaths() );
  2557     total.set_split_births(total.split_births() + fl->split_births());
  2558     total.set_split_deaths(total.split_deaths() + fl->split_deaths());
  2560   total.print_on(gclog_or_tty, "TOTAL");
  2561   gclog_or_tty->print_cr("Total free in indexed lists "
  2562                          SIZE_FORMAT " words", total_free);
  2563   gclog_or_tty->print("growth: %8.5f  deficit: %8.5f\n",
  2564     (double)(total.split_births()+total.coal_births()-total.split_deaths()-total.coal_deaths())/
  2565             (total.prev_sweep() != 0 ? (double)total.prev_sweep() : 1.0),
  2566     (double)(total.desired() - total.count())/(total.desired() != 0 ? (double)total.desired() : 1.0));
  2567   _dictionary->print_dict_census();
  2570 ///////////////////////////////////////////////////////////////////////////
  2571 // CFLS_LAB
  2572 ///////////////////////////////////////////////////////////////////////////
  2574 #define VECTOR_257(x)                                                                                  \
  2575   /* 1  2  3  4  5  6  7  8  9 1x 11 12 13 14 15 16 17 18 19 2x 21 22 23 24 25 26 27 28 29 3x 31 32 */ \
  2576   {  x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
  2577      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
  2578      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
  2579      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
  2580      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
  2581      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
  2582      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
  2583      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
  2584      x }
  2586 // Initialize with default setting of CMSParPromoteBlocksToClaim, _not_
  2587 // OldPLABSize, whose static default is different; if overridden at the
  2588 // command-line, this will get reinitialized via a call to
  2589 // modify_initialization() below.
  2590 AdaptiveWeightedAverage CFLS_LAB::_blocks_to_claim[]    =
  2591   VECTOR_257(AdaptiveWeightedAverage(OldPLABWeight, (float)CMSParPromoteBlocksToClaim));
  2592 size_t CFLS_LAB::_global_num_blocks[]  = VECTOR_257(0);
  2593 uint   CFLS_LAB::_global_num_workers[] = VECTOR_257(0);
  2595 CFLS_LAB::CFLS_LAB(CompactibleFreeListSpace* cfls) :
  2596   _cfls(cfls)
  2598   assert(CompactibleFreeListSpace::IndexSetSize == 257, "Modify VECTOR_257() macro above");
  2599   for (size_t i = CompactibleFreeListSpace::IndexSetStart;
  2600        i < CompactibleFreeListSpace::IndexSetSize;
  2601        i += CompactibleFreeListSpace::IndexSetStride) {
  2602     _indexedFreeList[i].set_size(i);
  2603     _num_blocks[i] = 0;
  2607 static bool _CFLS_LAB_modified = false;
  2609 void CFLS_LAB::modify_initialization(size_t n, unsigned wt) {
  2610   assert(!_CFLS_LAB_modified, "Call only once");
  2611   _CFLS_LAB_modified = true;
  2612   for (size_t i = CompactibleFreeListSpace::IndexSetStart;
  2613        i < CompactibleFreeListSpace::IndexSetSize;
  2614        i += CompactibleFreeListSpace::IndexSetStride) {
  2615     _blocks_to_claim[i].modify(n, wt, true /* force */);
  2619 HeapWord* CFLS_LAB::alloc(size_t word_sz) {
  2620   FreeChunk* res;
  2621   assert(word_sz == _cfls->adjustObjectSize(word_sz), "Error");
  2622   if (word_sz >=  CompactibleFreeListSpace::IndexSetSize) {
  2623     // This locking manages sync with other large object allocations.
  2624     MutexLockerEx x(_cfls->parDictionaryAllocLock(),
  2625                     Mutex::_no_safepoint_check_flag);
  2626     res = _cfls->getChunkFromDictionaryExact(word_sz);
  2627     if (res == NULL) return NULL;
  2628   } else {
  2629     AdaptiveFreeList<FreeChunk>* fl = &_indexedFreeList[word_sz];
  2630     if (fl->count() == 0) {
  2631       // Attempt to refill this local free list.
  2632       get_from_global_pool(word_sz, fl);
  2633       // If it didn't work, give up.
  2634       if (fl->count() == 0) return NULL;
  2636     res = fl->get_chunk_at_head();
  2637     assert(res != NULL, "Why was count non-zero?");
  2639   res->markNotFree();
  2640   assert(!res->is_free(), "shouldn't be marked free");
  2641   assert(oop(res)->klass_or_null() == NULL, "should look uninitialized");
  2642   // mangle a just allocated object with a distinct pattern.
  2643   debug_only(res->mangleAllocated(word_sz));
  2644   return (HeapWord*)res;
  2647 // Get a chunk of blocks of the right size and update related
  2648 // book-keeping stats
  2649 void CFLS_LAB::get_from_global_pool(size_t word_sz, AdaptiveFreeList<FreeChunk>* fl) {
  2650   // Get the #blocks we want to claim
  2651   size_t n_blks = (size_t)_blocks_to_claim[word_sz].average();
  2652   assert(n_blks > 0, "Error");
  2653   assert(ResizePLAB || n_blks == OldPLABSize, "Error");
  2654   // In some cases, when the application has a phase change,
  2655   // there may be a sudden and sharp shift in the object survival
  2656   // profile, and updating the counts at the end of a scavenge
  2657   // may not be quick enough, giving rise to large scavenge pauses
  2658   // during these phase changes. It is beneficial to detect such
  2659   // changes on-the-fly during a scavenge and avoid such a phase-change
  2660   // pothole. The following code is a heuristic attempt to do that.
  2661   // It is protected by a product flag until we have gained
  2662   // enough experience with this heuristic and fine-tuned its behaviour.
  2663   // WARNING: This might increase fragmentation if we overreact to
  2664   // small spikes, so some kind of historical smoothing based on
  2665   // previous experience with the greater reactivity might be useful.
  2666   // Lacking sufficient experience, CMSOldPLABResizeQuicker is disabled by
  2667   // default.
  2668   if (ResizeOldPLAB && CMSOldPLABResizeQuicker) {
  2669     size_t multiple = _num_blocks[word_sz]/(CMSOldPLABToleranceFactor*CMSOldPLABNumRefills*n_blks);
  2670     n_blks +=  CMSOldPLABReactivityFactor*multiple*n_blks;
  2671     n_blks = MIN2(n_blks, CMSOldPLABMax);
  2673   assert(n_blks > 0, "Error");
  2674   _cfls->par_get_chunk_of_blocks(word_sz, n_blks, fl);
  2675   // Update stats table entry for this block size
  2676   _num_blocks[word_sz] += fl->count();
  2679 void CFLS_LAB::compute_desired_plab_size() {
  2680   for (size_t i =  CompactibleFreeListSpace::IndexSetStart;
  2681        i < CompactibleFreeListSpace::IndexSetSize;
  2682        i += CompactibleFreeListSpace::IndexSetStride) {
  2683     assert((_global_num_workers[i] == 0) == (_global_num_blocks[i] == 0),
  2684            "Counter inconsistency");
  2685     if (_global_num_workers[i] > 0) {
  2686       // Need to smooth wrt historical average
  2687       if (ResizeOldPLAB) {
  2688         _blocks_to_claim[i].sample(
  2689           MAX2((size_t)CMSOldPLABMin,
  2690           MIN2((size_t)CMSOldPLABMax,
  2691                _global_num_blocks[i]/(_global_num_workers[i]*CMSOldPLABNumRefills))));
  2693       // Reset counters for next round
  2694       _global_num_workers[i] = 0;
  2695       _global_num_blocks[i] = 0;
  2696       if (PrintOldPLAB) {
  2697         gclog_or_tty->print_cr("[%d]: %d", i, (size_t)_blocks_to_claim[i].average());
  2703 // If this is changed in the future to allow parallel
  2704 // access, one would need to take the FL locks and,
  2705 // depending on how it is used, stagger access from
  2706 // parallel threads to reduce contention.
  2707 void CFLS_LAB::retire(int tid) {
  2708   // We run this single threaded with the world stopped;
  2709   // so no need for locks and such.
  2710   NOT_PRODUCT(Thread* t = Thread::current();)
  2711   assert(Thread::current()->is_VM_thread(), "Error");
  2712   for (size_t i =  CompactibleFreeListSpace::IndexSetStart;
  2713        i < CompactibleFreeListSpace::IndexSetSize;
  2714        i += CompactibleFreeListSpace::IndexSetStride) {
  2715     assert(_num_blocks[i] >= (size_t)_indexedFreeList[i].count(),
  2716            "Can't retire more than what we obtained");
  2717     if (_num_blocks[i] > 0) {
  2718       size_t num_retire =  _indexedFreeList[i].count();
  2719       assert(_num_blocks[i] > num_retire, "Should have used at least one");
  2721         // MutexLockerEx x(_cfls->_indexedFreeListParLocks[i],
  2722         //                Mutex::_no_safepoint_check_flag);
  2724         // Update globals stats for num_blocks used
  2725         _global_num_blocks[i] += (_num_blocks[i] - num_retire);
  2726         _global_num_workers[i]++;
  2727         assert(_global_num_workers[i] <= ParallelGCThreads, "Too big");
  2728         if (num_retire > 0) {
  2729           _cfls->_indexedFreeList[i].prepend(&_indexedFreeList[i]);
  2730           // Reset this list.
  2731           _indexedFreeList[i] = AdaptiveFreeList<FreeChunk>();
  2732           _indexedFreeList[i].set_size(i);
  2735       if (PrintOldPLAB) {
  2736         gclog_or_tty->print_cr("%d[%d]: %d/%d/%d",
  2737                                tid, i, num_retire, _num_blocks[i], (size_t)_blocks_to_claim[i].average());
  2739       // Reset stats for next round
  2740       _num_blocks[i]         = 0;
  2745 void CompactibleFreeListSpace:: par_get_chunk_of_blocks(size_t word_sz, size_t n, AdaptiveFreeList<FreeChunk>* fl) {
  2746   assert(fl->count() == 0, "Precondition.");
  2747   assert(word_sz < CompactibleFreeListSpace::IndexSetSize,
  2748          "Precondition");
  2750   // We'll try all multiples of word_sz in the indexed set, starting with
  2751   // word_sz itself and, if CMSSplitIndexedFreeListBlocks, try larger multiples,
  2752   // then try getting a big chunk and splitting it.
  2754     bool found;
  2755     int  k;
  2756     size_t cur_sz;
  2757     for (k = 1, cur_sz = k * word_sz, found = false;
  2758          (cur_sz < CompactibleFreeListSpace::IndexSetSize) &&
  2759          (CMSSplitIndexedFreeListBlocks || k <= 1);
  2760          k++, cur_sz = k * word_sz) {
  2761       AdaptiveFreeList<FreeChunk> fl_for_cur_sz;  // Empty.
  2762       fl_for_cur_sz.set_size(cur_sz);
  2764         MutexLockerEx x(_indexedFreeListParLocks[cur_sz],
  2765                         Mutex::_no_safepoint_check_flag);
  2766         AdaptiveFreeList<FreeChunk>* gfl = &_indexedFreeList[cur_sz];
  2767         if (gfl->count() != 0) {
  2768           // nn is the number of chunks of size cur_sz that
  2769           // we'd need to split k-ways each, in order to create
  2770           // "n" chunks of size word_sz each.
  2771           const size_t nn = MAX2(n/k, (size_t)1);
  2772           gfl->getFirstNChunksFromList(nn, &fl_for_cur_sz);
  2773           found = true;
  2774           if (k > 1) {
  2775             // Update split death stats for the cur_sz-size blocks list:
  2776             // we increment the split death count by the number of blocks
  2777             // we just took from the cur_sz-size blocks list and which
  2778             // we will be splitting below.
  2779             ssize_t deaths = gfl->split_deaths() +
  2780                              fl_for_cur_sz.count();
  2781             gfl->set_split_deaths(deaths);
  2785       // Now transfer fl_for_cur_sz to fl.  Common case, we hope, is k = 1.
  2786       if (found) {
  2787         if (k == 1) {
  2788           fl->prepend(&fl_for_cur_sz);
  2789         } else {
  2790           // Divide each block on fl_for_cur_sz up k ways.
  2791           FreeChunk* fc;
  2792           while ((fc = fl_for_cur_sz.get_chunk_at_head()) != NULL) {
  2793             // Must do this in reverse order, so that anybody attempting to
  2794             // access the main chunk sees it as a single free block until we
  2795             // change it.
  2796             size_t fc_size = fc->size();
  2797             assert(fc->is_free(), "Error");
  2798             for (int i = k-1; i >= 0; i--) {
  2799               FreeChunk* ffc = (FreeChunk*)((HeapWord*)fc + i * word_sz);
  2800               assert((i != 0) ||
  2801                         ((fc == ffc) && ffc->is_free() &&
  2802                          (ffc->size() == k*word_sz) && (fc_size == word_sz)),
  2803                         "Counting error");
  2804               ffc->set_size(word_sz);
  2805               ffc->link_prev(NULL); // Mark as a free block for other (parallel) GC threads.
  2806               ffc->link_next(NULL);
  2807               // Above must occur before BOT is updated below.
  2808               OrderAccess::storestore();
  2809               // splitting from the right, fc_size == i * word_sz
  2810               _bt.mark_block((HeapWord*)ffc, word_sz, true /* reducing */);
  2811               fc_size -= word_sz;
  2812               assert(fc_size == i*word_sz, "Error");
  2813               _bt.verify_not_unallocated((HeapWord*)ffc, word_sz);
  2814               _bt.verify_single_block((HeapWord*)fc, fc_size);
  2815               _bt.verify_single_block((HeapWord*)ffc, word_sz);
  2816               // Push this on "fl".
  2817               fl->return_chunk_at_head(ffc);
  2819             // TRAP
  2820             assert(fl->tail()->next() == NULL, "List invariant.");
  2823         // Update birth stats for this block size.
  2824         size_t num = fl->count();
  2825         MutexLockerEx x(_indexedFreeListParLocks[word_sz],
  2826                         Mutex::_no_safepoint_check_flag);
  2827         ssize_t births = _indexedFreeList[word_sz].split_births() + num;
  2828         _indexedFreeList[word_sz].set_split_births(births);
  2829         return;
  2833   // Otherwise, we'll split a block from the dictionary.
  2834   FreeChunk* fc = NULL;
  2835   FreeChunk* rem_fc = NULL;
  2836   size_t rem;
  2838     MutexLockerEx x(parDictionaryAllocLock(),
  2839                     Mutex::_no_safepoint_check_flag);
  2840     while (n > 0) {
  2841       fc = dictionary()->get_chunk(MAX2(n * word_sz, _dictionary->min_size()),
  2842                                   FreeBlockDictionary<FreeChunk>::atLeast);
  2843       if (fc != NULL) {
  2844         _bt.allocated((HeapWord*)fc, fc->size(), true /* reducing */);  // update _unallocated_blk
  2845         dictionary()->dict_census_update(fc->size(),
  2846                                        true /*split*/,
  2847                                        false /*birth*/);
  2848         break;
  2849       } else {
  2850         n--;
  2853     if (fc == NULL) return;
  2854     // Otherwise, split up that block.
  2855     assert((ssize_t)n >= 1, "Control point invariant");
  2856     assert(fc->is_free(), "Error: should be a free block");
  2857     _bt.verify_single_block((HeapWord*)fc, fc->size());
  2858     const size_t nn = fc->size() / word_sz;
  2859     n = MIN2(nn, n);
  2860     assert((ssize_t)n >= 1, "Control point invariant");
  2861     rem = fc->size() - n * word_sz;
  2862     // If there is a remainder, and it's too small, allocate one fewer.
  2863     if (rem > 0 && rem < MinChunkSize) {
  2864       n--; rem += word_sz;
  2866     // Note that at this point we may have n == 0.
  2867     assert((ssize_t)n >= 0, "Control point invariant");
  2869     // If n is 0, the chunk fc that was found is not large
  2870     // enough to leave a viable remainder.  We are unable to
  2871     // allocate even one block.  Return fc to the
  2872     // dictionary and return, leaving "fl" empty.
  2873     if (n == 0) {
  2874       returnChunkToDictionary(fc);
  2875       assert(fl->count() == 0, "We never allocated any blocks");
  2876       return;
  2879     // First return the remainder, if any.
  2880     // Note that we hold the lock until we decide if we're going to give
  2881     // back the remainder to the dictionary, since a concurrent allocation
  2882     // may otherwise see the heap as empty.  (We're willing to take that
  2883     // hit if the block is a small block.)
  2884     if (rem > 0) {
  2885       size_t prefix_size = n * word_sz;
  2886       rem_fc = (FreeChunk*)((HeapWord*)fc + prefix_size);
  2887       rem_fc->set_size(rem);
  2888       rem_fc->link_prev(NULL); // Mark as a free block for other (parallel) GC threads.
  2889       rem_fc->link_next(NULL);
  2890       // Above must occur before BOT is updated below.
  2891       assert((ssize_t)n > 0 && prefix_size > 0 && rem_fc > fc, "Error");
  2892       OrderAccess::storestore();
  2893       _bt.split_block((HeapWord*)fc, fc->size(), prefix_size);
  2894       assert(fc->is_free(), "Error");
  2895       fc->set_size(prefix_size);
  2896       if (rem >= IndexSetSize) {
  2897         returnChunkToDictionary(rem_fc);
  2898         dictionary()->dict_census_update(rem, true /*split*/, true /*birth*/);
  2899         rem_fc = NULL;
  2901       // Otherwise, return it to the small list below.
  2904   if (rem_fc != NULL) {
  2905     MutexLockerEx x(_indexedFreeListParLocks[rem],
  2906                     Mutex::_no_safepoint_check_flag);
  2907     _bt.verify_not_unallocated((HeapWord*)rem_fc, rem_fc->size());
  2908     _indexedFreeList[rem].return_chunk_at_head(rem_fc);
  2909     smallSplitBirth(rem);
  2911   assert((ssize_t)n > 0 && fc != NULL, "Consistency");
  2912   // Now do the splitting up.
  2913   // Must do this in reverse order, so that anybody attempting to
  2914   // access the main chunk sees it as a single free block until we
  2915   // change it.
  2916   size_t fc_size = n * word_sz;
  2917   // All but first chunk in this loop
  2918   for (ssize_t i = n-1; i > 0; i--) {
  2919     FreeChunk* ffc = (FreeChunk*)((HeapWord*)fc + i * word_sz);
  2920     ffc->set_size(word_sz);
  2921     ffc->link_prev(NULL); // Mark as a free block for other (parallel) GC threads.
  2922     ffc->link_next(NULL);
  2923     // Above must occur before BOT is updated below.
  2924     OrderAccess::storestore();
  2925     // splitting from the right, fc_size == (n - i + 1) * wordsize
  2926     _bt.mark_block((HeapWord*)ffc, word_sz, true /* reducing */);
  2927     fc_size -= word_sz;
  2928     _bt.verify_not_unallocated((HeapWord*)ffc, ffc->size());
  2929     _bt.verify_single_block((HeapWord*)ffc, ffc->size());
  2930     _bt.verify_single_block((HeapWord*)fc, fc_size);
  2931     // Push this on "fl".
  2932     fl->return_chunk_at_head(ffc);
  2934   // First chunk
  2935   assert(fc->is_free() && fc->size() == n*word_sz, "Error: should still be a free block");
  2936   // The blocks above should show their new sizes before the first block below
  2937   fc->set_size(word_sz);
  2938   fc->link_prev(NULL);    // idempotent wrt free-ness, see assert above
  2939   fc->link_next(NULL);
  2940   _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
  2941   _bt.verify_single_block((HeapWord*)fc, fc->size());
  2942   fl->return_chunk_at_head(fc);
  2944   assert((ssize_t)n > 0 && (ssize_t)n == fl->count(), "Incorrect number of blocks");
  2946     // Update the stats for this block size.
  2947     MutexLockerEx x(_indexedFreeListParLocks[word_sz],
  2948                     Mutex::_no_safepoint_check_flag);
  2949     const ssize_t births = _indexedFreeList[word_sz].split_births() + n;
  2950     _indexedFreeList[word_sz].set_split_births(births);
  2951     // ssize_t new_surplus = _indexedFreeList[word_sz].surplus() + n;
  2952     // _indexedFreeList[word_sz].set_surplus(new_surplus);
  2955   // TRAP
  2956   assert(fl->tail()->next() == NULL, "List invariant.");
  2959 // Set up the space's par_seq_tasks structure for work claiming
  2960 // for parallel rescan. See CMSParRemarkTask where this is currently used.
  2961 // XXX Need to suitably abstract and generalize this and the next
  2962 // method into one.
  2963 void
  2964 CompactibleFreeListSpace::
  2965 initialize_sequential_subtasks_for_rescan(int n_threads) {
  2966   // The "size" of each task is fixed according to rescan_task_size.
  2967   assert(n_threads > 0, "Unexpected n_threads argument");
  2968   const size_t task_size = rescan_task_size();
  2969   size_t n_tasks = (used_region().word_size() + task_size - 1)/task_size;
  2970   assert((n_tasks == 0) == used_region().is_empty(), "n_tasks incorrect");
  2971   assert(n_tasks == 0 ||
  2972          ((used_region().start() + (n_tasks - 1)*task_size < used_region().end()) &&
  2973           (used_region().start() + n_tasks*task_size >= used_region().end())),
  2974          "n_tasks calculation incorrect");
  2975   SequentialSubTasksDone* pst = conc_par_seq_tasks();
  2976   assert(!pst->valid(), "Clobbering existing data?");
  2977   // Sets the condition for completion of the subtask (how many threads
  2978   // need to finish in order to be done).
  2979   pst->set_n_threads(n_threads);
  2980   pst->set_n_tasks((int)n_tasks);
  2983 // Set up the space's par_seq_tasks structure for work claiming
  2984 // for parallel concurrent marking. See CMSConcMarkTask where this is currently used.
  2985 void
  2986 CompactibleFreeListSpace::
  2987 initialize_sequential_subtasks_for_marking(int n_threads,
  2988                                            HeapWord* low) {
  2989   // The "size" of each task is fixed according to rescan_task_size.
  2990   assert(n_threads > 0, "Unexpected n_threads argument");
  2991   const size_t task_size = marking_task_size();
  2992   assert(task_size > CardTableModRefBS::card_size_in_words &&
  2993          (task_size %  CardTableModRefBS::card_size_in_words == 0),
  2994          "Otherwise arithmetic below would be incorrect");
  2995   MemRegion span = _gen->reserved();
  2996   if (low != NULL) {
  2997     if (span.contains(low)) {
  2998       // Align low down to  a card boundary so that
  2999       // we can use block_offset_careful() on span boundaries.
  3000       HeapWord* aligned_low = (HeapWord*)align_size_down((uintptr_t)low,
  3001                                  CardTableModRefBS::card_size);
  3002       // Clip span prefix at aligned_low
  3003       span = span.intersection(MemRegion(aligned_low, span.end()));
  3004     } else if (low > span.end()) {
  3005       span = MemRegion(low, low);  // Null region
  3006     } // else use entire span
  3008   assert(span.is_empty() ||
  3009          ((uintptr_t)span.start() %  CardTableModRefBS::card_size == 0),
  3010         "span should start at a card boundary");
  3011   size_t n_tasks = (span.word_size() + task_size - 1)/task_size;
  3012   assert((n_tasks == 0) == span.is_empty(), "Inconsistency");
  3013   assert(n_tasks == 0 ||
  3014          ((span.start() + (n_tasks - 1)*task_size < span.end()) &&
  3015           (span.start() + n_tasks*task_size >= span.end())),
  3016          "n_tasks calculation incorrect");
  3017   SequentialSubTasksDone* pst = conc_par_seq_tasks();
  3018   assert(!pst->valid(), "Clobbering existing data?");
  3019   // Sets the condition for completion of the subtask (how many threads
  3020   // need to finish in order to be done).
  3021   pst->set_n_threads(n_threads);
  3022   pst->set_n_tasks((int)n_tasks);

mercurial