src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp

Thu, 06 Mar 2014 09:08:18 +0100

author
mgerdin
date
Thu, 06 Mar 2014 09:08:18 +0100
changeset 6978
30c99d8e0f02
parent 6912
c49dcaf78a65
child 6979
5255b195f828
permissions
-rw-r--r--

8038399: Remove dead oop_iterate MemRegion variants from SharedHeap, Generation and Space classes
Reviewed-by: tschatzl, stefank

     1 /*
     2  * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    20  * or visit www.oracle.com if you need additional information or have any
    21  * questions.
    22  *
    23  */
    25 #include "precompiled.hpp"
    26 #include "gc_implementation/concurrentMarkSweep/cmsLockVerifier.hpp"
    27 #include "gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp"
    28 #include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.inline.hpp"
    29 #include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.hpp"
    30 #include "gc_implementation/shared/liveRange.hpp"
    31 #include "gc_implementation/shared/spaceDecorator.hpp"
    32 #include "gc_interface/collectedHeap.inline.hpp"
    33 #include "memory/allocation.inline.hpp"
    34 #include "memory/blockOffsetTable.inline.hpp"
    35 #include "memory/resourceArea.hpp"
    36 #include "memory/space.inline.hpp"
    37 #include "memory/universe.inline.hpp"
    38 #include "oops/oop.inline.hpp"
    39 #include "runtime/globals.hpp"
    40 #include "runtime/handles.inline.hpp"
    41 #include "runtime/init.hpp"
    42 #include "runtime/java.hpp"
    43 #include "runtime/orderAccess.inline.hpp"
    44 #include "runtime/vmThread.hpp"
    45 #include "utilities/copy.hpp"
    47 /////////////////////////////////////////////////////////////////////////
    48 //// CompactibleFreeListSpace
    49 /////////////////////////////////////////////////////////////////////////
    51 // highest ranked  free list lock rank
    52 int CompactibleFreeListSpace::_lockRank = Mutex::leaf + 3;
    54 // Defaults are 0 so things will break badly if incorrectly initialized.
    55 size_t CompactibleFreeListSpace::IndexSetStart  = 0;
    56 size_t CompactibleFreeListSpace::IndexSetStride = 0;
    58 size_t MinChunkSize = 0;
    60 void CompactibleFreeListSpace::set_cms_values() {
    61   // Set CMS global values
    62   assert(MinChunkSize == 0, "already set");
    64   // MinChunkSize should be a multiple of MinObjAlignment and be large enough
    65   // for chunks to contain a FreeChunk.
    66   size_t min_chunk_size_in_bytes = align_size_up(sizeof(FreeChunk), MinObjAlignmentInBytes);
    67   MinChunkSize = min_chunk_size_in_bytes / BytesPerWord;
    69   assert(IndexSetStart == 0 && IndexSetStride == 0, "already set");
    70   IndexSetStart  = MinChunkSize;
    71   IndexSetStride = MinObjAlignment;
    72 }
    74 // Constructor
    75 CompactibleFreeListSpace::CompactibleFreeListSpace(BlockOffsetSharedArray* bs,
    76   MemRegion mr, bool use_adaptive_freelists,
    77   FreeBlockDictionary<FreeChunk>::DictionaryChoice dictionaryChoice) :
    78   _dictionaryChoice(dictionaryChoice),
    79   _adaptive_freelists(use_adaptive_freelists),
    80   _bt(bs, mr),
    81   // free list locks are in the range of values taken by _lockRank
    82   // This range currently is [_leaf+2, _leaf+3]
    83   // Note: this requires that CFLspace c'tors
    84   // are called serially in the order in which the locks are
    85   // are acquired in the program text. This is true today.
    86   _freelistLock(_lockRank--, "CompactibleFreeListSpace._lock", true),
    87   _parDictionaryAllocLock(Mutex::leaf - 1,  // == rank(ExpandHeap_lock) - 1
    88                           "CompactibleFreeListSpace._dict_par_lock", true),
    89   _rescan_task_size(CardTableModRefBS::card_size_in_words * BitsPerWord *
    90                     CMSRescanMultiple),
    91   _marking_task_size(CardTableModRefBS::card_size_in_words * BitsPerWord *
    92                     CMSConcMarkMultiple),
    93   _collector(NULL)
    94 {
    95   assert(sizeof(FreeChunk) / BytesPerWord <= MinChunkSize,
    96          "FreeChunk is larger than expected");
    97   _bt.set_space(this);
    98   initialize(mr, SpaceDecorator::Clear, SpaceDecorator::Mangle);
    99   // We have all of "mr", all of which we place in the dictionary
   100   // as one big chunk. We'll need to decide here which of several
   101   // possible alternative dictionary implementations to use. For
   102   // now the choice is easy, since we have only one working
   103   // implementation, namely, the simple binary tree (splaying
   104   // temporarily disabled).
   105   switch (dictionaryChoice) {
   106     case FreeBlockDictionary<FreeChunk>::dictionaryBinaryTree:
   107       _dictionary = new AFLBinaryTreeDictionary(mr);
   108       break;
   109     case FreeBlockDictionary<FreeChunk>::dictionarySplayTree:
   110     case FreeBlockDictionary<FreeChunk>::dictionarySkipList:
   111     default:
   112       warning("dictionaryChoice: selected option not understood; using"
   113               " default BinaryTreeDictionary implementation instead.");
   114   }
   115   assert(_dictionary != NULL, "CMS dictionary initialization");
   116   // The indexed free lists are initially all empty and are lazily
   117   // filled in on demand. Initialize the array elements to NULL.
   118   initializeIndexedFreeListArray();
   120   // Not using adaptive free lists assumes that allocation is first
   121   // from the linAB's.  Also a cms perm gen which can be compacted
   122   // has to have the klass's klassKlass allocated at a lower
   123   // address in the heap than the klass so that the klassKlass is
   124   // moved to its new location before the klass is moved.
   125   // Set the _refillSize for the linear allocation blocks
   126   if (!use_adaptive_freelists) {
   127     FreeChunk* fc = _dictionary->get_chunk(mr.word_size(),
   128                                            FreeBlockDictionary<FreeChunk>::atLeast);
   129     // The small linAB initially has all the space and will allocate
   130     // a chunk of any size.
   131     HeapWord* addr = (HeapWord*) fc;
   132     _smallLinearAllocBlock.set(addr, fc->size() ,
   133       1024*SmallForLinearAlloc, fc->size());
   134     // Note that _unallocated_block is not updated here.
   135     // Allocations from the linear allocation block should
   136     // update it.
   137   } else {
   138     _smallLinearAllocBlock.set(0, 0, 1024*SmallForLinearAlloc,
   139                                SmallForLinearAlloc);
   140   }
   141   // CMSIndexedFreeListReplenish should be at least 1
   142   CMSIndexedFreeListReplenish = MAX2((uintx)1, CMSIndexedFreeListReplenish);
   143   _promoInfo.setSpace(this);
   144   if (UseCMSBestFit) {
   145     _fitStrategy = FreeBlockBestFitFirst;
   146   } else {
   147     _fitStrategy = FreeBlockStrategyNone;
   148   }
   149   check_free_list_consistency();
   151   // Initialize locks for parallel case.
   153   if (CollectedHeap::use_parallel_gc_threads()) {
   154     for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
   155       _indexedFreeListParLocks[i] = new Mutex(Mutex::leaf - 1, // == ExpandHeap_lock - 1
   156                                               "a freelist par lock",
   157                                               true);
   158       DEBUG_ONLY(
   159         _indexedFreeList[i].set_protecting_lock(_indexedFreeListParLocks[i]);
   160       )
   161     }
   162     _dictionary->set_par_lock(&_parDictionaryAllocLock);
   163   }
   164 }
   166 // Like CompactibleSpace forward() but always calls cross_threshold() to
   167 // update the block offset table.  Removed initialize_threshold call because
   168 // CFLS does not use a block offset array for contiguous spaces.
   169 HeapWord* CompactibleFreeListSpace::forward(oop q, size_t size,
   170                                     CompactPoint* cp, HeapWord* compact_top) {
   171   // q is alive
   172   // First check if we should switch compaction space
   173   assert(this == cp->space, "'this' should be current compaction space.");
   174   size_t compaction_max_size = pointer_delta(end(), compact_top);
   175   assert(adjustObjectSize(size) == cp->space->adjust_object_size_v(size),
   176     "virtual adjustObjectSize_v() method is not correct");
   177   size_t adjusted_size = adjustObjectSize(size);
   178   assert(compaction_max_size >= MinChunkSize || compaction_max_size == 0,
   179          "no small fragments allowed");
   180   assert(minimum_free_block_size() == MinChunkSize,
   181          "for de-virtualized reference below");
   182   // Can't leave a nonzero size, residual fragment smaller than MinChunkSize
   183   if (adjusted_size + MinChunkSize > compaction_max_size &&
   184       adjusted_size != compaction_max_size) {
   185     do {
   186       // switch to next compaction space
   187       cp->space->set_compaction_top(compact_top);
   188       cp->space = cp->space->next_compaction_space();
   189       if (cp->space == NULL) {
   190         cp->gen = GenCollectedHeap::heap()->prev_gen(cp->gen);
   191         assert(cp->gen != NULL, "compaction must succeed");
   192         cp->space = cp->gen->first_compaction_space();
   193         assert(cp->space != NULL, "generation must have a first compaction space");
   194       }
   195       compact_top = cp->space->bottom();
   196       cp->space->set_compaction_top(compact_top);
   197       // The correct adjusted_size may not be the same as that for this method
   198       // (i.e., cp->space may no longer be "this" so adjust the size again.
   199       // Use the virtual method which is not used above to save the virtual
   200       // dispatch.
   201       adjusted_size = cp->space->adjust_object_size_v(size);
   202       compaction_max_size = pointer_delta(cp->space->end(), compact_top);
   203       assert(cp->space->minimum_free_block_size() == 0, "just checking");
   204     } while (adjusted_size > compaction_max_size);
   205   }
   207   // store the forwarding pointer into the mark word
   208   if ((HeapWord*)q != compact_top) {
   209     q->forward_to(oop(compact_top));
   210     assert(q->is_gc_marked(), "encoding the pointer should preserve the mark");
   211   } else {
   212     // if the object isn't moving we can just set the mark to the default
   213     // mark and handle it specially later on.
   214     q->init_mark();
   215     assert(q->forwardee() == NULL, "should be forwarded to NULL");
   216   }
   218   compact_top += adjusted_size;
   220   // we need to update the offset table so that the beginnings of objects can be
   221   // found during scavenge.  Note that we are updating the offset table based on
   222   // where the object will be once the compaction phase finishes.
   224   // Always call cross_threshold().  A contiguous space can only call it when
   225   // the compaction_top exceeds the current threshold but not for an
   226   // non-contiguous space.
   227   cp->threshold =
   228     cp->space->cross_threshold(compact_top - adjusted_size, compact_top);
   229   return compact_top;
   230 }
   232 // A modified copy of OffsetTableContigSpace::cross_threshold() with _offsets -> _bt
   233 // and use of single_block instead of alloc_block.  The name here is not really
   234 // appropriate - maybe a more general name could be invented for both the
   235 // contiguous and noncontiguous spaces.
   237 HeapWord* CompactibleFreeListSpace::cross_threshold(HeapWord* start, HeapWord* the_end) {
   238   _bt.single_block(start, the_end);
   239   return end();
   240 }
   242 // Initialize them to NULL.
   243 void CompactibleFreeListSpace::initializeIndexedFreeListArray() {
   244   for (size_t i = 0; i < IndexSetSize; i++) {
   245     // Note that on platforms where objects are double word aligned,
   246     // the odd array elements are not used.  It is convenient, however,
   247     // to map directly from the object size to the array element.
   248     _indexedFreeList[i].reset(IndexSetSize);
   249     _indexedFreeList[i].set_size(i);
   250     assert(_indexedFreeList[i].count() == 0, "reset check failed");
   251     assert(_indexedFreeList[i].head() == NULL, "reset check failed");
   252     assert(_indexedFreeList[i].tail() == NULL, "reset check failed");
   253     assert(_indexedFreeList[i].hint() == IndexSetSize, "reset check failed");
   254   }
   255 }
   257 void CompactibleFreeListSpace::resetIndexedFreeListArray() {
   258   for (size_t i = 1; i < IndexSetSize; i++) {
   259     assert(_indexedFreeList[i].size() == (size_t) i,
   260       "Indexed free list sizes are incorrect");
   261     _indexedFreeList[i].reset(IndexSetSize);
   262     assert(_indexedFreeList[i].count() == 0, "reset check failed");
   263     assert(_indexedFreeList[i].head() == NULL, "reset check failed");
   264     assert(_indexedFreeList[i].tail() == NULL, "reset check failed");
   265     assert(_indexedFreeList[i].hint() == IndexSetSize, "reset check failed");
   266   }
   267 }
   269 void CompactibleFreeListSpace::reset(MemRegion mr) {
   270   resetIndexedFreeListArray();
   271   dictionary()->reset();
   272   if (BlockOffsetArrayUseUnallocatedBlock) {
   273     assert(end() == mr.end(), "We are compacting to the bottom of CMS gen");
   274     // Everything's allocated until proven otherwise.
   275     _bt.set_unallocated_block(end());
   276   }
   277   if (!mr.is_empty()) {
   278     assert(mr.word_size() >= MinChunkSize, "Chunk size is too small");
   279     _bt.single_block(mr.start(), mr.word_size());
   280     FreeChunk* fc = (FreeChunk*) mr.start();
   281     fc->set_size(mr.word_size());
   282     if (mr.word_size() >= IndexSetSize ) {
   283       returnChunkToDictionary(fc);
   284     } else {
   285       _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
   286       _indexedFreeList[mr.word_size()].return_chunk_at_head(fc);
   287     }
   288     coalBirth(mr.word_size());
   289   }
   290   _promoInfo.reset();
   291   _smallLinearAllocBlock._ptr = NULL;
   292   _smallLinearAllocBlock._word_size = 0;
   293 }
   295 void CompactibleFreeListSpace::reset_after_compaction() {
   296   // Reset the space to the new reality - one free chunk.
   297   MemRegion mr(compaction_top(), end());
   298   reset(mr);
   299   // Now refill the linear allocation block(s) if possible.
   300   if (_adaptive_freelists) {
   301     refillLinearAllocBlocksIfNeeded();
   302   } else {
   303     // Place as much of mr in the linAB as we can get,
   304     // provided it was big enough to go into the dictionary.
   305     FreeChunk* fc = dictionary()->find_largest_dict();
   306     if (fc != NULL) {
   307       assert(fc->size() == mr.word_size(),
   308              "Why was the chunk broken up?");
   309       removeChunkFromDictionary(fc);
   310       HeapWord* addr = (HeapWord*) fc;
   311       _smallLinearAllocBlock.set(addr, fc->size() ,
   312         1024*SmallForLinearAlloc, fc->size());
   313       // Note that _unallocated_block is not updated here.
   314     }
   315   }
   316 }
   318 // Walks the entire dictionary, returning a coterminal
   319 // chunk, if it exists. Use with caution since it involves
   320 // a potentially complete walk of a potentially large tree.
   321 FreeChunk* CompactibleFreeListSpace::find_chunk_at_end() {
   323   assert_lock_strong(&_freelistLock);
   325   return dictionary()->find_chunk_ends_at(end());
   326 }
   329 #ifndef PRODUCT
   330 void CompactibleFreeListSpace::initializeIndexedFreeListArrayReturnedBytes() {
   331   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
   332     _indexedFreeList[i].allocation_stats()->set_returned_bytes(0);
   333   }
   334 }
   336 size_t CompactibleFreeListSpace::sumIndexedFreeListArrayReturnedBytes() {
   337   size_t sum = 0;
   338   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
   339     sum += _indexedFreeList[i].allocation_stats()->returned_bytes();
   340   }
   341   return sum;
   342 }
   344 size_t CompactibleFreeListSpace::totalCountInIndexedFreeLists() const {
   345   size_t count = 0;
   346   for (size_t i = IndexSetStart; i < IndexSetSize; i++) {
   347     debug_only(
   348       ssize_t total_list_count = 0;
   349       for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
   350          fc = fc->next()) {
   351         total_list_count++;
   352       }
   353       assert(total_list_count ==  _indexedFreeList[i].count(),
   354         "Count in list is incorrect");
   355     )
   356     count += _indexedFreeList[i].count();
   357   }
   358   return count;
   359 }
   361 size_t CompactibleFreeListSpace::totalCount() {
   362   size_t num = totalCountInIndexedFreeLists();
   363   num +=  dictionary()->total_count();
   364   if (_smallLinearAllocBlock._word_size != 0) {
   365     num++;
   366   }
   367   return num;
   368 }
   369 #endif
   371 bool CompactibleFreeListSpace::is_free_block(const HeapWord* p) const {
   372   FreeChunk* fc = (FreeChunk*) p;
   373   return fc->is_free();
   374 }
   376 size_t CompactibleFreeListSpace::used() const {
   377   return capacity() - free();
   378 }
   380 size_t CompactibleFreeListSpace::free() const {
   381   // "MT-safe, but not MT-precise"(TM), if you will: i.e.
   382   // if you do this while the structures are in flux you
   383   // may get an approximate answer only; for instance
   384   // because there is concurrent allocation either
   385   // directly by mutators or for promotion during a GC.
   386   // It's "MT-safe", however, in the sense that you are guaranteed
   387   // not to crash and burn, for instance, because of walking
   388   // pointers that could disappear as you were walking them.
   389   // The approximation is because the various components
   390   // that are read below are not read atomically (and
   391   // further the computation of totalSizeInIndexedFreeLists()
   392   // is itself a non-atomic computation. The normal use of
   393   // this is during a resize operation at the end of GC
   394   // and at that time you are guaranteed to get the
   395   // correct actual value. However, for instance, this is
   396   // also read completely asynchronously by the "perf-sampler"
   397   // that supports jvmstat, and you are apt to see the values
   398   // flicker in such cases.
   399   assert(_dictionary != NULL, "No _dictionary?");
   400   return (_dictionary->total_chunk_size(DEBUG_ONLY(freelistLock())) +
   401           totalSizeInIndexedFreeLists() +
   402           _smallLinearAllocBlock._word_size) * HeapWordSize;
   403 }
   405 size_t CompactibleFreeListSpace::max_alloc_in_words() const {
   406   assert(_dictionary != NULL, "No _dictionary?");
   407   assert_locked();
   408   size_t res = _dictionary->max_chunk_size();
   409   res = MAX2(res, MIN2(_smallLinearAllocBlock._word_size,
   410                        (size_t) SmallForLinearAlloc - 1));
   411   // XXX the following could potentially be pretty slow;
   412   // should one, pesimally for the rare cases when res
   413   // caclulated above is less than IndexSetSize,
   414   // just return res calculated above? My reasoning was that
   415   // those cases will be so rare that the extra time spent doesn't
   416   // really matter....
   417   // Note: do not change the loop test i >= res + IndexSetStride
   418   // to i > res below, because i is unsigned and res may be zero.
   419   for (size_t i = IndexSetSize - 1; i >= res + IndexSetStride;
   420        i -= IndexSetStride) {
   421     if (_indexedFreeList[i].head() != NULL) {
   422       assert(_indexedFreeList[i].count() != 0, "Inconsistent FreeList");
   423       return i;
   424     }
   425   }
   426   return res;
   427 }
   429 void LinearAllocBlock::print_on(outputStream* st) const {
   430   st->print_cr(" LinearAllocBlock: ptr = " PTR_FORMAT ", word_size = " SIZE_FORMAT
   431             ", refillsize = " SIZE_FORMAT ", allocation_size_limit = " SIZE_FORMAT,
   432             p2i(_ptr), _word_size, _refillSize, _allocation_size_limit);
   433 }
   435 void CompactibleFreeListSpace::print_on(outputStream* st) const {
   436   st->print_cr("COMPACTIBLE FREELIST SPACE");
   437   st->print_cr(" Space:");
   438   Space::print_on(st);
   440   st->print_cr("promoInfo:");
   441   _promoInfo.print_on(st);
   443   st->print_cr("_smallLinearAllocBlock");
   444   _smallLinearAllocBlock.print_on(st);
   446   // dump_memory_block(_smallLinearAllocBlock->_ptr, 128);
   448   st->print_cr(" _fitStrategy = %s, _adaptive_freelists = %s",
   449                _fitStrategy?"true":"false", _adaptive_freelists?"true":"false");
   450 }
   452 void CompactibleFreeListSpace::print_indexed_free_lists(outputStream* st)
   453 const {
   454   reportIndexedFreeListStatistics();
   455   gclog_or_tty->print_cr("Layout of Indexed Freelists");
   456   gclog_or_tty->print_cr("---------------------------");
   457   AdaptiveFreeList<FreeChunk>::print_labels_on(st, "size");
   458   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
   459     _indexedFreeList[i].print_on(gclog_or_tty);
   460     for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
   461          fc = fc->next()) {
   462       gclog_or_tty->print_cr("\t[" PTR_FORMAT "," PTR_FORMAT ")  %s",
   463                           p2i(fc), p2i((HeapWord*)fc + i),
   464                           fc->cantCoalesce() ? "\t CC" : "");
   465     }
   466   }
   467 }
   469 void CompactibleFreeListSpace::print_promo_info_blocks(outputStream* st)
   470 const {
   471   _promoInfo.print_on(st);
   472 }
   474 void CompactibleFreeListSpace::print_dictionary_free_lists(outputStream* st)
   475 const {
   476   _dictionary->report_statistics();
   477   st->print_cr("Layout of Freelists in Tree");
   478   st->print_cr("---------------------------");
   479   _dictionary->print_free_lists(st);
   480 }
   482 class BlkPrintingClosure: public BlkClosure {
   483   const CMSCollector*             _collector;
   484   const CompactibleFreeListSpace* _sp;
   485   const CMSBitMap*                _live_bit_map;
   486   const bool                      _post_remark;
   487   outputStream*                   _st;
   488 public:
   489   BlkPrintingClosure(const CMSCollector* collector,
   490                      const CompactibleFreeListSpace* sp,
   491                      const CMSBitMap* live_bit_map,
   492                      outputStream* st):
   493     _collector(collector),
   494     _sp(sp),
   495     _live_bit_map(live_bit_map),
   496     _post_remark(collector->abstract_state() > CMSCollector::FinalMarking),
   497     _st(st) { }
   498   size_t do_blk(HeapWord* addr);
   499 };
   501 size_t BlkPrintingClosure::do_blk(HeapWord* addr) {
   502   size_t sz = _sp->block_size_no_stall(addr, _collector);
   503   assert(sz != 0, "Should always be able to compute a size");
   504   if (_sp->block_is_obj(addr)) {
   505     const bool dead = _post_remark && !_live_bit_map->isMarked(addr);
   506     _st->print_cr(PTR_FORMAT ": %s object of size " SIZE_FORMAT "%s",
   507       p2i(addr),
   508       dead ? "dead" : "live",
   509       sz,
   510       (!dead && CMSPrintObjectsInDump) ? ":" : ".");
   511     if (CMSPrintObjectsInDump && !dead) {
   512       oop(addr)->print_on(_st);
   513       _st->print_cr("--------------------------------------");
   514     }
   515   } else { // free block
   516     _st->print_cr(PTR_FORMAT ": free block of size " SIZE_FORMAT "%s",
   517       p2i(addr), sz, CMSPrintChunksInDump ? ":" : ".");
   518     if (CMSPrintChunksInDump) {
   519       ((FreeChunk*)addr)->print_on(_st);
   520       _st->print_cr("--------------------------------------");
   521     }
   522   }
   523   return sz;
   524 }
   526 void CompactibleFreeListSpace::dump_at_safepoint_with_locks(CMSCollector* c,
   527   outputStream* st) {
   528   st->print_cr("\n=========================");
   529   st->print_cr("Block layout in CMS Heap:");
   530   st->print_cr("=========================");
   531   BlkPrintingClosure  bpcl(c, this, c->markBitMap(), st);
   532   blk_iterate(&bpcl);
   534   st->print_cr("\n=======================================");
   535   st->print_cr("Order & Layout of Promotion Info Blocks");
   536   st->print_cr("=======================================");
   537   print_promo_info_blocks(st);
   539   st->print_cr("\n===========================");
   540   st->print_cr("Order of Indexed Free Lists");
   541   st->print_cr("=========================");
   542   print_indexed_free_lists(st);
   544   st->print_cr("\n=================================");
   545   st->print_cr("Order of Free Lists in Dictionary");
   546   st->print_cr("=================================");
   547   print_dictionary_free_lists(st);
   548 }
   551 void CompactibleFreeListSpace::reportFreeListStatistics() const {
   552   assert_lock_strong(&_freelistLock);
   553   assert(PrintFLSStatistics != 0, "Reporting error");
   554   _dictionary->report_statistics();
   555   if (PrintFLSStatistics > 1) {
   556     reportIndexedFreeListStatistics();
   557     size_t total_size = totalSizeInIndexedFreeLists() +
   558                        _dictionary->total_chunk_size(DEBUG_ONLY(freelistLock()));
   559     gclog_or_tty->print(" free=" SIZE_FORMAT " frag=%1.4f\n", total_size, flsFrag());
   560   }
   561 }
   563 void CompactibleFreeListSpace::reportIndexedFreeListStatistics() const {
   564   assert_lock_strong(&_freelistLock);
   565   gclog_or_tty->print("Statistics for IndexedFreeLists:\n"
   566                       "--------------------------------\n");
   567   size_t total_size = totalSizeInIndexedFreeLists();
   568   size_t   free_blocks = numFreeBlocksInIndexedFreeLists();
   569   gclog_or_tty->print("Total Free Space: " SIZE_FORMAT "\n", total_size);
   570   gclog_or_tty->print("Max   Chunk Size: " SIZE_FORMAT "\n", maxChunkSizeInIndexedFreeLists());
   571   gclog_or_tty->print("Number of Blocks: " SIZE_FORMAT "\n", free_blocks);
   572   if (free_blocks != 0) {
   573     gclog_or_tty->print("Av.  Block  Size: " SIZE_FORMAT "\n", total_size/free_blocks);
   574   }
   575 }
   577 size_t CompactibleFreeListSpace::numFreeBlocksInIndexedFreeLists() const {
   578   size_t res = 0;
   579   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
   580     debug_only(
   581       ssize_t recount = 0;
   582       for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
   583          fc = fc->next()) {
   584         recount += 1;
   585       }
   586       assert(recount == _indexedFreeList[i].count(),
   587         "Incorrect count in list");
   588     )
   589     res += _indexedFreeList[i].count();
   590   }
   591   return res;
   592 }
   594 size_t CompactibleFreeListSpace::maxChunkSizeInIndexedFreeLists() const {
   595   for (size_t i = IndexSetSize - 1; i != 0; i -= IndexSetStride) {
   596     if (_indexedFreeList[i].head() != NULL) {
   597       assert(_indexedFreeList[i].count() != 0, "Inconsistent FreeList");
   598       return (size_t)i;
   599     }
   600   }
   601   return 0;
   602 }
   604 void CompactibleFreeListSpace::set_end(HeapWord* value) {
   605   HeapWord* prevEnd = end();
   606   assert(prevEnd != value, "unnecessary set_end call");
   607   assert(prevEnd == NULL || !BlockOffsetArrayUseUnallocatedBlock || value >= unallocated_block(),
   608         "New end is below unallocated block");
   609   _end = value;
   610   if (prevEnd != NULL) {
   611     // Resize the underlying block offset table.
   612     _bt.resize(pointer_delta(value, bottom()));
   613     if (value <= prevEnd) {
   614       assert(!BlockOffsetArrayUseUnallocatedBlock || value >= unallocated_block(),
   615              "New end is below unallocated block");
   616     } else {
   617       // Now, take this new chunk and add it to the free blocks.
   618       // Note that the BOT has not yet been updated for this block.
   619       size_t newFcSize = pointer_delta(value, prevEnd);
   620       // XXX This is REALLY UGLY and should be fixed up. XXX
   621       if (!_adaptive_freelists && _smallLinearAllocBlock._ptr == NULL) {
   622         // Mark the boundary of the new block in BOT
   623         _bt.mark_block(prevEnd, value);
   624         // put it all in the linAB
   625         if (ParallelGCThreads == 0) {
   626           _smallLinearAllocBlock._ptr = prevEnd;
   627           _smallLinearAllocBlock._word_size = newFcSize;
   628           repairLinearAllocBlock(&_smallLinearAllocBlock);
   629         } else { // ParallelGCThreads > 0
   630           MutexLockerEx x(parDictionaryAllocLock(),
   631                           Mutex::_no_safepoint_check_flag);
   632           _smallLinearAllocBlock._ptr = prevEnd;
   633           _smallLinearAllocBlock._word_size = newFcSize;
   634           repairLinearAllocBlock(&_smallLinearAllocBlock);
   635         }
   636         // Births of chunks put into a LinAB are not recorded.  Births
   637         // of chunks as they are allocated out of a LinAB are.
   638       } else {
   639         // Add the block to the free lists, if possible coalescing it
   640         // with the last free block, and update the BOT and census data.
   641         addChunkToFreeListsAtEndRecordingStats(prevEnd, newFcSize);
   642       }
   643     }
   644   }
   645 }
   647 class FreeListSpace_DCTOC : public Filtering_DCTOC {
   648   CompactibleFreeListSpace* _cfls;
   649   CMSCollector* _collector;
   650 protected:
   651   // Override.
   652 #define walk_mem_region_with_cl_DECL(ClosureType)                       \
   653   virtual void walk_mem_region_with_cl(MemRegion mr,                    \
   654                                        HeapWord* bottom, HeapWord* top, \
   655                                        ClosureType* cl);                \
   656       void walk_mem_region_with_cl_par(MemRegion mr,                    \
   657                                        HeapWord* bottom, HeapWord* top, \
   658                                        ClosureType* cl);                \
   659     void walk_mem_region_with_cl_nopar(MemRegion mr,                    \
   660                                        HeapWord* bottom, HeapWord* top, \
   661                                        ClosureType* cl)
   662   walk_mem_region_with_cl_DECL(ExtendedOopClosure);
   663   walk_mem_region_with_cl_DECL(FilteringClosure);
   665 public:
   666   FreeListSpace_DCTOC(CompactibleFreeListSpace* sp,
   667                       CMSCollector* collector,
   668                       ExtendedOopClosure* cl,
   669                       CardTableModRefBS::PrecisionStyle precision,
   670                       HeapWord* boundary) :
   671     Filtering_DCTOC(sp, cl, precision, boundary),
   672     _cfls(sp), _collector(collector) {}
   673 };
   675 // We de-virtualize the block-related calls below, since we know that our
   676 // space is a CompactibleFreeListSpace.
   678 #define FreeListSpace_DCTOC__walk_mem_region_with_cl_DEFN(ClosureType)          \
   679 void FreeListSpace_DCTOC::walk_mem_region_with_cl(MemRegion mr,                 \
   680                                                  HeapWord* bottom,              \
   681                                                  HeapWord* top,                 \
   682                                                  ClosureType* cl) {             \
   683    bool is_par = SharedHeap::heap()->n_par_threads() > 0;                       \
   684    if (is_par) {                                                                \
   685      assert(SharedHeap::heap()->n_par_threads() ==                              \
   686             SharedHeap::heap()->workers()->active_workers(), "Mismatch");       \
   687      walk_mem_region_with_cl_par(mr, bottom, top, cl);                          \
   688    } else {                                                                     \
   689      walk_mem_region_with_cl_nopar(mr, bottom, top, cl);                        \
   690    }                                                                            \
   691 }                                                                               \
   692 void FreeListSpace_DCTOC::walk_mem_region_with_cl_par(MemRegion mr,             \
   693                                                       HeapWord* bottom,         \
   694                                                       HeapWord* top,            \
   695                                                       ClosureType* cl) {        \
   696   /* Skip parts that are before "mr", in case "block_start" sent us             \
   697      back too far. */                                                           \
   698   HeapWord* mr_start = mr.start();                                              \
   699   size_t bot_size = _cfls->CompactibleFreeListSpace::block_size(bottom);        \
   700   HeapWord* next = bottom + bot_size;                                           \
   701   while (next < mr_start) {                                                     \
   702     bottom = next;                                                              \
   703     bot_size = _cfls->CompactibleFreeListSpace::block_size(bottom);             \
   704     next = bottom + bot_size;                                                   \
   705   }                                                                             \
   706                                                                                 \
   707   while (bottom < top) {                                                        \
   708     if (_cfls->CompactibleFreeListSpace::block_is_obj(bottom) &&                \
   709         !_cfls->CompactibleFreeListSpace::obj_allocated_since_save_marks(       \
   710                     oop(bottom)) &&                                             \
   711         !_collector->CMSCollector::is_dead_obj(oop(bottom))) {                  \
   712       size_t word_sz = oop(bottom)->oop_iterate(cl, mr);                        \
   713       bottom += _cfls->adjustObjectSize(word_sz);                               \
   714     } else {                                                                    \
   715       bottom += _cfls->CompactibleFreeListSpace::block_size(bottom);            \
   716     }                                                                           \
   717   }                                                                             \
   718 }                                                                               \
   719 void FreeListSpace_DCTOC::walk_mem_region_with_cl_nopar(MemRegion mr,           \
   720                                                         HeapWord* bottom,       \
   721                                                         HeapWord* top,          \
   722                                                         ClosureType* cl) {      \
   723   /* Skip parts that are before "mr", in case "block_start" sent us             \
   724      back too far. */                                                           \
   725   HeapWord* mr_start = mr.start();                                              \
   726   size_t bot_size = _cfls->CompactibleFreeListSpace::block_size_nopar(bottom);  \
   727   HeapWord* next = bottom + bot_size;                                           \
   728   while (next < mr_start) {                                                     \
   729     bottom = next;                                                              \
   730     bot_size = _cfls->CompactibleFreeListSpace::block_size_nopar(bottom);       \
   731     next = bottom + bot_size;                                                   \
   732   }                                                                             \
   733                                                                                 \
   734   while (bottom < top) {                                                        \
   735     if (_cfls->CompactibleFreeListSpace::block_is_obj_nopar(bottom) &&          \
   736         !_cfls->CompactibleFreeListSpace::obj_allocated_since_save_marks(       \
   737                     oop(bottom)) &&                                             \
   738         !_collector->CMSCollector::is_dead_obj(oop(bottom))) {                  \
   739       size_t word_sz = oop(bottom)->oop_iterate(cl, mr);                        \
   740       bottom += _cfls->adjustObjectSize(word_sz);                               \
   741     } else {                                                                    \
   742       bottom += _cfls->CompactibleFreeListSpace::block_size_nopar(bottom);      \
   743     }                                                                           \
   744   }                                                                             \
   745 }
   747 // (There are only two of these, rather than N, because the split is due
   748 // only to the introduction of the FilteringClosure, a local part of the
   749 // impl of this abstraction.)
   750 FreeListSpace_DCTOC__walk_mem_region_with_cl_DEFN(ExtendedOopClosure)
   751 FreeListSpace_DCTOC__walk_mem_region_with_cl_DEFN(FilteringClosure)
   753 DirtyCardToOopClosure*
   754 CompactibleFreeListSpace::new_dcto_cl(ExtendedOopClosure* cl,
   755                                       CardTableModRefBS::PrecisionStyle precision,
   756                                       HeapWord* boundary) {
   757   return new FreeListSpace_DCTOC(this, _collector, cl, precision, boundary);
   758 }
   761 // Note on locking for the space iteration functions:
   762 // since the collector's iteration activities are concurrent with
   763 // allocation activities by mutators, absent a suitable mutual exclusion
   764 // mechanism the iterators may go awry. For instace a block being iterated
   765 // may suddenly be allocated or divided up and part of it allocated and
   766 // so on.
   768 // Apply the given closure to each block in the space.
   769 void CompactibleFreeListSpace::blk_iterate_careful(BlkClosureCareful* cl) {
   770   assert_lock_strong(freelistLock());
   771   HeapWord *cur, *limit;
   772   for (cur = bottom(), limit = end(); cur < limit;
   773        cur += cl->do_blk_careful(cur));
   774 }
   776 // Apply the given closure to each block in the space.
   777 void CompactibleFreeListSpace::blk_iterate(BlkClosure* cl) {
   778   assert_lock_strong(freelistLock());
   779   HeapWord *cur, *limit;
   780   for (cur = bottom(), limit = end(); cur < limit;
   781        cur += cl->do_blk(cur));
   782 }
   784 // Apply the given closure to each oop in the space.
   785 void CompactibleFreeListSpace::oop_iterate(ExtendedOopClosure* cl) {
   786   assert_lock_strong(freelistLock());
   787   HeapWord *cur, *limit;
   788   size_t curSize;
   789   for (cur = bottom(), limit = end(); cur < limit;
   790        cur += curSize) {
   791     curSize = block_size(cur);
   792     if (block_is_obj(cur)) {
   793       oop(cur)->oop_iterate(cl);
   794     }
   795   }
   796 }
   798 // NOTE: In the following methods, in order to safely be able to
   799 // apply the closure to an object, we need to be sure that the
   800 // object has been initialized. We are guaranteed that an object
   801 // is initialized if we are holding the Heap_lock with the
   802 // world stopped.
   803 void CompactibleFreeListSpace::verify_objects_initialized() const {
   804   if (is_init_completed()) {
   805     assert_locked_or_safepoint(Heap_lock);
   806     if (Universe::is_fully_initialized()) {
   807       guarantee(SafepointSynchronize::is_at_safepoint(),
   808                 "Required for objects to be initialized");
   809     }
   810   } // else make a concession at vm start-up
   811 }
   813 // Apply the given closure to each object in the space
   814 void CompactibleFreeListSpace::object_iterate(ObjectClosure* blk) {
   815   assert_lock_strong(freelistLock());
   816   NOT_PRODUCT(verify_objects_initialized());
   817   HeapWord *cur, *limit;
   818   size_t curSize;
   819   for (cur = bottom(), limit = end(); cur < limit;
   820        cur += curSize) {
   821     curSize = block_size(cur);
   822     if (block_is_obj(cur)) {
   823       blk->do_object(oop(cur));
   824     }
   825   }
   826 }
   828 // Apply the given closure to each live object in the space
   829 //   The usage of CompactibleFreeListSpace
   830 // by the ConcurrentMarkSweepGeneration for concurrent GC's allows
   831 // objects in the space with references to objects that are no longer
   832 // valid.  For example, an object may reference another object
   833 // that has already been sweep up (collected).  This method uses
   834 // obj_is_alive() to determine whether it is safe to apply the closure to
   835 // an object.  See obj_is_alive() for details on how liveness of an
   836 // object is decided.
   838 void CompactibleFreeListSpace::safe_object_iterate(ObjectClosure* blk) {
   839   assert_lock_strong(freelistLock());
   840   NOT_PRODUCT(verify_objects_initialized());
   841   HeapWord *cur, *limit;
   842   size_t curSize;
   843   for (cur = bottom(), limit = end(); cur < limit;
   844        cur += curSize) {
   845     curSize = block_size(cur);
   846     if (block_is_obj(cur) && obj_is_alive(cur)) {
   847       blk->do_object(oop(cur));
   848     }
   849   }
   850 }
   852 void CompactibleFreeListSpace::object_iterate_mem(MemRegion mr,
   853                                                   UpwardsObjectClosure* cl) {
   854   assert_locked(freelistLock());
   855   NOT_PRODUCT(verify_objects_initialized());
   856   Space::object_iterate_mem(mr, cl);
   857 }
   859 // Callers of this iterator beware: The closure application should
   860 // be robust in the face of uninitialized objects and should (always)
   861 // return a correct size so that the next addr + size below gives us a
   862 // valid block boundary. [See for instance,
   863 // ScanMarkedObjectsAgainCarefullyClosure::do_object_careful()
   864 // in ConcurrentMarkSweepGeneration.cpp.]
   865 HeapWord*
   866 CompactibleFreeListSpace::object_iterate_careful(ObjectClosureCareful* cl) {
   867   assert_lock_strong(freelistLock());
   868   HeapWord *addr, *last;
   869   size_t size;
   870   for (addr = bottom(), last  = end();
   871        addr < last; addr += size) {
   872     FreeChunk* fc = (FreeChunk*)addr;
   873     if (fc->is_free()) {
   874       // Since we hold the free list lock, which protects direct
   875       // allocation in this generation by mutators, a free object
   876       // will remain free throughout this iteration code.
   877       size = fc->size();
   878     } else {
   879       // Note that the object need not necessarily be initialized,
   880       // because (for instance) the free list lock does NOT protect
   881       // object initialization. The closure application below must
   882       // therefore be correct in the face of uninitialized objects.
   883       size = cl->do_object_careful(oop(addr));
   884       if (size == 0) {
   885         // An unparsable object found. Signal early termination.
   886         return addr;
   887       }
   888     }
   889   }
   890   return NULL;
   891 }
   893 // Callers of this iterator beware: The closure application should
   894 // be robust in the face of uninitialized objects and should (always)
   895 // return a correct size so that the next addr + size below gives us a
   896 // valid block boundary. [See for instance,
   897 // ScanMarkedObjectsAgainCarefullyClosure::do_object_careful()
   898 // in ConcurrentMarkSweepGeneration.cpp.]
   899 HeapWord*
   900 CompactibleFreeListSpace::object_iterate_careful_m(MemRegion mr,
   901   ObjectClosureCareful* cl) {
   902   assert_lock_strong(freelistLock());
   903   // Can't use used_region() below because it may not necessarily
   904   // be the same as [bottom(),end()); although we could
   905   // use [used_region().start(),round_to(used_region().end(),CardSize)),
   906   // that appears too cumbersome, so we just do the simpler check
   907   // in the assertion below.
   908   assert(!mr.is_empty() && MemRegion(bottom(),end()).contains(mr),
   909          "mr should be non-empty and within used space");
   910   HeapWord *addr, *end;
   911   size_t size;
   912   for (addr = block_start_careful(mr.start()), end  = mr.end();
   913        addr < end; addr += size) {
   914     FreeChunk* fc = (FreeChunk*)addr;
   915     if (fc->is_free()) {
   916       // Since we hold the free list lock, which protects direct
   917       // allocation in this generation by mutators, a free object
   918       // will remain free throughout this iteration code.
   919       size = fc->size();
   920     } else {
   921       // Note that the object need not necessarily be initialized,
   922       // because (for instance) the free list lock does NOT protect
   923       // object initialization. The closure application below must
   924       // therefore be correct in the face of uninitialized objects.
   925       size = cl->do_object_careful_m(oop(addr), mr);
   926       if (size == 0) {
   927         // An unparsable object found. Signal early termination.
   928         return addr;
   929       }
   930     }
   931   }
   932   return NULL;
   933 }
   936 HeapWord* CompactibleFreeListSpace::block_start_const(const void* p) const {
   937   NOT_PRODUCT(verify_objects_initialized());
   938   return _bt.block_start(p);
   939 }
   941 HeapWord* CompactibleFreeListSpace::block_start_careful(const void* p) const {
   942   return _bt.block_start_careful(p);
   943 }
   945 size_t CompactibleFreeListSpace::block_size(const HeapWord* p) const {
   946   NOT_PRODUCT(verify_objects_initialized());
   947   // This must be volatile, or else there is a danger that the compiler
   948   // will compile the code below into a sometimes-infinite loop, by keeping
   949   // the value read the first time in a register.
   950   while (true) {
   951     // We must do this until we get a consistent view of the object.
   952     if (FreeChunk::indicatesFreeChunk(p)) {
   953       volatile FreeChunk* fc = (volatile FreeChunk*)p;
   954       size_t res = fc->size();
   956       // Bugfix for systems with weak memory model (PPC64/IA64). The
   957       // block's free bit was set and we have read the size of the
   958       // block. Acquire and check the free bit again. If the block is
   959       // still free, the read size is correct.
   960       OrderAccess::acquire();
   962       // If the object is still a free chunk, return the size, else it
   963       // has been allocated so try again.
   964       if (FreeChunk::indicatesFreeChunk(p)) {
   965         assert(res != 0, "Block size should not be 0");
   966         return res;
   967       }
   968     } else {
   969       // must read from what 'p' points to in each loop.
   970       Klass* k = ((volatile oopDesc*)p)->klass_or_null();
   971       if (k != NULL) {
   972         assert(k->is_klass(), "Should really be klass oop.");
   973         oop o = (oop)p;
   974         assert(o->is_oop(true /* ignore mark word */), "Should be an oop.");
   976         // Bugfix for systems with weak memory model (PPC64/IA64).
   977         // The object o may be an array. Acquire to make sure that the array
   978         // size (third word) is consistent.
   979         OrderAccess::acquire();
   981         size_t res = o->size_given_klass(k);
   982         res = adjustObjectSize(res);
   983         assert(res != 0, "Block size should not be 0");
   984         return res;
   985       }
   986     }
   987   }
   988 }
   990 // TODO: Now that is_parsable is gone, we should combine these two functions.
   991 // A variant of the above that uses the Printezis bits for
   992 // unparsable but allocated objects. This avoids any possible
   993 // stalls waiting for mutators to initialize objects, and is
   994 // thus potentially faster than the variant above. However,
   995 // this variant may return a zero size for a block that is
   996 // under mutation and for which a consistent size cannot be
   997 // inferred without stalling; see CMSCollector::block_size_if_printezis_bits().
   998 size_t CompactibleFreeListSpace::block_size_no_stall(HeapWord* p,
   999                                                      const CMSCollector* c)
  1000 const {
  1001   assert(MemRegion(bottom(), end()).contains(p), "p not in space");
  1002   // This must be volatile, or else there is a danger that the compiler
  1003   // will compile the code below into a sometimes-infinite loop, by keeping
  1004   // the value read the first time in a register.
  1005   DEBUG_ONLY(uint loops = 0;)
  1006   while (true) {
  1007     // We must do this until we get a consistent view of the object.
  1008     if (FreeChunk::indicatesFreeChunk(p)) {
  1009       volatile FreeChunk* fc = (volatile FreeChunk*)p;
  1010       size_t res = fc->size();
  1012       // Bugfix for systems with weak memory model (PPC64/IA64). The
  1013       // free bit of the block was set and we have read the size of
  1014       // the block. Acquire and check the free bit again. If the
  1015       // block is still free, the read size is correct.
  1016       OrderAccess::acquire();
  1018       if (FreeChunk::indicatesFreeChunk(p)) {
  1019         assert(res != 0, "Block size should not be 0");
  1020         assert(loops == 0, "Should be 0");
  1021         return res;
  1023     } else {
  1024       // must read from what 'p' points to in each loop.
  1025       Klass* k = ((volatile oopDesc*)p)->klass_or_null();
  1026       // We trust the size of any object that has a non-NULL
  1027       // klass and (for those in the perm gen) is parsable
  1028       // -- irrespective of its conc_safe-ty.
  1029       if (k != NULL) {
  1030         assert(k->is_klass(), "Should really be klass oop.");
  1031         oop o = (oop)p;
  1032         assert(o->is_oop(), "Should be an oop");
  1034         // Bugfix for systems with weak memory model (PPC64/IA64).
  1035         // The object o may be an array. Acquire to make sure that the array
  1036         // size (third word) is consistent.
  1037         OrderAccess::acquire();
  1039         size_t res = o->size_given_klass(k);
  1040         res = adjustObjectSize(res);
  1041         assert(res != 0, "Block size should not be 0");
  1042         return res;
  1043       } else {
  1044         // May return 0 if P-bits not present.
  1045         return c->block_size_if_printezis_bits(p);
  1048     assert(loops == 0, "Can loop at most once");
  1049     DEBUG_ONLY(loops++;)
  1053 size_t CompactibleFreeListSpace::block_size_nopar(const HeapWord* p) const {
  1054   NOT_PRODUCT(verify_objects_initialized());
  1055   assert(MemRegion(bottom(), end()).contains(p), "p not in space");
  1056   FreeChunk* fc = (FreeChunk*)p;
  1057   if (fc->is_free()) {
  1058     return fc->size();
  1059   } else {
  1060     // Ignore mark word because this may be a recently promoted
  1061     // object whose mark word is used to chain together grey
  1062     // objects (the last one would have a null value).
  1063     assert(oop(p)->is_oop(true), "Should be an oop");
  1064     return adjustObjectSize(oop(p)->size());
  1068 // This implementation assumes that the property of "being an object" is
  1069 // stable.  But being a free chunk may not be (because of parallel
  1070 // promotion.)
  1071 bool CompactibleFreeListSpace::block_is_obj(const HeapWord* p) const {
  1072   FreeChunk* fc = (FreeChunk*)p;
  1073   assert(is_in_reserved(p), "Should be in space");
  1074   // When doing a mark-sweep-compact of the CMS generation, this
  1075   // assertion may fail because prepare_for_compaction() uses
  1076   // space that is garbage to maintain information on ranges of
  1077   // live objects so that these live ranges can be moved as a whole.
  1078   // Comment out this assertion until that problem can be solved
  1079   // (i.e., that the block start calculation may look at objects
  1080   // at address below "p" in finding the object that contains "p"
  1081   // and those objects (if garbage) may have been modified to hold
  1082   // live range information.
  1083   // assert(CollectedHeap::use_parallel_gc_threads() || _bt.block_start(p) == p,
  1084   //        "Should be a block boundary");
  1085   if (FreeChunk::indicatesFreeChunk(p)) return false;
  1086   Klass* k = oop(p)->klass_or_null();
  1087   if (k != NULL) {
  1088     // Ignore mark word because it may have been used to
  1089     // chain together promoted objects (the last one
  1090     // would have a null value).
  1091     assert(oop(p)->is_oop(true), "Should be an oop");
  1092     return true;
  1093   } else {
  1094     return false;  // Was not an object at the start of collection.
  1098 // Check if the object is alive. This fact is checked either by consulting
  1099 // the main marking bitmap in the sweeping phase or, if it's a permanent
  1100 // generation and we're not in the sweeping phase, by checking the
  1101 // perm_gen_verify_bit_map where we store the "deadness" information if
  1102 // we did not sweep the perm gen in the most recent previous GC cycle.
  1103 bool CompactibleFreeListSpace::obj_is_alive(const HeapWord* p) const {
  1104   assert(SafepointSynchronize::is_at_safepoint() || !is_init_completed(),
  1105          "Else races are possible");
  1106   assert(block_is_obj(p), "The address should point to an object");
  1108   // If we're sweeping, we use object liveness information from the main bit map
  1109   // for both perm gen and old gen.
  1110   // We don't need to lock the bitmap (live_map or dead_map below), because
  1111   // EITHER we are in the middle of the sweeping phase, and the
  1112   // main marking bit map (live_map below) is locked,
  1113   // OR we're in other phases and perm_gen_verify_bit_map (dead_map below)
  1114   // is stable, because it's mutated only in the sweeping phase.
  1115   // NOTE: This method is also used by jmap where, if class unloading is
  1116   // off, the results can return "false" for legitimate perm objects,
  1117   // when we are not in the midst of a sweeping phase, which can result
  1118   // in jmap not reporting certain perm gen objects. This will be moot
  1119   // if/when the perm gen goes away in the future.
  1120   if (_collector->abstract_state() == CMSCollector::Sweeping) {
  1121     CMSBitMap* live_map = _collector->markBitMap();
  1122     return live_map->par_isMarked((HeapWord*) p);
  1124   return true;
  1127 bool CompactibleFreeListSpace::block_is_obj_nopar(const HeapWord* p) const {
  1128   FreeChunk* fc = (FreeChunk*)p;
  1129   assert(is_in_reserved(p), "Should be in space");
  1130   assert(_bt.block_start(p) == p, "Should be a block boundary");
  1131   if (!fc->is_free()) {
  1132     // Ignore mark word because it may have been used to
  1133     // chain together promoted objects (the last one
  1134     // would have a null value).
  1135     assert(oop(p)->is_oop(true), "Should be an oop");
  1136     return true;
  1138   return false;
  1141 // "MT-safe but not guaranteed MT-precise" (TM); you may get an
  1142 // approximate answer if you don't hold the freelistlock when you call this.
  1143 size_t CompactibleFreeListSpace::totalSizeInIndexedFreeLists() const {
  1144   size_t size = 0;
  1145   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
  1146     debug_only(
  1147       // We may be calling here without the lock in which case we
  1148       // won't do this modest sanity check.
  1149       if (freelistLock()->owned_by_self()) {
  1150         size_t total_list_size = 0;
  1151         for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
  1152           fc = fc->next()) {
  1153           total_list_size += i;
  1155         assert(total_list_size == i * _indexedFreeList[i].count(),
  1156                "Count in list is incorrect");
  1159     size += i * _indexedFreeList[i].count();
  1161   return size;
  1164 HeapWord* CompactibleFreeListSpace::par_allocate(size_t size) {
  1165   MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
  1166   return allocate(size);
  1169 HeapWord*
  1170 CompactibleFreeListSpace::getChunkFromSmallLinearAllocBlockRemainder(size_t size) {
  1171   return getChunkFromLinearAllocBlockRemainder(&_smallLinearAllocBlock, size);
  1174 HeapWord* CompactibleFreeListSpace::allocate(size_t size) {
  1175   assert_lock_strong(freelistLock());
  1176   HeapWord* res = NULL;
  1177   assert(size == adjustObjectSize(size),
  1178          "use adjustObjectSize() before calling into allocate()");
  1180   if (_adaptive_freelists) {
  1181     res = allocate_adaptive_freelists(size);
  1182   } else {  // non-adaptive free lists
  1183     res = allocate_non_adaptive_freelists(size);
  1186   if (res != NULL) {
  1187     // check that res does lie in this space!
  1188     assert(is_in_reserved(res), "Not in this space!");
  1189     assert(is_aligned((void*)res), "alignment check");
  1191     FreeChunk* fc = (FreeChunk*)res;
  1192     fc->markNotFree();
  1193     assert(!fc->is_free(), "shouldn't be marked free");
  1194     assert(oop(fc)->klass_or_null() == NULL, "should look uninitialized");
  1195     // Verify that the block offset table shows this to
  1196     // be a single block, but not one which is unallocated.
  1197     _bt.verify_single_block(res, size);
  1198     _bt.verify_not_unallocated(res, size);
  1199     // mangle a just allocated object with a distinct pattern.
  1200     debug_only(fc->mangleAllocated(size));
  1203   return res;
  1206 HeapWord* CompactibleFreeListSpace::allocate_non_adaptive_freelists(size_t size) {
  1207   HeapWord* res = NULL;
  1208   // try and use linear allocation for smaller blocks
  1209   if (size < _smallLinearAllocBlock._allocation_size_limit) {
  1210     // if successful, the following also adjusts block offset table
  1211     res = getChunkFromSmallLinearAllocBlock(size);
  1213   // Else triage to indexed lists for smaller sizes
  1214   if (res == NULL) {
  1215     if (size < SmallForDictionary) {
  1216       res = (HeapWord*) getChunkFromIndexedFreeList(size);
  1217     } else {
  1218       // else get it from the big dictionary; if even this doesn't
  1219       // work we are out of luck.
  1220       res = (HeapWord*)getChunkFromDictionaryExact(size);
  1224   return res;
  1227 HeapWord* CompactibleFreeListSpace::allocate_adaptive_freelists(size_t size) {
  1228   assert_lock_strong(freelistLock());
  1229   HeapWord* res = NULL;
  1230   assert(size == adjustObjectSize(size),
  1231          "use adjustObjectSize() before calling into allocate()");
  1233   // Strategy
  1234   //   if small
  1235   //     exact size from small object indexed list if small
  1236   //     small or large linear allocation block (linAB) as appropriate
  1237   //     take from lists of greater sized chunks
  1238   //   else
  1239   //     dictionary
  1240   //     small or large linear allocation block if it has the space
  1241   // Try allocating exact size from indexTable first
  1242   if (size < IndexSetSize) {
  1243     res = (HeapWord*) getChunkFromIndexedFreeList(size);
  1244     if(res != NULL) {
  1245       assert(res != (HeapWord*)_indexedFreeList[size].head(),
  1246         "Not removed from free list");
  1247       // no block offset table adjustment is necessary on blocks in
  1248       // the indexed lists.
  1250     // Try allocating from the small LinAB
  1251     } else if (size < _smallLinearAllocBlock._allocation_size_limit &&
  1252         (res = getChunkFromSmallLinearAllocBlock(size)) != NULL) {
  1253         // if successful, the above also adjusts block offset table
  1254         // Note that this call will refill the LinAB to
  1255         // satisfy the request.  This is different that
  1256         // evm.
  1257         // Don't record chunk off a LinAB?  smallSplitBirth(size);
  1258     } else {
  1259       // Raid the exact free lists larger than size, even if they are not
  1260       // overpopulated.
  1261       res = (HeapWord*) getChunkFromGreater(size);
  1263   } else {
  1264     // Big objects get allocated directly from the dictionary.
  1265     res = (HeapWord*) getChunkFromDictionaryExact(size);
  1266     if (res == NULL) {
  1267       // Try hard not to fail since an allocation failure will likely
  1268       // trigger a synchronous GC.  Try to get the space from the
  1269       // allocation blocks.
  1270       res = getChunkFromSmallLinearAllocBlockRemainder(size);
  1274   return res;
  1277 // A worst-case estimate of the space required (in HeapWords) to expand the heap
  1278 // when promoting obj.
  1279 size_t CompactibleFreeListSpace::expansionSpaceRequired(size_t obj_size) const {
  1280   // Depending on the object size, expansion may require refilling either a
  1281   // bigLAB or a smallLAB plus refilling a PromotionInfo object.  MinChunkSize
  1282   // is added because the dictionary may over-allocate to avoid fragmentation.
  1283   size_t space = obj_size;
  1284   if (!_adaptive_freelists) {
  1285     space = MAX2(space, _smallLinearAllocBlock._refillSize);
  1287   space += _promoInfo.refillSize() + 2 * MinChunkSize;
  1288   return space;
  1291 FreeChunk* CompactibleFreeListSpace::getChunkFromGreater(size_t numWords) {
  1292   FreeChunk* ret;
  1294   assert(numWords >= MinChunkSize, "Size is less than minimum");
  1295   assert(linearAllocationWouldFail() || bestFitFirst(),
  1296     "Should not be here");
  1298   size_t i;
  1299   size_t currSize = numWords + MinChunkSize;
  1300   assert(currSize % MinObjAlignment == 0, "currSize should be aligned");
  1301   for (i = currSize; i < IndexSetSize; i += IndexSetStride) {
  1302     AdaptiveFreeList<FreeChunk>* fl = &_indexedFreeList[i];
  1303     if (fl->head()) {
  1304       ret = getFromListGreater(fl, numWords);
  1305       assert(ret == NULL || ret->is_free(), "Should be returning a free chunk");
  1306       return ret;
  1310   currSize = MAX2((size_t)SmallForDictionary,
  1311                   (size_t)(numWords + MinChunkSize));
  1313   /* Try to get a chunk that satisfies request, while avoiding
  1314      fragmentation that can't be handled. */
  1316     ret =  dictionary()->get_chunk(currSize);
  1317     if (ret != NULL) {
  1318       assert(ret->size() - numWords >= MinChunkSize,
  1319              "Chunk is too small");
  1320       _bt.allocated((HeapWord*)ret, ret->size());
  1321       /* Carve returned chunk. */
  1322       (void) splitChunkAndReturnRemainder(ret, numWords);
  1323       /* Label this as no longer a free chunk. */
  1324       assert(ret->is_free(), "This chunk should be free");
  1325       ret->link_prev(NULL);
  1327     assert(ret == NULL || ret->is_free(), "Should be returning a free chunk");
  1328     return ret;
  1330   ShouldNotReachHere();
  1333 bool CompactibleFreeListSpace::verifyChunkInIndexedFreeLists(FreeChunk* fc) const {
  1334   assert(fc->size() < IndexSetSize, "Size of chunk is too large");
  1335   return _indexedFreeList[fc->size()].verify_chunk_in_free_list(fc);
  1338 bool CompactibleFreeListSpace::verify_chunk_is_linear_alloc_block(FreeChunk* fc) const {
  1339   assert((_smallLinearAllocBlock._ptr != (HeapWord*)fc) ||
  1340          (_smallLinearAllocBlock._word_size == fc->size()),
  1341          "Linear allocation block shows incorrect size");
  1342   return ((_smallLinearAllocBlock._ptr == (HeapWord*)fc) &&
  1343           (_smallLinearAllocBlock._word_size == fc->size()));
  1346 // Check if the purported free chunk is present either as a linear
  1347 // allocation block, the size-indexed table of (smaller) free blocks,
  1348 // or the larger free blocks kept in the binary tree dictionary.
  1349 bool CompactibleFreeListSpace::verify_chunk_in_free_list(FreeChunk* fc) const {
  1350   if (verify_chunk_is_linear_alloc_block(fc)) {
  1351     return true;
  1352   } else if (fc->size() < IndexSetSize) {
  1353     return verifyChunkInIndexedFreeLists(fc);
  1354   } else {
  1355     return dictionary()->verify_chunk_in_free_list(fc);
  1359 #ifndef PRODUCT
  1360 void CompactibleFreeListSpace::assert_locked() const {
  1361   CMSLockVerifier::assert_locked(freelistLock(), parDictionaryAllocLock());
  1364 void CompactibleFreeListSpace::assert_locked(const Mutex* lock) const {
  1365   CMSLockVerifier::assert_locked(lock);
  1367 #endif
  1369 FreeChunk* CompactibleFreeListSpace::allocateScratch(size_t size) {
  1370   // In the parallel case, the main thread holds the free list lock
  1371   // on behalf the parallel threads.
  1372   FreeChunk* fc;
  1374     // If GC is parallel, this might be called by several threads.
  1375     // This should be rare enough that the locking overhead won't affect
  1376     // the sequential code.
  1377     MutexLockerEx x(parDictionaryAllocLock(),
  1378                     Mutex::_no_safepoint_check_flag);
  1379     fc = getChunkFromDictionary(size);
  1381   if (fc != NULL) {
  1382     fc->dontCoalesce();
  1383     assert(fc->is_free(), "Should be free, but not coalescable");
  1384     // Verify that the block offset table shows this to
  1385     // be a single block, but not one which is unallocated.
  1386     _bt.verify_single_block((HeapWord*)fc, fc->size());
  1387     _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
  1389   return fc;
  1392 oop CompactibleFreeListSpace::promote(oop obj, size_t obj_size) {
  1393   assert(obj_size == (size_t)obj->size(), "bad obj_size passed in");
  1394   assert_locked();
  1396   // if we are tracking promotions, then first ensure space for
  1397   // promotion (including spooling space for saving header if necessary).
  1398   // then allocate and copy, then track promoted info if needed.
  1399   // When tracking (see PromotionInfo::track()), the mark word may
  1400   // be displaced and in this case restoration of the mark word
  1401   // occurs in the (oop_since_save_marks_)iterate phase.
  1402   if (_promoInfo.tracking() && !_promoInfo.ensure_spooling_space()) {
  1403     return NULL;
  1405   // Call the allocate(size_t, bool) form directly to avoid the
  1406   // additional call through the allocate(size_t) form.  Having
  1407   // the compile inline the call is problematic because allocate(size_t)
  1408   // is a virtual method.
  1409   HeapWord* res = allocate(adjustObjectSize(obj_size));
  1410   if (res != NULL) {
  1411     Copy::aligned_disjoint_words((HeapWord*)obj, res, obj_size);
  1412     // if we should be tracking promotions, do so.
  1413     if (_promoInfo.tracking()) {
  1414         _promoInfo.track((PromotedObject*)res);
  1417   return oop(res);
  1420 HeapWord*
  1421 CompactibleFreeListSpace::getChunkFromSmallLinearAllocBlock(size_t size) {
  1422   assert_locked();
  1423   assert(size >= MinChunkSize, "minimum chunk size");
  1424   assert(size <  _smallLinearAllocBlock._allocation_size_limit,
  1425     "maximum from smallLinearAllocBlock");
  1426   return getChunkFromLinearAllocBlock(&_smallLinearAllocBlock, size);
  1429 HeapWord*
  1430 CompactibleFreeListSpace::getChunkFromLinearAllocBlock(LinearAllocBlock *blk,
  1431                                                        size_t size) {
  1432   assert_locked();
  1433   assert(size >= MinChunkSize, "too small");
  1434   HeapWord* res = NULL;
  1435   // Try to do linear allocation from blk, making sure that
  1436   if (blk->_word_size == 0) {
  1437     // We have probably been unable to fill this either in the prologue or
  1438     // when it was exhausted at the last linear allocation. Bail out until
  1439     // next time.
  1440     assert(blk->_ptr == NULL, "consistency check");
  1441     return NULL;
  1443   assert(blk->_word_size != 0 && blk->_ptr != NULL, "consistency check");
  1444   res = getChunkFromLinearAllocBlockRemainder(blk, size);
  1445   if (res != NULL) return res;
  1447   // about to exhaust this linear allocation block
  1448   if (blk->_word_size == size) { // exactly satisfied
  1449     res = blk->_ptr;
  1450     _bt.allocated(res, blk->_word_size);
  1451   } else if (size + MinChunkSize <= blk->_refillSize) {
  1452     size_t sz = blk->_word_size;
  1453     // Update _unallocated_block if the size is such that chunk would be
  1454     // returned to the indexed free list.  All other chunks in the indexed
  1455     // free lists are allocated from the dictionary so that _unallocated_block
  1456     // has already been adjusted for them.  Do it here so that the cost
  1457     // for all chunks added back to the indexed free lists.
  1458     if (sz < SmallForDictionary) {
  1459       _bt.allocated(blk->_ptr, sz);
  1461     // Return the chunk that isn't big enough, and then refill below.
  1462     addChunkToFreeLists(blk->_ptr, sz);
  1463     split_birth(sz);
  1464     // Don't keep statistics on adding back chunk from a LinAB.
  1465   } else {
  1466     // A refilled block would not satisfy the request.
  1467     return NULL;
  1470   blk->_ptr = NULL; blk->_word_size = 0;
  1471   refillLinearAllocBlock(blk);
  1472   assert(blk->_ptr == NULL || blk->_word_size >= size + MinChunkSize,
  1473          "block was replenished");
  1474   if (res != NULL) {
  1475     split_birth(size);
  1476     repairLinearAllocBlock(blk);
  1477   } else if (blk->_ptr != NULL) {
  1478     res = blk->_ptr;
  1479     size_t blk_size = blk->_word_size;
  1480     blk->_word_size -= size;
  1481     blk->_ptr  += size;
  1482     split_birth(size);
  1483     repairLinearAllocBlock(blk);
  1484     // Update BOT last so that other (parallel) GC threads see a consistent
  1485     // view of the BOT and free blocks.
  1486     // Above must occur before BOT is updated below.
  1487     OrderAccess::storestore();
  1488     _bt.split_block(res, blk_size, size);  // adjust block offset table
  1490   return res;
  1493 HeapWord*  CompactibleFreeListSpace::getChunkFromLinearAllocBlockRemainder(
  1494                                         LinearAllocBlock* blk,
  1495                                         size_t size) {
  1496   assert_locked();
  1497   assert(size >= MinChunkSize, "too small");
  1499   HeapWord* res = NULL;
  1500   // This is the common case.  Keep it simple.
  1501   if (blk->_word_size >= size + MinChunkSize) {
  1502     assert(blk->_ptr != NULL, "consistency check");
  1503     res = blk->_ptr;
  1504     // Note that the BOT is up-to-date for the linAB before allocation.  It
  1505     // indicates the start of the linAB.  The split_block() updates the
  1506     // BOT for the linAB after the allocation (indicates the start of the
  1507     // next chunk to be allocated).
  1508     size_t blk_size = blk->_word_size;
  1509     blk->_word_size -= size;
  1510     blk->_ptr  += size;
  1511     split_birth(size);
  1512     repairLinearAllocBlock(blk);
  1513     // Update BOT last so that other (parallel) GC threads see a consistent
  1514     // view of the BOT and free blocks.
  1515     // Above must occur before BOT is updated below.
  1516     OrderAccess::storestore();
  1517     _bt.split_block(res, blk_size, size);  // adjust block offset table
  1518     _bt.allocated(res, size);
  1520   return res;
  1523 FreeChunk*
  1524 CompactibleFreeListSpace::getChunkFromIndexedFreeList(size_t size) {
  1525   assert_locked();
  1526   assert(size < SmallForDictionary, "just checking");
  1527   FreeChunk* res;
  1528   res = _indexedFreeList[size].get_chunk_at_head();
  1529   if (res == NULL) {
  1530     res = getChunkFromIndexedFreeListHelper(size);
  1532   _bt.verify_not_unallocated((HeapWord*) res, size);
  1533   assert(res == NULL || res->size() == size, "Incorrect block size");
  1534   return res;
  1537 FreeChunk*
  1538 CompactibleFreeListSpace::getChunkFromIndexedFreeListHelper(size_t size,
  1539   bool replenish) {
  1540   assert_locked();
  1541   FreeChunk* fc = NULL;
  1542   if (size < SmallForDictionary) {
  1543     assert(_indexedFreeList[size].head() == NULL ||
  1544       _indexedFreeList[size].surplus() <= 0,
  1545       "List for this size should be empty or under populated");
  1546     // Try best fit in exact lists before replenishing the list
  1547     if (!bestFitFirst() || (fc = bestFitSmall(size)) == NULL) {
  1548       // Replenish list.
  1549       //
  1550       // Things tried that failed.
  1551       //   Tried allocating out of the two LinAB's first before
  1552       // replenishing lists.
  1553       //   Tried small linAB of size 256 (size in indexed list)
  1554       // and replenishing indexed lists from the small linAB.
  1555       //
  1556       FreeChunk* newFc = NULL;
  1557       const size_t replenish_size = CMSIndexedFreeListReplenish * size;
  1558       if (replenish_size < SmallForDictionary) {
  1559         // Do not replenish from an underpopulated size.
  1560         if (_indexedFreeList[replenish_size].surplus() > 0 &&
  1561             _indexedFreeList[replenish_size].head() != NULL) {
  1562           newFc = _indexedFreeList[replenish_size].get_chunk_at_head();
  1563         } else if (bestFitFirst()) {
  1564           newFc = bestFitSmall(replenish_size);
  1567       if (newFc == NULL && replenish_size > size) {
  1568         assert(CMSIndexedFreeListReplenish > 1, "ctl pt invariant");
  1569         newFc = getChunkFromIndexedFreeListHelper(replenish_size, false);
  1571       // Note: The stats update re split-death of block obtained above
  1572       // will be recorded below precisely when we know we are going to
  1573       // be actually splitting it into more than one pieces below.
  1574       if (newFc != NULL) {
  1575         if  (replenish || CMSReplenishIntermediate) {
  1576           // Replenish this list and return one block to caller.
  1577           size_t i;
  1578           FreeChunk *curFc, *nextFc;
  1579           size_t num_blk = newFc->size() / size;
  1580           assert(num_blk >= 1, "Smaller than requested?");
  1581           assert(newFc->size() % size == 0, "Should be integral multiple of request");
  1582           if (num_blk > 1) {
  1583             // we are sure we will be splitting the block just obtained
  1584             // into multiple pieces; record the split-death of the original
  1585             splitDeath(replenish_size);
  1587           // carve up and link blocks 0, ..., num_blk - 2
  1588           // The last chunk is not added to the lists but is returned as the
  1589           // free chunk.
  1590           for (curFc = newFc, nextFc = (FreeChunk*)((HeapWord*)curFc + size),
  1591                i = 0;
  1592                i < (num_blk - 1);
  1593                curFc = nextFc, nextFc = (FreeChunk*)((HeapWord*)nextFc + size),
  1594                i++) {
  1595             curFc->set_size(size);
  1596             // Don't record this as a return in order to try and
  1597             // determine the "returns" from a GC.
  1598             _bt.verify_not_unallocated((HeapWord*) fc, size);
  1599             _indexedFreeList[size].return_chunk_at_tail(curFc, false);
  1600             _bt.mark_block((HeapWord*)curFc, size);
  1601             split_birth(size);
  1602             // Don't record the initial population of the indexed list
  1603             // as a split birth.
  1606           // check that the arithmetic was OK above
  1607           assert((HeapWord*)nextFc == (HeapWord*)newFc + num_blk*size,
  1608             "inconsistency in carving newFc");
  1609           curFc->set_size(size);
  1610           _bt.mark_block((HeapWord*)curFc, size);
  1611           split_birth(size);
  1612           fc = curFc;
  1613         } else {
  1614           // Return entire block to caller
  1615           fc = newFc;
  1619   } else {
  1620     // Get a free chunk from the free chunk dictionary to be returned to
  1621     // replenish the indexed free list.
  1622     fc = getChunkFromDictionaryExact(size);
  1624   // assert(fc == NULL || fc->is_free(), "Should be returning a free chunk");
  1625   return fc;
  1628 FreeChunk*
  1629 CompactibleFreeListSpace::getChunkFromDictionary(size_t size) {
  1630   assert_locked();
  1631   FreeChunk* fc = _dictionary->get_chunk(size,
  1632                                          FreeBlockDictionary<FreeChunk>::atLeast);
  1633   if (fc == NULL) {
  1634     return NULL;
  1636   _bt.allocated((HeapWord*)fc, fc->size());
  1637   if (fc->size() >= size + MinChunkSize) {
  1638     fc = splitChunkAndReturnRemainder(fc, size);
  1640   assert(fc->size() >= size, "chunk too small");
  1641   assert(fc->size() < size + MinChunkSize, "chunk too big");
  1642   _bt.verify_single_block((HeapWord*)fc, fc->size());
  1643   return fc;
  1646 FreeChunk*
  1647 CompactibleFreeListSpace::getChunkFromDictionaryExact(size_t size) {
  1648   assert_locked();
  1649   FreeChunk* fc = _dictionary->get_chunk(size,
  1650                                          FreeBlockDictionary<FreeChunk>::atLeast);
  1651   if (fc == NULL) {
  1652     return fc;
  1654   _bt.allocated((HeapWord*)fc, fc->size());
  1655   if (fc->size() == size) {
  1656     _bt.verify_single_block((HeapWord*)fc, size);
  1657     return fc;
  1659   assert(fc->size() > size, "get_chunk() guarantee");
  1660   if (fc->size() < size + MinChunkSize) {
  1661     // Return the chunk to the dictionary and go get a bigger one.
  1662     returnChunkToDictionary(fc);
  1663     fc = _dictionary->get_chunk(size + MinChunkSize,
  1664                                 FreeBlockDictionary<FreeChunk>::atLeast);
  1665     if (fc == NULL) {
  1666       return NULL;
  1668     _bt.allocated((HeapWord*)fc, fc->size());
  1670   assert(fc->size() >= size + MinChunkSize, "tautology");
  1671   fc = splitChunkAndReturnRemainder(fc, size);
  1672   assert(fc->size() == size, "chunk is wrong size");
  1673   _bt.verify_single_block((HeapWord*)fc, size);
  1674   return fc;
  1677 void
  1678 CompactibleFreeListSpace::returnChunkToDictionary(FreeChunk* chunk) {
  1679   assert_locked();
  1681   size_t size = chunk->size();
  1682   _bt.verify_single_block((HeapWord*)chunk, size);
  1683   // adjust _unallocated_block downward, as necessary
  1684   _bt.freed((HeapWord*)chunk, size);
  1685   _dictionary->return_chunk(chunk);
  1686 #ifndef PRODUCT
  1687   if (CMSCollector::abstract_state() != CMSCollector::Sweeping) {
  1688     TreeChunk<FreeChunk, AdaptiveFreeList<FreeChunk> >* tc = TreeChunk<FreeChunk, AdaptiveFreeList<FreeChunk> >::as_TreeChunk(chunk);
  1689     TreeList<FreeChunk, AdaptiveFreeList<FreeChunk> >* tl = tc->list();
  1690     tl->verify_stats();
  1692 #endif // PRODUCT
  1695 void
  1696 CompactibleFreeListSpace::returnChunkToFreeList(FreeChunk* fc) {
  1697   assert_locked();
  1698   size_t size = fc->size();
  1699   _bt.verify_single_block((HeapWord*) fc, size);
  1700   _bt.verify_not_unallocated((HeapWord*) fc, size);
  1701   if (_adaptive_freelists) {
  1702     _indexedFreeList[size].return_chunk_at_tail(fc);
  1703   } else {
  1704     _indexedFreeList[size].return_chunk_at_head(fc);
  1706 #ifndef PRODUCT
  1707   if (CMSCollector::abstract_state() != CMSCollector::Sweeping) {
  1708      _indexedFreeList[size].verify_stats();
  1710 #endif // PRODUCT
  1713 // Add chunk to end of last block -- if it's the largest
  1714 // block -- and update BOT and census data. We would
  1715 // of course have preferred to coalesce it with the
  1716 // last block, but it's currently less expensive to find the
  1717 // largest block than it is to find the last.
  1718 void
  1719 CompactibleFreeListSpace::addChunkToFreeListsAtEndRecordingStats(
  1720   HeapWord* chunk, size_t     size) {
  1721   // check that the chunk does lie in this space!
  1722   assert(chunk != NULL && is_in_reserved(chunk), "Not in this space!");
  1723   // One of the parallel gc task threads may be here
  1724   // whilst others are allocating.
  1725   Mutex* lock = NULL;
  1726   if (ParallelGCThreads != 0) {
  1727     lock = &_parDictionaryAllocLock;
  1729   FreeChunk* ec;
  1731     MutexLockerEx x(lock, Mutex::_no_safepoint_check_flag);
  1732     ec = dictionary()->find_largest_dict();  // get largest block
  1733     if (ec != NULL && ec->end() == (uintptr_t*) chunk) {
  1734       // It's a coterminal block - we can coalesce.
  1735       size_t old_size = ec->size();
  1736       coalDeath(old_size);
  1737       removeChunkFromDictionary(ec);
  1738       size += old_size;
  1739     } else {
  1740       ec = (FreeChunk*)chunk;
  1743   ec->set_size(size);
  1744   debug_only(ec->mangleFreed(size));
  1745   if (size < SmallForDictionary && ParallelGCThreads != 0) {
  1746     lock = _indexedFreeListParLocks[size];
  1748   MutexLockerEx x(lock, Mutex::_no_safepoint_check_flag);
  1749   addChunkAndRepairOffsetTable((HeapWord*)ec, size, true);
  1750   // record the birth under the lock since the recording involves
  1751   // manipulation of the list on which the chunk lives and
  1752   // if the chunk is allocated and is the last on the list,
  1753   // the list can go away.
  1754   coalBirth(size);
  1757 void
  1758 CompactibleFreeListSpace::addChunkToFreeLists(HeapWord* chunk,
  1759                                               size_t     size) {
  1760   // check that the chunk does lie in this space!
  1761   assert(chunk != NULL && is_in_reserved(chunk), "Not in this space!");
  1762   assert_locked();
  1763   _bt.verify_single_block(chunk, size);
  1765   FreeChunk* fc = (FreeChunk*) chunk;
  1766   fc->set_size(size);
  1767   debug_only(fc->mangleFreed(size));
  1768   if (size < SmallForDictionary) {
  1769     returnChunkToFreeList(fc);
  1770   } else {
  1771     returnChunkToDictionary(fc);
  1775 void
  1776 CompactibleFreeListSpace::addChunkAndRepairOffsetTable(HeapWord* chunk,
  1777   size_t size, bool coalesced) {
  1778   assert_locked();
  1779   assert(chunk != NULL, "null chunk");
  1780   if (coalesced) {
  1781     // repair BOT
  1782     _bt.single_block(chunk, size);
  1784   addChunkToFreeLists(chunk, size);
  1787 // We _must_ find the purported chunk on our free lists;
  1788 // we assert if we don't.
  1789 void
  1790 CompactibleFreeListSpace::removeFreeChunkFromFreeLists(FreeChunk* fc) {
  1791   size_t size = fc->size();
  1792   assert_locked();
  1793   debug_only(verifyFreeLists());
  1794   if (size < SmallForDictionary) {
  1795     removeChunkFromIndexedFreeList(fc);
  1796   } else {
  1797     removeChunkFromDictionary(fc);
  1799   _bt.verify_single_block((HeapWord*)fc, size);
  1800   debug_only(verifyFreeLists());
  1803 void
  1804 CompactibleFreeListSpace::removeChunkFromDictionary(FreeChunk* fc) {
  1805   size_t size = fc->size();
  1806   assert_locked();
  1807   assert(fc != NULL, "null chunk");
  1808   _bt.verify_single_block((HeapWord*)fc, size);
  1809   _dictionary->remove_chunk(fc);
  1810   // adjust _unallocated_block upward, as necessary
  1811   _bt.allocated((HeapWord*)fc, size);
  1814 void
  1815 CompactibleFreeListSpace::removeChunkFromIndexedFreeList(FreeChunk* fc) {
  1816   assert_locked();
  1817   size_t size = fc->size();
  1818   _bt.verify_single_block((HeapWord*)fc, size);
  1819   NOT_PRODUCT(
  1820     if (FLSVerifyIndexTable) {
  1821       verifyIndexedFreeList(size);
  1824   _indexedFreeList[size].remove_chunk(fc);
  1825   NOT_PRODUCT(
  1826     if (FLSVerifyIndexTable) {
  1827       verifyIndexedFreeList(size);
  1832 FreeChunk* CompactibleFreeListSpace::bestFitSmall(size_t numWords) {
  1833   /* A hint is the next larger size that has a surplus.
  1834      Start search at a size large enough to guarantee that
  1835      the excess is >= MIN_CHUNK. */
  1836   size_t start = align_object_size(numWords + MinChunkSize);
  1837   if (start < IndexSetSize) {
  1838     AdaptiveFreeList<FreeChunk>* it   = _indexedFreeList;
  1839     size_t    hint = _indexedFreeList[start].hint();
  1840     while (hint < IndexSetSize) {
  1841       assert(hint % MinObjAlignment == 0, "hint should be aligned");
  1842       AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[hint];
  1843       if (fl->surplus() > 0 && fl->head() != NULL) {
  1844         // Found a list with surplus, reset original hint
  1845         // and split out a free chunk which is returned.
  1846         _indexedFreeList[start].set_hint(hint);
  1847         FreeChunk* res = getFromListGreater(fl, numWords);
  1848         assert(res == NULL || res->is_free(),
  1849           "Should be returning a free chunk");
  1850         return res;
  1852       hint = fl->hint(); /* keep looking */
  1854     /* None found. */
  1855     it[start].set_hint(IndexSetSize);
  1857   return NULL;
  1860 /* Requires fl->size >= numWords + MinChunkSize */
  1861 FreeChunk* CompactibleFreeListSpace::getFromListGreater(AdaptiveFreeList<FreeChunk>* fl,
  1862   size_t numWords) {
  1863   FreeChunk *curr = fl->head();
  1864   size_t oldNumWords = curr->size();
  1865   assert(numWords >= MinChunkSize, "Word size is too small");
  1866   assert(curr != NULL, "List is empty");
  1867   assert(oldNumWords >= numWords + MinChunkSize,
  1868         "Size of chunks in the list is too small");
  1870   fl->remove_chunk(curr);
  1871   // recorded indirectly by splitChunkAndReturnRemainder -
  1872   // smallSplit(oldNumWords, numWords);
  1873   FreeChunk* new_chunk = splitChunkAndReturnRemainder(curr, numWords);
  1874   // Does anything have to be done for the remainder in terms of
  1875   // fixing the card table?
  1876   assert(new_chunk == NULL || new_chunk->is_free(),
  1877     "Should be returning a free chunk");
  1878   return new_chunk;
  1881 FreeChunk*
  1882 CompactibleFreeListSpace::splitChunkAndReturnRemainder(FreeChunk* chunk,
  1883   size_t new_size) {
  1884   assert_locked();
  1885   size_t size = chunk->size();
  1886   assert(size > new_size, "Split from a smaller block?");
  1887   assert(is_aligned(chunk), "alignment problem");
  1888   assert(size == adjustObjectSize(size), "alignment problem");
  1889   size_t rem_size = size - new_size;
  1890   assert(rem_size == adjustObjectSize(rem_size), "alignment problem");
  1891   assert(rem_size >= MinChunkSize, "Free chunk smaller than minimum");
  1892   FreeChunk* ffc = (FreeChunk*)((HeapWord*)chunk + new_size);
  1893   assert(is_aligned(ffc), "alignment problem");
  1894   ffc->set_size(rem_size);
  1895   ffc->link_next(NULL);
  1896   ffc->link_prev(NULL); // Mark as a free block for other (parallel) GC threads.
  1897   // Above must occur before BOT is updated below.
  1898   // adjust block offset table
  1899   OrderAccess::storestore();
  1900   assert(chunk->is_free() && ffc->is_free(), "Error");
  1901   _bt.split_block((HeapWord*)chunk, chunk->size(), new_size);
  1902   if (rem_size < SmallForDictionary) {
  1903     bool is_par = (SharedHeap::heap()->n_par_threads() > 0);
  1904     if (is_par) _indexedFreeListParLocks[rem_size]->lock();
  1905     assert(!is_par ||
  1906            (SharedHeap::heap()->n_par_threads() ==
  1907             SharedHeap::heap()->workers()->active_workers()), "Mismatch");
  1908     returnChunkToFreeList(ffc);
  1909     split(size, rem_size);
  1910     if (is_par) _indexedFreeListParLocks[rem_size]->unlock();
  1911   } else {
  1912     returnChunkToDictionary(ffc);
  1913     split(size ,rem_size);
  1915   chunk->set_size(new_size);
  1916   return chunk;
  1919 void
  1920 CompactibleFreeListSpace::sweep_completed() {
  1921   // Now that space is probably plentiful, refill linear
  1922   // allocation blocks as needed.
  1923   refillLinearAllocBlocksIfNeeded();
  1926 void
  1927 CompactibleFreeListSpace::gc_prologue() {
  1928   assert_locked();
  1929   if (PrintFLSStatistics != 0) {
  1930     gclog_or_tty->print("Before GC:\n");
  1931     reportFreeListStatistics();
  1933   refillLinearAllocBlocksIfNeeded();
  1936 void
  1937 CompactibleFreeListSpace::gc_epilogue() {
  1938   assert_locked();
  1939   if (PrintGCDetails && Verbose && !_adaptive_freelists) {
  1940     if (_smallLinearAllocBlock._word_size == 0)
  1941       warning("CompactibleFreeListSpace(epilogue):: Linear allocation failure");
  1943   assert(_promoInfo.noPromotions(), "_promoInfo inconsistency");
  1944   _promoInfo.stopTrackingPromotions();
  1945   repairLinearAllocationBlocks();
  1946   // Print Space's stats
  1947   if (PrintFLSStatistics != 0) {
  1948     gclog_or_tty->print("After GC:\n");
  1949     reportFreeListStatistics();
  1953 // Iteration support, mostly delegated from a CMS generation
  1955 void CompactibleFreeListSpace::save_marks() {
  1956   assert(Thread::current()->is_VM_thread(),
  1957          "Global variable should only be set when single-threaded");
  1958   // Mark the "end" of the used space at the time of this call;
  1959   // note, however, that promoted objects from this point
  1960   // on are tracked in the _promoInfo below.
  1961   set_saved_mark_word(unallocated_block());
  1962 #ifdef ASSERT
  1963   // Check the sanity of save_marks() etc.
  1964   MemRegion ur    = used_region();
  1965   MemRegion urasm = used_region_at_save_marks();
  1966   assert(ur.contains(urasm),
  1967          err_msg(" Error at save_marks(): [" PTR_FORMAT "," PTR_FORMAT ")"
  1968                  " should contain [" PTR_FORMAT "," PTR_FORMAT ")",
  1969                  p2i(ur.start()), p2i(ur.end()), p2i(urasm.start()), p2i(urasm.end())));
  1970 #endif
  1971   // inform allocator that promotions should be tracked.
  1972   assert(_promoInfo.noPromotions(), "_promoInfo inconsistency");
  1973   _promoInfo.startTrackingPromotions();
  1976 bool CompactibleFreeListSpace::no_allocs_since_save_marks() {
  1977   assert(_promoInfo.tracking(), "No preceding save_marks?");
  1978   assert(SharedHeap::heap()->n_par_threads() == 0,
  1979          "Shouldn't be called if using parallel gc.");
  1980   return _promoInfo.noPromotions();
  1983 #define CFLS_OOP_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix)           \
  1985 void CompactibleFreeListSpace::                                             \
  1986 oop_since_save_marks_iterate##nv_suffix(OopClosureType* blk) {              \
  1987   assert(SharedHeap::heap()->n_par_threads() == 0,                          \
  1988          "Shouldn't be called (yet) during parallel part of gc.");          \
  1989   _promoInfo.promoted_oops_iterate##nv_suffix(blk);                         \
  1990   /*                                                                        \
  1991    * This also restores any displaced headers and removes the elements from \
  1992    * the iteration set as they are processed, so that we have a clean slate \
  1993    * at the end of the iteration. Note, thus, that if new objects are       \
  1994    * promoted as a result of the iteration they are iterated over as well.  \
  1995    */                                                                       \
  1996   assert(_promoInfo.noPromotions(), "_promoInfo inconsistency");            \
  1999 ALL_SINCE_SAVE_MARKS_CLOSURES(CFLS_OOP_SINCE_SAVE_MARKS_DEFN)
  2001 bool CompactibleFreeListSpace::linearAllocationWouldFail() const {
  2002   return _smallLinearAllocBlock._word_size == 0;
  2005 void CompactibleFreeListSpace::repairLinearAllocationBlocks() {
  2006   // Fix up linear allocation blocks to look like free blocks
  2007   repairLinearAllocBlock(&_smallLinearAllocBlock);
  2010 void CompactibleFreeListSpace::repairLinearAllocBlock(LinearAllocBlock* blk) {
  2011   assert_locked();
  2012   if (blk->_ptr != NULL) {
  2013     assert(blk->_word_size != 0 && blk->_word_size >= MinChunkSize,
  2014            "Minimum block size requirement");
  2015     FreeChunk* fc = (FreeChunk*)(blk->_ptr);
  2016     fc->set_size(blk->_word_size);
  2017     fc->link_prev(NULL);   // mark as free
  2018     fc->dontCoalesce();
  2019     assert(fc->is_free(), "just marked it free");
  2020     assert(fc->cantCoalesce(), "just marked it uncoalescable");
  2024 void CompactibleFreeListSpace::refillLinearAllocBlocksIfNeeded() {
  2025   assert_locked();
  2026   if (_smallLinearAllocBlock._ptr == NULL) {
  2027     assert(_smallLinearAllocBlock._word_size == 0,
  2028       "Size of linAB should be zero if the ptr is NULL");
  2029     // Reset the linAB refill and allocation size limit.
  2030     _smallLinearAllocBlock.set(0, 0, 1024*SmallForLinearAlloc, SmallForLinearAlloc);
  2032   refillLinearAllocBlockIfNeeded(&_smallLinearAllocBlock);
  2035 void
  2036 CompactibleFreeListSpace::refillLinearAllocBlockIfNeeded(LinearAllocBlock* blk) {
  2037   assert_locked();
  2038   assert((blk->_ptr == NULL && blk->_word_size == 0) ||
  2039          (blk->_ptr != NULL && blk->_word_size >= MinChunkSize),
  2040          "blk invariant");
  2041   if (blk->_ptr == NULL) {
  2042     refillLinearAllocBlock(blk);
  2044   if (PrintMiscellaneous && Verbose) {
  2045     if (blk->_word_size == 0) {
  2046       warning("CompactibleFreeListSpace(prologue):: Linear allocation failure");
  2051 void
  2052 CompactibleFreeListSpace::refillLinearAllocBlock(LinearAllocBlock* blk) {
  2053   assert_locked();
  2054   assert(blk->_word_size == 0 && blk->_ptr == NULL,
  2055          "linear allocation block should be empty");
  2056   FreeChunk* fc;
  2057   if (blk->_refillSize < SmallForDictionary &&
  2058       (fc = getChunkFromIndexedFreeList(blk->_refillSize)) != NULL) {
  2059     // A linAB's strategy might be to use small sizes to reduce
  2060     // fragmentation but still get the benefits of allocation from a
  2061     // linAB.
  2062   } else {
  2063     fc = getChunkFromDictionary(blk->_refillSize);
  2065   if (fc != NULL) {
  2066     blk->_ptr  = (HeapWord*)fc;
  2067     blk->_word_size = fc->size();
  2068     fc->dontCoalesce();   // to prevent sweeper from sweeping us up
  2072 // Support for concurrent collection policy decisions.
  2073 bool CompactibleFreeListSpace::should_concurrent_collect() const {
  2074   // In the future we might want to add in frgamentation stats --
  2075   // including erosion of the "mountain" into this decision as well.
  2076   return !adaptive_freelists() && linearAllocationWouldFail();
  2079 // Support for compaction
  2081 void CompactibleFreeListSpace::prepare_for_compaction(CompactPoint* cp) {
  2082   SCAN_AND_FORWARD(cp,end,block_is_obj,block_size);
  2083   // prepare_for_compaction() uses the space between live objects
  2084   // so that later phase can skip dead space quickly.  So verification
  2085   // of the free lists doesn't work after.
  2088 #define obj_size(q) adjustObjectSize(oop(q)->size())
  2089 #define adjust_obj_size(s) adjustObjectSize(s)
  2091 void CompactibleFreeListSpace::adjust_pointers() {
  2092   // In other versions of adjust_pointers(), a bail out
  2093   // based on the amount of live data in the generation
  2094   // (i.e., if 0, bail out) may be used.
  2095   // Cannot test used() == 0 here because the free lists have already
  2096   // been mangled by the compaction.
  2098   SCAN_AND_ADJUST_POINTERS(adjust_obj_size);
  2099   // See note about verification in prepare_for_compaction().
  2102 void CompactibleFreeListSpace::compact() {
  2103   SCAN_AND_COMPACT(obj_size);
  2106 // fragmentation_metric = 1 - [sum of (fbs**2) / (sum of fbs)**2]
  2107 // where fbs is free block sizes
  2108 double CompactibleFreeListSpace::flsFrag() const {
  2109   size_t itabFree = totalSizeInIndexedFreeLists();
  2110   double frag = 0.0;
  2111   size_t i;
  2113   for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
  2114     double sz  = i;
  2115     frag      += _indexedFreeList[i].count() * (sz * sz);
  2118   double totFree = itabFree +
  2119                    _dictionary->total_chunk_size(DEBUG_ONLY(freelistLock()));
  2120   if (totFree > 0) {
  2121     frag = ((frag + _dictionary->sum_of_squared_block_sizes()) /
  2122             (totFree * totFree));
  2123     frag = (double)1.0  - frag;
  2124   } else {
  2125     assert(frag == 0.0, "Follows from totFree == 0");
  2127   return frag;
  2130 void CompactibleFreeListSpace::beginSweepFLCensus(
  2131   float inter_sweep_current,
  2132   float inter_sweep_estimate,
  2133   float intra_sweep_estimate) {
  2134   assert_locked();
  2135   size_t i;
  2136   for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
  2137     AdaptiveFreeList<FreeChunk>* fl    = &_indexedFreeList[i];
  2138     if (PrintFLSStatistics > 1) {
  2139       gclog_or_tty->print("size[" SIZE_FORMAT "] : ", i);
  2141     fl->compute_desired(inter_sweep_current, inter_sweep_estimate, intra_sweep_estimate);
  2142     fl->set_coal_desired((ssize_t)((double)fl->desired() * CMSSmallCoalSurplusPercent));
  2143     fl->set_before_sweep(fl->count());
  2144     fl->set_bfr_surp(fl->surplus());
  2146   _dictionary->begin_sweep_dict_census(CMSLargeCoalSurplusPercent,
  2147                                     inter_sweep_current,
  2148                                     inter_sweep_estimate,
  2149                                     intra_sweep_estimate);
  2152 void CompactibleFreeListSpace::setFLSurplus() {
  2153   assert_locked();
  2154   size_t i;
  2155   for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
  2156     AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[i];
  2157     fl->set_surplus(fl->count() -
  2158                     (ssize_t)((double)fl->desired() * CMSSmallSplitSurplusPercent));
  2162 void CompactibleFreeListSpace::setFLHints() {
  2163   assert_locked();
  2164   size_t i;
  2165   size_t h = IndexSetSize;
  2166   for (i = IndexSetSize - 1; i != 0; i -= IndexSetStride) {
  2167     AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[i];
  2168     fl->set_hint(h);
  2169     if (fl->surplus() > 0) {
  2170       h = i;
  2175 void CompactibleFreeListSpace::clearFLCensus() {
  2176   assert_locked();
  2177   size_t i;
  2178   for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
  2179     AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[i];
  2180     fl->set_prev_sweep(fl->count());
  2181     fl->set_coal_births(0);
  2182     fl->set_coal_deaths(0);
  2183     fl->set_split_births(0);
  2184     fl->set_split_deaths(0);
  2188 void CompactibleFreeListSpace::endSweepFLCensus(size_t sweep_count) {
  2189   if (PrintFLSStatistics > 0) {
  2190     HeapWord* largestAddr = (HeapWord*) dictionary()->find_largest_dict();
  2191     gclog_or_tty->print_cr("CMS: Large block " PTR_FORMAT,
  2192                            p2i(largestAddr));
  2194   setFLSurplus();
  2195   setFLHints();
  2196   if (PrintGC && PrintFLSCensus > 0) {
  2197     printFLCensus(sweep_count);
  2199   clearFLCensus();
  2200   assert_locked();
  2201   _dictionary->end_sweep_dict_census(CMSLargeSplitSurplusPercent);
  2204 bool CompactibleFreeListSpace::coalOverPopulated(size_t size) {
  2205   if (size < SmallForDictionary) {
  2206     AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
  2207     return (fl->coal_desired() < 0) ||
  2208            ((int)fl->count() > fl->coal_desired());
  2209   } else {
  2210     return dictionary()->coal_dict_over_populated(size);
  2214 void CompactibleFreeListSpace::smallCoalBirth(size_t size) {
  2215   assert(size < SmallForDictionary, "Size too large for indexed list");
  2216   AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
  2217   fl->increment_coal_births();
  2218   fl->increment_surplus();
  2221 void CompactibleFreeListSpace::smallCoalDeath(size_t size) {
  2222   assert(size < SmallForDictionary, "Size too large for indexed list");
  2223   AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
  2224   fl->increment_coal_deaths();
  2225   fl->decrement_surplus();
  2228 void CompactibleFreeListSpace::coalBirth(size_t size) {
  2229   if (size  < SmallForDictionary) {
  2230     smallCoalBirth(size);
  2231   } else {
  2232     dictionary()->dict_census_update(size,
  2233                                    false /* split */,
  2234                                    true /* birth */);
  2238 void CompactibleFreeListSpace::coalDeath(size_t size) {
  2239   if(size  < SmallForDictionary) {
  2240     smallCoalDeath(size);
  2241   } else {
  2242     dictionary()->dict_census_update(size,
  2243                                    false /* split */,
  2244                                    false /* birth */);
  2248 void CompactibleFreeListSpace::smallSplitBirth(size_t size) {
  2249   assert(size < SmallForDictionary, "Size too large for indexed list");
  2250   AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
  2251   fl->increment_split_births();
  2252   fl->increment_surplus();
  2255 void CompactibleFreeListSpace::smallSplitDeath(size_t size) {
  2256   assert(size < SmallForDictionary, "Size too large for indexed list");
  2257   AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
  2258   fl->increment_split_deaths();
  2259   fl->decrement_surplus();
  2262 void CompactibleFreeListSpace::split_birth(size_t size) {
  2263   if (size  < SmallForDictionary) {
  2264     smallSplitBirth(size);
  2265   } else {
  2266     dictionary()->dict_census_update(size,
  2267                                    true /* split */,
  2268                                    true /* birth */);
  2272 void CompactibleFreeListSpace::splitDeath(size_t size) {
  2273   if (size  < SmallForDictionary) {
  2274     smallSplitDeath(size);
  2275   } else {
  2276     dictionary()->dict_census_update(size,
  2277                                    true /* split */,
  2278                                    false /* birth */);
  2282 void CompactibleFreeListSpace::split(size_t from, size_t to1) {
  2283   size_t to2 = from - to1;
  2284   splitDeath(from);
  2285   split_birth(to1);
  2286   split_birth(to2);
  2289 void CompactibleFreeListSpace::print() const {
  2290   print_on(tty);
  2293 void CompactibleFreeListSpace::prepare_for_verify() {
  2294   assert_locked();
  2295   repairLinearAllocationBlocks();
  2296   // Verify that the SpoolBlocks look like free blocks of
  2297   // appropriate sizes... To be done ...
  2300 class VerifyAllBlksClosure: public BlkClosure {
  2301  private:
  2302   const CompactibleFreeListSpace* _sp;
  2303   const MemRegion                 _span;
  2304   HeapWord*                       _last_addr;
  2305   size_t                          _last_size;
  2306   bool                            _last_was_obj;
  2307   bool                            _last_was_live;
  2309  public:
  2310   VerifyAllBlksClosure(const CompactibleFreeListSpace* sp,
  2311     MemRegion span) :  _sp(sp), _span(span),
  2312                        _last_addr(NULL), _last_size(0),
  2313                        _last_was_obj(false), _last_was_live(false) { }
  2315   virtual size_t do_blk(HeapWord* addr) {
  2316     size_t res;
  2317     bool   was_obj  = false;
  2318     bool   was_live = false;
  2319     if (_sp->block_is_obj(addr)) {
  2320       was_obj = true;
  2321       oop p = oop(addr);
  2322       guarantee(p->is_oop(), "Should be an oop");
  2323       res = _sp->adjustObjectSize(p->size());
  2324       if (_sp->obj_is_alive(addr)) {
  2325         was_live = true;
  2326         p->verify();
  2328     } else {
  2329       FreeChunk* fc = (FreeChunk*)addr;
  2330       res = fc->size();
  2331       if (FLSVerifyLists && !fc->cantCoalesce()) {
  2332         guarantee(_sp->verify_chunk_in_free_list(fc),
  2333                   "Chunk should be on a free list");
  2336     if (res == 0) {
  2337       gclog_or_tty->print_cr("Livelock: no rank reduction!");
  2338       gclog_or_tty->print_cr(
  2339         " Current:  addr = " PTR_FORMAT ", size = " SIZE_FORMAT ", obj = %s, live = %s \n"
  2340         " Previous: addr = " PTR_FORMAT ", size = " SIZE_FORMAT ", obj = %s, live = %s \n",
  2341         p2i(addr),       res,        was_obj      ?"true":"false", was_live      ?"true":"false",
  2342         p2i(_last_addr), _last_size, _last_was_obj?"true":"false", _last_was_live?"true":"false");
  2343       _sp->print_on(gclog_or_tty);
  2344       guarantee(false, "Seppuku!");
  2346     _last_addr = addr;
  2347     _last_size = res;
  2348     _last_was_obj  = was_obj;
  2349     _last_was_live = was_live;
  2350     return res;
  2352 };
  2354 class VerifyAllOopsClosure: public OopClosure {
  2355  private:
  2356   const CMSCollector*             _collector;
  2357   const CompactibleFreeListSpace* _sp;
  2358   const MemRegion                 _span;
  2359   const bool                      _past_remark;
  2360   const CMSBitMap*                _bit_map;
  2362  protected:
  2363   void do_oop(void* p, oop obj) {
  2364     if (_span.contains(obj)) { // the interior oop points into CMS heap
  2365       if (!_span.contains(p)) { // reference from outside CMS heap
  2366         // Should be a valid object; the first disjunct below allows
  2367         // us to sidestep an assertion in block_is_obj() that insists
  2368         // that p be in _sp. Note that several generations (and spaces)
  2369         // are spanned by _span (CMS heap) above.
  2370         guarantee(!_sp->is_in_reserved(obj) ||
  2371                   _sp->block_is_obj((HeapWord*)obj),
  2372                   "Should be an object");
  2373         guarantee(obj->is_oop(), "Should be an oop");
  2374         obj->verify();
  2375         if (_past_remark) {
  2376           // Remark has been completed, the object should be marked
  2377           _bit_map->isMarked((HeapWord*)obj);
  2379       } else { // reference within CMS heap
  2380         if (_past_remark) {
  2381           // Remark has been completed -- so the referent should have
  2382           // been marked, if referring object is.
  2383           if (_bit_map->isMarked(_collector->block_start(p))) {
  2384             guarantee(_bit_map->isMarked((HeapWord*)obj), "Marking error?");
  2388     } else if (_sp->is_in_reserved(p)) {
  2389       // the reference is from FLS, and points out of FLS
  2390       guarantee(obj->is_oop(), "Should be an oop");
  2391       obj->verify();
  2395   template <class T> void do_oop_work(T* p) {
  2396     T heap_oop = oopDesc::load_heap_oop(p);
  2397     if (!oopDesc::is_null(heap_oop)) {
  2398       oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
  2399       do_oop(p, obj);
  2403  public:
  2404   VerifyAllOopsClosure(const CMSCollector* collector,
  2405     const CompactibleFreeListSpace* sp, MemRegion span,
  2406     bool past_remark, CMSBitMap* bit_map) :
  2407     _collector(collector), _sp(sp), _span(span),
  2408     _past_remark(past_remark), _bit_map(bit_map) { }
  2410   virtual void do_oop(oop* p)       { VerifyAllOopsClosure::do_oop_work(p); }
  2411   virtual void do_oop(narrowOop* p) { VerifyAllOopsClosure::do_oop_work(p); }
  2412 };
  2414 void CompactibleFreeListSpace::verify() const {
  2415   assert_lock_strong(&_freelistLock);
  2416   verify_objects_initialized();
  2417   MemRegion span = _collector->_span;
  2418   bool past_remark = (_collector->abstract_state() ==
  2419                       CMSCollector::Sweeping);
  2421   ResourceMark rm;
  2422   HandleMark  hm;
  2424   // Check integrity of CFL data structures
  2425   _promoInfo.verify();
  2426   _dictionary->verify();
  2427   if (FLSVerifyIndexTable) {
  2428     verifyIndexedFreeLists();
  2430   // Check integrity of all objects and free blocks in space
  2432     VerifyAllBlksClosure cl(this, span);
  2433     ((CompactibleFreeListSpace*)this)->blk_iterate(&cl);  // cast off const
  2435   // Check that all references in the heap to FLS
  2436   // are to valid objects in FLS or that references in
  2437   // FLS are to valid objects elsewhere in the heap
  2438   if (FLSVerifyAllHeapReferences)
  2440     VerifyAllOopsClosure cl(_collector, this, span, past_remark,
  2441       _collector->markBitMap());
  2442     CollectedHeap* ch = Universe::heap();
  2444     // Iterate over all oops in the heap. Uses the _no_header version
  2445     // since we are not interested in following the klass pointers.
  2446     ch->oop_iterate_no_header(&cl);
  2449   if (VerifyObjectStartArray) {
  2450     // Verify the block offset table
  2451     _bt.verify();
  2455 #ifndef PRODUCT
  2456 void CompactibleFreeListSpace::verifyFreeLists() const {
  2457   if (FLSVerifyLists) {
  2458     _dictionary->verify();
  2459     verifyIndexedFreeLists();
  2460   } else {
  2461     if (FLSVerifyDictionary) {
  2462       _dictionary->verify();
  2464     if (FLSVerifyIndexTable) {
  2465       verifyIndexedFreeLists();
  2469 #endif
  2471 void CompactibleFreeListSpace::verifyIndexedFreeLists() const {
  2472   size_t i = 0;
  2473   for (; i < IndexSetStart; i++) {
  2474     guarantee(_indexedFreeList[i].head() == NULL, "should be NULL");
  2476   for (; i < IndexSetSize; i++) {
  2477     verifyIndexedFreeList(i);
  2481 void CompactibleFreeListSpace::verifyIndexedFreeList(size_t size) const {
  2482   FreeChunk* fc   =  _indexedFreeList[size].head();
  2483   FreeChunk* tail =  _indexedFreeList[size].tail();
  2484   size_t    num = _indexedFreeList[size].count();
  2485   size_t      n = 0;
  2486   guarantee(((size >= IndexSetStart) && (size % IndexSetStride == 0)) || fc == NULL,
  2487             "Slot should have been empty");
  2488   for (; fc != NULL; fc = fc->next(), n++) {
  2489     guarantee(fc->size() == size, "Size inconsistency");
  2490     guarantee(fc->is_free(), "!free?");
  2491     guarantee(fc->next() == NULL || fc->next()->prev() == fc, "Broken list");
  2492     guarantee((fc->next() == NULL) == (fc == tail), "Incorrect tail");
  2494   guarantee(n == num, "Incorrect count");
  2497 #ifndef PRODUCT
  2498 void CompactibleFreeListSpace::check_free_list_consistency() const {
  2499   assert((TreeChunk<FreeChunk, AdaptiveFreeList<FreeChunk> >::min_size() <= IndexSetSize),
  2500     "Some sizes can't be allocated without recourse to"
  2501     " linear allocation buffers");
  2502   assert((TreeChunk<FreeChunk, AdaptiveFreeList<FreeChunk> >::min_size()*HeapWordSize == sizeof(TreeChunk<FreeChunk, AdaptiveFreeList<FreeChunk> >)),
  2503     "else MIN_TREE_CHUNK_SIZE is wrong");
  2504   assert(IndexSetStart != 0, "IndexSetStart not initialized");
  2505   assert(IndexSetStride != 0, "IndexSetStride not initialized");
  2507 #endif
  2509 void CompactibleFreeListSpace::printFLCensus(size_t sweep_count) const {
  2510   assert_lock_strong(&_freelistLock);
  2511   AdaptiveFreeList<FreeChunk> total;
  2512   gclog_or_tty->print("end sweep# " SIZE_FORMAT "\n", sweep_count);
  2513   AdaptiveFreeList<FreeChunk>::print_labels_on(gclog_or_tty, "size");
  2514   size_t total_free = 0;
  2515   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
  2516     const AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[i];
  2517     total_free += fl->count() * fl->size();
  2518     if (i % (40*IndexSetStride) == 0) {
  2519       AdaptiveFreeList<FreeChunk>::print_labels_on(gclog_or_tty, "size");
  2521     fl->print_on(gclog_or_tty);
  2522     total.set_bfr_surp(    total.bfr_surp()     + fl->bfr_surp()    );
  2523     total.set_surplus(    total.surplus()     + fl->surplus()    );
  2524     total.set_desired(    total.desired()     + fl->desired()    );
  2525     total.set_prev_sweep(  total.prev_sweep()   + fl->prev_sweep()  );
  2526     total.set_before_sweep(total.before_sweep() + fl->before_sweep());
  2527     total.set_count(      total.count()       + fl->count()      );
  2528     total.set_coal_births( total.coal_births()  + fl->coal_births() );
  2529     total.set_coal_deaths( total.coal_deaths()  + fl->coal_deaths() );
  2530     total.set_split_births(total.split_births() + fl->split_births());
  2531     total.set_split_deaths(total.split_deaths() + fl->split_deaths());
  2533   total.print_on(gclog_or_tty, "TOTAL");
  2534   gclog_or_tty->print_cr("Total free in indexed lists "
  2535                          SIZE_FORMAT " words", total_free);
  2536   gclog_or_tty->print("growth: %8.5f  deficit: %8.5f\n",
  2537     (double)(total.split_births()+total.coal_births()-total.split_deaths()-total.coal_deaths())/
  2538             (total.prev_sweep() != 0 ? (double)total.prev_sweep() : 1.0),
  2539     (double)(total.desired() - total.count())/(total.desired() != 0 ? (double)total.desired() : 1.0));
  2540   _dictionary->print_dict_census();
  2543 ///////////////////////////////////////////////////////////////////////////
  2544 // CFLS_LAB
  2545 ///////////////////////////////////////////////////////////////////////////
  2547 #define VECTOR_257(x)                                                                                  \
  2548   /* 1  2  3  4  5  6  7  8  9 1x 11 12 13 14 15 16 17 18 19 2x 21 22 23 24 25 26 27 28 29 3x 31 32 */ \
  2549   {  x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
  2550      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
  2551      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
  2552      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
  2553      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
  2554      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
  2555      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
  2556      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
  2557      x }
  2559 // Initialize with default setting of CMSParPromoteBlocksToClaim, _not_
  2560 // OldPLABSize, whose static default is different; if overridden at the
  2561 // command-line, this will get reinitialized via a call to
  2562 // modify_initialization() below.
  2563 AdaptiveWeightedAverage CFLS_LAB::_blocks_to_claim[]    =
  2564   VECTOR_257(AdaptiveWeightedAverage(OldPLABWeight, (float)CMSParPromoteBlocksToClaim));
  2565 size_t CFLS_LAB::_global_num_blocks[]  = VECTOR_257(0);
  2566 uint   CFLS_LAB::_global_num_workers[] = VECTOR_257(0);
  2568 CFLS_LAB::CFLS_LAB(CompactibleFreeListSpace* cfls) :
  2569   _cfls(cfls)
  2571   assert(CompactibleFreeListSpace::IndexSetSize == 257, "Modify VECTOR_257() macro above");
  2572   for (size_t i = CompactibleFreeListSpace::IndexSetStart;
  2573        i < CompactibleFreeListSpace::IndexSetSize;
  2574        i += CompactibleFreeListSpace::IndexSetStride) {
  2575     _indexedFreeList[i].set_size(i);
  2576     _num_blocks[i] = 0;
  2580 static bool _CFLS_LAB_modified = false;
  2582 void CFLS_LAB::modify_initialization(size_t n, unsigned wt) {
  2583   assert(!_CFLS_LAB_modified, "Call only once");
  2584   _CFLS_LAB_modified = true;
  2585   for (size_t i = CompactibleFreeListSpace::IndexSetStart;
  2586        i < CompactibleFreeListSpace::IndexSetSize;
  2587        i += CompactibleFreeListSpace::IndexSetStride) {
  2588     _blocks_to_claim[i].modify(n, wt, true /* force */);
  2592 HeapWord* CFLS_LAB::alloc(size_t word_sz) {
  2593   FreeChunk* res;
  2594   assert(word_sz == _cfls->adjustObjectSize(word_sz), "Error");
  2595   if (word_sz >=  CompactibleFreeListSpace::IndexSetSize) {
  2596     // This locking manages sync with other large object allocations.
  2597     MutexLockerEx x(_cfls->parDictionaryAllocLock(),
  2598                     Mutex::_no_safepoint_check_flag);
  2599     res = _cfls->getChunkFromDictionaryExact(word_sz);
  2600     if (res == NULL) return NULL;
  2601   } else {
  2602     AdaptiveFreeList<FreeChunk>* fl = &_indexedFreeList[word_sz];
  2603     if (fl->count() == 0) {
  2604       // Attempt to refill this local free list.
  2605       get_from_global_pool(word_sz, fl);
  2606       // If it didn't work, give up.
  2607       if (fl->count() == 0) return NULL;
  2609     res = fl->get_chunk_at_head();
  2610     assert(res != NULL, "Why was count non-zero?");
  2612   res->markNotFree();
  2613   assert(!res->is_free(), "shouldn't be marked free");
  2614   assert(oop(res)->klass_or_null() == NULL, "should look uninitialized");
  2615   // mangle a just allocated object with a distinct pattern.
  2616   debug_only(res->mangleAllocated(word_sz));
  2617   return (HeapWord*)res;
  2620 // Get a chunk of blocks of the right size and update related
  2621 // book-keeping stats
  2622 void CFLS_LAB::get_from_global_pool(size_t word_sz, AdaptiveFreeList<FreeChunk>* fl) {
  2623   // Get the #blocks we want to claim
  2624   size_t n_blks = (size_t)_blocks_to_claim[word_sz].average();
  2625   assert(n_blks > 0, "Error");
  2626   assert(ResizePLAB || n_blks == OldPLABSize, "Error");
  2627   // In some cases, when the application has a phase change,
  2628   // there may be a sudden and sharp shift in the object survival
  2629   // profile, and updating the counts at the end of a scavenge
  2630   // may not be quick enough, giving rise to large scavenge pauses
  2631   // during these phase changes. It is beneficial to detect such
  2632   // changes on-the-fly during a scavenge and avoid such a phase-change
  2633   // pothole. The following code is a heuristic attempt to do that.
  2634   // It is protected by a product flag until we have gained
  2635   // enough experience with this heuristic and fine-tuned its behaviour.
  2636   // WARNING: This might increase fragmentation if we overreact to
  2637   // small spikes, so some kind of historical smoothing based on
  2638   // previous experience with the greater reactivity might be useful.
  2639   // Lacking sufficient experience, CMSOldPLABResizeQuicker is disabled by
  2640   // default.
  2641   if (ResizeOldPLAB && CMSOldPLABResizeQuicker) {
  2642     size_t multiple = _num_blocks[word_sz]/(CMSOldPLABToleranceFactor*CMSOldPLABNumRefills*n_blks);
  2643     n_blks +=  CMSOldPLABReactivityFactor*multiple*n_blks;
  2644     n_blks = MIN2(n_blks, CMSOldPLABMax);
  2646   assert(n_blks > 0, "Error");
  2647   _cfls->par_get_chunk_of_blocks(word_sz, n_blks, fl);
  2648   // Update stats table entry for this block size
  2649   _num_blocks[word_sz] += fl->count();
  2652 void CFLS_LAB::compute_desired_plab_size() {
  2653   for (size_t i =  CompactibleFreeListSpace::IndexSetStart;
  2654        i < CompactibleFreeListSpace::IndexSetSize;
  2655        i += CompactibleFreeListSpace::IndexSetStride) {
  2656     assert((_global_num_workers[i] == 0) == (_global_num_blocks[i] == 0),
  2657            "Counter inconsistency");
  2658     if (_global_num_workers[i] > 0) {
  2659       // Need to smooth wrt historical average
  2660       if (ResizeOldPLAB) {
  2661         _blocks_to_claim[i].sample(
  2662           MAX2((size_t)CMSOldPLABMin,
  2663           MIN2((size_t)CMSOldPLABMax,
  2664                _global_num_blocks[i]/(_global_num_workers[i]*CMSOldPLABNumRefills))));
  2666       // Reset counters for next round
  2667       _global_num_workers[i] = 0;
  2668       _global_num_blocks[i] = 0;
  2669       if (PrintOldPLAB) {
  2670         gclog_or_tty->print_cr("[" SIZE_FORMAT "]: " SIZE_FORMAT, i, (size_t)_blocks_to_claim[i].average());
  2676 // If this is changed in the future to allow parallel
  2677 // access, one would need to take the FL locks and,
  2678 // depending on how it is used, stagger access from
  2679 // parallel threads to reduce contention.
  2680 void CFLS_LAB::retire(int tid) {
  2681   // We run this single threaded with the world stopped;
  2682   // so no need for locks and such.
  2683   NOT_PRODUCT(Thread* t = Thread::current();)
  2684   assert(Thread::current()->is_VM_thread(), "Error");
  2685   for (size_t i =  CompactibleFreeListSpace::IndexSetStart;
  2686        i < CompactibleFreeListSpace::IndexSetSize;
  2687        i += CompactibleFreeListSpace::IndexSetStride) {
  2688     assert(_num_blocks[i] >= (size_t)_indexedFreeList[i].count(),
  2689            "Can't retire more than what we obtained");
  2690     if (_num_blocks[i] > 0) {
  2691       size_t num_retire =  _indexedFreeList[i].count();
  2692       assert(_num_blocks[i] > num_retire, "Should have used at least one");
  2694         // MutexLockerEx x(_cfls->_indexedFreeListParLocks[i],
  2695         //                Mutex::_no_safepoint_check_flag);
  2697         // Update globals stats for num_blocks used
  2698         _global_num_blocks[i] += (_num_blocks[i] - num_retire);
  2699         _global_num_workers[i]++;
  2700         assert(_global_num_workers[i] <= ParallelGCThreads, "Too big");
  2701         if (num_retire > 0) {
  2702           _cfls->_indexedFreeList[i].prepend(&_indexedFreeList[i]);
  2703           // Reset this list.
  2704           _indexedFreeList[i] = AdaptiveFreeList<FreeChunk>();
  2705           _indexedFreeList[i].set_size(i);
  2708       if (PrintOldPLAB) {
  2709         gclog_or_tty->print_cr("%d[" SIZE_FORMAT "]: " SIZE_FORMAT "/" SIZE_FORMAT "/" SIZE_FORMAT,
  2710                                tid, i, num_retire, _num_blocks[i], (size_t)_blocks_to_claim[i].average());
  2712       // Reset stats for next round
  2713       _num_blocks[i]         = 0;
  2718 void CompactibleFreeListSpace:: par_get_chunk_of_blocks(size_t word_sz, size_t n, AdaptiveFreeList<FreeChunk>* fl) {
  2719   assert(fl->count() == 0, "Precondition.");
  2720   assert(word_sz < CompactibleFreeListSpace::IndexSetSize,
  2721          "Precondition");
  2723   // We'll try all multiples of word_sz in the indexed set, starting with
  2724   // word_sz itself and, if CMSSplitIndexedFreeListBlocks, try larger multiples,
  2725   // then try getting a big chunk and splitting it.
  2727     bool found;
  2728     int  k;
  2729     size_t cur_sz;
  2730     for (k = 1, cur_sz = k * word_sz, found = false;
  2731          (cur_sz < CompactibleFreeListSpace::IndexSetSize) &&
  2732          (CMSSplitIndexedFreeListBlocks || k <= 1);
  2733          k++, cur_sz = k * word_sz) {
  2734       AdaptiveFreeList<FreeChunk> fl_for_cur_sz;  // Empty.
  2735       fl_for_cur_sz.set_size(cur_sz);
  2737         MutexLockerEx x(_indexedFreeListParLocks[cur_sz],
  2738                         Mutex::_no_safepoint_check_flag);
  2739         AdaptiveFreeList<FreeChunk>* gfl = &_indexedFreeList[cur_sz];
  2740         if (gfl->count() != 0) {
  2741           // nn is the number of chunks of size cur_sz that
  2742           // we'd need to split k-ways each, in order to create
  2743           // "n" chunks of size word_sz each.
  2744           const size_t nn = MAX2(n/k, (size_t)1);
  2745           gfl->getFirstNChunksFromList(nn, &fl_for_cur_sz);
  2746           found = true;
  2747           if (k > 1) {
  2748             // Update split death stats for the cur_sz-size blocks list:
  2749             // we increment the split death count by the number of blocks
  2750             // we just took from the cur_sz-size blocks list and which
  2751             // we will be splitting below.
  2752             ssize_t deaths = gfl->split_deaths() +
  2753                              fl_for_cur_sz.count();
  2754             gfl->set_split_deaths(deaths);
  2758       // Now transfer fl_for_cur_sz to fl.  Common case, we hope, is k = 1.
  2759       if (found) {
  2760         if (k == 1) {
  2761           fl->prepend(&fl_for_cur_sz);
  2762         } else {
  2763           // Divide each block on fl_for_cur_sz up k ways.
  2764           FreeChunk* fc;
  2765           while ((fc = fl_for_cur_sz.get_chunk_at_head()) != NULL) {
  2766             // Must do this in reverse order, so that anybody attempting to
  2767             // access the main chunk sees it as a single free block until we
  2768             // change it.
  2769             size_t fc_size = fc->size();
  2770             assert(fc->is_free(), "Error");
  2771             for (int i = k-1; i >= 0; i--) {
  2772               FreeChunk* ffc = (FreeChunk*)((HeapWord*)fc + i * word_sz);
  2773               assert((i != 0) ||
  2774                         ((fc == ffc) && ffc->is_free() &&
  2775                          (ffc->size() == k*word_sz) && (fc_size == word_sz)),
  2776                         "Counting error");
  2777               ffc->set_size(word_sz);
  2778               ffc->link_prev(NULL); // Mark as a free block for other (parallel) GC threads.
  2779               ffc->link_next(NULL);
  2780               // Above must occur before BOT is updated below.
  2781               OrderAccess::storestore();
  2782               // splitting from the right, fc_size == i * word_sz
  2783               _bt.mark_block((HeapWord*)ffc, word_sz, true /* reducing */);
  2784               fc_size -= word_sz;
  2785               assert(fc_size == i*word_sz, "Error");
  2786               _bt.verify_not_unallocated((HeapWord*)ffc, word_sz);
  2787               _bt.verify_single_block((HeapWord*)fc, fc_size);
  2788               _bt.verify_single_block((HeapWord*)ffc, word_sz);
  2789               // Push this on "fl".
  2790               fl->return_chunk_at_head(ffc);
  2792             // TRAP
  2793             assert(fl->tail()->next() == NULL, "List invariant.");
  2796         // Update birth stats for this block size.
  2797         size_t num = fl->count();
  2798         MutexLockerEx x(_indexedFreeListParLocks[word_sz],
  2799                         Mutex::_no_safepoint_check_flag);
  2800         ssize_t births = _indexedFreeList[word_sz].split_births() + num;
  2801         _indexedFreeList[word_sz].set_split_births(births);
  2802         return;
  2806   // Otherwise, we'll split a block from the dictionary.
  2807   FreeChunk* fc = NULL;
  2808   FreeChunk* rem_fc = NULL;
  2809   size_t rem;
  2811     MutexLockerEx x(parDictionaryAllocLock(),
  2812                     Mutex::_no_safepoint_check_flag);
  2813     while (n > 0) {
  2814       fc = dictionary()->get_chunk(MAX2(n * word_sz, _dictionary->min_size()),
  2815                                   FreeBlockDictionary<FreeChunk>::atLeast);
  2816       if (fc != NULL) {
  2817         _bt.allocated((HeapWord*)fc, fc->size(), true /* reducing */);  // update _unallocated_blk
  2818         dictionary()->dict_census_update(fc->size(),
  2819                                        true /*split*/,
  2820                                        false /*birth*/);
  2821         break;
  2822       } else {
  2823         n--;
  2826     if (fc == NULL) return;
  2827     // Otherwise, split up that block.
  2828     assert((ssize_t)n >= 1, "Control point invariant");
  2829     assert(fc->is_free(), "Error: should be a free block");
  2830     _bt.verify_single_block((HeapWord*)fc, fc->size());
  2831     const size_t nn = fc->size() / word_sz;
  2832     n = MIN2(nn, n);
  2833     assert((ssize_t)n >= 1, "Control point invariant");
  2834     rem = fc->size() - n * word_sz;
  2835     // If there is a remainder, and it's too small, allocate one fewer.
  2836     if (rem > 0 && rem < MinChunkSize) {
  2837       n--; rem += word_sz;
  2839     // Note that at this point we may have n == 0.
  2840     assert((ssize_t)n >= 0, "Control point invariant");
  2842     // If n is 0, the chunk fc that was found is not large
  2843     // enough to leave a viable remainder.  We are unable to
  2844     // allocate even one block.  Return fc to the
  2845     // dictionary and return, leaving "fl" empty.
  2846     if (n == 0) {
  2847       returnChunkToDictionary(fc);
  2848       assert(fl->count() == 0, "We never allocated any blocks");
  2849       return;
  2852     // First return the remainder, if any.
  2853     // Note that we hold the lock until we decide if we're going to give
  2854     // back the remainder to the dictionary, since a concurrent allocation
  2855     // may otherwise see the heap as empty.  (We're willing to take that
  2856     // hit if the block is a small block.)
  2857     if (rem > 0) {
  2858       size_t prefix_size = n * word_sz;
  2859       rem_fc = (FreeChunk*)((HeapWord*)fc + prefix_size);
  2860       rem_fc->set_size(rem);
  2861       rem_fc->link_prev(NULL); // Mark as a free block for other (parallel) GC threads.
  2862       rem_fc->link_next(NULL);
  2863       // Above must occur before BOT is updated below.
  2864       assert((ssize_t)n > 0 && prefix_size > 0 && rem_fc > fc, "Error");
  2865       OrderAccess::storestore();
  2866       _bt.split_block((HeapWord*)fc, fc->size(), prefix_size);
  2867       assert(fc->is_free(), "Error");
  2868       fc->set_size(prefix_size);
  2869       if (rem >= IndexSetSize) {
  2870         returnChunkToDictionary(rem_fc);
  2871         dictionary()->dict_census_update(rem, true /*split*/, true /*birth*/);
  2872         rem_fc = NULL;
  2874       // Otherwise, return it to the small list below.
  2877   if (rem_fc != NULL) {
  2878     MutexLockerEx x(_indexedFreeListParLocks[rem],
  2879                     Mutex::_no_safepoint_check_flag);
  2880     _bt.verify_not_unallocated((HeapWord*)rem_fc, rem_fc->size());
  2881     _indexedFreeList[rem].return_chunk_at_head(rem_fc);
  2882     smallSplitBirth(rem);
  2884   assert((ssize_t)n > 0 && fc != NULL, "Consistency");
  2885   // Now do the splitting up.
  2886   // Must do this in reverse order, so that anybody attempting to
  2887   // access the main chunk sees it as a single free block until we
  2888   // change it.
  2889   size_t fc_size = n * word_sz;
  2890   // All but first chunk in this loop
  2891   for (ssize_t i = n-1; i > 0; i--) {
  2892     FreeChunk* ffc = (FreeChunk*)((HeapWord*)fc + i * word_sz);
  2893     ffc->set_size(word_sz);
  2894     ffc->link_prev(NULL); // Mark as a free block for other (parallel) GC threads.
  2895     ffc->link_next(NULL);
  2896     // Above must occur before BOT is updated below.
  2897     OrderAccess::storestore();
  2898     // splitting from the right, fc_size == (n - i + 1) * wordsize
  2899     _bt.mark_block((HeapWord*)ffc, word_sz, true /* reducing */);
  2900     fc_size -= word_sz;
  2901     _bt.verify_not_unallocated((HeapWord*)ffc, ffc->size());
  2902     _bt.verify_single_block((HeapWord*)ffc, ffc->size());
  2903     _bt.verify_single_block((HeapWord*)fc, fc_size);
  2904     // Push this on "fl".
  2905     fl->return_chunk_at_head(ffc);
  2907   // First chunk
  2908   assert(fc->is_free() && fc->size() == n*word_sz, "Error: should still be a free block");
  2909   // The blocks above should show their new sizes before the first block below
  2910   fc->set_size(word_sz);
  2911   fc->link_prev(NULL);    // idempotent wrt free-ness, see assert above
  2912   fc->link_next(NULL);
  2913   _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
  2914   _bt.verify_single_block((HeapWord*)fc, fc->size());
  2915   fl->return_chunk_at_head(fc);
  2917   assert((ssize_t)n > 0 && (ssize_t)n == fl->count(), "Incorrect number of blocks");
  2919     // Update the stats for this block size.
  2920     MutexLockerEx x(_indexedFreeListParLocks[word_sz],
  2921                     Mutex::_no_safepoint_check_flag);
  2922     const ssize_t births = _indexedFreeList[word_sz].split_births() + n;
  2923     _indexedFreeList[word_sz].set_split_births(births);
  2924     // ssize_t new_surplus = _indexedFreeList[word_sz].surplus() + n;
  2925     // _indexedFreeList[word_sz].set_surplus(new_surplus);
  2928   // TRAP
  2929   assert(fl->tail()->next() == NULL, "List invariant.");
  2932 // Set up the space's par_seq_tasks structure for work claiming
  2933 // for parallel rescan. See CMSParRemarkTask where this is currently used.
  2934 // XXX Need to suitably abstract and generalize this and the next
  2935 // method into one.
  2936 void
  2937 CompactibleFreeListSpace::
  2938 initialize_sequential_subtasks_for_rescan(int n_threads) {
  2939   // The "size" of each task is fixed according to rescan_task_size.
  2940   assert(n_threads > 0, "Unexpected n_threads argument");
  2941   const size_t task_size = rescan_task_size();
  2942   size_t n_tasks = (used_region().word_size() + task_size - 1)/task_size;
  2943   assert((n_tasks == 0) == used_region().is_empty(), "n_tasks incorrect");
  2944   assert(n_tasks == 0 ||
  2945          ((used_region().start() + (n_tasks - 1)*task_size < used_region().end()) &&
  2946           (used_region().start() + n_tasks*task_size >= used_region().end())),
  2947          "n_tasks calculation incorrect");
  2948   SequentialSubTasksDone* pst = conc_par_seq_tasks();
  2949   assert(!pst->valid(), "Clobbering existing data?");
  2950   // Sets the condition for completion of the subtask (how many threads
  2951   // need to finish in order to be done).
  2952   pst->set_n_threads(n_threads);
  2953   pst->set_n_tasks((int)n_tasks);
  2956 // Set up the space's par_seq_tasks structure for work claiming
  2957 // for parallel concurrent marking. See CMSConcMarkTask where this is currently used.
  2958 void
  2959 CompactibleFreeListSpace::
  2960 initialize_sequential_subtasks_for_marking(int n_threads,
  2961                                            HeapWord* low) {
  2962   // The "size" of each task is fixed according to rescan_task_size.
  2963   assert(n_threads > 0, "Unexpected n_threads argument");
  2964   const size_t task_size = marking_task_size();
  2965   assert(task_size > CardTableModRefBS::card_size_in_words &&
  2966          (task_size %  CardTableModRefBS::card_size_in_words == 0),
  2967          "Otherwise arithmetic below would be incorrect");
  2968   MemRegion span = _gen->reserved();
  2969   if (low != NULL) {
  2970     if (span.contains(low)) {
  2971       // Align low down to  a card boundary so that
  2972       // we can use block_offset_careful() on span boundaries.
  2973       HeapWord* aligned_low = (HeapWord*)align_size_down((uintptr_t)low,
  2974                                  CardTableModRefBS::card_size);
  2975       // Clip span prefix at aligned_low
  2976       span = span.intersection(MemRegion(aligned_low, span.end()));
  2977     } else if (low > span.end()) {
  2978       span = MemRegion(low, low);  // Null region
  2979     } // else use entire span
  2981   assert(span.is_empty() ||
  2982          ((uintptr_t)span.start() %  CardTableModRefBS::card_size == 0),
  2983         "span should start at a card boundary");
  2984   size_t n_tasks = (span.word_size() + task_size - 1)/task_size;
  2985   assert((n_tasks == 0) == span.is_empty(), "Inconsistency");
  2986   assert(n_tasks == 0 ||
  2987          ((span.start() + (n_tasks - 1)*task_size < span.end()) &&
  2988           (span.start() + n_tasks*task_size >= span.end())),
  2989          "n_tasks calculation incorrect");
  2990   SequentialSubTasksDone* pst = conc_par_seq_tasks();
  2991   assert(!pst->valid(), "Clobbering existing data?");
  2992   // Sets the condition for completion of the subtask (how many threads
  2993   // need to finish in order to be done).
  2994   pst->set_n_threads(n_threads);
  2995   pst->set_n_tasks((int)n_tasks);

mercurial