src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp

Tue, 27 Nov 2012 07:57:57 -0800

author
mikael
date
Tue, 27 Nov 2012 07:57:57 -0800
changeset 4290
7c15faa95ce7
parent 4196
685df3c6f84b
child 4384
b735136e0d82
child 4465
203f64878aab
permissions
-rw-r--r--

8003879: Duplicate definitions in vmStructs
Summary: Removed duplicate entries
Reviewed-by: dholmes, sspitsyn

     1 /*
     2  * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    20  * or visit www.oracle.com if you need additional information or have any
    21  * questions.
    22  *
    23  */
    25 #include "precompiled.hpp"
    26 #include "gc_implementation/concurrentMarkSweep/cmsLockVerifier.hpp"
    27 #include "gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp"
    28 #include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.inline.hpp"
    29 #include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.hpp"
    30 #include "gc_implementation/shared/liveRange.hpp"
    31 #include "gc_implementation/shared/spaceDecorator.hpp"
    32 #include "gc_interface/collectedHeap.inline.hpp"
    33 #include "memory/allocation.inline.hpp"
    34 #include "memory/blockOffsetTable.inline.hpp"
    35 #include "memory/resourceArea.hpp"
    36 #include "memory/universe.inline.hpp"
    37 #include "oops/oop.inline.hpp"
    38 #include "runtime/globals.hpp"
    39 #include "runtime/handles.inline.hpp"
    40 #include "runtime/init.hpp"
    41 #include "runtime/java.hpp"
    42 #include "runtime/vmThread.hpp"
    43 #include "utilities/copy.hpp"
    45 /////////////////////////////////////////////////////////////////////////
    46 //// CompactibleFreeListSpace
    47 /////////////////////////////////////////////////////////////////////////
    49 // highest ranked  free list lock rank
    50 int CompactibleFreeListSpace::_lockRank = Mutex::leaf + 3;
    52 // Defaults are 0 so things will break badly if incorrectly initialized.
    53 size_t CompactibleFreeListSpace::IndexSetStart  = 0;
    54 size_t CompactibleFreeListSpace::IndexSetStride = 0;
    56 size_t MinChunkSize = 0;
    58 void CompactibleFreeListSpace::set_cms_values() {
    59   // Set CMS global values
    60   assert(MinChunkSize == 0, "already set");
    62   // MinChunkSize should be a multiple of MinObjAlignment and be large enough
    63   // for chunks to contain a FreeChunk.
    64   size_t min_chunk_size_in_bytes = align_size_up(sizeof(FreeChunk), MinObjAlignmentInBytes);
    65   MinChunkSize = min_chunk_size_in_bytes / BytesPerWord;
    67   assert(IndexSetStart == 0 && IndexSetStride == 0, "already set");
    68   IndexSetStart  = MinChunkSize;
    69   IndexSetStride = MinObjAlignment;
    70 }
    72 // Constructor
    73 CompactibleFreeListSpace::CompactibleFreeListSpace(BlockOffsetSharedArray* bs,
    74   MemRegion mr, bool use_adaptive_freelists,
    75   FreeBlockDictionary<FreeChunk>::DictionaryChoice dictionaryChoice) :
    76   _dictionaryChoice(dictionaryChoice),
    77   _adaptive_freelists(use_adaptive_freelists),
    78   _bt(bs, mr),
    79   // free list locks are in the range of values taken by _lockRank
    80   // This range currently is [_leaf+2, _leaf+3]
    81   // Note: this requires that CFLspace c'tors
    82   // are called serially in the order in which the locks are
    83   // are acquired in the program text. This is true today.
    84   _freelistLock(_lockRank--, "CompactibleFreeListSpace._lock", true),
    85   _parDictionaryAllocLock(Mutex::leaf - 1,  // == rank(ExpandHeap_lock) - 1
    86                           "CompactibleFreeListSpace._dict_par_lock", true),
    87   _rescan_task_size(CardTableModRefBS::card_size_in_words * BitsPerWord *
    88                     CMSRescanMultiple),
    89   _marking_task_size(CardTableModRefBS::card_size_in_words * BitsPerWord *
    90                     CMSConcMarkMultiple),
    91   _collector(NULL)
    92 {
    93   assert(sizeof(FreeChunk) / BytesPerWord <= MinChunkSize,
    94          "FreeChunk is larger than expected");
    95   _bt.set_space(this);
    96   initialize(mr, SpaceDecorator::Clear, SpaceDecorator::Mangle);
    97   // We have all of "mr", all of which we place in the dictionary
    98   // as one big chunk. We'll need to decide here which of several
    99   // possible alternative dictionary implementations to use. For
   100   // now the choice is easy, since we have only one working
   101   // implementation, namely, the simple binary tree (splaying
   102   // temporarily disabled).
   103   switch (dictionaryChoice) {
   104     case FreeBlockDictionary<FreeChunk>::dictionaryBinaryTree:
   105       _dictionary = new BinaryTreeDictionary<FreeChunk, AdaptiveFreeList>(mr);
   106       break;
   107     case FreeBlockDictionary<FreeChunk>::dictionarySplayTree:
   108     case FreeBlockDictionary<FreeChunk>::dictionarySkipList:
   109     default:
   110       warning("dictionaryChoice: selected option not understood; using"
   111               " default BinaryTreeDictionary implementation instead.");
   112   }
   113   assert(_dictionary != NULL, "CMS dictionary initialization");
   114   // The indexed free lists are initially all empty and are lazily
   115   // filled in on demand. Initialize the array elements to NULL.
   116   initializeIndexedFreeListArray();
   118   // Not using adaptive free lists assumes that allocation is first
   119   // from the linAB's.  Also a cms perm gen which can be compacted
   120   // has to have the klass's klassKlass allocated at a lower
   121   // address in the heap than the klass so that the klassKlass is
   122   // moved to its new location before the klass is moved.
   123   // Set the _refillSize for the linear allocation blocks
   124   if (!use_adaptive_freelists) {
   125     FreeChunk* fc = _dictionary->get_chunk(mr.word_size());
   126     // The small linAB initially has all the space and will allocate
   127     // a chunk of any size.
   128     HeapWord* addr = (HeapWord*) fc;
   129     _smallLinearAllocBlock.set(addr, fc->size() ,
   130       1024*SmallForLinearAlloc, fc->size());
   131     // Note that _unallocated_block is not updated here.
   132     // Allocations from the linear allocation block should
   133     // update it.
   134   } else {
   135     _smallLinearAllocBlock.set(0, 0, 1024*SmallForLinearAlloc,
   136                                SmallForLinearAlloc);
   137   }
   138   // CMSIndexedFreeListReplenish should be at least 1
   139   CMSIndexedFreeListReplenish = MAX2((uintx)1, CMSIndexedFreeListReplenish);
   140   _promoInfo.setSpace(this);
   141   if (UseCMSBestFit) {
   142     _fitStrategy = FreeBlockBestFitFirst;
   143   } else {
   144     _fitStrategy = FreeBlockStrategyNone;
   145   }
   146   check_free_list_consistency();
   148   // Initialize locks for parallel case.
   150   if (CollectedHeap::use_parallel_gc_threads()) {
   151     for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
   152       _indexedFreeListParLocks[i] = new Mutex(Mutex::leaf - 1, // == ExpandHeap_lock - 1
   153                                               "a freelist par lock",
   154                                               true);
   155       if (_indexedFreeListParLocks[i] == NULL)
   156         vm_exit_during_initialization("Could not allocate a par lock");
   157       DEBUG_ONLY(
   158         _indexedFreeList[i].set_protecting_lock(_indexedFreeListParLocks[i]);
   159       )
   160     }
   161     _dictionary->set_par_lock(&_parDictionaryAllocLock);
   162   }
   163 }
   165 // Like CompactibleSpace forward() but always calls cross_threshold() to
   166 // update the block offset table.  Removed initialize_threshold call because
   167 // CFLS does not use a block offset array for contiguous spaces.
   168 HeapWord* CompactibleFreeListSpace::forward(oop q, size_t size,
   169                                     CompactPoint* cp, HeapWord* compact_top) {
   170   // q is alive
   171   // First check if we should switch compaction space
   172   assert(this == cp->space, "'this' should be current compaction space.");
   173   size_t compaction_max_size = pointer_delta(end(), compact_top);
   174   assert(adjustObjectSize(size) == cp->space->adjust_object_size_v(size),
   175     "virtual adjustObjectSize_v() method is not correct");
   176   size_t adjusted_size = adjustObjectSize(size);
   177   assert(compaction_max_size >= MinChunkSize || compaction_max_size == 0,
   178          "no small fragments allowed");
   179   assert(minimum_free_block_size() == MinChunkSize,
   180          "for de-virtualized reference below");
   181   // Can't leave a nonzero size, residual fragment smaller than MinChunkSize
   182   if (adjusted_size + MinChunkSize > compaction_max_size &&
   183       adjusted_size != compaction_max_size) {
   184     do {
   185       // switch to next compaction space
   186       cp->space->set_compaction_top(compact_top);
   187       cp->space = cp->space->next_compaction_space();
   188       if (cp->space == NULL) {
   189         cp->gen = GenCollectedHeap::heap()->prev_gen(cp->gen);
   190         assert(cp->gen != NULL, "compaction must succeed");
   191         cp->space = cp->gen->first_compaction_space();
   192         assert(cp->space != NULL, "generation must have a first compaction space");
   193       }
   194       compact_top = cp->space->bottom();
   195       cp->space->set_compaction_top(compact_top);
   196       // The correct adjusted_size may not be the same as that for this method
   197       // (i.e., cp->space may no longer be "this" so adjust the size again.
   198       // Use the virtual method which is not used above to save the virtual
   199       // dispatch.
   200       adjusted_size = cp->space->adjust_object_size_v(size);
   201       compaction_max_size = pointer_delta(cp->space->end(), compact_top);
   202       assert(cp->space->minimum_free_block_size() == 0, "just checking");
   203     } while (adjusted_size > compaction_max_size);
   204   }
   206   // store the forwarding pointer into the mark word
   207   if ((HeapWord*)q != compact_top) {
   208     q->forward_to(oop(compact_top));
   209     assert(q->is_gc_marked(), "encoding the pointer should preserve the mark");
   210   } else {
   211     // if the object isn't moving we can just set the mark to the default
   212     // mark and handle it specially later on.
   213     q->init_mark();
   214     assert(q->forwardee() == NULL, "should be forwarded to NULL");
   215   }
   217   VALIDATE_MARK_SWEEP_ONLY(MarkSweep::register_live_oop(q, adjusted_size));
   218   compact_top += adjusted_size;
   220   // we need to update the offset table so that the beginnings of objects can be
   221   // found during scavenge.  Note that we are updating the offset table based on
   222   // where the object will be once the compaction phase finishes.
   224   // Always call cross_threshold().  A contiguous space can only call it when
   225   // the compaction_top exceeds the current threshold but not for an
   226   // non-contiguous space.
   227   cp->threshold =
   228     cp->space->cross_threshold(compact_top - adjusted_size, compact_top);
   229   return compact_top;
   230 }
   232 // A modified copy of OffsetTableContigSpace::cross_threshold() with _offsets -> _bt
   233 // and use of single_block instead of alloc_block.  The name here is not really
   234 // appropriate - maybe a more general name could be invented for both the
   235 // contiguous and noncontiguous spaces.
   237 HeapWord* CompactibleFreeListSpace::cross_threshold(HeapWord* start, HeapWord* the_end) {
   238   _bt.single_block(start, the_end);
   239   return end();
   240 }
   242 // Initialize them to NULL.
   243 void CompactibleFreeListSpace::initializeIndexedFreeListArray() {
   244   for (size_t i = 0; i < IndexSetSize; i++) {
   245     // Note that on platforms where objects are double word aligned,
   246     // the odd array elements are not used.  It is convenient, however,
   247     // to map directly from the object size to the array element.
   248     _indexedFreeList[i].reset(IndexSetSize);
   249     _indexedFreeList[i].set_size(i);
   250     assert(_indexedFreeList[i].count() == 0, "reset check failed");
   251     assert(_indexedFreeList[i].head() == NULL, "reset check failed");
   252     assert(_indexedFreeList[i].tail() == NULL, "reset check failed");
   253     assert(_indexedFreeList[i].hint() == IndexSetSize, "reset check failed");
   254   }
   255 }
   257 void CompactibleFreeListSpace::resetIndexedFreeListArray() {
   258   for (size_t i = 1; i < IndexSetSize; i++) {
   259     assert(_indexedFreeList[i].size() == (size_t) i,
   260       "Indexed free list sizes are incorrect");
   261     _indexedFreeList[i].reset(IndexSetSize);
   262     assert(_indexedFreeList[i].count() == 0, "reset check failed");
   263     assert(_indexedFreeList[i].head() == NULL, "reset check failed");
   264     assert(_indexedFreeList[i].tail() == NULL, "reset check failed");
   265     assert(_indexedFreeList[i].hint() == IndexSetSize, "reset check failed");
   266   }
   267 }
   269 void CompactibleFreeListSpace::reset(MemRegion mr) {
   270   resetIndexedFreeListArray();
   271   dictionary()->reset();
   272   if (BlockOffsetArrayUseUnallocatedBlock) {
   273     assert(end() == mr.end(), "We are compacting to the bottom of CMS gen");
   274     // Everything's allocated until proven otherwise.
   275     _bt.set_unallocated_block(end());
   276   }
   277   if (!mr.is_empty()) {
   278     assert(mr.word_size() >= MinChunkSize, "Chunk size is too small");
   279     _bt.single_block(mr.start(), mr.word_size());
   280     FreeChunk* fc = (FreeChunk*) mr.start();
   281     fc->set_size(mr.word_size());
   282     if (mr.word_size() >= IndexSetSize ) {
   283       returnChunkToDictionary(fc);
   284     } else {
   285       _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
   286       _indexedFreeList[mr.word_size()].return_chunk_at_head(fc);
   287     }
   288   }
   289   _promoInfo.reset();
   290   _smallLinearAllocBlock._ptr = NULL;
   291   _smallLinearAllocBlock._word_size = 0;
   292 }
   294 void CompactibleFreeListSpace::reset_after_compaction() {
   295   // Reset the space to the new reality - one free chunk.
   296   MemRegion mr(compaction_top(), end());
   297   reset(mr);
   298   // Now refill the linear allocation block(s) if possible.
   299   if (_adaptive_freelists) {
   300     refillLinearAllocBlocksIfNeeded();
   301   } else {
   302     // Place as much of mr in the linAB as we can get,
   303     // provided it was big enough to go into the dictionary.
   304     FreeChunk* fc = dictionary()->find_largest_dict();
   305     if (fc != NULL) {
   306       assert(fc->size() == mr.word_size(),
   307              "Why was the chunk broken up?");
   308       removeChunkFromDictionary(fc);
   309       HeapWord* addr = (HeapWord*) fc;
   310       _smallLinearAllocBlock.set(addr, fc->size() ,
   311         1024*SmallForLinearAlloc, fc->size());
   312       // Note that _unallocated_block is not updated here.
   313     }
   314   }
   315 }
   317 // Walks the entire dictionary, returning a coterminal
   318 // chunk, if it exists. Use with caution since it involves
   319 // a potentially complete walk of a potentially large tree.
   320 FreeChunk* CompactibleFreeListSpace::find_chunk_at_end() {
   322   assert_lock_strong(&_freelistLock);
   324   return dictionary()->find_chunk_ends_at(end());
   325 }
   328 #ifndef PRODUCT
   329 void CompactibleFreeListSpace::initializeIndexedFreeListArrayReturnedBytes() {
   330   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
   331     _indexedFreeList[i].allocation_stats()->set_returned_bytes(0);
   332   }
   333 }
   335 size_t CompactibleFreeListSpace::sumIndexedFreeListArrayReturnedBytes() {
   336   size_t sum = 0;
   337   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
   338     sum += _indexedFreeList[i].allocation_stats()->returned_bytes();
   339   }
   340   return sum;
   341 }
   343 size_t CompactibleFreeListSpace::totalCountInIndexedFreeLists() const {
   344   size_t count = 0;
   345   for (size_t i = IndexSetStart; i < IndexSetSize; i++) {
   346     debug_only(
   347       ssize_t total_list_count = 0;
   348       for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
   349          fc = fc->next()) {
   350         total_list_count++;
   351       }
   352       assert(total_list_count ==  _indexedFreeList[i].count(),
   353         "Count in list is incorrect");
   354     )
   355     count += _indexedFreeList[i].count();
   356   }
   357   return count;
   358 }
   360 size_t CompactibleFreeListSpace::totalCount() {
   361   size_t num = totalCountInIndexedFreeLists();
   362   num +=  dictionary()->total_count();
   363   if (_smallLinearAllocBlock._word_size != 0) {
   364     num++;
   365   }
   366   return num;
   367 }
   368 #endif
   370 bool CompactibleFreeListSpace::is_free_block(const HeapWord* p) const {
   371   FreeChunk* fc = (FreeChunk*) p;
   372   return fc->is_free();
   373 }
   375 size_t CompactibleFreeListSpace::used() const {
   376   return capacity() - free();
   377 }
   379 size_t CompactibleFreeListSpace::free() const {
   380   // "MT-safe, but not MT-precise"(TM), if you will: i.e.
   381   // if you do this while the structures are in flux you
   382   // may get an approximate answer only; for instance
   383   // because there is concurrent allocation either
   384   // directly by mutators or for promotion during a GC.
   385   // It's "MT-safe", however, in the sense that you are guaranteed
   386   // not to crash and burn, for instance, because of walking
   387   // pointers that could disappear as you were walking them.
   388   // The approximation is because the various components
   389   // that are read below are not read atomically (and
   390   // further the computation of totalSizeInIndexedFreeLists()
   391   // is itself a non-atomic computation. The normal use of
   392   // this is during a resize operation at the end of GC
   393   // and at that time you are guaranteed to get the
   394   // correct actual value. However, for instance, this is
   395   // also read completely asynchronously by the "perf-sampler"
   396   // that supports jvmstat, and you are apt to see the values
   397   // flicker in such cases.
   398   assert(_dictionary != NULL, "No _dictionary?");
   399   return (_dictionary->total_chunk_size(DEBUG_ONLY(freelistLock())) +
   400           totalSizeInIndexedFreeLists() +
   401           _smallLinearAllocBlock._word_size) * HeapWordSize;
   402 }
   404 size_t CompactibleFreeListSpace::max_alloc_in_words() const {
   405   assert(_dictionary != NULL, "No _dictionary?");
   406   assert_locked();
   407   size_t res = _dictionary->max_chunk_size();
   408   res = MAX2(res, MIN2(_smallLinearAllocBlock._word_size,
   409                        (size_t) SmallForLinearAlloc - 1));
   410   // XXX the following could potentially be pretty slow;
   411   // should one, pesimally for the rare cases when res
   412   // caclulated above is less than IndexSetSize,
   413   // just return res calculated above? My reasoning was that
   414   // those cases will be so rare that the extra time spent doesn't
   415   // really matter....
   416   // Note: do not change the loop test i >= res + IndexSetStride
   417   // to i > res below, because i is unsigned and res may be zero.
   418   for (size_t i = IndexSetSize - 1; i >= res + IndexSetStride;
   419        i -= IndexSetStride) {
   420     if (_indexedFreeList[i].head() != NULL) {
   421       assert(_indexedFreeList[i].count() != 0, "Inconsistent FreeList");
   422       return i;
   423     }
   424   }
   425   return res;
   426 }
   428 void LinearAllocBlock::print_on(outputStream* st) const {
   429   st->print_cr(" LinearAllocBlock: ptr = " PTR_FORMAT ", word_size = " SIZE_FORMAT
   430             ", refillsize = " SIZE_FORMAT ", allocation_size_limit = " SIZE_FORMAT,
   431             _ptr, _word_size, _refillSize, _allocation_size_limit);
   432 }
   434 void CompactibleFreeListSpace::print_on(outputStream* st) const {
   435   st->print_cr("COMPACTIBLE FREELIST SPACE");
   436   st->print_cr(" Space:");
   437   Space::print_on(st);
   439   st->print_cr("promoInfo:");
   440   _promoInfo.print_on(st);
   442   st->print_cr("_smallLinearAllocBlock");
   443   _smallLinearAllocBlock.print_on(st);
   445   // dump_memory_block(_smallLinearAllocBlock->_ptr, 128);
   447   st->print_cr(" _fitStrategy = %s, _adaptive_freelists = %s",
   448                _fitStrategy?"true":"false", _adaptive_freelists?"true":"false");
   449 }
   451 void CompactibleFreeListSpace::print_indexed_free_lists(outputStream* st)
   452 const {
   453   reportIndexedFreeListStatistics();
   454   gclog_or_tty->print_cr("Layout of Indexed Freelists");
   455   gclog_or_tty->print_cr("---------------------------");
   456   AdaptiveFreeList<FreeChunk>::print_labels_on(st, "size");
   457   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
   458     _indexedFreeList[i].print_on(gclog_or_tty);
   459     for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
   460          fc = fc->next()) {
   461       gclog_or_tty->print_cr("\t[" PTR_FORMAT "," PTR_FORMAT ")  %s",
   462                           fc, (HeapWord*)fc + i,
   463                           fc->cantCoalesce() ? "\t CC" : "");
   464     }
   465   }
   466 }
   468 void CompactibleFreeListSpace::print_promo_info_blocks(outputStream* st)
   469 const {
   470   _promoInfo.print_on(st);
   471 }
   473 void CompactibleFreeListSpace::print_dictionary_free_lists(outputStream* st)
   474 const {
   475   _dictionary->report_statistics();
   476   st->print_cr("Layout of Freelists in Tree");
   477   st->print_cr("---------------------------");
   478   _dictionary->print_free_lists(st);
   479 }
   481 class BlkPrintingClosure: public BlkClosure {
   482   const CMSCollector*             _collector;
   483   const CompactibleFreeListSpace* _sp;
   484   const CMSBitMap*                _live_bit_map;
   485   const bool                      _post_remark;
   486   outputStream*                   _st;
   487 public:
   488   BlkPrintingClosure(const CMSCollector* collector,
   489                      const CompactibleFreeListSpace* sp,
   490                      const CMSBitMap* live_bit_map,
   491                      outputStream* st):
   492     _collector(collector),
   493     _sp(sp),
   494     _live_bit_map(live_bit_map),
   495     _post_remark(collector->abstract_state() > CMSCollector::FinalMarking),
   496     _st(st) { }
   497   size_t do_blk(HeapWord* addr);
   498 };
   500 size_t BlkPrintingClosure::do_blk(HeapWord* addr) {
   501   size_t sz = _sp->block_size_no_stall(addr, _collector);
   502   assert(sz != 0, "Should always be able to compute a size");
   503   if (_sp->block_is_obj(addr)) {
   504     const bool dead = _post_remark && !_live_bit_map->isMarked(addr);
   505     _st->print_cr(PTR_FORMAT ": %s object of size " SIZE_FORMAT "%s",
   506       addr,
   507       dead ? "dead" : "live",
   508       sz,
   509       (!dead && CMSPrintObjectsInDump) ? ":" : ".");
   510     if (CMSPrintObjectsInDump && !dead) {
   511       oop(addr)->print_on(_st);
   512       _st->print_cr("--------------------------------------");
   513     }
   514   } else { // free block
   515     _st->print_cr(PTR_FORMAT ": free block of size " SIZE_FORMAT "%s",
   516       addr, sz, CMSPrintChunksInDump ? ":" : ".");
   517     if (CMSPrintChunksInDump) {
   518       ((FreeChunk*)addr)->print_on(_st);
   519       _st->print_cr("--------------------------------------");
   520     }
   521   }
   522   return sz;
   523 }
   525 void CompactibleFreeListSpace::dump_at_safepoint_with_locks(CMSCollector* c,
   526   outputStream* st) {
   527   st->print_cr("\n=========================");
   528   st->print_cr("Block layout in CMS Heap:");
   529   st->print_cr("=========================");
   530   BlkPrintingClosure  bpcl(c, this, c->markBitMap(), st);
   531   blk_iterate(&bpcl);
   533   st->print_cr("\n=======================================");
   534   st->print_cr("Order & Layout of Promotion Info Blocks");
   535   st->print_cr("=======================================");
   536   print_promo_info_blocks(st);
   538   st->print_cr("\n===========================");
   539   st->print_cr("Order of Indexed Free Lists");
   540   st->print_cr("=========================");
   541   print_indexed_free_lists(st);
   543   st->print_cr("\n=================================");
   544   st->print_cr("Order of Free Lists in Dictionary");
   545   st->print_cr("=================================");
   546   print_dictionary_free_lists(st);
   547 }
   550 void CompactibleFreeListSpace::reportFreeListStatistics() const {
   551   assert_lock_strong(&_freelistLock);
   552   assert(PrintFLSStatistics != 0, "Reporting error");
   553   _dictionary->report_statistics();
   554   if (PrintFLSStatistics > 1) {
   555     reportIndexedFreeListStatistics();
   556     size_t total_size = totalSizeInIndexedFreeLists() +
   557                        _dictionary->total_chunk_size(DEBUG_ONLY(freelistLock()));
   558     gclog_or_tty->print(" free=%ld frag=%1.4f\n", total_size, flsFrag());
   559   }
   560 }
   562 void CompactibleFreeListSpace::reportIndexedFreeListStatistics() const {
   563   assert_lock_strong(&_freelistLock);
   564   gclog_or_tty->print("Statistics for IndexedFreeLists:\n"
   565                       "--------------------------------\n");
   566   size_t total_size = totalSizeInIndexedFreeLists();
   567   size_t   free_blocks = numFreeBlocksInIndexedFreeLists();
   568   gclog_or_tty->print("Total Free Space: %d\n", total_size);
   569   gclog_or_tty->print("Max   Chunk Size: %d\n", maxChunkSizeInIndexedFreeLists());
   570   gclog_or_tty->print("Number of Blocks: %d\n", free_blocks);
   571   if (free_blocks != 0) {
   572     gclog_or_tty->print("Av.  Block  Size: %d\n", total_size/free_blocks);
   573   }
   574 }
   576 size_t CompactibleFreeListSpace::numFreeBlocksInIndexedFreeLists() const {
   577   size_t res = 0;
   578   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
   579     debug_only(
   580       ssize_t recount = 0;
   581       for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
   582          fc = fc->next()) {
   583         recount += 1;
   584       }
   585       assert(recount == _indexedFreeList[i].count(),
   586         "Incorrect count in list");
   587     )
   588     res += _indexedFreeList[i].count();
   589   }
   590   return res;
   591 }
   593 size_t CompactibleFreeListSpace::maxChunkSizeInIndexedFreeLists() const {
   594   for (size_t i = IndexSetSize - 1; i != 0; i -= IndexSetStride) {
   595     if (_indexedFreeList[i].head() != NULL) {
   596       assert(_indexedFreeList[i].count() != 0, "Inconsistent FreeList");
   597       return (size_t)i;
   598     }
   599   }
   600   return 0;
   601 }
   603 void CompactibleFreeListSpace::set_end(HeapWord* value) {
   604   HeapWord* prevEnd = end();
   605   assert(prevEnd != value, "unnecessary set_end call");
   606   assert(prevEnd == NULL || !BlockOffsetArrayUseUnallocatedBlock || value >= unallocated_block(),
   607         "New end is below unallocated block");
   608   _end = value;
   609   if (prevEnd != NULL) {
   610     // Resize the underlying block offset table.
   611     _bt.resize(pointer_delta(value, bottom()));
   612     if (value <= prevEnd) {
   613       assert(!BlockOffsetArrayUseUnallocatedBlock || value >= unallocated_block(),
   614              "New end is below unallocated block");
   615     } else {
   616       // Now, take this new chunk and add it to the free blocks.
   617       // Note that the BOT has not yet been updated for this block.
   618       size_t newFcSize = pointer_delta(value, prevEnd);
   619       // XXX This is REALLY UGLY and should be fixed up. XXX
   620       if (!_adaptive_freelists && _smallLinearAllocBlock._ptr == NULL) {
   621         // Mark the boundary of the new block in BOT
   622         _bt.mark_block(prevEnd, value);
   623         // put it all in the linAB
   624         if (ParallelGCThreads == 0) {
   625           _smallLinearAllocBlock._ptr = prevEnd;
   626           _smallLinearAllocBlock._word_size = newFcSize;
   627           repairLinearAllocBlock(&_smallLinearAllocBlock);
   628         } else { // ParallelGCThreads > 0
   629           MutexLockerEx x(parDictionaryAllocLock(),
   630                           Mutex::_no_safepoint_check_flag);
   631           _smallLinearAllocBlock._ptr = prevEnd;
   632           _smallLinearAllocBlock._word_size = newFcSize;
   633           repairLinearAllocBlock(&_smallLinearAllocBlock);
   634         }
   635         // Births of chunks put into a LinAB are not recorded.  Births
   636         // of chunks as they are allocated out of a LinAB are.
   637       } else {
   638         // Add the block to the free lists, if possible coalescing it
   639         // with the last free block, and update the BOT and census data.
   640         addChunkToFreeListsAtEndRecordingStats(prevEnd, newFcSize);
   641       }
   642     }
   643   }
   644 }
   646 class FreeListSpace_DCTOC : public Filtering_DCTOC {
   647   CompactibleFreeListSpace* _cfls;
   648   CMSCollector* _collector;
   649 protected:
   650   // Override.
   651 #define walk_mem_region_with_cl_DECL(ClosureType)                       \
   652   virtual void walk_mem_region_with_cl(MemRegion mr,                    \
   653                                        HeapWord* bottom, HeapWord* top, \
   654                                        ClosureType* cl);                \
   655       void walk_mem_region_with_cl_par(MemRegion mr,                    \
   656                                        HeapWord* bottom, HeapWord* top, \
   657                                        ClosureType* cl);                \
   658     void walk_mem_region_with_cl_nopar(MemRegion mr,                    \
   659                                        HeapWord* bottom, HeapWord* top, \
   660                                        ClosureType* cl)
   661   walk_mem_region_with_cl_DECL(ExtendedOopClosure);
   662   walk_mem_region_with_cl_DECL(FilteringClosure);
   664 public:
   665   FreeListSpace_DCTOC(CompactibleFreeListSpace* sp,
   666                       CMSCollector* collector,
   667                       ExtendedOopClosure* cl,
   668                       CardTableModRefBS::PrecisionStyle precision,
   669                       HeapWord* boundary) :
   670     Filtering_DCTOC(sp, cl, precision, boundary),
   671     _cfls(sp), _collector(collector) {}
   672 };
   674 // We de-virtualize the block-related calls below, since we know that our
   675 // space is a CompactibleFreeListSpace.
   677 #define FreeListSpace_DCTOC__walk_mem_region_with_cl_DEFN(ClosureType)          \
   678 void FreeListSpace_DCTOC::walk_mem_region_with_cl(MemRegion mr,                 \
   679                                                  HeapWord* bottom,              \
   680                                                  HeapWord* top,                 \
   681                                                  ClosureType* cl) {             \
   682    bool is_par = SharedHeap::heap()->n_par_threads() > 0;                       \
   683    if (is_par) {                                                                \
   684      assert(SharedHeap::heap()->n_par_threads() ==                              \
   685             SharedHeap::heap()->workers()->active_workers(), "Mismatch");       \
   686      walk_mem_region_with_cl_par(mr, bottom, top, cl);                          \
   687    } else {                                                                     \
   688      walk_mem_region_with_cl_nopar(mr, bottom, top, cl);                        \
   689    }                                                                            \
   690 }                                                                               \
   691 void FreeListSpace_DCTOC::walk_mem_region_with_cl_par(MemRegion mr,             \
   692                                                       HeapWord* bottom,         \
   693                                                       HeapWord* top,            \
   694                                                       ClosureType* cl) {        \
   695   /* Skip parts that are before "mr", in case "block_start" sent us             \
   696      back too far. */                                                           \
   697   HeapWord* mr_start = mr.start();                                              \
   698   size_t bot_size = _cfls->CompactibleFreeListSpace::block_size(bottom);        \
   699   HeapWord* next = bottom + bot_size;                                           \
   700   while (next < mr_start) {                                                     \
   701     bottom = next;                                                              \
   702     bot_size = _cfls->CompactibleFreeListSpace::block_size(bottom);             \
   703     next = bottom + bot_size;                                                   \
   704   }                                                                             \
   705                                                                                 \
   706   while (bottom < top) {                                                        \
   707     if (_cfls->CompactibleFreeListSpace::block_is_obj(bottom) &&                \
   708         !_cfls->CompactibleFreeListSpace::obj_allocated_since_save_marks(       \
   709                     oop(bottom)) &&                                             \
   710         !_collector->CMSCollector::is_dead_obj(oop(bottom))) {                  \
   711       size_t word_sz = oop(bottom)->oop_iterate(cl, mr);                        \
   712       bottom += _cfls->adjustObjectSize(word_sz);                               \
   713     } else {                                                                    \
   714       bottom += _cfls->CompactibleFreeListSpace::block_size(bottom);            \
   715     }                                                                           \
   716   }                                                                             \
   717 }                                                                               \
   718 void FreeListSpace_DCTOC::walk_mem_region_with_cl_nopar(MemRegion mr,           \
   719                                                         HeapWord* bottom,       \
   720                                                         HeapWord* top,          \
   721                                                         ClosureType* cl) {      \
   722   /* Skip parts that are before "mr", in case "block_start" sent us             \
   723      back too far. */                                                           \
   724   HeapWord* mr_start = mr.start();                                              \
   725   size_t bot_size = _cfls->CompactibleFreeListSpace::block_size_nopar(bottom);  \
   726   HeapWord* next = bottom + bot_size;                                           \
   727   while (next < mr_start) {                                                     \
   728     bottom = next;                                                              \
   729     bot_size = _cfls->CompactibleFreeListSpace::block_size_nopar(bottom);       \
   730     next = bottom + bot_size;                                                   \
   731   }                                                                             \
   732                                                                                 \
   733   while (bottom < top) {                                                        \
   734     if (_cfls->CompactibleFreeListSpace::block_is_obj_nopar(bottom) &&          \
   735         !_cfls->CompactibleFreeListSpace::obj_allocated_since_save_marks(       \
   736                     oop(bottom)) &&                                             \
   737         !_collector->CMSCollector::is_dead_obj(oop(bottom))) {                  \
   738       size_t word_sz = oop(bottom)->oop_iterate(cl, mr);                        \
   739       bottom += _cfls->adjustObjectSize(word_sz);                               \
   740     } else {                                                                    \
   741       bottom += _cfls->CompactibleFreeListSpace::block_size_nopar(bottom);      \
   742     }                                                                           \
   743   }                                                                             \
   744 }
   746 // (There are only two of these, rather than N, because the split is due
   747 // only to the introduction of the FilteringClosure, a local part of the
   748 // impl of this abstraction.)
   749 FreeListSpace_DCTOC__walk_mem_region_with_cl_DEFN(ExtendedOopClosure)
   750 FreeListSpace_DCTOC__walk_mem_region_with_cl_DEFN(FilteringClosure)
   752 DirtyCardToOopClosure*
   753 CompactibleFreeListSpace::new_dcto_cl(ExtendedOopClosure* cl,
   754                                       CardTableModRefBS::PrecisionStyle precision,
   755                                       HeapWord* boundary) {
   756   return new FreeListSpace_DCTOC(this, _collector, cl, precision, boundary);
   757 }
   760 // Note on locking for the space iteration functions:
   761 // since the collector's iteration activities are concurrent with
   762 // allocation activities by mutators, absent a suitable mutual exclusion
   763 // mechanism the iterators may go awry. For instace a block being iterated
   764 // may suddenly be allocated or divided up and part of it allocated and
   765 // so on.
   767 // Apply the given closure to each block in the space.
   768 void CompactibleFreeListSpace::blk_iterate_careful(BlkClosureCareful* cl) {
   769   assert_lock_strong(freelistLock());
   770   HeapWord *cur, *limit;
   771   for (cur = bottom(), limit = end(); cur < limit;
   772        cur += cl->do_blk_careful(cur));
   773 }
   775 // Apply the given closure to each block in the space.
   776 void CompactibleFreeListSpace::blk_iterate(BlkClosure* cl) {
   777   assert_lock_strong(freelistLock());
   778   HeapWord *cur, *limit;
   779   for (cur = bottom(), limit = end(); cur < limit;
   780        cur += cl->do_blk(cur));
   781 }
   783 // Apply the given closure to each oop in the space.
   784 void CompactibleFreeListSpace::oop_iterate(ExtendedOopClosure* cl) {
   785   assert_lock_strong(freelistLock());
   786   HeapWord *cur, *limit;
   787   size_t curSize;
   788   for (cur = bottom(), limit = end(); cur < limit;
   789        cur += curSize) {
   790     curSize = block_size(cur);
   791     if (block_is_obj(cur)) {
   792       oop(cur)->oop_iterate(cl);
   793     }
   794   }
   795 }
   797 // Apply the given closure to each oop in the space \intersect memory region.
   798 void CompactibleFreeListSpace::oop_iterate(MemRegion mr, ExtendedOopClosure* cl) {
   799   assert_lock_strong(freelistLock());
   800   if (is_empty()) {
   801     return;
   802   }
   803   MemRegion cur = MemRegion(bottom(), end());
   804   mr = mr.intersection(cur);
   805   if (mr.is_empty()) {
   806     return;
   807   }
   808   if (mr.equals(cur)) {
   809     oop_iterate(cl);
   810     return;
   811   }
   812   assert(mr.end() <= end(), "just took an intersection above");
   813   HeapWord* obj_addr = block_start(mr.start());
   814   HeapWord* t = mr.end();
   816   SpaceMemRegionOopsIterClosure smr_blk(cl, mr);
   817   if (block_is_obj(obj_addr)) {
   818     // Handle first object specially.
   819     oop obj = oop(obj_addr);
   820     obj_addr += adjustObjectSize(obj->oop_iterate(&smr_blk));
   821   } else {
   822     FreeChunk* fc = (FreeChunk*)obj_addr;
   823     obj_addr += fc->size();
   824   }
   825   while (obj_addr < t) {
   826     HeapWord* obj = obj_addr;
   827     obj_addr += block_size(obj_addr);
   828     // If "obj_addr" is not greater than top, then the
   829     // entire object "obj" is within the region.
   830     if (obj_addr <= t) {
   831       if (block_is_obj(obj)) {
   832         oop(obj)->oop_iterate(cl);
   833       }
   834     } else {
   835       // "obj" extends beyond end of region
   836       if (block_is_obj(obj)) {
   837         oop(obj)->oop_iterate(&smr_blk);
   838       }
   839       break;
   840     }
   841   }
   842 }
   844 // NOTE: In the following methods, in order to safely be able to
   845 // apply the closure to an object, we need to be sure that the
   846 // object has been initialized. We are guaranteed that an object
   847 // is initialized if we are holding the Heap_lock with the
   848 // world stopped.
   849 void CompactibleFreeListSpace::verify_objects_initialized() const {
   850   if (is_init_completed()) {
   851     assert_locked_or_safepoint(Heap_lock);
   852     if (Universe::is_fully_initialized()) {
   853       guarantee(SafepointSynchronize::is_at_safepoint(),
   854                 "Required for objects to be initialized");
   855     }
   856   } // else make a concession at vm start-up
   857 }
   859 // Apply the given closure to each object in the space
   860 void CompactibleFreeListSpace::object_iterate(ObjectClosure* blk) {
   861   assert_lock_strong(freelistLock());
   862   NOT_PRODUCT(verify_objects_initialized());
   863   HeapWord *cur, *limit;
   864   size_t curSize;
   865   for (cur = bottom(), limit = end(); cur < limit;
   866        cur += curSize) {
   867     curSize = block_size(cur);
   868     if (block_is_obj(cur)) {
   869       blk->do_object(oop(cur));
   870     }
   871   }
   872 }
   874 // Apply the given closure to each live object in the space
   875 //   The usage of CompactibleFreeListSpace
   876 // by the ConcurrentMarkSweepGeneration for concurrent GC's allows
   877 // objects in the space with references to objects that are no longer
   878 // valid.  For example, an object may reference another object
   879 // that has already been sweep up (collected).  This method uses
   880 // obj_is_alive() to determine whether it is safe to apply the closure to
   881 // an object.  See obj_is_alive() for details on how liveness of an
   882 // object is decided.
   884 void CompactibleFreeListSpace::safe_object_iterate(ObjectClosure* blk) {
   885   assert_lock_strong(freelistLock());
   886   NOT_PRODUCT(verify_objects_initialized());
   887   HeapWord *cur, *limit;
   888   size_t curSize;
   889   for (cur = bottom(), limit = end(); cur < limit;
   890        cur += curSize) {
   891     curSize = block_size(cur);
   892     if (block_is_obj(cur) && obj_is_alive(cur)) {
   893       blk->do_object(oop(cur));
   894     }
   895   }
   896 }
   898 void CompactibleFreeListSpace::object_iterate_mem(MemRegion mr,
   899                                                   UpwardsObjectClosure* cl) {
   900   assert_locked(freelistLock());
   901   NOT_PRODUCT(verify_objects_initialized());
   902   Space::object_iterate_mem(mr, cl);
   903 }
   905 // Callers of this iterator beware: The closure application should
   906 // be robust in the face of uninitialized objects and should (always)
   907 // return a correct size so that the next addr + size below gives us a
   908 // valid block boundary. [See for instance,
   909 // ScanMarkedObjectsAgainCarefullyClosure::do_object_careful()
   910 // in ConcurrentMarkSweepGeneration.cpp.]
   911 HeapWord*
   912 CompactibleFreeListSpace::object_iterate_careful(ObjectClosureCareful* cl) {
   913   assert_lock_strong(freelistLock());
   914   HeapWord *addr, *last;
   915   size_t size;
   916   for (addr = bottom(), last  = end();
   917        addr < last; addr += size) {
   918     FreeChunk* fc = (FreeChunk*)addr;
   919     if (fc->is_free()) {
   920       // Since we hold the free list lock, which protects direct
   921       // allocation in this generation by mutators, a free object
   922       // will remain free throughout this iteration code.
   923       size = fc->size();
   924     } else {
   925       // Note that the object need not necessarily be initialized,
   926       // because (for instance) the free list lock does NOT protect
   927       // object initialization. The closure application below must
   928       // therefore be correct in the face of uninitialized objects.
   929       size = cl->do_object_careful(oop(addr));
   930       if (size == 0) {
   931         // An unparsable object found. Signal early termination.
   932         return addr;
   933       }
   934     }
   935   }
   936   return NULL;
   937 }
   939 // Callers of this iterator beware: The closure application should
   940 // be robust in the face of uninitialized objects and should (always)
   941 // return a correct size so that the next addr + size below gives us a
   942 // valid block boundary. [See for instance,
   943 // ScanMarkedObjectsAgainCarefullyClosure::do_object_careful()
   944 // in ConcurrentMarkSweepGeneration.cpp.]
   945 HeapWord*
   946 CompactibleFreeListSpace::object_iterate_careful_m(MemRegion mr,
   947   ObjectClosureCareful* cl) {
   948   assert_lock_strong(freelistLock());
   949   // Can't use used_region() below because it may not necessarily
   950   // be the same as [bottom(),end()); although we could
   951   // use [used_region().start(),round_to(used_region().end(),CardSize)),
   952   // that appears too cumbersome, so we just do the simpler check
   953   // in the assertion below.
   954   assert(!mr.is_empty() && MemRegion(bottom(),end()).contains(mr),
   955          "mr should be non-empty and within used space");
   956   HeapWord *addr, *end;
   957   size_t size;
   958   for (addr = block_start_careful(mr.start()), end  = mr.end();
   959        addr < end; addr += size) {
   960     FreeChunk* fc = (FreeChunk*)addr;
   961     if (fc->is_free()) {
   962       // Since we hold the free list lock, which protects direct
   963       // allocation in this generation by mutators, a free object
   964       // will remain free throughout this iteration code.
   965       size = fc->size();
   966     } else {
   967       // Note that the object need not necessarily be initialized,
   968       // because (for instance) the free list lock does NOT protect
   969       // object initialization. The closure application below must
   970       // therefore be correct in the face of uninitialized objects.
   971       size = cl->do_object_careful_m(oop(addr), mr);
   972       if (size == 0) {
   973         // An unparsable object found. Signal early termination.
   974         return addr;
   975       }
   976     }
   977   }
   978   return NULL;
   979 }
   982 HeapWord* CompactibleFreeListSpace::block_start_const(const void* p) const {
   983   NOT_PRODUCT(verify_objects_initialized());
   984   return _bt.block_start(p);
   985 }
   987 HeapWord* CompactibleFreeListSpace::block_start_careful(const void* p) const {
   988   return _bt.block_start_careful(p);
   989 }
   991 size_t CompactibleFreeListSpace::block_size(const HeapWord* p) const {
   992   NOT_PRODUCT(verify_objects_initialized());
   993   // This must be volatile, or else there is a danger that the compiler
   994   // will compile the code below into a sometimes-infinite loop, by keeping
   995   // the value read the first time in a register.
   996   while (true) {
   997     // We must do this until we get a consistent view of the object.
   998     if (FreeChunk::indicatesFreeChunk(p)) {
   999       volatile FreeChunk* fc = (volatile FreeChunk*)p;
  1000       size_t res = fc->size();
  1001       // If the object is still a free chunk, return the size, else it
  1002       // has been allocated so try again.
  1003       if (FreeChunk::indicatesFreeChunk(p)) {
  1004         assert(res != 0, "Block size should not be 0");
  1005         return res;
  1007     } else {
  1008       // must read from what 'p' points to in each loop.
  1009       Klass* k = ((volatile oopDesc*)p)->klass_or_null();
  1010       if (k != NULL) {
  1011         assert(k->is_klass(), "Should really be klass oop.");
  1012         oop o = (oop)p;
  1013         assert(o->is_oop(true /* ignore mark word */), "Should be an oop.");
  1014         size_t res = o->size_given_klass(k);
  1015         res = adjustObjectSize(res);
  1016         assert(res != 0, "Block size should not be 0");
  1017         return res;
  1023 // TODO: Now that is_parsable is gone, we should combine these two functions.
  1024 // A variant of the above that uses the Printezis bits for
  1025 // unparsable but allocated objects. This avoids any possible
  1026 // stalls waiting for mutators to initialize objects, and is
  1027 // thus potentially faster than the variant above. However,
  1028 // this variant may return a zero size for a block that is
  1029 // under mutation and for which a consistent size cannot be
  1030 // inferred without stalling; see CMSCollector::block_size_if_printezis_bits().
  1031 size_t CompactibleFreeListSpace::block_size_no_stall(HeapWord* p,
  1032                                                      const CMSCollector* c)
  1033 const {
  1034   assert(MemRegion(bottom(), end()).contains(p), "p not in space");
  1035   // This must be volatile, or else there is a danger that the compiler
  1036   // will compile the code below into a sometimes-infinite loop, by keeping
  1037   // the value read the first time in a register.
  1038   DEBUG_ONLY(uint loops = 0;)
  1039   while (true) {
  1040     // We must do this until we get a consistent view of the object.
  1041     if (FreeChunk::indicatesFreeChunk(p)) {
  1042       volatile FreeChunk* fc = (volatile FreeChunk*)p;
  1043       size_t res = fc->size();
  1044       if (FreeChunk::indicatesFreeChunk(p)) {
  1045         assert(res != 0, "Block size should not be 0");
  1046         assert(loops == 0, "Should be 0");
  1047         return res;
  1049     } else {
  1050       // must read from what 'p' points to in each loop.
  1051       Klass* k = ((volatile oopDesc*)p)->klass_or_null();
  1052       // We trust the size of any object that has a non-NULL
  1053       // klass and (for those in the perm gen) is parsable
  1054       // -- irrespective of its conc_safe-ty.
  1055       if (k != NULL) {
  1056         assert(k->is_klass(), "Should really be klass oop.");
  1057         oop o = (oop)p;
  1058         assert(o->is_oop(), "Should be an oop");
  1059         size_t res = o->size_given_klass(k);
  1060         res = adjustObjectSize(res);
  1061         assert(res != 0, "Block size should not be 0");
  1062         return res;
  1063       } else {
  1064         // May return 0 if P-bits not present.
  1065         return c->block_size_if_printezis_bits(p);
  1068     assert(loops == 0, "Can loop at most once");
  1069     DEBUG_ONLY(loops++;)
  1073 size_t CompactibleFreeListSpace::block_size_nopar(const HeapWord* p) const {
  1074   NOT_PRODUCT(verify_objects_initialized());
  1075   assert(MemRegion(bottom(), end()).contains(p), "p not in space");
  1076   FreeChunk* fc = (FreeChunk*)p;
  1077   if (fc->is_free()) {
  1078     return fc->size();
  1079   } else {
  1080     // Ignore mark word because this may be a recently promoted
  1081     // object whose mark word is used to chain together grey
  1082     // objects (the last one would have a null value).
  1083     assert(oop(p)->is_oop(true), "Should be an oop");
  1084     return adjustObjectSize(oop(p)->size());
  1088 // This implementation assumes that the property of "being an object" is
  1089 // stable.  But being a free chunk may not be (because of parallel
  1090 // promotion.)
  1091 bool CompactibleFreeListSpace::block_is_obj(const HeapWord* p) const {
  1092   FreeChunk* fc = (FreeChunk*)p;
  1093   assert(is_in_reserved(p), "Should be in space");
  1094   // When doing a mark-sweep-compact of the CMS generation, this
  1095   // assertion may fail because prepare_for_compaction() uses
  1096   // space that is garbage to maintain information on ranges of
  1097   // live objects so that these live ranges can be moved as a whole.
  1098   // Comment out this assertion until that problem can be solved
  1099   // (i.e., that the block start calculation may look at objects
  1100   // at address below "p" in finding the object that contains "p"
  1101   // and those objects (if garbage) may have been modified to hold
  1102   // live range information.
  1103   // assert(CollectedHeap::use_parallel_gc_threads() || _bt.block_start(p) == p,
  1104   //        "Should be a block boundary");
  1105   if (FreeChunk::indicatesFreeChunk(p)) return false;
  1106   Klass* k = oop(p)->klass_or_null();
  1107   if (k != NULL) {
  1108     // Ignore mark word because it may have been used to
  1109     // chain together promoted objects (the last one
  1110     // would have a null value).
  1111     assert(oop(p)->is_oop(true), "Should be an oop");
  1112     return true;
  1113   } else {
  1114     return false;  // Was not an object at the start of collection.
  1118 // Check if the object is alive. This fact is checked either by consulting
  1119 // the main marking bitmap in the sweeping phase or, if it's a permanent
  1120 // generation and we're not in the sweeping phase, by checking the
  1121 // perm_gen_verify_bit_map where we store the "deadness" information if
  1122 // we did not sweep the perm gen in the most recent previous GC cycle.
  1123 bool CompactibleFreeListSpace::obj_is_alive(const HeapWord* p) const {
  1124   assert(SafepointSynchronize::is_at_safepoint() || !is_init_completed(),
  1125          "Else races are possible");
  1126   assert(block_is_obj(p), "The address should point to an object");
  1128   // If we're sweeping, we use object liveness information from the main bit map
  1129   // for both perm gen and old gen.
  1130   // We don't need to lock the bitmap (live_map or dead_map below), because
  1131   // EITHER we are in the middle of the sweeping phase, and the
  1132   // main marking bit map (live_map below) is locked,
  1133   // OR we're in other phases and perm_gen_verify_bit_map (dead_map below)
  1134   // is stable, because it's mutated only in the sweeping phase.
  1135   // NOTE: This method is also used by jmap where, if class unloading is
  1136   // off, the results can return "false" for legitimate perm objects,
  1137   // when we are not in the midst of a sweeping phase, which can result
  1138   // in jmap not reporting certain perm gen objects. This will be moot
  1139   // if/when the perm gen goes away in the future.
  1140   if (_collector->abstract_state() == CMSCollector::Sweeping) {
  1141     CMSBitMap* live_map = _collector->markBitMap();
  1142     return live_map->par_isMarked((HeapWord*) p);
  1144   return true;
  1147 bool CompactibleFreeListSpace::block_is_obj_nopar(const HeapWord* p) const {
  1148   FreeChunk* fc = (FreeChunk*)p;
  1149   assert(is_in_reserved(p), "Should be in space");
  1150   assert(_bt.block_start(p) == p, "Should be a block boundary");
  1151   if (!fc->is_free()) {
  1152     // Ignore mark word because it may have been used to
  1153     // chain together promoted objects (the last one
  1154     // would have a null value).
  1155     assert(oop(p)->is_oop(true), "Should be an oop");
  1156     return true;
  1158   return false;
  1161 // "MT-safe but not guaranteed MT-precise" (TM); you may get an
  1162 // approximate answer if you don't hold the freelistlock when you call this.
  1163 size_t CompactibleFreeListSpace::totalSizeInIndexedFreeLists() const {
  1164   size_t size = 0;
  1165   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
  1166     debug_only(
  1167       // We may be calling here without the lock in which case we
  1168       // won't do this modest sanity check.
  1169       if (freelistLock()->owned_by_self()) {
  1170         size_t total_list_size = 0;
  1171         for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
  1172           fc = fc->next()) {
  1173           total_list_size += i;
  1175         assert(total_list_size == i * _indexedFreeList[i].count(),
  1176                "Count in list is incorrect");
  1179     size += i * _indexedFreeList[i].count();
  1181   return size;
  1184 HeapWord* CompactibleFreeListSpace::par_allocate(size_t size) {
  1185   MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
  1186   return allocate(size);
  1189 HeapWord*
  1190 CompactibleFreeListSpace::getChunkFromSmallLinearAllocBlockRemainder(size_t size) {
  1191   return getChunkFromLinearAllocBlockRemainder(&_smallLinearAllocBlock, size);
  1194 HeapWord* CompactibleFreeListSpace::allocate(size_t size) {
  1195   assert_lock_strong(freelistLock());
  1196   HeapWord* res = NULL;
  1197   assert(size == adjustObjectSize(size),
  1198          "use adjustObjectSize() before calling into allocate()");
  1200   if (_adaptive_freelists) {
  1201     res = allocate_adaptive_freelists(size);
  1202   } else {  // non-adaptive free lists
  1203     res = allocate_non_adaptive_freelists(size);
  1206   if (res != NULL) {
  1207     // check that res does lie in this space!
  1208     assert(is_in_reserved(res), "Not in this space!");
  1209     assert(is_aligned((void*)res), "alignment check");
  1211     FreeChunk* fc = (FreeChunk*)res;
  1212     fc->markNotFree();
  1213     assert(!fc->is_free(), "shouldn't be marked free");
  1214     assert(oop(fc)->klass_or_null() == NULL, "should look uninitialized");
  1215     // Verify that the block offset table shows this to
  1216     // be a single block, but not one which is unallocated.
  1217     _bt.verify_single_block(res, size);
  1218     _bt.verify_not_unallocated(res, size);
  1219     // mangle a just allocated object with a distinct pattern.
  1220     debug_only(fc->mangleAllocated(size));
  1223   return res;
  1226 HeapWord* CompactibleFreeListSpace::allocate_non_adaptive_freelists(size_t size) {
  1227   HeapWord* res = NULL;
  1228   // try and use linear allocation for smaller blocks
  1229   if (size < _smallLinearAllocBlock._allocation_size_limit) {
  1230     // if successful, the following also adjusts block offset table
  1231     res = getChunkFromSmallLinearAllocBlock(size);
  1233   // Else triage to indexed lists for smaller sizes
  1234   if (res == NULL) {
  1235     if (size < SmallForDictionary) {
  1236       res = (HeapWord*) getChunkFromIndexedFreeList(size);
  1237     } else {
  1238       // else get it from the big dictionary; if even this doesn't
  1239       // work we are out of luck.
  1240       res = (HeapWord*)getChunkFromDictionaryExact(size);
  1244   return res;
  1247 HeapWord* CompactibleFreeListSpace::allocate_adaptive_freelists(size_t size) {
  1248   assert_lock_strong(freelistLock());
  1249   HeapWord* res = NULL;
  1250   assert(size == adjustObjectSize(size),
  1251          "use adjustObjectSize() before calling into allocate()");
  1253   // Strategy
  1254   //   if small
  1255   //     exact size from small object indexed list if small
  1256   //     small or large linear allocation block (linAB) as appropriate
  1257   //     take from lists of greater sized chunks
  1258   //   else
  1259   //     dictionary
  1260   //     small or large linear allocation block if it has the space
  1261   // Try allocating exact size from indexTable first
  1262   if (size < IndexSetSize) {
  1263     res = (HeapWord*) getChunkFromIndexedFreeList(size);
  1264     if(res != NULL) {
  1265       assert(res != (HeapWord*)_indexedFreeList[size].head(),
  1266         "Not removed from free list");
  1267       // no block offset table adjustment is necessary on blocks in
  1268       // the indexed lists.
  1270     // Try allocating from the small LinAB
  1271     } else if (size < _smallLinearAllocBlock._allocation_size_limit &&
  1272         (res = getChunkFromSmallLinearAllocBlock(size)) != NULL) {
  1273         // if successful, the above also adjusts block offset table
  1274         // Note that this call will refill the LinAB to
  1275         // satisfy the request.  This is different that
  1276         // evm.
  1277         // Don't record chunk off a LinAB?  smallSplitBirth(size);
  1278     } else {
  1279       // Raid the exact free lists larger than size, even if they are not
  1280       // overpopulated.
  1281       res = (HeapWord*) getChunkFromGreater(size);
  1283   } else {
  1284     // Big objects get allocated directly from the dictionary.
  1285     res = (HeapWord*) getChunkFromDictionaryExact(size);
  1286     if (res == NULL) {
  1287       // Try hard not to fail since an allocation failure will likely
  1288       // trigger a synchronous GC.  Try to get the space from the
  1289       // allocation blocks.
  1290       res = getChunkFromSmallLinearAllocBlockRemainder(size);
  1294   return res;
  1297 // A worst-case estimate of the space required (in HeapWords) to expand the heap
  1298 // when promoting obj.
  1299 size_t CompactibleFreeListSpace::expansionSpaceRequired(size_t obj_size) const {
  1300   // Depending on the object size, expansion may require refilling either a
  1301   // bigLAB or a smallLAB plus refilling a PromotionInfo object.  MinChunkSize
  1302   // is added because the dictionary may over-allocate to avoid fragmentation.
  1303   size_t space = obj_size;
  1304   if (!_adaptive_freelists) {
  1305     space = MAX2(space, _smallLinearAllocBlock._refillSize);
  1307   space += _promoInfo.refillSize() + 2 * MinChunkSize;
  1308   return space;
  1311 FreeChunk* CompactibleFreeListSpace::getChunkFromGreater(size_t numWords) {
  1312   FreeChunk* ret;
  1314   assert(numWords >= MinChunkSize, "Size is less than minimum");
  1315   assert(linearAllocationWouldFail() || bestFitFirst(),
  1316     "Should not be here");
  1318   size_t i;
  1319   size_t currSize = numWords + MinChunkSize;
  1320   assert(currSize % MinObjAlignment == 0, "currSize should be aligned");
  1321   for (i = currSize; i < IndexSetSize; i += IndexSetStride) {
  1322     AdaptiveFreeList<FreeChunk>* fl = &_indexedFreeList[i];
  1323     if (fl->head()) {
  1324       ret = getFromListGreater(fl, numWords);
  1325       assert(ret == NULL || ret->is_free(), "Should be returning a free chunk");
  1326       return ret;
  1330   currSize = MAX2((size_t)SmallForDictionary,
  1331                   (size_t)(numWords + MinChunkSize));
  1333   /* Try to get a chunk that satisfies request, while avoiding
  1334      fragmentation that can't be handled. */
  1336     ret =  dictionary()->get_chunk(currSize);
  1337     if (ret != NULL) {
  1338       assert(ret->size() - numWords >= MinChunkSize,
  1339              "Chunk is too small");
  1340       _bt.allocated((HeapWord*)ret, ret->size());
  1341       /* Carve returned chunk. */
  1342       (void) splitChunkAndReturnRemainder(ret, numWords);
  1343       /* Label this as no longer a free chunk. */
  1344       assert(ret->is_free(), "This chunk should be free");
  1345       ret->link_prev(NULL);
  1347     assert(ret == NULL || ret->is_free(), "Should be returning a free chunk");
  1348     return ret;
  1350   ShouldNotReachHere();
  1353 bool CompactibleFreeListSpace::verifyChunkInIndexedFreeLists(FreeChunk* fc) const {
  1354   assert(fc->size() < IndexSetSize, "Size of chunk is too large");
  1355   return _indexedFreeList[fc->size()].verify_chunk_in_free_list(fc);
  1358 bool CompactibleFreeListSpace::verify_chunk_is_linear_alloc_block(FreeChunk* fc) const {
  1359   assert((_smallLinearAllocBlock._ptr != (HeapWord*)fc) ||
  1360          (_smallLinearAllocBlock._word_size == fc->size()),
  1361          "Linear allocation block shows incorrect size");
  1362   return ((_smallLinearAllocBlock._ptr == (HeapWord*)fc) &&
  1363           (_smallLinearAllocBlock._word_size == fc->size()));
  1366 // Check if the purported free chunk is present either as a linear
  1367 // allocation block, the size-indexed table of (smaller) free blocks,
  1368 // or the larger free blocks kept in the binary tree dictionary.
  1369 bool CompactibleFreeListSpace::verify_chunk_in_free_list(FreeChunk* fc) const {
  1370   if (verify_chunk_is_linear_alloc_block(fc)) {
  1371     return true;
  1372   } else if (fc->size() < IndexSetSize) {
  1373     return verifyChunkInIndexedFreeLists(fc);
  1374   } else {
  1375     return dictionary()->verify_chunk_in_free_list(fc);
  1379 #ifndef PRODUCT
  1380 void CompactibleFreeListSpace::assert_locked() const {
  1381   CMSLockVerifier::assert_locked(freelistLock(), parDictionaryAllocLock());
  1384 void CompactibleFreeListSpace::assert_locked(const Mutex* lock) const {
  1385   CMSLockVerifier::assert_locked(lock);
  1387 #endif
  1389 FreeChunk* CompactibleFreeListSpace::allocateScratch(size_t size) {
  1390   // In the parallel case, the main thread holds the free list lock
  1391   // on behalf the parallel threads.
  1392   FreeChunk* fc;
  1394     // If GC is parallel, this might be called by several threads.
  1395     // This should be rare enough that the locking overhead won't affect
  1396     // the sequential code.
  1397     MutexLockerEx x(parDictionaryAllocLock(),
  1398                     Mutex::_no_safepoint_check_flag);
  1399     fc = getChunkFromDictionary(size);
  1401   if (fc != NULL) {
  1402     fc->dontCoalesce();
  1403     assert(fc->is_free(), "Should be free, but not coalescable");
  1404     // Verify that the block offset table shows this to
  1405     // be a single block, but not one which is unallocated.
  1406     _bt.verify_single_block((HeapWord*)fc, fc->size());
  1407     _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
  1409   return fc;
  1412 oop CompactibleFreeListSpace::promote(oop obj, size_t obj_size) {
  1413   assert(obj_size == (size_t)obj->size(), "bad obj_size passed in");
  1414   assert_locked();
  1416   // if we are tracking promotions, then first ensure space for
  1417   // promotion (including spooling space for saving header if necessary).
  1418   // then allocate and copy, then track promoted info if needed.
  1419   // When tracking (see PromotionInfo::track()), the mark word may
  1420   // be displaced and in this case restoration of the mark word
  1421   // occurs in the (oop_since_save_marks_)iterate phase.
  1422   if (_promoInfo.tracking() && !_promoInfo.ensure_spooling_space()) {
  1423     return NULL;
  1425   // Call the allocate(size_t, bool) form directly to avoid the
  1426   // additional call through the allocate(size_t) form.  Having
  1427   // the compile inline the call is problematic because allocate(size_t)
  1428   // is a virtual method.
  1429   HeapWord* res = allocate(adjustObjectSize(obj_size));
  1430   if (res != NULL) {
  1431     Copy::aligned_disjoint_words((HeapWord*)obj, res, obj_size);
  1432     // if we should be tracking promotions, do so.
  1433     if (_promoInfo.tracking()) {
  1434         _promoInfo.track((PromotedObject*)res);
  1437   return oop(res);
  1440 HeapWord*
  1441 CompactibleFreeListSpace::getChunkFromSmallLinearAllocBlock(size_t size) {
  1442   assert_locked();
  1443   assert(size >= MinChunkSize, "minimum chunk size");
  1444   assert(size <  _smallLinearAllocBlock._allocation_size_limit,
  1445     "maximum from smallLinearAllocBlock");
  1446   return getChunkFromLinearAllocBlock(&_smallLinearAllocBlock, size);
  1449 HeapWord*
  1450 CompactibleFreeListSpace::getChunkFromLinearAllocBlock(LinearAllocBlock *blk,
  1451                                                        size_t size) {
  1452   assert_locked();
  1453   assert(size >= MinChunkSize, "too small");
  1454   HeapWord* res = NULL;
  1455   // Try to do linear allocation from blk, making sure that
  1456   if (blk->_word_size == 0) {
  1457     // We have probably been unable to fill this either in the prologue or
  1458     // when it was exhausted at the last linear allocation. Bail out until
  1459     // next time.
  1460     assert(blk->_ptr == NULL, "consistency check");
  1461     return NULL;
  1463   assert(blk->_word_size != 0 && blk->_ptr != NULL, "consistency check");
  1464   res = getChunkFromLinearAllocBlockRemainder(blk, size);
  1465   if (res != NULL) return res;
  1467   // about to exhaust this linear allocation block
  1468   if (blk->_word_size == size) { // exactly satisfied
  1469     res = blk->_ptr;
  1470     _bt.allocated(res, blk->_word_size);
  1471   } else if (size + MinChunkSize <= blk->_refillSize) {
  1472     size_t sz = blk->_word_size;
  1473     // Update _unallocated_block if the size is such that chunk would be
  1474     // returned to the indexed free list.  All other chunks in the indexed
  1475     // free lists are allocated from the dictionary so that _unallocated_block
  1476     // has already been adjusted for them.  Do it here so that the cost
  1477     // for all chunks added back to the indexed free lists.
  1478     if (sz < SmallForDictionary) {
  1479       _bt.allocated(blk->_ptr, sz);
  1481     // Return the chunk that isn't big enough, and then refill below.
  1482     addChunkToFreeLists(blk->_ptr, sz);
  1483     split_birth(sz);
  1484     // Don't keep statistics on adding back chunk from a LinAB.
  1485   } else {
  1486     // A refilled block would not satisfy the request.
  1487     return NULL;
  1490   blk->_ptr = NULL; blk->_word_size = 0;
  1491   refillLinearAllocBlock(blk);
  1492   assert(blk->_ptr == NULL || blk->_word_size >= size + MinChunkSize,
  1493          "block was replenished");
  1494   if (res != NULL) {
  1495     split_birth(size);
  1496     repairLinearAllocBlock(blk);
  1497   } else if (blk->_ptr != NULL) {
  1498     res = blk->_ptr;
  1499     size_t blk_size = blk->_word_size;
  1500     blk->_word_size -= size;
  1501     blk->_ptr  += size;
  1502     split_birth(size);
  1503     repairLinearAllocBlock(blk);
  1504     // Update BOT last so that other (parallel) GC threads see a consistent
  1505     // view of the BOT and free blocks.
  1506     // Above must occur before BOT is updated below.
  1507     OrderAccess::storestore();
  1508     _bt.split_block(res, blk_size, size);  // adjust block offset table
  1510   return res;
  1513 HeapWord*  CompactibleFreeListSpace::getChunkFromLinearAllocBlockRemainder(
  1514                                         LinearAllocBlock* blk,
  1515                                         size_t size) {
  1516   assert_locked();
  1517   assert(size >= MinChunkSize, "too small");
  1519   HeapWord* res = NULL;
  1520   // This is the common case.  Keep it simple.
  1521   if (blk->_word_size >= size + MinChunkSize) {
  1522     assert(blk->_ptr != NULL, "consistency check");
  1523     res = blk->_ptr;
  1524     // Note that the BOT is up-to-date for the linAB before allocation.  It
  1525     // indicates the start of the linAB.  The split_block() updates the
  1526     // BOT for the linAB after the allocation (indicates the start of the
  1527     // next chunk to be allocated).
  1528     size_t blk_size = blk->_word_size;
  1529     blk->_word_size -= size;
  1530     blk->_ptr  += size;
  1531     split_birth(size);
  1532     repairLinearAllocBlock(blk);
  1533     // Update BOT last so that other (parallel) GC threads see a consistent
  1534     // view of the BOT and free blocks.
  1535     // Above must occur before BOT is updated below.
  1536     OrderAccess::storestore();
  1537     _bt.split_block(res, blk_size, size);  // adjust block offset table
  1538     _bt.allocated(res, size);
  1540   return res;
  1543 FreeChunk*
  1544 CompactibleFreeListSpace::getChunkFromIndexedFreeList(size_t size) {
  1545   assert_locked();
  1546   assert(size < SmallForDictionary, "just checking");
  1547   FreeChunk* res;
  1548   res = _indexedFreeList[size].get_chunk_at_head();
  1549   if (res == NULL) {
  1550     res = getChunkFromIndexedFreeListHelper(size);
  1552   _bt.verify_not_unallocated((HeapWord*) res, size);
  1553   assert(res == NULL || res->size() == size, "Incorrect block size");
  1554   return res;
  1557 FreeChunk*
  1558 CompactibleFreeListSpace::getChunkFromIndexedFreeListHelper(size_t size,
  1559   bool replenish) {
  1560   assert_locked();
  1561   FreeChunk* fc = NULL;
  1562   if (size < SmallForDictionary) {
  1563     assert(_indexedFreeList[size].head() == NULL ||
  1564       _indexedFreeList[size].surplus() <= 0,
  1565       "List for this size should be empty or under populated");
  1566     // Try best fit in exact lists before replenishing the list
  1567     if (!bestFitFirst() || (fc = bestFitSmall(size)) == NULL) {
  1568       // Replenish list.
  1569       //
  1570       // Things tried that failed.
  1571       //   Tried allocating out of the two LinAB's first before
  1572       // replenishing lists.
  1573       //   Tried small linAB of size 256 (size in indexed list)
  1574       // and replenishing indexed lists from the small linAB.
  1575       //
  1576       FreeChunk* newFc = NULL;
  1577       const size_t replenish_size = CMSIndexedFreeListReplenish * size;
  1578       if (replenish_size < SmallForDictionary) {
  1579         // Do not replenish from an underpopulated size.
  1580         if (_indexedFreeList[replenish_size].surplus() > 0 &&
  1581             _indexedFreeList[replenish_size].head() != NULL) {
  1582           newFc = _indexedFreeList[replenish_size].get_chunk_at_head();
  1583         } else if (bestFitFirst()) {
  1584           newFc = bestFitSmall(replenish_size);
  1587       if (newFc == NULL && replenish_size > size) {
  1588         assert(CMSIndexedFreeListReplenish > 1, "ctl pt invariant");
  1589         newFc = getChunkFromIndexedFreeListHelper(replenish_size, false);
  1591       // Note: The stats update re split-death of block obtained above
  1592       // will be recorded below precisely when we know we are going to
  1593       // be actually splitting it into more than one pieces below.
  1594       if (newFc != NULL) {
  1595         if  (replenish || CMSReplenishIntermediate) {
  1596           // Replenish this list and return one block to caller.
  1597           size_t i;
  1598           FreeChunk *curFc, *nextFc;
  1599           size_t num_blk = newFc->size() / size;
  1600           assert(num_blk >= 1, "Smaller than requested?");
  1601           assert(newFc->size() % size == 0, "Should be integral multiple of request");
  1602           if (num_blk > 1) {
  1603             // we are sure we will be splitting the block just obtained
  1604             // into multiple pieces; record the split-death of the original
  1605             splitDeath(replenish_size);
  1607           // carve up and link blocks 0, ..., num_blk - 2
  1608           // The last chunk is not added to the lists but is returned as the
  1609           // free chunk.
  1610           for (curFc = newFc, nextFc = (FreeChunk*)((HeapWord*)curFc + size),
  1611                i = 0;
  1612                i < (num_blk - 1);
  1613                curFc = nextFc, nextFc = (FreeChunk*)((HeapWord*)nextFc + size),
  1614                i++) {
  1615             curFc->set_size(size);
  1616             // Don't record this as a return in order to try and
  1617             // determine the "returns" from a GC.
  1618             _bt.verify_not_unallocated((HeapWord*) fc, size);
  1619             _indexedFreeList[size].return_chunk_at_tail(curFc, false);
  1620             _bt.mark_block((HeapWord*)curFc, size);
  1621             split_birth(size);
  1622             // Don't record the initial population of the indexed list
  1623             // as a split birth.
  1626           // check that the arithmetic was OK above
  1627           assert((HeapWord*)nextFc == (HeapWord*)newFc + num_blk*size,
  1628             "inconsistency in carving newFc");
  1629           curFc->set_size(size);
  1630           _bt.mark_block((HeapWord*)curFc, size);
  1631           split_birth(size);
  1632           fc = curFc;
  1633         } else {
  1634           // Return entire block to caller
  1635           fc = newFc;
  1639   } else {
  1640     // Get a free chunk from the free chunk dictionary to be returned to
  1641     // replenish the indexed free list.
  1642     fc = getChunkFromDictionaryExact(size);
  1644   // assert(fc == NULL || fc->is_free(), "Should be returning a free chunk");
  1645   return fc;
  1648 FreeChunk*
  1649 CompactibleFreeListSpace::getChunkFromDictionary(size_t size) {
  1650   assert_locked();
  1651   FreeChunk* fc = _dictionary->get_chunk(size);
  1652   if (fc == NULL) {
  1653     return NULL;
  1655   _bt.allocated((HeapWord*)fc, fc->size());
  1656   if (fc->size() >= size + MinChunkSize) {
  1657     fc = splitChunkAndReturnRemainder(fc, size);
  1659   assert(fc->size() >= size, "chunk too small");
  1660   assert(fc->size() < size + MinChunkSize, "chunk too big");
  1661   _bt.verify_single_block((HeapWord*)fc, fc->size());
  1662   return fc;
  1665 FreeChunk*
  1666 CompactibleFreeListSpace::getChunkFromDictionaryExact(size_t size) {
  1667   assert_locked();
  1668   FreeChunk* fc = _dictionary->get_chunk(size);
  1669   if (fc == NULL) {
  1670     return fc;
  1672   _bt.allocated((HeapWord*)fc, fc->size());
  1673   if (fc->size() == size) {
  1674     _bt.verify_single_block((HeapWord*)fc, size);
  1675     return fc;
  1677   assert(fc->size() > size, "get_chunk() guarantee");
  1678   if (fc->size() < size + MinChunkSize) {
  1679     // Return the chunk to the dictionary and go get a bigger one.
  1680     returnChunkToDictionary(fc);
  1681     fc = _dictionary->get_chunk(size + MinChunkSize);
  1682     if (fc == NULL) {
  1683       return NULL;
  1685     _bt.allocated((HeapWord*)fc, fc->size());
  1687   assert(fc->size() >= size + MinChunkSize, "tautology");
  1688   fc = splitChunkAndReturnRemainder(fc, size);
  1689   assert(fc->size() == size, "chunk is wrong size");
  1690   _bt.verify_single_block((HeapWord*)fc, size);
  1691   return fc;
  1694 void
  1695 CompactibleFreeListSpace::returnChunkToDictionary(FreeChunk* chunk) {
  1696   assert_locked();
  1698   size_t size = chunk->size();
  1699   _bt.verify_single_block((HeapWord*)chunk, size);
  1700   // adjust _unallocated_block downward, as necessary
  1701   _bt.freed((HeapWord*)chunk, size);
  1702   _dictionary->return_chunk(chunk);
  1703 #ifndef PRODUCT
  1704   if (CMSCollector::abstract_state() != CMSCollector::Sweeping) {
  1705     TreeChunk<FreeChunk, AdaptiveFreeList>* tc = TreeChunk<FreeChunk, AdaptiveFreeList>::as_TreeChunk(chunk);
  1706     TreeList<FreeChunk, AdaptiveFreeList>* tl = tc->list();
  1707     tl->verify_stats();
  1709 #endif // PRODUCT
  1712 void
  1713 CompactibleFreeListSpace::returnChunkToFreeList(FreeChunk* fc) {
  1714   assert_locked();
  1715   size_t size = fc->size();
  1716   _bt.verify_single_block((HeapWord*) fc, size);
  1717   _bt.verify_not_unallocated((HeapWord*) fc, size);
  1718   if (_adaptive_freelists) {
  1719     _indexedFreeList[size].return_chunk_at_tail(fc);
  1720   } else {
  1721     _indexedFreeList[size].return_chunk_at_head(fc);
  1723 #ifndef PRODUCT
  1724   if (CMSCollector::abstract_state() != CMSCollector::Sweeping) {
  1725      _indexedFreeList[size].verify_stats();
  1727 #endif // PRODUCT
  1730 // Add chunk to end of last block -- if it's the largest
  1731 // block -- and update BOT and census data. We would
  1732 // of course have preferred to coalesce it with the
  1733 // last block, but it's currently less expensive to find the
  1734 // largest block than it is to find the last.
  1735 void
  1736 CompactibleFreeListSpace::addChunkToFreeListsAtEndRecordingStats(
  1737   HeapWord* chunk, size_t     size) {
  1738   // check that the chunk does lie in this space!
  1739   assert(chunk != NULL && is_in_reserved(chunk), "Not in this space!");
  1740   // One of the parallel gc task threads may be here
  1741   // whilst others are allocating.
  1742   Mutex* lock = NULL;
  1743   if (ParallelGCThreads != 0) {
  1744     lock = &_parDictionaryAllocLock;
  1746   FreeChunk* ec;
  1748     MutexLockerEx x(lock, Mutex::_no_safepoint_check_flag);
  1749     ec = dictionary()->find_largest_dict();  // get largest block
  1750     if (ec != NULL && ec->end() == (uintptr_t*) chunk) {
  1751       // It's a coterminal block - we can coalesce.
  1752       size_t old_size = ec->size();
  1753       coalDeath(old_size);
  1754       removeChunkFromDictionary(ec);
  1755       size += old_size;
  1756     } else {
  1757       ec = (FreeChunk*)chunk;
  1760   ec->set_size(size);
  1761   debug_only(ec->mangleFreed(size));
  1762   if (size < SmallForDictionary) {
  1763     lock = _indexedFreeListParLocks[size];
  1765   MutexLockerEx x(lock, Mutex::_no_safepoint_check_flag);
  1766   addChunkAndRepairOffsetTable((HeapWord*)ec, size, true);
  1767   // record the birth under the lock since the recording involves
  1768   // manipulation of the list on which the chunk lives and
  1769   // if the chunk is allocated and is the last on the list,
  1770   // the list can go away.
  1771   coalBirth(size);
  1774 void
  1775 CompactibleFreeListSpace::addChunkToFreeLists(HeapWord* chunk,
  1776                                               size_t     size) {
  1777   // check that the chunk does lie in this space!
  1778   assert(chunk != NULL && is_in_reserved(chunk), "Not in this space!");
  1779   assert_locked();
  1780   _bt.verify_single_block(chunk, size);
  1782   FreeChunk* fc = (FreeChunk*) chunk;
  1783   fc->set_size(size);
  1784   debug_only(fc->mangleFreed(size));
  1785   if (size < SmallForDictionary) {
  1786     returnChunkToFreeList(fc);
  1787   } else {
  1788     returnChunkToDictionary(fc);
  1792 void
  1793 CompactibleFreeListSpace::addChunkAndRepairOffsetTable(HeapWord* chunk,
  1794   size_t size, bool coalesced) {
  1795   assert_locked();
  1796   assert(chunk != NULL, "null chunk");
  1797   if (coalesced) {
  1798     // repair BOT
  1799     _bt.single_block(chunk, size);
  1801   addChunkToFreeLists(chunk, size);
  1804 // We _must_ find the purported chunk on our free lists;
  1805 // we assert if we don't.
  1806 void
  1807 CompactibleFreeListSpace::removeFreeChunkFromFreeLists(FreeChunk* fc) {
  1808   size_t size = fc->size();
  1809   assert_locked();
  1810   debug_only(verifyFreeLists());
  1811   if (size < SmallForDictionary) {
  1812     removeChunkFromIndexedFreeList(fc);
  1813   } else {
  1814     removeChunkFromDictionary(fc);
  1816   _bt.verify_single_block((HeapWord*)fc, size);
  1817   debug_only(verifyFreeLists());
  1820 void
  1821 CompactibleFreeListSpace::removeChunkFromDictionary(FreeChunk* fc) {
  1822   size_t size = fc->size();
  1823   assert_locked();
  1824   assert(fc != NULL, "null chunk");
  1825   _bt.verify_single_block((HeapWord*)fc, size);
  1826   _dictionary->remove_chunk(fc);
  1827   // adjust _unallocated_block upward, as necessary
  1828   _bt.allocated((HeapWord*)fc, size);
  1831 void
  1832 CompactibleFreeListSpace::removeChunkFromIndexedFreeList(FreeChunk* fc) {
  1833   assert_locked();
  1834   size_t size = fc->size();
  1835   _bt.verify_single_block((HeapWord*)fc, size);
  1836   NOT_PRODUCT(
  1837     if (FLSVerifyIndexTable) {
  1838       verifyIndexedFreeList(size);
  1841   _indexedFreeList[size].remove_chunk(fc);
  1842   NOT_PRODUCT(
  1843     if (FLSVerifyIndexTable) {
  1844       verifyIndexedFreeList(size);
  1849 FreeChunk* CompactibleFreeListSpace::bestFitSmall(size_t numWords) {
  1850   /* A hint is the next larger size that has a surplus.
  1851      Start search at a size large enough to guarantee that
  1852      the excess is >= MIN_CHUNK. */
  1853   size_t start = align_object_size(numWords + MinChunkSize);
  1854   if (start < IndexSetSize) {
  1855     AdaptiveFreeList<FreeChunk>* it   = _indexedFreeList;
  1856     size_t    hint = _indexedFreeList[start].hint();
  1857     while (hint < IndexSetSize) {
  1858       assert(hint % MinObjAlignment == 0, "hint should be aligned");
  1859       AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[hint];
  1860       if (fl->surplus() > 0 && fl->head() != NULL) {
  1861         // Found a list with surplus, reset original hint
  1862         // and split out a free chunk which is returned.
  1863         _indexedFreeList[start].set_hint(hint);
  1864         FreeChunk* res = getFromListGreater(fl, numWords);
  1865         assert(res == NULL || res->is_free(),
  1866           "Should be returning a free chunk");
  1867         return res;
  1869       hint = fl->hint(); /* keep looking */
  1871     /* None found. */
  1872     it[start].set_hint(IndexSetSize);
  1874   return NULL;
  1877 /* Requires fl->size >= numWords + MinChunkSize */
  1878 FreeChunk* CompactibleFreeListSpace::getFromListGreater(AdaptiveFreeList<FreeChunk>* fl,
  1879   size_t numWords) {
  1880   FreeChunk *curr = fl->head();
  1881   size_t oldNumWords = curr->size();
  1882   assert(numWords >= MinChunkSize, "Word size is too small");
  1883   assert(curr != NULL, "List is empty");
  1884   assert(oldNumWords >= numWords + MinChunkSize,
  1885         "Size of chunks in the list is too small");
  1887   fl->remove_chunk(curr);
  1888   // recorded indirectly by splitChunkAndReturnRemainder -
  1889   // smallSplit(oldNumWords, numWords);
  1890   FreeChunk* new_chunk = splitChunkAndReturnRemainder(curr, numWords);
  1891   // Does anything have to be done for the remainder in terms of
  1892   // fixing the card table?
  1893   assert(new_chunk == NULL || new_chunk->is_free(),
  1894     "Should be returning a free chunk");
  1895   return new_chunk;
  1898 FreeChunk*
  1899 CompactibleFreeListSpace::splitChunkAndReturnRemainder(FreeChunk* chunk,
  1900   size_t new_size) {
  1901   assert_locked();
  1902   size_t size = chunk->size();
  1903   assert(size > new_size, "Split from a smaller block?");
  1904   assert(is_aligned(chunk), "alignment problem");
  1905   assert(size == adjustObjectSize(size), "alignment problem");
  1906   size_t rem_size = size - new_size;
  1907   assert(rem_size == adjustObjectSize(rem_size), "alignment problem");
  1908   assert(rem_size >= MinChunkSize, "Free chunk smaller than minimum");
  1909   FreeChunk* ffc = (FreeChunk*)((HeapWord*)chunk + new_size);
  1910   assert(is_aligned(ffc), "alignment problem");
  1911   ffc->set_size(rem_size);
  1912   ffc->link_next(NULL);
  1913   ffc->link_prev(NULL); // Mark as a free block for other (parallel) GC threads.
  1914   // Above must occur before BOT is updated below.
  1915   // adjust block offset table
  1916   OrderAccess::storestore();
  1917   assert(chunk->is_free() && ffc->is_free(), "Error");
  1918   _bt.split_block((HeapWord*)chunk, chunk->size(), new_size);
  1919   if (rem_size < SmallForDictionary) {
  1920     bool is_par = (SharedHeap::heap()->n_par_threads() > 0);
  1921     if (is_par) _indexedFreeListParLocks[rem_size]->lock();
  1922     assert(!is_par ||
  1923            (SharedHeap::heap()->n_par_threads() ==
  1924             SharedHeap::heap()->workers()->active_workers()), "Mismatch");
  1925     returnChunkToFreeList(ffc);
  1926     split(size, rem_size);
  1927     if (is_par) _indexedFreeListParLocks[rem_size]->unlock();
  1928   } else {
  1929     returnChunkToDictionary(ffc);
  1930     split(size ,rem_size);
  1932   chunk->set_size(new_size);
  1933   return chunk;
  1936 void
  1937 CompactibleFreeListSpace::sweep_completed() {
  1938   // Now that space is probably plentiful, refill linear
  1939   // allocation blocks as needed.
  1940   refillLinearAllocBlocksIfNeeded();
  1943 void
  1944 CompactibleFreeListSpace::gc_prologue() {
  1945   assert_locked();
  1946   if (PrintFLSStatistics != 0) {
  1947     gclog_or_tty->print("Before GC:\n");
  1948     reportFreeListStatistics();
  1950   refillLinearAllocBlocksIfNeeded();
  1953 void
  1954 CompactibleFreeListSpace::gc_epilogue() {
  1955   assert_locked();
  1956   if (PrintGCDetails && Verbose && !_adaptive_freelists) {
  1957     if (_smallLinearAllocBlock._word_size == 0)
  1958       warning("CompactibleFreeListSpace(epilogue):: Linear allocation failure");
  1960   assert(_promoInfo.noPromotions(), "_promoInfo inconsistency");
  1961   _promoInfo.stopTrackingPromotions();
  1962   repairLinearAllocationBlocks();
  1963   // Print Space's stats
  1964   if (PrintFLSStatistics != 0) {
  1965     gclog_or_tty->print("After GC:\n");
  1966     reportFreeListStatistics();
  1970 // Iteration support, mostly delegated from a CMS generation
  1972 void CompactibleFreeListSpace::save_marks() {
  1973   assert(Thread::current()->is_VM_thread(),
  1974          "Global variable should only be set when single-threaded");
  1975   // Mark the "end" of the used space at the time of this call;
  1976   // note, however, that promoted objects from this point
  1977   // on are tracked in the _promoInfo below.
  1978   set_saved_mark_word(unallocated_block());
  1979 #ifdef ASSERT
  1980   // Check the sanity of save_marks() etc.
  1981   MemRegion ur    = used_region();
  1982   MemRegion urasm = used_region_at_save_marks();
  1983   assert(ur.contains(urasm),
  1984          err_msg(" Error at save_marks(): [" PTR_FORMAT "," PTR_FORMAT ")"
  1985                  " should contain [" PTR_FORMAT "," PTR_FORMAT ")",
  1986                  ur.start(), ur.end(), urasm.start(), urasm.end()));
  1987 #endif
  1988   // inform allocator that promotions should be tracked.
  1989   assert(_promoInfo.noPromotions(), "_promoInfo inconsistency");
  1990   _promoInfo.startTrackingPromotions();
  1993 bool CompactibleFreeListSpace::no_allocs_since_save_marks() {
  1994   assert(_promoInfo.tracking(), "No preceding save_marks?");
  1995   assert(SharedHeap::heap()->n_par_threads() == 0,
  1996          "Shouldn't be called if using parallel gc.");
  1997   return _promoInfo.noPromotions();
  2000 #define CFLS_OOP_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix)           \
  2002 void CompactibleFreeListSpace::                                             \
  2003 oop_since_save_marks_iterate##nv_suffix(OopClosureType* blk) {              \
  2004   assert(SharedHeap::heap()->n_par_threads() == 0,                          \
  2005          "Shouldn't be called (yet) during parallel part of gc.");          \
  2006   _promoInfo.promoted_oops_iterate##nv_suffix(blk);                         \
  2007   /*                                                                        \
  2008    * This also restores any displaced headers and removes the elements from \
  2009    * the iteration set as they are processed, so that we have a clean slate \
  2010    * at the end of the iteration. Note, thus, that if new objects are       \
  2011    * promoted as a result of the iteration they are iterated over as well.  \
  2012    */                                                                       \
  2013   assert(_promoInfo.noPromotions(), "_promoInfo inconsistency");            \
  2016 ALL_SINCE_SAVE_MARKS_CLOSURES(CFLS_OOP_SINCE_SAVE_MARKS_DEFN)
  2019 void CompactibleFreeListSpace::object_iterate_since_last_GC(ObjectClosure* cl) {
  2020   // ugghh... how would one do this efficiently for a non-contiguous space?
  2021   guarantee(false, "NYI");
  2024 bool CompactibleFreeListSpace::linearAllocationWouldFail() const {
  2025   return _smallLinearAllocBlock._word_size == 0;
  2028 void CompactibleFreeListSpace::repairLinearAllocationBlocks() {
  2029   // Fix up linear allocation blocks to look like free blocks
  2030   repairLinearAllocBlock(&_smallLinearAllocBlock);
  2033 void CompactibleFreeListSpace::repairLinearAllocBlock(LinearAllocBlock* blk) {
  2034   assert_locked();
  2035   if (blk->_ptr != NULL) {
  2036     assert(blk->_word_size != 0 && blk->_word_size >= MinChunkSize,
  2037            "Minimum block size requirement");
  2038     FreeChunk* fc = (FreeChunk*)(blk->_ptr);
  2039     fc->set_size(blk->_word_size);
  2040     fc->link_prev(NULL);   // mark as free
  2041     fc->dontCoalesce();
  2042     assert(fc->is_free(), "just marked it free");
  2043     assert(fc->cantCoalesce(), "just marked it uncoalescable");
  2047 void CompactibleFreeListSpace::refillLinearAllocBlocksIfNeeded() {
  2048   assert_locked();
  2049   if (_smallLinearAllocBlock._ptr == NULL) {
  2050     assert(_smallLinearAllocBlock._word_size == 0,
  2051       "Size of linAB should be zero if the ptr is NULL");
  2052     // Reset the linAB refill and allocation size limit.
  2053     _smallLinearAllocBlock.set(0, 0, 1024*SmallForLinearAlloc, SmallForLinearAlloc);
  2055   refillLinearAllocBlockIfNeeded(&_smallLinearAllocBlock);
  2058 void
  2059 CompactibleFreeListSpace::refillLinearAllocBlockIfNeeded(LinearAllocBlock* blk) {
  2060   assert_locked();
  2061   assert((blk->_ptr == NULL && blk->_word_size == 0) ||
  2062          (blk->_ptr != NULL && blk->_word_size >= MinChunkSize),
  2063          "blk invariant");
  2064   if (blk->_ptr == NULL) {
  2065     refillLinearAllocBlock(blk);
  2067   if (PrintMiscellaneous && Verbose) {
  2068     if (blk->_word_size == 0) {
  2069       warning("CompactibleFreeListSpace(prologue):: Linear allocation failure");
  2074 void
  2075 CompactibleFreeListSpace::refillLinearAllocBlock(LinearAllocBlock* blk) {
  2076   assert_locked();
  2077   assert(blk->_word_size == 0 && blk->_ptr == NULL,
  2078          "linear allocation block should be empty");
  2079   FreeChunk* fc;
  2080   if (blk->_refillSize < SmallForDictionary &&
  2081       (fc = getChunkFromIndexedFreeList(blk->_refillSize)) != NULL) {
  2082     // A linAB's strategy might be to use small sizes to reduce
  2083     // fragmentation but still get the benefits of allocation from a
  2084     // linAB.
  2085   } else {
  2086     fc = getChunkFromDictionary(blk->_refillSize);
  2088   if (fc != NULL) {
  2089     blk->_ptr  = (HeapWord*)fc;
  2090     blk->_word_size = fc->size();
  2091     fc->dontCoalesce();   // to prevent sweeper from sweeping us up
  2095 // Support for concurrent collection policy decisions.
  2096 bool CompactibleFreeListSpace::should_concurrent_collect() const {
  2097   // In the future we might want to add in frgamentation stats --
  2098   // including erosion of the "mountain" into this decision as well.
  2099   return !adaptive_freelists() && linearAllocationWouldFail();
  2102 // Support for compaction
  2104 void CompactibleFreeListSpace::prepare_for_compaction(CompactPoint* cp) {
  2105   SCAN_AND_FORWARD(cp,end,block_is_obj,block_size);
  2106   // prepare_for_compaction() uses the space between live objects
  2107   // so that later phase can skip dead space quickly.  So verification
  2108   // of the free lists doesn't work after.
  2111 #define obj_size(q) adjustObjectSize(oop(q)->size())
  2112 #define adjust_obj_size(s) adjustObjectSize(s)
  2114 void CompactibleFreeListSpace::adjust_pointers() {
  2115   // In other versions of adjust_pointers(), a bail out
  2116   // based on the amount of live data in the generation
  2117   // (i.e., if 0, bail out) may be used.
  2118   // Cannot test used() == 0 here because the free lists have already
  2119   // been mangled by the compaction.
  2121   SCAN_AND_ADJUST_POINTERS(adjust_obj_size);
  2122   // See note about verification in prepare_for_compaction().
  2125 void CompactibleFreeListSpace::compact() {
  2126   SCAN_AND_COMPACT(obj_size);
  2129 // fragmentation_metric = 1 - [sum of (fbs**2) / (sum of fbs)**2]
  2130 // where fbs is free block sizes
  2131 double CompactibleFreeListSpace::flsFrag() const {
  2132   size_t itabFree = totalSizeInIndexedFreeLists();
  2133   double frag = 0.0;
  2134   size_t i;
  2136   for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
  2137     double sz  = i;
  2138     frag      += _indexedFreeList[i].count() * (sz * sz);
  2141   double totFree = itabFree +
  2142                    _dictionary->total_chunk_size(DEBUG_ONLY(freelistLock()));
  2143   if (totFree > 0) {
  2144     frag = ((frag + _dictionary->sum_of_squared_block_sizes()) /
  2145             (totFree * totFree));
  2146     frag = (double)1.0  - frag;
  2147   } else {
  2148     assert(frag == 0.0, "Follows from totFree == 0");
  2150   return frag;
  2153 void CompactibleFreeListSpace::beginSweepFLCensus(
  2154   float inter_sweep_current,
  2155   float inter_sweep_estimate,
  2156   float intra_sweep_estimate) {
  2157   assert_locked();
  2158   size_t i;
  2159   for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
  2160     AdaptiveFreeList<FreeChunk>* fl    = &_indexedFreeList[i];
  2161     if (PrintFLSStatistics > 1) {
  2162       gclog_or_tty->print("size[%d] : ", i);
  2164     fl->compute_desired(inter_sweep_current, inter_sweep_estimate, intra_sweep_estimate);
  2165     fl->set_coal_desired((ssize_t)((double)fl->desired() * CMSSmallCoalSurplusPercent));
  2166     fl->set_before_sweep(fl->count());
  2167     fl->set_bfr_surp(fl->surplus());
  2169   _dictionary->begin_sweep_dict_census(CMSLargeCoalSurplusPercent,
  2170                                     inter_sweep_current,
  2171                                     inter_sweep_estimate,
  2172                                     intra_sweep_estimate);
  2175 void CompactibleFreeListSpace::setFLSurplus() {
  2176   assert_locked();
  2177   size_t i;
  2178   for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
  2179     AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[i];
  2180     fl->set_surplus(fl->count() -
  2181                     (ssize_t)((double)fl->desired() * CMSSmallSplitSurplusPercent));
  2185 void CompactibleFreeListSpace::setFLHints() {
  2186   assert_locked();
  2187   size_t i;
  2188   size_t h = IndexSetSize;
  2189   for (i = IndexSetSize - 1; i != 0; i -= IndexSetStride) {
  2190     AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[i];
  2191     fl->set_hint(h);
  2192     if (fl->surplus() > 0) {
  2193       h = i;
  2198 void CompactibleFreeListSpace::clearFLCensus() {
  2199   assert_locked();
  2200   size_t i;
  2201   for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
  2202     AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[i];
  2203     fl->set_prev_sweep(fl->count());
  2204     fl->set_coal_births(0);
  2205     fl->set_coal_deaths(0);
  2206     fl->set_split_births(0);
  2207     fl->set_split_deaths(0);
  2211 void CompactibleFreeListSpace::endSweepFLCensus(size_t sweep_count) {
  2212   if (PrintFLSStatistics > 0) {
  2213     HeapWord* largestAddr = (HeapWord*) dictionary()->find_largest_dict();
  2214     gclog_or_tty->print_cr("CMS: Large block " PTR_FORMAT,
  2215                            largestAddr);
  2217   setFLSurplus();
  2218   setFLHints();
  2219   if (PrintGC && PrintFLSCensus > 0) {
  2220     printFLCensus(sweep_count);
  2222   clearFLCensus();
  2223   assert_locked();
  2224   _dictionary->end_sweep_dict_census(CMSLargeSplitSurplusPercent);
  2227 bool CompactibleFreeListSpace::coalOverPopulated(size_t size) {
  2228   if (size < SmallForDictionary) {
  2229     AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
  2230     return (fl->coal_desired() < 0) ||
  2231            ((int)fl->count() > fl->coal_desired());
  2232   } else {
  2233     return dictionary()->coal_dict_over_populated(size);
  2237 void CompactibleFreeListSpace::smallCoalBirth(size_t size) {
  2238   assert(size < SmallForDictionary, "Size too large for indexed list");
  2239   AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
  2240   fl->increment_coal_births();
  2241   fl->increment_surplus();
  2244 void CompactibleFreeListSpace::smallCoalDeath(size_t size) {
  2245   assert(size < SmallForDictionary, "Size too large for indexed list");
  2246   AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
  2247   fl->increment_coal_deaths();
  2248   fl->decrement_surplus();
  2251 void CompactibleFreeListSpace::coalBirth(size_t size) {
  2252   if (size  < SmallForDictionary) {
  2253     smallCoalBirth(size);
  2254   } else {
  2255     dictionary()->dict_census_update(size,
  2256                                    false /* split */,
  2257                                    true /* birth */);
  2261 void CompactibleFreeListSpace::coalDeath(size_t size) {
  2262   if(size  < SmallForDictionary) {
  2263     smallCoalDeath(size);
  2264   } else {
  2265     dictionary()->dict_census_update(size,
  2266                                    false /* split */,
  2267                                    false /* birth */);
  2271 void CompactibleFreeListSpace::smallSplitBirth(size_t size) {
  2272   assert(size < SmallForDictionary, "Size too large for indexed list");
  2273   AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
  2274   fl->increment_split_births();
  2275   fl->increment_surplus();
  2278 void CompactibleFreeListSpace::smallSplitDeath(size_t size) {
  2279   assert(size < SmallForDictionary, "Size too large for indexed list");
  2280   AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
  2281   fl->increment_split_deaths();
  2282   fl->decrement_surplus();
  2285 void CompactibleFreeListSpace::split_birth(size_t size) {
  2286   if (size  < SmallForDictionary) {
  2287     smallSplitBirth(size);
  2288   } else {
  2289     dictionary()->dict_census_update(size,
  2290                                    true /* split */,
  2291                                    true /* birth */);
  2295 void CompactibleFreeListSpace::splitDeath(size_t size) {
  2296   if (size  < SmallForDictionary) {
  2297     smallSplitDeath(size);
  2298   } else {
  2299     dictionary()->dict_census_update(size,
  2300                                    true /* split */,
  2301                                    false /* birth */);
  2305 void CompactibleFreeListSpace::split(size_t from, size_t to1) {
  2306   size_t to2 = from - to1;
  2307   splitDeath(from);
  2308   split_birth(to1);
  2309   split_birth(to2);
  2312 void CompactibleFreeListSpace::print() const {
  2313   print_on(tty);
  2316 void CompactibleFreeListSpace::prepare_for_verify() {
  2317   assert_locked();
  2318   repairLinearAllocationBlocks();
  2319   // Verify that the SpoolBlocks look like free blocks of
  2320   // appropriate sizes... To be done ...
  2323 class VerifyAllBlksClosure: public BlkClosure {
  2324  private:
  2325   const CompactibleFreeListSpace* _sp;
  2326   const MemRegion                 _span;
  2327   HeapWord*                       _last_addr;
  2328   size_t                          _last_size;
  2329   bool                            _last_was_obj;
  2330   bool                            _last_was_live;
  2332  public:
  2333   VerifyAllBlksClosure(const CompactibleFreeListSpace* sp,
  2334     MemRegion span) :  _sp(sp), _span(span),
  2335                        _last_addr(NULL), _last_size(0),
  2336                        _last_was_obj(false), _last_was_live(false) { }
  2338   virtual size_t do_blk(HeapWord* addr) {
  2339     size_t res;
  2340     bool   was_obj  = false;
  2341     bool   was_live = false;
  2342     if (_sp->block_is_obj(addr)) {
  2343       was_obj = true;
  2344       oop p = oop(addr);
  2345       guarantee(p->is_oop(), "Should be an oop");
  2346       res = _sp->adjustObjectSize(p->size());
  2347       if (_sp->obj_is_alive(addr)) {
  2348         was_live = true;
  2349         p->verify();
  2351     } else {
  2352       FreeChunk* fc = (FreeChunk*)addr;
  2353       res = fc->size();
  2354       if (FLSVerifyLists && !fc->cantCoalesce()) {
  2355         guarantee(_sp->verify_chunk_in_free_list(fc),
  2356                   "Chunk should be on a free list");
  2359     if (res == 0) {
  2360       gclog_or_tty->print_cr("Livelock: no rank reduction!");
  2361       gclog_or_tty->print_cr(
  2362         " Current:  addr = " PTR_FORMAT ", size = " SIZE_FORMAT ", obj = %s, live = %s \n"
  2363         " Previous: addr = " PTR_FORMAT ", size = " SIZE_FORMAT ", obj = %s, live = %s \n",
  2364         addr,       res,        was_obj      ?"true":"false", was_live      ?"true":"false",
  2365         _last_addr, _last_size, _last_was_obj?"true":"false", _last_was_live?"true":"false");
  2366       _sp->print_on(gclog_or_tty);
  2367       guarantee(false, "Seppuku!");
  2369     _last_addr = addr;
  2370     _last_size = res;
  2371     _last_was_obj  = was_obj;
  2372     _last_was_live = was_live;
  2373     return res;
  2375 };
  2377 class VerifyAllOopsClosure: public OopClosure {
  2378  private:
  2379   const CMSCollector*             _collector;
  2380   const CompactibleFreeListSpace* _sp;
  2381   const MemRegion                 _span;
  2382   const bool                      _past_remark;
  2383   const CMSBitMap*                _bit_map;
  2385  protected:
  2386   void do_oop(void* p, oop obj) {
  2387     if (_span.contains(obj)) { // the interior oop points into CMS heap
  2388       if (!_span.contains(p)) { // reference from outside CMS heap
  2389         // Should be a valid object; the first disjunct below allows
  2390         // us to sidestep an assertion in block_is_obj() that insists
  2391         // that p be in _sp. Note that several generations (and spaces)
  2392         // are spanned by _span (CMS heap) above.
  2393         guarantee(!_sp->is_in_reserved(obj) ||
  2394                   _sp->block_is_obj((HeapWord*)obj),
  2395                   "Should be an object");
  2396         guarantee(obj->is_oop(), "Should be an oop");
  2397         obj->verify();
  2398         if (_past_remark) {
  2399           // Remark has been completed, the object should be marked
  2400           _bit_map->isMarked((HeapWord*)obj);
  2402       } else { // reference within CMS heap
  2403         if (_past_remark) {
  2404           // Remark has been completed -- so the referent should have
  2405           // been marked, if referring object is.
  2406           if (_bit_map->isMarked(_collector->block_start(p))) {
  2407             guarantee(_bit_map->isMarked((HeapWord*)obj), "Marking error?");
  2411     } else if (_sp->is_in_reserved(p)) {
  2412       // the reference is from FLS, and points out of FLS
  2413       guarantee(obj->is_oop(), "Should be an oop");
  2414       obj->verify();
  2418   template <class T> void do_oop_work(T* p) {
  2419     T heap_oop = oopDesc::load_heap_oop(p);
  2420     if (!oopDesc::is_null(heap_oop)) {
  2421       oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
  2422       do_oop(p, obj);
  2426  public:
  2427   VerifyAllOopsClosure(const CMSCollector* collector,
  2428     const CompactibleFreeListSpace* sp, MemRegion span,
  2429     bool past_remark, CMSBitMap* bit_map) :
  2430     _collector(collector), _sp(sp), _span(span),
  2431     _past_remark(past_remark), _bit_map(bit_map) { }
  2433   virtual void do_oop(oop* p)       { VerifyAllOopsClosure::do_oop_work(p); }
  2434   virtual void do_oop(narrowOop* p) { VerifyAllOopsClosure::do_oop_work(p); }
  2435 };
  2437 void CompactibleFreeListSpace::verify() const {
  2438   assert_lock_strong(&_freelistLock);
  2439   verify_objects_initialized();
  2440   MemRegion span = _collector->_span;
  2441   bool past_remark = (_collector->abstract_state() ==
  2442                       CMSCollector::Sweeping);
  2444   ResourceMark rm;
  2445   HandleMark  hm;
  2447   // Check integrity of CFL data structures
  2448   _promoInfo.verify();
  2449   _dictionary->verify();
  2450   if (FLSVerifyIndexTable) {
  2451     verifyIndexedFreeLists();
  2453   // Check integrity of all objects and free blocks in space
  2455     VerifyAllBlksClosure cl(this, span);
  2456     ((CompactibleFreeListSpace*)this)->blk_iterate(&cl);  // cast off const
  2458   // Check that all references in the heap to FLS
  2459   // are to valid objects in FLS or that references in
  2460   // FLS are to valid objects elsewhere in the heap
  2461   if (FLSVerifyAllHeapReferences)
  2463     VerifyAllOopsClosure cl(_collector, this, span, past_remark,
  2464       _collector->markBitMap());
  2465     CollectedHeap* ch = Universe::heap();
  2467     // Iterate over all oops in the heap. Uses the _no_header version
  2468     // since we are not interested in following the klass pointers.
  2469     ch->oop_iterate_no_header(&cl);
  2472   if (VerifyObjectStartArray) {
  2473     // Verify the block offset table
  2474     _bt.verify();
  2478 #ifndef PRODUCT
  2479 void CompactibleFreeListSpace::verifyFreeLists() const {
  2480   if (FLSVerifyLists) {
  2481     _dictionary->verify();
  2482     verifyIndexedFreeLists();
  2483   } else {
  2484     if (FLSVerifyDictionary) {
  2485       _dictionary->verify();
  2487     if (FLSVerifyIndexTable) {
  2488       verifyIndexedFreeLists();
  2492 #endif
  2494 void CompactibleFreeListSpace::verifyIndexedFreeLists() const {
  2495   size_t i = 0;
  2496   for (; i < IndexSetStart; i++) {
  2497     guarantee(_indexedFreeList[i].head() == NULL, "should be NULL");
  2499   for (; i < IndexSetSize; i++) {
  2500     verifyIndexedFreeList(i);
  2504 void CompactibleFreeListSpace::verifyIndexedFreeList(size_t size) const {
  2505   FreeChunk* fc   =  _indexedFreeList[size].head();
  2506   FreeChunk* tail =  _indexedFreeList[size].tail();
  2507   size_t    num = _indexedFreeList[size].count();
  2508   size_t      n = 0;
  2509   guarantee(((size >= IndexSetStart) && (size % IndexSetStride == 0)) || fc == NULL,
  2510             "Slot should have been empty");
  2511   for (; fc != NULL; fc = fc->next(), n++) {
  2512     guarantee(fc->size() == size, "Size inconsistency");
  2513     guarantee(fc->is_free(), "!free?");
  2514     guarantee(fc->next() == NULL || fc->next()->prev() == fc, "Broken list");
  2515     guarantee((fc->next() == NULL) == (fc == tail), "Incorrect tail");
  2517   guarantee(n == num, "Incorrect count");
  2520 #ifndef PRODUCT
  2521 void CompactibleFreeListSpace::check_free_list_consistency() const {
  2522   assert((TreeChunk<FreeChunk, AdaptiveFreeList>::min_size() <= IndexSetSize),
  2523     "Some sizes can't be allocated without recourse to"
  2524     " linear allocation buffers");
  2525   assert((TreeChunk<FreeChunk, AdaptiveFreeList>::min_size()*HeapWordSize == sizeof(TreeChunk<FreeChunk, AdaptiveFreeList>)),
  2526     "else MIN_TREE_CHUNK_SIZE is wrong");
  2527   assert(IndexSetStart != 0, "IndexSetStart not initialized");
  2528   assert(IndexSetStride != 0, "IndexSetStride not initialized");
  2530 #endif
  2532 void CompactibleFreeListSpace::printFLCensus(size_t sweep_count) const {
  2533   assert_lock_strong(&_freelistLock);
  2534   AdaptiveFreeList<FreeChunk> total;
  2535   gclog_or_tty->print("end sweep# " SIZE_FORMAT "\n", sweep_count);
  2536   AdaptiveFreeList<FreeChunk>::print_labels_on(gclog_or_tty, "size");
  2537   size_t total_free = 0;
  2538   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
  2539     const AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[i];
  2540     total_free += fl->count() * fl->size();
  2541     if (i % (40*IndexSetStride) == 0) {
  2542       AdaptiveFreeList<FreeChunk>::print_labels_on(gclog_or_tty, "size");
  2544     fl->print_on(gclog_or_tty);
  2545     total.set_bfr_surp(    total.bfr_surp()     + fl->bfr_surp()    );
  2546     total.set_surplus(    total.surplus()     + fl->surplus()    );
  2547     total.set_desired(    total.desired()     + fl->desired()    );
  2548     total.set_prev_sweep(  total.prev_sweep()   + fl->prev_sweep()  );
  2549     total.set_before_sweep(total.before_sweep() + fl->before_sweep());
  2550     total.set_count(      total.count()       + fl->count()      );
  2551     total.set_coal_births( total.coal_births()  + fl->coal_births() );
  2552     total.set_coal_deaths( total.coal_deaths()  + fl->coal_deaths() );
  2553     total.set_split_births(total.split_births() + fl->split_births());
  2554     total.set_split_deaths(total.split_deaths() + fl->split_deaths());
  2556   total.print_on(gclog_or_tty, "TOTAL");
  2557   gclog_or_tty->print_cr("Total free in indexed lists "
  2558                          SIZE_FORMAT " words", total_free);
  2559   gclog_or_tty->print("growth: %8.5f  deficit: %8.5f\n",
  2560     (double)(total.split_births()+total.coal_births()-total.split_deaths()-total.coal_deaths())/
  2561             (total.prev_sweep() != 0 ? (double)total.prev_sweep() : 1.0),
  2562     (double)(total.desired() - total.count())/(total.desired() != 0 ? (double)total.desired() : 1.0));
  2563   _dictionary->print_dict_census();
  2566 ///////////////////////////////////////////////////////////////////////////
  2567 // CFLS_LAB
  2568 ///////////////////////////////////////////////////////////////////////////
  2570 #define VECTOR_257(x)                                                                                  \
  2571   /* 1  2  3  4  5  6  7  8  9 1x 11 12 13 14 15 16 17 18 19 2x 21 22 23 24 25 26 27 28 29 3x 31 32 */ \
  2572   {  x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
  2573      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
  2574      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
  2575      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
  2576      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
  2577      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
  2578      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
  2579      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
  2580      x }
  2582 // Initialize with default setting of CMSParPromoteBlocksToClaim, _not_
  2583 // OldPLABSize, whose static default is different; if overridden at the
  2584 // command-line, this will get reinitialized via a call to
  2585 // modify_initialization() below.
  2586 AdaptiveWeightedAverage CFLS_LAB::_blocks_to_claim[]    =
  2587   VECTOR_257(AdaptiveWeightedAverage(OldPLABWeight, (float)CMSParPromoteBlocksToClaim));
  2588 size_t CFLS_LAB::_global_num_blocks[]  = VECTOR_257(0);
  2589 uint   CFLS_LAB::_global_num_workers[] = VECTOR_257(0);
  2591 CFLS_LAB::CFLS_LAB(CompactibleFreeListSpace* cfls) :
  2592   _cfls(cfls)
  2594   assert(CompactibleFreeListSpace::IndexSetSize == 257, "Modify VECTOR_257() macro above");
  2595   for (size_t i = CompactibleFreeListSpace::IndexSetStart;
  2596        i < CompactibleFreeListSpace::IndexSetSize;
  2597        i += CompactibleFreeListSpace::IndexSetStride) {
  2598     _indexedFreeList[i].set_size(i);
  2599     _num_blocks[i] = 0;
  2603 static bool _CFLS_LAB_modified = false;
  2605 void CFLS_LAB::modify_initialization(size_t n, unsigned wt) {
  2606   assert(!_CFLS_LAB_modified, "Call only once");
  2607   _CFLS_LAB_modified = true;
  2608   for (size_t i = CompactibleFreeListSpace::IndexSetStart;
  2609        i < CompactibleFreeListSpace::IndexSetSize;
  2610        i += CompactibleFreeListSpace::IndexSetStride) {
  2611     _blocks_to_claim[i].modify(n, wt, true /* force */);
  2615 HeapWord* CFLS_LAB::alloc(size_t word_sz) {
  2616   FreeChunk* res;
  2617   assert(word_sz == _cfls->adjustObjectSize(word_sz), "Error");
  2618   if (word_sz >=  CompactibleFreeListSpace::IndexSetSize) {
  2619     // This locking manages sync with other large object allocations.
  2620     MutexLockerEx x(_cfls->parDictionaryAllocLock(),
  2621                     Mutex::_no_safepoint_check_flag);
  2622     res = _cfls->getChunkFromDictionaryExact(word_sz);
  2623     if (res == NULL) return NULL;
  2624   } else {
  2625     AdaptiveFreeList<FreeChunk>* fl = &_indexedFreeList[word_sz];
  2626     if (fl->count() == 0) {
  2627       // Attempt to refill this local free list.
  2628       get_from_global_pool(word_sz, fl);
  2629       // If it didn't work, give up.
  2630       if (fl->count() == 0) return NULL;
  2632     res = fl->get_chunk_at_head();
  2633     assert(res != NULL, "Why was count non-zero?");
  2635   res->markNotFree();
  2636   assert(!res->is_free(), "shouldn't be marked free");
  2637   assert(oop(res)->klass_or_null() == NULL, "should look uninitialized");
  2638   // mangle a just allocated object with a distinct pattern.
  2639   debug_only(res->mangleAllocated(word_sz));
  2640   return (HeapWord*)res;
  2643 // Get a chunk of blocks of the right size and update related
  2644 // book-keeping stats
  2645 void CFLS_LAB::get_from_global_pool(size_t word_sz, AdaptiveFreeList<FreeChunk>* fl) {
  2646   // Get the #blocks we want to claim
  2647   size_t n_blks = (size_t)_blocks_to_claim[word_sz].average();
  2648   assert(n_blks > 0, "Error");
  2649   assert(ResizePLAB || n_blks == OldPLABSize, "Error");
  2650   // In some cases, when the application has a phase change,
  2651   // there may be a sudden and sharp shift in the object survival
  2652   // profile, and updating the counts at the end of a scavenge
  2653   // may not be quick enough, giving rise to large scavenge pauses
  2654   // during these phase changes. It is beneficial to detect such
  2655   // changes on-the-fly during a scavenge and avoid such a phase-change
  2656   // pothole. The following code is a heuristic attempt to do that.
  2657   // It is protected by a product flag until we have gained
  2658   // enough experience with this heuristic and fine-tuned its behaviour.
  2659   // WARNING: This might increase fragmentation if we overreact to
  2660   // small spikes, so some kind of historical smoothing based on
  2661   // previous experience with the greater reactivity might be useful.
  2662   // Lacking sufficient experience, CMSOldPLABResizeQuicker is disabled by
  2663   // default.
  2664   if (ResizeOldPLAB && CMSOldPLABResizeQuicker) {
  2665     size_t multiple = _num_blocks[word_sz]/(CMSOldPLABToleranceFactor*CMSOldPLABNumRefills*n_blks);
  2666     n_blks +=  CMSOldPLABReactivityFactor*multiple*n_blks;
  2667     n_blks = MIN2(n_blks, CMSOldPLABMax);
  2669   assert(n_blks > 0, "Error");
  2670   _cfls->par_get_chunk_of_blocks(word_sz, n_blks, fl);
  2671   // Update stats table entry for this block size
  2672   _num_blocks[word_sz] += fl->count();
  2675 void CFLS_LAB::compute_desired_plab_size() {
  2676   for (size_t i =  CompactibleFreeListSpace::IndexSetStart;
  2677        i < CompactibleFreeListSpace::IndexSetSize;
  2678        i += CompactibleFreeListSpace::IndexSetStride) {
  2679     assert((_global_num_workers[i] == 0) == (_global_num_blocks[i] == 0),
  2680            "Counter inconsistency");
  2681     if (_global_num_workers[i] > 0) {
  2682       // Need to smooth wrt historical average
  2683       if (ResizeOldPLAB) {
  2684         _blocks_to_claim[i].sample(
  2685           MAX2((size_t)CMSOldPLABMin,
  2686           MIN2((size_t)CMSOldPLABMax,
  2687                _global_num_blocks[i]/(_global_num_workers[i]*CMSOldPLABNumRefills))));
  2689       // Reset counters for next round
  2690       _global_num_workers[i] = 0;
  2691       _global_num_blocks[i] = 0;
  2692       if (PrintOldPLAB) {
  2693         gclog_or_tty->print_cr("[%d]: %d", i, (size_t)_blocks_to_claim[i].average());
  2699 // If this is changed in the future to allow parallel
  2700 // access, one would need to take the FL locks and,
  2701 // depending on how it is used, stagger access from
  2702 // parallel threads to reduce contention.
  2703 void CFLS_LAB::retire(int tid) {
  2704   // We run this single threaded with the world stopped;
  2705   // so no need for locks and such.
  2706   NOT_PRODUCT(Thread* t = Thread::current();)
  2707   assert(Thread::current()->is_VM_thread(), "Error");
  2708   for (size_t i =  CompactibleFreeListSpace::IndexSetStart;
  2709        i < CompactibleFreeListSpace::IndexSetSize;
  2710        i += CompactibleFreeListSpace::IndexSetStride) {
  2711     assert(_num_blocks[i] >= (size_t)_indexedFreeList[i].count(),
  2712            "Can't retire more than what we obtained");
  2713     if (_num_blocks[i] > 0) {
  2714       size_t num_retire =  _indexedFreeList[i].count();
  2715       assert(_num_blocks[i] > num_retire, "Should have used at least one");
  2717         // MutexLockerEx x(_cfls->_indexedFreeListParLocks[i],
  2718         //                Mutex::_no_safepoint_check_flag);
  2720         // Update globals stats for num_blocks used
  2721         _global_num_blocks[i] += (_num_blocks[i] - num_retire);
  2722         _global_num_workers[i]++;
  2723         assert(_global_num_workers[i] <= ParallelGCThreads, "Too big");
  2724         if (num_retire > 0) {
  2725           _cfls->_indexedFreeList[i].prepend(&_indexedFreeList[i]);
  2726           // Reset this list.
  2727           _indexedFreeList[i] = AdaptiveFreeList<FreeChunk>();
  2728           _indexedFreeList[i].set_size(i);
  2731       if (PrintOldPLAB) {
  2732         gclog_or_tty->print_cr("%d[%d]: %d/%d/%d",
  2733                                tid, i, num_retire, _num_blocks[i], (size_t)_blocks_to_claim[i].average());
  2735       // Reset stats for next round
  2736       _num_blocks[i]         = 0;
  2741 void CompactibleFreeListSpace:: par_get_chunk_of_blocks(size_t word_sz, size_t n, AdaptiveFreeList<FreeChunk>* fl) {
  2742   assert(fl->count() == 0, "Precondition.");
  2743   assert(word_sz < CompactibleFreeListSpace::IndexSetSize,
  2744          "Precondition");
  2746   // We'll try all multiples of word_sz in the indexed set, starting with
  2747   // word_sz itself and, if CMSSplitIndexedFreeListBlocks, try larger multiples,
  2748   // then try getting a big chunk and splitting it.
  2750     bool found;
  2751     int  k;
  2752     size_t cur_sz;
  2753     for (k = 1, cur_sz = k * word_sz, found = false;
  2754          (cur_sz < CompactibleFreeListSpace::IndexSetSize) &&
  2755          (CMSSplitIndexedFreeListBlocks || k <= 1);
  2756          k++, cur_sz = k * word_sz) {
  2757       AdaptiveFreeList<FreeChunk> fl_for_cur_sz;  // Empty.
  2758       fl_for_cur_sz.set_size(cur_sz);
  2760         MutexLockerEx x(_indexedFreeListParLocks[cur_sz],
  2761                         Mutex::_no_safepoint_check_flag);
  2762         AdaptiveFreeList<FreeChunk>* gfl = &_indexedFreeList[cur_sz];
  2763         if (gfl->count() != 0) {
  2764           // nn is the number of chunks of size cur_sz that
  2765           // we'd need to split k-ways each, in order to create
  2766           // "n" chunks of size word_sz each.
  2767           const size_t nn = MAX2(n/k, (size_t)1);
  2768           gfl->getFirstNChunksFromList(nn, &fl_for_cur_sz);
  2769           found = true;
  2770           if (k > 1) {
  2771             // Update split death stats for the cur_sz-size blocks list:
  2772             // we increment the split death count by the number of blocks
  2773             // we just took from the cur_sz-size blocks list and which
  2774             // we will be splitting below.
  2775             ssize_t deaths = gfl->split_deaths() +
  2776                              fl_for_cur_sz.count();
  2777             gfl->set_split_deaths(deaths);
  2781       // Now transfer fl_for_cur_sz to fl.  Common case, we hope, is k = 1.
  2782       if (found) {
  2783         if (k == 1) {
  2784           fl->prepend(&fl_for_cur_sz);
  2785         } else {
  2786           // Divide each block on fl_for_cur_sz up k ways.
  2787           FreeChunk* fc;
  2788           while ((fc = fl_for_cur_sz.get_chunk_at_head()) != NULL) {
  2789             // Must do this in reverse order, so that anybody attempting to
  2790             // access the main chunk sees it as a single free block until we
  2791             // change it.
  2792             size_t fc_size = fc->size();
  2793             assert(fc->is_free(), "Error");
  2794             for (int i = k-1; i >= 0; i--) {
  2795               FreeChunk* ffc = (FreeChunk*)((HeapWord*)fc + i * word_sz);
  2796               assert((i != 0) ||
  2797                         ((fc == ffc) && ffc->is_free() &&
  2798                          (ffc->size() == k*word_sz) && (fc_size == word_sz)),
  2799                         "Counting error");
  2800               ffc->set_size(word_sz);
  2801               ffc->link_prev(NULL); // Mark as a free block for other (parallel) GC threads.
  2802               ffc->link_next(NULL);
  2803               // Above must occur before BOT is updated below.
  2804               OrderAccess::storestore();
  2805               // splitting from the right, fc_size == i * word_sz
  2806               _bt.mark_block((HeapWord*)ffc, word_sz, true /* reducing */);
  2807               fc_size -= word_sz;
  2808               assert(fc_size == i*word_sz, "Error");
  2809               _bt.verify_not_unallocated((HeapWord*)ffc, word_sz);
  2810               _bt.verify_single_block((HeapWord*)fc, fc_size);
  2811               _bt.verify_single_block((HeapWord*)ffc, word_sz);
  2812               // Push this on "fl".
  2813               fl->return_chunk_at_head(ffc);
  2815             // TRAP
  2816             assert(fl->tail()->next() == NULL, "List invariant.");
  2819         // Update birth stats for this block size.
  2820         size_t num = fl->count();
  2821         MutexLockerEx x(_indexedFreeListParLocks[word_sz],
  2822                         Mutex::_no_safepoint_check_flag);
  2823         ssize_t births = _indexedFreeList[word_sz].split_births() + num;
  2824         _indexedFreeList[word_sz].set_split_births(births);
  2825         return;
  2829   // Otherwise, we'll split a block from the dictionary.
  2830   FreeChunk* fc = NULL;
  2831   FreeChunk* rem_fc = NULL;
  2832   size_t rem;
  2834     MutexLockerEx x(parDictionaryAllocLock(),
  2835                     Mutex::_no_safepoint_check_flag);
  2836     while (n > 0) {
  2837       fc = dictionary()->get_chunk(MAX2(n * word_sz, _dictionary->min_size()),
  2838                                   FreeBlockDictionary<FreeChunk>::atLeast);
  2839       if (fc != NULL) {
  2840         _bt.allocated((HeapWord*)fc, fc->size(), true /* reducing */);  // update _unallocated_blk
  2841         dictionary()->dict_census_update(fc->size(),
  2842                                        true /*split*/,
  2843                                        false /*birth*/);
  2844         break;
  2845       } else {
  2846         n--;
  2849     if (fc == NULL) return;
  2850     // Otherwise, split up that block.
  2851     assert((ssize_t)n >= 1, "Control point invariant");
  2852     assert(fc->is_free(), "Error: should be a free block");
  2853     _bt.verify_single_block((HeapWord*)fc, fc->size());
  2854     const size_t nn = fc->size() / word_sz;
  2855     n = MIN2(nn, n);
  2856     assert((ssize_t)n >= 1, "Control point invariant");
  2857     rem = fc->size() - n * word_sz;
  2858     // If there is a remainder, and it's too small, allocate one fewer.
  2859     if (rem > 0 && rem < MinChunkSize) {
  2860       n--; rem += word_sz;
  2862     // Note that at this point we may have n == 0.
  2863     assert((ssize_t)n >= 0, "Control point invariant");
  2865     // If n is 0, the chunk fc that was found is not large
  2866     // enough to leave a viable remainder.  We are unable to
  2867     // allocate even one block.  Return fc to the
  2868     // dictionary and return, leaving "fl" empty.
  2869     if (n == 0) {
  2870       returnChunkToDictionary(fc);
  2871       assert(fl->count() == 0, "We never allocated any blocks");
  2872       return;
  2875     // First return the remainder, if any.
  2876     // Note that we hold the lock until we decide if we're going to give
  2877     // back the remainder to the dictionary, since a concurrent allocation
  2878     // may otherwise see the heap as empty.  (We're willing to take that
  2879     // hit if the block is a small block.)
  2880     if (rem > 0) {
  2881       size_t prefix_size = n * word_sz;
  2882       rem_fc = (FreeChunk*)((HeapWord*)fc + prefix_size);
  2883       rem_fc->set_size(rem);
  2884       rem_fc->link_prev(NULL); // Mark as a free block for other (parallel) GC threads.
  2885       rem_fc->link_next(NULL);
  2886       // Above must occur before BOT is updated below.
  2887       assert((ssize_t)n > 0 && prefix_size > 0 && rem_fc > fc, "Error");
  2888       OrderAccess::storestore();
  2889       _bt.split_block((HeapWord*)fc, fc->size(), prefix_size);
  2890       assert(fc->is_free(), "Error");
  2891       fc->set_size(prefix_size);
  2892       if (rem >= IndexSetSize) {
  2893         returnChunkToDictionary(rem_fc);
  2894         dictionary()->dict_census_update(rem, true /*split*/, true /*birth*/);
  2895         rem_fc = NULL;
  2897       // Otherwise, return it to the small list below.
  2900   if (rem_fc != NULL) {
  2901     MutexLockerEx x(_indexedFreeListParLocks[rem],
  2902                     Mutex::_no_safepoint_check_flag);
  2903     _bt.verify_not_unallocated((HeapWord*)rem_fc, rem_fc->size());
  2904     _indexedFreeList[rem].return_chunk_at_head(rem_fc);
  2905     smallSplitBirth(rem);
  2907   assert((ssize_t)n > 0 && fc != NULL, "Consistency");
  2908   // Now do the splitting up.
  2909   // Must do this in reverse order, so that anybody attempting to
  2910   // access the main chunk sees it as a single free block until we
  2911   // change it.
  2912   size_t fc_size = n * word_sz;
  2913   // All but first chunk in this loop
  2914   for (ssize_t i = n-1; i > 0; i--) {
  2915     FreeChunk* ffc = (FreeChunk*)((HeapWord*)fc + i * word_sz);
  2916     ffc->set_size(word_sz);
  2917     ffc->link_prev(NULL); // Mark as a free block for other (parallel) GC threads.
  2918     ffc->link_next(NULL);
  2919     // Above must occur before BOT is updated below.
  2920     OrderAccess::storestore();
  2921     // splitting from the right, fc_size == (n - i + 1) * wordsize
  2922     _bt.mark_block((HeapWord*)ffc, word_sz, true /* reducing */);
  2923     fc_size -= word_sz;
  2924     _bt.verify_not_unallocated((HeapWord*)ffc, ffc->size());
  2925     _bt.verify_single_block((HeapWord*)ffc, ffc->size());
  2926     _bt.verify_single_block((HeapWord*)fc, fc_size);
  2927     // Push this on "fl".
  2928     fl->return_chunk_at_head(ffc);
  2930   // First chunk
  2931   assert(fc->is_free() && fc->size() == n*word_sz, "Error: should still be a free block");
  2932   // The blocks above should show their new sizes before the first block below
  2933   fc->set_size(word_sz);
  2934   fc->link_prev(NULL);    // idempotent wrt free-ness, see assert above
  2935   fc->link_next(NULL);
  2936   _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
  2937   _bt.verify_single_block((HeapWord*)fc, fc->size());
  2938   fl->return_chunk_at_head(fc);
  2940   assert((ssize_t)n > 0 && (ssize_t)n == fl->count(), "Incorrect number of blocks");
  2942     // Update the stats for this block size.
  2943     MutexLockerEx x(_indexedFreeListParLocks[word_sz],
  2944                     Mutex::_no_safepoint_check_flag);
  2945     const ssize_t births = _indexedFreeList[word_sz].split_births() + n;
  2946     _indexedFreeList[word_sz].set_split_births(births);
  2947     // ssize_t new_surplus = _indexedFreeList[word_sz].surplus() + n;
  2948     // _indexedFreeList[word_sz].set_surplus(new_surplus);
  2951   // TRAP
  2952   assert(fl->tail()->next() == NULL, "List invariant.");
  2955 // Set up the space's par_seq_tasks structure for work claiming
  2956 // for parallel rescan. See CMSParRemarkTask where this is currently used.
  2957 // XXX Need to suitably abstract and generalize this and the next
  2958 // method into one.
  2959 void
  2960 CompactibleFreeListSpace::
  2961 initialize_sequential_subtasks_for_rescan(int n_threads) {
  2962   // The "size" of each task is fixed according to rescan_task_size.
  2963   assert(n_threads > 0, "Unexpected n_threads argument");
  2964   const size_t task_size = rescan_task_size();
  2965   size_t n_tasks = (used_region().word_size() + task_size - 1)/task_size;
  2966   assert((n_tasks == 0) == used_region().is_empty(), "n_tasks incorrect");
  2967   assert(n_tasks == 0 ||
  2968          ((used_region().start() + (n_tasks - 1)*task_size < used_region().end()) &&
  2969           (used_region().start() + n_tasks*task_size >= used_region().end())),
  2970          "n_tasks calculation incorrect");
  2971   SequentialSubTasksDone* pst = conc_par_seq_tasks();
  2972   assert(!pst->valid(), "Clobbering existing data?");
  2973   // Sets the condition for completion of the subtask (how many threads
  2974   // need to finish in order to be done).
  2975   pst->set_n_threads(n_threads);
  2976   pst->set_n_tasks((int)n_tasks);
  2979 // Set up the space's par_seq_tasks structure for work claiming
  2980 // for parallel concurrent marking. See CMSConcMarkTask where this is currently used.
  2981 void
  2982 CompactibleFreeListSpace::
  2983 initialize_sequential_subtasks_for_marking(int n_threads,
  2984                                            HeapWord* low) {
  2985   // The "size" of each task is fixed according to rescan_task_size.
  2986   assert(n_threads > 0, "Unexpected n_threads argument");
  2987   const size_t task_size = marking_task_size();
  2988   assert(task_size > CardTableModRefBS::card_size_in_words &&
  2989          (task_size %  CardTableModRefBS::card_size_in_words == 0),
  2990          "Otherwise arithmetic below would be incorrect");
  2991   MemRegion span = _gen->reserved();
  2992   if (low != NULL) {
  2993     if (span.contains(low)) {
  2994       // Align low down to  a card boundary so that
  2995       // we can use block_offset_careful() on span boundaries.
  2996       HeapWord* aligned_low = (HeapWord*)align_size_down((uintptr_t)low,
  2997                                  CardTableModRefBS::card_size);
  2998       // Clip span prefix at aligned_low
  2999       span = span.intersection(MemRegion(aligned_low, span.end()));
  3000     } else if (low > span.end()) {
  3001       span = MemRegion(low, low);  // Null region
  3002     } // else use entire span
  3004   assert(span.is_empty() ||
  3005          ((uintptr_t)span.start() %  CardTableModRefBS::card_size == 0),
  3006         "span should start at a card boundary");
  3007   size_t n_tasks = (span.word_size() + task_size - 1)/task_size;
  3008   assert((n_tasks == 0) == span.is_empty(), "Inconsistency");
  3009   assert(n_tasks == 0 ||
  3010          ((span.start() + (n_tasks - 1)*task_size < span.end()) &&
  3011           (span.start() + n_tasks*task_size >= span.end())),
  3012          "n_tasks calculation incorrect");
  3013   SequentialSubTasksDone* pst = conc_par_seq_tasks();
  3014   assert(!pst->valid(), "Clobbering existing data?");
  3015   // Sets the condition for completion of the subtask (how many threads
  3016   // need to finish in order to be done).
  3017   pst->set_n_threads(n_threads);
  3018   pst->set_n_tasks((int)n_tasks);

mercurial