src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp

Tue, 29 Apr 2014 15:17:27 +0200

author
goetz
date
Tue, 29 Apr 2014 15:17:27 +0200
changeset 6911
ce8f6bb717c9
parent 6680
78bbf4d43a14
child 6912
c49dcaf78a65
permissions
-rw-r--r--

8042195: Introduce umbrella header orderAccess.inline.hpp.
Reviewed-by: dholmes, kvn, stefank, twisti

     1 /*
     2  * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    20  * or visit www.oracle.com if you need additional information or have any
    21  * questions.
    22  *
    23  */
    25 #include "precompiled.hpp"
    26 #include "gc_implementation/concurrentMarkSweep/cmsLockVerifier.hpp"
    27 #include "gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp"
    28 #include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.inline.hpp"
    29 #include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.hpp"
    30 #include "gc_implementation/shared/liveRange.hpp"
    31 #include "gc_implementation/shared/spaceDecorator.hpp"
    32 #include "gc_interface/collectedHeap.inline.hpp"
    33 #include "memory/allocation.inline.hpp"
    34 #include "memory/blockOffsetTable.inline.hpp"
    35 #include "memory/resourceArea.hpp"
    36 #include "memory/universe.inline.hpp"
    37 #include "oops/oop.inline.hpp"
    38 #include "runtime/globals.hpp"
    39 #include "runtime/handles.inline.hpp"
    40 #include "runtime/init.hpp"
    41 #include "runtime/java.hpp"
    42 #include "runtime/orderAccess.inline.hpp"
    43 #include "runtime/vmThread.hpp"
    44 #include "utilities/copy.hpp"
    46 /////////////////////////////////////////////////////////////////////////
    47 //// CompactibleFreeListSpace
    48 /////////////////////////////////////////////////////////////////////////
    50 // highest ranked  free list lock rank
    51 int CompactibleFreeListSpace::_lockRank = Mutex::leaf + 3;
    53 // Defaults are 0 so things will break badly if incorrectly initialized.
    54 size_t CompactibleFreeListSpace::IndexSetStart  = 0;
    55 size_t CompactibleFreeListSpace::IndexSetStride = 0;
    57 size_t MinChunkSize = 0;
    59 void CompactibleFreeListSpace::set_cms_values() {
    60   // Set CMS global values
    61   assert(MinChunkSize == 0, "already set");
    63   // MinChunkSize should be a multiple of MinObjAlignment and be large enough
    64   // for chunks to contain a FreeChunk.
    65   size_t min_chunk_size_in_bytes = align_size_up(sizeof(FreeChunk), MinObjAlignmentInBytes);
    66   MinChunkSize = min_chunk_size_in_bytes / BytesPerWord;
    68   assert(IndexSetStart == 0 && IndexSetStride == 0, "already set");
    69   IndexSetStart  = MinChunkSize;
    70   IndexSetStride = MinObjAlignment;
    71 }
    73 // Constructor
    74 CompactibleFreeListSpace::CompactibleFreeListSpace(BlockOffsetSharedArray* bs,
    75   MemRegion mr, bool use_adaptive_freelists,
    76   FreeBlockDictionary<FreeChunk>::DictionaryChoice dictionaryChoice) :
    77   _dictionaryChoice(dictionaryChoice),
    78   _adaptive_freelists(use_adaptive_freelists),
    79   _bt(bs, mr),
    80   // free list locks are in the range of values taken by _lockRank
    81   // This range currently is [_leaf+2, _leaf+3]
    82   // Note: this requires that CFLspace c'tors
    83   // are called serially in the order in which the locks are
    84   // are acquired in the program text. This is true today.
    85   _freelistLock(_lockRank--, "CompactibleFreeListSpace._lock", true),
    86   _parDictionaryAllocLock(Mutex::leaf - 1,  // == rank(ExpandHeap_lock) - 1
    87                           "CompactibleFreeListSpace._dict_par_lock", true),
    88   _rescan_task_size(CardTableModRefBS::card_size_in_words * BitsPerWord *
    89                     CMSRescanMultiple),
    90   _marking_task_size(CardTableModRefBS::card_size_in_words * BitsPerWord *
    91                     CMSConcMarkMultiple),
    92   _collector(NULL)
    93 {
    94   assert(sizeof(FreeChunk) / BytesPerWord <= MinChunkSize,
    95          "FreeChunk is larger than expected");
    96   _bt.set_space(this);
    97   initialize(mr, SpaceDecorator::Clear, SpaceDecorator::Mangle);
    98   // We have all of "mr", all of which we place in the dictionary
    99   // as one big chunk. We'll need to decide here which of several
   100   // possible alternative dictionary implementations to use. For
   101   // now the choice is easy, since we have only one working
   102   // implementation, namely, the simple binary tree (splaying
   103   // temporarily disabled).
   104   switch (dictionaryChoice) {
   105     case FreeBlockDictionary<FreeChunk>::dictionaryBinaryTree:
   106       _dictionary = new AFLBinaryTreeDictionary(mr);
   107       break;
   108     case FreeBlockDictionary<FreeChunk>::dictionarySplayTree:
   109     case FreeBlockDictionary<FreeChunk>::dictionarySkipList:
   110     default:
   111       warning("dictionaryChoice: selected option not understood; using"
   112               " default BinaryTreeDictionary implementation instead.");
   113   }
   114   assert(_dictionary != NULL, "CMS dictionary initialization");
   115   // The indexed free lists are initially all empty and are lazily
   116   // filled in on demand. Initialize the array elements to NULL.
   117   initializeIndexedFreeListArray();
   119   // Not using adaptive free lists assumes that allocation is first
   120   // from the linAB's.  Also a cms perm gen which can be compacted
   121   // has to have the klass's klassKlass allocated at a lower
   122   // address in the heap than the klass so that the klassKlass is
   123   // moved to its new location before the klass is moved.
   124   // Set the _refillSize for the linear allocation blocks
   125   if (!use_adaptive_freelists) {
   126     FreeChunk* fc = _dictionary->get_chunk(mr.word_size(),
   127                                            FreeBlockDictionary<FreeChunk>::atLeast);
   128     // The small linAB initially has all the space and will allocate
   129     // a chunk of any size.
   130     HeapWord* addr = (HeapWord*) fc;
   131     _smallLinearAllocBlock.set(addr, fc->size() ,
   132       1024*SmallForLinearAlloc, fc->size());
   133     // Note that _unallocated_block is not updated here.
   134     // Allocations from the linear allocation block should
   135     // update it.
   136   } else {
   137     _smallLinearAllocBlock.set(0, 0, 1024*SmallForLinearAlloc,
   138                                SmallForLinearAlloc);
   139   }
   140   // CMSIndexedFreeListReplenish should be at least 1
   141   CMSIndexedFreeListReplenish = MAX2((uintx)1, CMSIndexedFreeListReplenish);
   142   _promoInfo.setSpace(this);
   143   if (UseCMSBestFit) {
   144     _fitStrategy = FreeBlockBestFitFirst;
   145   } else {
   146     _fitStrategy = FreeBlockStrategyNone;
   147   }
   148   check_free_list_consistency();
   150   // Initialize locks for parallel case.
   152   if (CollectedHeap::use_parallel_gc_threads()) {
   153     for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
   154       _indexedFreeListParLocks[i] = new Mutex(Mutex::leaf - 1, // == ExpandHeap_lock - 1
   155                                               "a freelist par lock",
   156                                               true);
   157       DEBUG_ONLY(
   158         _indexedFreeList[i].set_protecting_lock(_indexedFreeListParLocks[i]);
   159       )
   160     }
   161     _dictionary->set_par_lock(&_parDictionaryAllocLock);
   162   }
   163 }
   165 // Like CompactibleSpace forward() but always calls cross_threshold() to
   166 // update the block offset table.  Removed initialize_threshold call because
   167 // CFLS does not use a block offset array for contiguous spaces.
   168 HeapWord* CompactibleFreeListSpace::forward(oop q, size_t size,
   169                                     CompactPoint* cp, HeapWord* compact_top) {
   170   // q is alive
   171   // First check if we should switch compaction space
   172   assert(this == cp->space, "'this' should be current compaction space.");
   173   size_t compaction_max_size = pointer_delta(end(), compact_top);
   174   assert(adjustObjectSize(size) == cp->space->adjust_object_size_v(size),
   175     "virtual adjustObjectSize_v() method is not correct");
   176   size_t adjusted_size = adjustObjectSize(size);
   177   assert(compaction_max_size >= MinChunkSize || compaction_max_size == 0,
   178          "no small fragments allowed");
   179   assert(minimum_free_block_size() == MinChunkSize,
   180          "for de-virtualized reference below");
   181   // Can't leave a nonzero size, residual fragment smaller than MinChunkSize
   182   if (adjusted_size + MinChunkSize > compaction_max_size &&
   183       adjusted_size != compaction_max_size) {
   184     do {
   185       // switch to next compaction space
   186       cp->space->set_compaction_top(compact_top);
   187       cp->space = cp->space->next_compaction_space();
   188       if (cp->space == NULL) {
   189         cp->gen = GenCollectedHeap::heap()->prev_gen(cp->gen);
   190         assert(cp->gen != NULL, "compaction must succeed");
   191         cp->space = cp->gen->first_compaction_space();
   192         assert(cp->space != NULL, "generation must have a first compaction space");
   193       }
   194       compact_top = cp->space->bottom();
   195       cp->space->set_compaction_top(compact_top);
   196       // The correct adjusted_size may not be the same as that for this method
   197       // (i.e., cp->space may no longer be "this" so adjust the size again.
   198       // Use the virtual method which is not used above to save the virtual
   199       // dispatch.
   200       adjusted_size = cp->space->adjust_object_size_v(size);
   201       compaction_max_size = pointer_delta(cp->space->end(), compact_top);
   202       assert(cp->space->minimum_free_block_size() == 0, "just checking");
   203     } while (adjusted_size > compaction_max_size);
   204   }
   206   // store the forwarding pointer into the mark word
   207   if ((HeapWord*)q != compact_top) {
   208     q->forward_to(oop(compact_top));
   209     assert(q->is_gc_marked(), "encoding the pointer should preserve the mark");
   210   } else {
   211     // if the object isn't moving we can just set the mark to the default
   212     // mark and handle it specially later on.
   213     q->init_mark();
   214     assert(q->forwardee() == NULL, "should be forwarded to NULL");
   215   }
   217   compact_top += adjusted_size;
   219   // we need to update the offset table so that the beginnings of objects can be
   220   // found during scavenge.  Note that we are updating the offset table based on
   221   // where the object will be once the compaction phase finishes.
   223   // Always call cross_threshold().  A contiguous space can only call it when
   224   // the compaction_top exceeds the current threshold but not for an
   225   // non-contiguous space.
   226   cp->threshold =
   227     cp->space->cross_threshold(compact_top - adjusted_size, compact_top);
   228   return compact_top;
   229 }
   231 // A modified copy of OffsetTableContigSpace::cross_threshold() with _offsets -> _bt
   232 // and use of single_block instead of alloc_block.  The name here is not really
   233 // appropriate - maybe a more general name could be invented for both the
   234 // contiguous and noncontiguous spaces.
   236 HeapWord* CompactibleFreeListSpace::cross_threshold(HeapWord* start, HeapWord* the_end) {
   237   _bt.single_block(start, the_end);
   238   return end();
   239 }
   241 // Initialize them to NULL.
   242 void CompactibleFreeListSpace::initializeIndexedFreeListArray() {
   243   for (size_t i = 0; i < IndexSetSize; i++) {
   244     // Note that on platforms where objects are double word aligned,
   245     // the odd array elements are not used.  It is convenient, however,
   246     // to map directly from the object size to the array element.
   247     _indexedFreeList[i].reset(IndexSetSize);
   248     _indexedFreeList[i].set_size(i);
   249     assert(_indexedFreeList[i].count() == 0, "reset check failed");
   250     assert(_indexedFreeList[i].head() == NULL, "reset check failed");
   251     assert(_indexedFreeList[i].tail() == NULL, "reset check failed");
   252     assert(_indexedFreeList[i].hint() == IndexSetSize, "reset check failed");
   253   }
   254 }
   256 void CompactibleFreeListSpace::resetIndexedFreeListArray() {
   257   for (size_t i = 1; i < IndexSetSize; i++) {
   258     assert(_indexedFreeList[i].size() == (size_t) i,
   259       "Indexed free list sizes are incorrect");
   260     _indexedFreeList[i].reset(IndexSetSize);
   261     assert(_indexedFreeList[i].count() == 0, "reset check failed");
   262     assert(_indexedFreeList[i].head() == NULL, "reset check failed");
   263     assert(_indexedFreeList[i].tail() == NULL, "reset check failed");
   264     assert(_indexedFreeList[i].hint() == IndexSetSize, "reset check failed");
   265   }
   266 }
   268 void CompactibleFreeListSpace::reset(MemRegion mr) {
   269   resetIndexedFreeListArray();
   270   dictionary()->reset();
   271   if (BlockOffsetArrayUseUnallocatedBlock) {
   272     assert(end() == mr.end(), "We are compacting to the bottom of CMS gen");
   273     // Everything's allocated until proven otherwise.
   274     _bt.set_unallocated_block(end());
   275   }
   276   if (!mr.is_empty()) {
   277     assert(mr.word_size() >= MinChunkSize, "Chunk size is too small");
   278     _bt.single_block(mr.start(), mr.word_size());
   279     FreeChunk* fc = (FreeChunk*) mr.start();
   280     fc->set_size(mr.word_size());
   281     if (mr.word_size() >= IndexSetSize ) {
   282       returnChunkToDictionary(fc);
   283     } else {
   284       _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
   285       _indexedFreeList[mr.word_size()].return_chunk_at_head(fc);
   286     }
   287     coalBirth(mr.word_size());
   288   }
   289   _promoInfo.reset();
   290   _smallLinearAllocBlock._ptr = NULL;
   291   _smallLinearAllocBlock._word_size = 0;
   292 }
   294 void CompactibleFreeListSpace::reset_after_compaction() {
   295   // Reset the space to the new reality - one free chunk.
   296   MemRegion mr(compaction_top(), end());
   297   reset(mr);
   298   // Now refill the linear allocation block(s) if possible.
   299   if (_adaptive_freelists) {
   300     refillLinearAllocBlocksIfNeeded();
   301   } else {
   302     // Place as much of mr in the linAB as we can get,
   303     // provided it was big enough to go into the dictionary.
   304     FreeChunk* fc = dictionary()->find_largest_dict();
   305     if (fc != NULL) {
   306       assert(fc->size() == mr.word_size(),
   307              "Why was the chunk broken up?");
   308       removeChunkFromDictionary(fc);
   309       HeapWord* addr = (HeapWord*) fc;
   310       _smallLinearAllocBlock.set(addr, fc->size() ,
   311         1024*SmallForLinearAlloc, fc->size());
   312       // Note that _unallocated_block is not updated here.
   313     }
   314   }
   315 }
   317 // Walks the entire dictionary, returning a coterminal
   318 // chunk, if it exists. Use with caution since it involves
   319 // a potentially complete walk of a potentially large tree.
   320 FreeChunk* CompactibleFreeListSpace::find_chunk_at_end() {
   322   assert_lock_strong(&_freelistLock);
   324   return dictionary()->find_chunk_ends_at(end());
   325 }
   328 #ifndef PRODUCT
   329 void CompactibleFreeListSpace::initializeIndexedFreeListArrayReturnedBytes() {
   330   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
   331     _indexedFreeList[i].allocation_stats()->set_returned_bytes(0);
   332   }
   333 }
   335 size_t CompactibleFreeListSpace::sumIndexedFreeListArrayReturnedBytes() {
   336   size_t sum = 0;
   337   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
   338     sum += _indexedFreeList[i].allocation_stats()->returned_bytes();
   339   }
   340   return sum;
   341 }
   343 size_t CompactibleFreeListSpace::totalCountInIndexedFreeLists() const {
   344   size_t count = 0;
   345   for (size_t i = IndexSetStart; i < IndexSetSize; i++) {
   346     debug_only(
   347       ssize_t total_list_count = 0;
   348       for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
   349          fc = fc->next()) {
   350         total_list_count++;
   351       }
   352       assert(total_list_count ==  _indexedFreeList[i].count(),
   353         "Count in list is incorrect");
   354     )
   355     count += _indexedFreeList[i].count();
   356   }
   357   return count;
   358 }
   360 size_t CompactibleFreeListSpace::totalCount() {
   361   size_t num = totalCountInIndexedFreeLists();
   362   num +=  dictionary()->total_count();
   363   if (_smallLinearAllocBlock._word_size != 0) {
   364     num++;
   365   }
   366   return num;
   367 }
   368 #endif
   370 bool CompactibleFreeListSpace::is_free_block(const HeapWord* p) const {
   371   FreeChunk* fc = (FreeChunk*) p;
   372   return fc->is_free();
   373 }
   375 size_t CompactibleFreeListSpace::used() const {
   376   return capacity() - free();
   377 }
   379 size_t CompactibleFreeListSpace::free() const {
   380   // "MT-safe, but not MT-precise"(TM), if you will: i.e.
   381   // if you do this while the structures are in flux you
   382   // may get an approximate answer only; for instance
   383   // because there is concurrent allocation either
   384   // directly by mutators or for promotion during a GC.
   385   // It's "MT-safe", however, in the sense that you are guaranteed
   386   // not to crash and burn, for instance, because of walking
   387   // pointers that could disappear as you were walking them.
   388   // The approximation is because the various components
   389   // that are read below are not read atomically (and
   390   // further the computation of totalSizeInIndexedFreeLists()
   391   // is itself a non-atomic computation. The normal use of
   392   // this is during a resize operation at the end of GC
   393   // and at that time you are guaranteed to get the
   394   // correct actual value. However, for instance, this is
   395   // also read completely asynchronously by the "perf-sampler"
   396   // that supports jvmstat, and you are apt to see the values
   397   // flicker in such cases.
   398   assert(_dictionary != NULL, "No _dictionary?");
   399   return (_dictionary->total_chunk_size(DEBUG_ONLY(freelistLock())) +
   400           totalSizeInIndexedFreeLists() +
   401           _smallLinearAllocBlock._word_size) * HeapWordSize;
   402 }
   404 size_t CompactibleFreeListSpace::max_alloc_in_words() const {
   405   assert(_dictionary != NULL, "No _dictionary?");
   406   assert_locked();
   407   size_t res = _dictionary->max_chunk_size();
   408   res = MAX2(res, MIN2(_smallLinearAllocBlock._word_size,
   409                        (size_t) SmallForLinearAlloc - 1));
   410   // XXX the following could potentially be pretty slow;
   411   // should one, pesimally for the rare cases when res
   412   // caclulated above is less than IndexSetSize,
   413   // just return res calculated above? My reasoning was that
   414   // those cases will be so rare that the extra time spent doesn't
   415   // really matter....
   416   // Note: do not change the loop test i >= res + IndexSetStride
   417   // to i > res below, because i is unsigned and res may be zero.
   418   for (size_t i = IndexSetSize - 1; i >= res + IndexSetStride;
   419        i -= IndexSetStride) {
   420     if (_indexedFreeList[i].head() != NULL) {
   421       assert(_indexedFreeList[i].count() != 0, "Inconsistent FreeList");
   422       return i;
   423     }
   424   }
   425   return res;
   426 }
   428 void LinearAllocBlock::print_on(outputStream* st) const {
   429   st->print_cr(" LinearAllocBlock: ptr = " PTR_FORMAT ", word_size = " SIZE_FORMAT
   430             ", refillsize = " SIZE_FORMAT ", allocation_size_limit = " SIZE_FORMAT,
   431             p2i(_ptr), _word_size, _refillSize, _allocation_size_limit);
   432 }
   434 void CompactibleFreeListSpace::print_on(outputStream* st) const {
   435   st->print_cr("COMPACTIBLE FREELIST SPACE");
   436   st->print_cr(" Space:");
   437   Space::print_on(st);
   439   st->print_cr("promoInfo:");
   440   _promoInfo.print_on(st);
   442   st->print_cr("_smallLinearAllocBlock");
   443   _smallLinearAllocBlock.print_on(st);
   445   // dump_memory_block(_smallLinearAllocBlock->_ptr, 128);
   447   st->print_cr(" _fitStrategy = %s, _adaptive_freelists = %s",
   448                _fitStrategy?"true":"false", _adaptive_freelists?"true":"false");
   449 }
   451 void CompactibleFreeListSpace::print_indexed_free_lists(outputStream* st)
   452 const {
   453   reportIndexedFreeListStatistics();
   454   gclog_or_tty->print_cr("Layout of Indexed Freelists");
   455   gclog_or_tty->print_cr("---------------------------");
   456   AdaptiveFreeList<FreeChunk>::print_labels_on(st, "size");
   457   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
   458     _indexedFreeList[i].print_on(gclog_or_tty);
   459     for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
   460          fc = fc->next()) {
   461       gclog_or_tty->print_cr("\t[" PTR_FORMAT "," PTR_FORMAT ")  %s",
   462                           p2i(fc), p2i((HeapWord*)fc + i),
   463                           fc->cantCoalesce() ? "\t CC" : "");
   464     }
   465   }
   466 }
   468 void CompactibleFreeListSpace::print_promo_info_blocks(outputStream* st)
   469 const {
   470   _promoInfo.print_on(st);
   471 }
   473 void CompactibleFreeListSpace::print_dictionary_free_lists(outputStream* st)
   474 const {
   475   _dictionary->report_statistics();
   476   st->print_cr("Layout of Freelists in Tree");
   477   st->print_cr("---------------------------");
   478   _dictionary->print_free_lists(st);
   479 }
   481 class BlkPrintingClosure: public BlkClosure {
   482   const CMSCollector*             _collector;
   483   const CompactibleFreeListSpace* _sp;
   484   const CMSBitMap*                _live_bit_map;
   485   const bool                      _post_remark;
   486   outputStream*                   _st;
   487 public:
   488   BlkPrintingClosure(const CMSCollector* collector,
   489                      const CompactibleFreeListSpace* sp,
   490                      const CMSBitMap* live_bit_map,
   491                      outputStream* st):
   492     _collector(collector),
   493     _sp(sp),
   494     _live_bit_map(live_bit_map),
   495     _post_remark(collector->abstract_state() > CMSCollector::FinalMarking),
   496     _st(st) { }
   497   size_t do_blk(HeapWord* addr);
   498 };
   500 size_t BlkPrintingClosure::do_blk(HeapWord* addr) {
   501   size_t sz = _sp->block_size_no_stall(addr, _collector);
   502   assert(sz != 0, "Should always be able to compute a size");
   503   if (_sp->block_is_obj(addr)) {
   504     const bool dead = _post_remark && !_live_bit_map->isMarked(addr);
   505     _st->print_cr(PTR_FORMAT ": %s object of size " SIZE_FORMAT "%s",
   506       p2i(addr),
   507       dead ? "dead" : "live",
   508       sz,
   509       (!dead && CMSPrintObjectsInDump) ? ":" : ".");
   510     if (CMSPrintObjectsInDump && !dead) {
   511       oop(addr)->print_on(_st);
   512       _st->print_cr("--------------------------------------");
   513     }
   514   } else { // free block
   515     _st->print_cr(PTR_FORMAT ": free block of size " SIZE_FORMAT "%s",
   516       p2i(addr), sz, CMSPrintChunksInDump ? ":" : ".");
   517     if (CMSPrintChunksInDump) {
   518       ((FreeChunk*)addr)->print_on(_st);
   519       _st->print_cr("--------------------------------------");
   520     }
   521   }
   522   return sz;
   523 }
   525 void CompactibleFreeListSpace::dump_at_safepoint_with_locks(CMSCollector* c,
   526   outputStream* st) {
   527   st->print_cr("\n=========================");
   528   st->print_cr("Block layout in CMS Heap:");
   529   st->print_cr("=========================");
   530   BlkPrintingClosure  bpcl(c, this, c->markBitMap(), st);
   531   blk_iterate(&bpcl);
   533   st->print_cr("\n=======================================");
   534   st->print_cr("Order & Layout of Promotion Info Blocks");
   535   st->print_cr("=======================================");
   536   print_promo_info_blocks(st);
   538   st->print_cr("\n===========================");
   539   st->print_cr("Order of Indexed Free Lists");
   540   st->print_cr("=========================");
   541   print_indexed_free_lists(st);
   543   st->print_cr("\n=================================");
   544   st->print_cr("Order of Free Lists in Dictionary");
   545   st->print_cr("=================================");
   546   print_dictionary_free_lists(st);
   547 }
   550 void CompactibleFreeListSpace::reportFreeListStatistics() const {
   551   assert_lock_strong(&_freelistLock);
   552   assert(PrintFLSStatistics != 0, "Reporting error");
   553   _dictionary->report_statistics();
   554   if (PrintFLSStatistics > 1) {
   555     reportIndexedFreeListStatistics();
   556     size_t total_size = totalSizeInIndexedFreeLists() +
   557                        _dictionary->total_chunk_size(DEBUG_ONLY(freelistLock()));
   558     gclog_or_tty->print(" free=" SIZE_FORMAT " frag=%1.4f\n", total_size, flsFrag());
   559   }
   560 }
   562 void CompactibleFreeListSpace::reportIndexedFreeListStatistics() const {
   563   assert_lock_strong(&_freelistLock);
   564   gclog_or_tty->print("Statistics for IndexedFreeLists:\n"
   565                       "--------------------------------\n");
   566   size_t total_size = totalSizeInIndexedFreeLists();
   567   size_t   free_blocks = numFreeBlocksInIndexedFreeLists();
   568   gclog_or_tty->print("Total Free Space: " SIZE_FORMAT "\n", total_size);
   569   gclog_or_tty->print("Max   Chunk Size: " SIZE_FORMAT "\n", maxChunkSizeInIndexedFreeLists());
   570   gclog_or_tty->print("Number of Blocks: " SIZE_FORMAT "\n", free_blocks);
   571   if (free_blocks != 0) {
   572     gclog_or_tty->print("Av.  Block  Size: " SIZE_FORMAT "\n", total_size/free_blocks);
   573   }
   574 }
   576 size_t CompactibleFreeListSpace::numFreeBlocksInIndexedFreeLists() const {
   577   size_t res = 0;
   578   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
   579     debug_only(
   580       ssize_t recount = 0;
   581       for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
   582          fc = fc->next()) {
   583         recount += 1;
   584       }
   585       assert(recount == _indexedFreeList[i].count(),
   586         "Incorrect count in list");
   587     )
   588     res += _indexedFreeList[i].count();
   589   }
   590   return res;
   591 }
   593 size_t CompactibleFreeListSpace::maxChunkSizeInIndexedFreeLists() const {
   594   for (size_t i = IndexSetSize - 1; i != 0; i -= IndexSetStride) {
   595     if (_indexedFreeList[i].head() != NULL) {
   596       assert(_indexedFreeList[i].count() != 0, "Inconsistent FreeList");
   597       return (size_t)i;
   598     }
   599   }
   600   return 0;
   601 }
   603 void CompactibleFreeListSpace::set_end(HeapWord* value) {
   604   HeapWord* prevEnd = end();
   605   assert(prevEnd != value, "unnecessary set_end call");
   606   assert(prevEnd == NULL || !BlockOffsetArrayUseUnallocatedBlock || value >= unallocated_block(),
   607         "New end is below unallocated block");
   608   _end = value;
   609   if (prevEnd != NULL) {
   610     // Resize the underlying block offset table.
   611     _bt.resize(pointer_delta(value, bottom()));
   612     if (value <= prevEnd) {
   613       assert(!BlockOffsetArrayUseUnallocatedBlock || value >= unallocated_block(),
   614              "New end is below unallocated block");
   615     } else {
   616       // Now, take this new chunk and add it to the free blocks.
   617       // Note that the BOT has not yet been updated for this block.
   618       size_t newFcSize = pointer_delta(value, prevEnd);
   619       // XXX This is REALLY UGLY and should be fixed up. XXX
   620       if (!_adaptive_freelists && _smallLinearAllocBlock._ptr == NULL) {
   621         // Mark the boundary of the new block in BOT
   622         _bt.mark_block(prevEnd, value);
   623         // put it all in the linAB
   624         if (ParallelGCThreads == 0) {
   625           _smallLinearAllocBlock._ptr = prevEnd;
   626           _smallLinearAllocBlock._word_size = newFcSize;
   627           repairLinearAllocBlock(&_smallLinearAllocBlock);
   628         } else { // ParallelGCThreads > 0
   629           MutexLockerEx x(parDictionaryAllocLock(),
   630                           Mutex::_no_safepoint_check_flag);
   631           _smallLinearAllocBlock._ptr = prevEnd;
   632           _smallLinearAllocBlock._word_size = newFcSize;
   633           repairLinearAllocBlock(&_smallLinearAllocBlock);
   634         }
   635         // Births of chunks put into a LinAB are not recorded.  Births
   636         // of chunks as they are allocated out of a LinAB are.
   637       } else {
   638         // Add the block to the free lists, if possible coalescing it
   639         // with the last free block, and update the BOT and census data.
   640         addChunkToFreeListsAtEndRecordingStats(prevEnd, newFcSize);
   641       }
   642     }
   643   }
   644 }
   646 class FreeListSpace_DCTOC : public Filtering_DCTOC {
   647   CompactibleFreeListSpace* _cfls;
   648   CMSCollector* _collector;
   649 protected:
   650   // Override.
   651 #define walk_mem_region_with_cl_DECL(ClosureType)                       \
   652   virtual void walk_mem_region_with_cl(MemRegion mr,                    \
   653                                        HeapWord* bottom, HeapWord* top, \
   654                                        ClosureType* cl);                \
   655       void walk_mem_region_with_cl_par(MemRegion mr,                    \
   656                                        HeapWord* bottom, HeapWord* top, \
   657                                        ClosureType* cl);                \
   658     void walk_mem_region_with_cl_nopar(MemRegion mr,                    \
   659                                        HeapWord* bottom, HeapWord* top, \
   660                                        ClosureType* cl)
   661   walk_mem_region_with_cl_DECL(ExtendedOopClosure);
   662   walk_mem_region_with_cl_DECL(FilteringClosure);
   664 public:
   665   FreeListSpace_DCTOC(CompactibleFreeListSpace* sp,
   666                       CMSCollector* collector,
   667                       ExtendedOopClosure* cl,
   668                       CardTableModRefBS::PrecisionStyle precision,
   669                       HeapWord* boundary) :
   670     Filtering_DCTOC(sp, cl, precision, boundary),
   671     _cfls(sp), _collector(collector) {}
   672 };
   674 // We de-virtualize the block-related calls below, since we know that our
   675 // space is a CompactibleFreeListSpace.
   677 #define FreeListSpace_DCTOC__walk_mem_region_with_cl_DEFN(ClosureType)          \
   678 void FreeListSpace_DCTOC::walk_mem_region_with_cl(MemRegion mr,                 \
   679                                                  HeapWord* bottom,              \
   680                                                  HeapWord* top,                 \
   681                                                  ClosureType* cl) {             \
   682    bool is_par = SharedHeap::heap()->n_par_threads() > 0;                       \
   683    if (is_par) {                                                                \
   684      assert(SharedHeap::heap()->n_par_threads() ==                              \
   685             SharedHeap::heap()->workers()->active_workers(), "Mismatch");       \
   686      walk_mem_region_with_cl_par(mr, bottom, top, cl);                          \
   687    } else {                                                                     \
   688      walk_mem_region_with_cl_nopar(mr, bottom, top, cl);                        \
   689    }                                                                            \
   690 }                                                                               \
   691 void FreeListSpace_DCTOC::walk_mem_region_with_cl_par(MemRegion mr,             \
   692                                                       HeapWord* bottom,         \
   693                                                       HeapWord* top,            \
   694                                                       ClosureType* cl) {        \
   695   /* Skip parts that are before "mr", in case "block_start" sent us             \
   696      back too far. */                                                           \
   697   HeapWord* mr_start = mr.start();                                              \
   698   size_t bot_size = _cfls->CompactibleFreeListSpace::block_size(bottom);        \
   699   HeapWord* next = bottom + bot_size;                                           \
   700   while (next < mr_start) {                                                     \
   701     bottom = next;                                                              \
   702     bot_size = _cfls->CompactibleFreeListSpace::block_size(bottom);             \
   703     next = bottom + bot_size;                                                   \
   704   }                                                                             \
   705                                                                                 \
   706   while (bottom < top) {                                                        \
   707     if (_cfls->CompactibleFreeListSpace::block_is_obj(bottom) &&                \
   708         !_cfls->CompactibleFreeListSpace::obj_allocated_since_save_marks(       \
   709                     oop(bottom)) &&                                             \
   710         !_collector->CMSCollector::is_dead_obj(oop(bottom))) {                  \
   711       size_t word_sz = oop(bottom)->oop_iterate(cl, mr);                        \
   712       bottom += _cfls->adjustObjectSize(word_sz);                               \
   713     } else {                                                                    \
   714       bottom += _cfls->CompactibleFreeListSpace::block_size(bottom);            \
   715     }                                                                           \
   716   }                                                                             \
   717 }                                                                               \
   718 void FreeListSpace_DCTOC::walk_mem_region_with_cl_nopar(MemRegion mr,           \
   719                                                         HeapWord* bottom,       \
   720                                                         HeapWord* top,          \
   721                                                         ClosureType* cl) {      \
   722   /* Skip parts that are before "mr", in case "block_start" sent us             \
   723      back too far. */                                                           \
   724   HeapWord* mr_start = mr.start();                                              \
   725   size_t bot_size = _cfls->CompactibleFreeListSpace::block_size_nopar(bottom);  \
   726   HeapWord* next = bottom + bot_size;                                           \
   727   while (next < mr_start) {                                                     \
   728     bottom = next;                                                              \
   729     bot_size = _cfls->CompactibleFreeListSpace::block_size_nopar(bottom);       \
   730     next = bottom + bot_size;                                                   \
   731   }                                                                             \
   732                                                                                 \
   733   while (bottom < top) {                                                        \
   734     if (_cfls->CompactibleFreeListSpace::block_is_obj_nopar(bottom) &&          \
   735         !_cfls->CompactibleFreeListSpace::obj_allocated_since_save_marks(       \
   736                     oop(bottom)) &&                                             \
   737         !_collector->CMSCollector::is_dead_obj(oop(bottom))) {                  \
   738       size_t word_sz = oop(bottom)->oop_iterate(cl, mr);                        \
   739       bottom += _cfls->adjustObjectSize(word_sz);                               \
   740     } else {                                                                    \
   741       bottom += _cfls->CompactibleFreeListSpace::block_size_nopar(bottom);      \
   742     }                                                                           \
   743   }                                                                             \
   744 }
   746 // (There are only two of these, rather than N, because the split is due
   747 // only to the introduction of the FilteringClosure, a local part of the
   748 // impl of this abstraction.)
   749 FreeListSpace_DCTOC__walk_mem_region_with_cl_DEFN(ExtendedOopClosure)
   750 FreeListSpace_DCTOC__walk_mem_region_with_cl_DEFN(FilteringClosure)
   752 DirtyCardToOopClosure*
   753 CompactibleFreeListSpace::new_dcto_cl(ExtendedOopClosure* cl,
   754                                       CardTableModRefBS::PrecisionStyle precision,
   755                                       HeapWord* boundary) {
   756   return new FreeListSpace_DCTOC(this, _collector, cl, precision, boundary);
   757 }
   760 // Note on locking for the space iteration functions:
   761 // since the collector's iteration activities are concurrent with
   762 // allocation activities by mutators, absent a suitable mutual exclusion
   763 // mechanism the iterators may go awry. For instace a block being iterated
   764 // may suddenly be allocated or divided up and part of it allocated and
   765 // so on.
   767 // Apply the given closure to each block in the space.
   768 void CompactibleFreeListSpace::blk_iterate_careful(BlkClosureCareful* cl) {
   769   assert_lock_strong(freelistLock());
   770   HeapWord *cur, *limit;
   771   for (cur = bottom(), limit = end(); cur < limit;
   772        cur += cl->do_blk_careful(cur));
   773 }
   775 // Apply the given closure to each block in the space.
   776 void CompactibleFreeListSpace::blk_iterate(BlkClosure* cl) {
   777   assert_lock_strong(freelistLock());
   778   HeapWord *cur, *limit;
   779   for (cur = bottom(), limit = end(); cur < limit;
   780        cur += cl->do_blk(cur));
   781 }
   783 // Apply the given closure to each oop in the space.
   784 void CompactibleFreeListSpace::oop_iterate(ExtendedOopClosure* cl) {
   785   assert_lock_strong(freelistLock());
   786   HeapWord *cur, *limit;
   787   size_t curSize;
   788   for (cur = bottom(), limit = end(); cur < limit;
   789        cur += curSize) {
   790     curSize = block_size(cur);
   791     if (block_is_obj(cur)) {
   792       oop(cur)->oop_iterate(cl);
   793     }
   794   }
   795 }
   797 // Apply the given closure to each oop in the space \intersect memory region.
   798 void CompactibleFreeListSpace::oop_iterate(MemRegion mr, ExtendedOopClosure* cl) {
   799   assert_lock_strong(freelistLock());
   800   if (is_empty()) {
   801     return;
   802   }
   803   MemRegion cur = MemRegion(bottom(), end());
   804   mr = mr.intersection(cur);
   805   if (mr.is_empty()) {
   806     return;
   807   }
   808   if (mr.equals(cur)) {
   809     oop_iterate(cl);
   810     return;
   811   }
   812   assert(mr.end() <= end(), "just took an intersection above");
   813   HeapWord* obj_addr = block_start(mr.start());
   814   HeapWord* t = mr.end();
   816   SpaceMemRegionOopsIterClosure smr_blk(cl, mr);
   817   if (block_is_obj(obj_addr)) {
   818     // Handle first object specially.
   819     oop obj = oop(obj_addr);
   820     obj_addr += adjustObjectSize(obj->oop_iterate(&smr_blk));
   821   } else {
   822     FreeChunk* fc = (FreeChunk*)obj_addr;
   823     obj_addr += fc->size();
   824   }
   825   while (obj_addr < t) {
   826     HeapWord* obj = obj_addr;
   827     obj_addr += block_size(obj_addr);
   828     // If "obj_addr" is not greater than top, then the
   829     // entire object "obj" is within the region.
   830     if (obj_addr <= t) {
   831       if (block_is_obj(obj)) {
   832         oop(obj)->oop_iterate(cl);
   833       }
   834     } else {
   835       // "obj" extends beyond end of region
   836       if (block_is_obj(obj)) {
   837         oop(obj)->oop_iterate(&smr_blk);
   838       }
   839       break;
   840     }
   841   }
   842 }
   844 // NOTE: In the following methods, in order to safely be able to
   845 // apply the closure to an object, we need to be sure that the
   846 // object has been initialized. We are guaranteed that an object
   847 // is initialized if we are holding the Heap_lock with the
   848 // world stopped.
   849 void CompactibleFreeListSpace::verify_objects_initialized() const {
   850   if (is_init_completed()) {
   851     assert_locked_or_safepoint(Heap_lock);
   852     if (Universe::is_fully_initialized()) {
   853       guarantee(SafepointSynchronize::is_at_safepoint(),
   854                 "Required for objects to be initialized");
   855     }
   856   } // else make a concession at vm start-up
   857 }
   859 // Apply the given closure to each object in the space
   860 void CompactibleFreeListSpace::object_iterate(ObjectClosure* blk) {
   861   assert_lock_strong(freelistLock());
   862   NOT_PRODUCT(verify_objects_initialized());
   863   HeapWord *cur, *limit;
   864   size_t curSize;
   865   for (cur = bottom(), limit = end(); cur < limit;
   866        cur += curSize) {
   867     curSize = block_size(cur);
   868     if (block_is_obj(cur)) {
   869       blk->do_object(oop(cur));
   870     }
   871   }
   872 }
   874 // Apply the given closure to each live object in the space
   875 //   The usage of CompactibleFreeListSpace
   876 // by the ConcurrentMarkSweepGeneration for concurrent GC's allows
   877 // objects in the space with references to objects that are no longer
   878 // valid.  For example, an object may reference another object
   879 // that has already been sweep up (collected).  This method uses
   880 // obj_is_alive() to determine whether it is safe to apply the closure to
   881 // an object.  See obj_is_alive() for details on how liveness of an
   882 // object is decided.
   884 void CompactibleFreeListSpace::safe_object_iterate(ObjectClosure* blk) {
   885   assert_lock_strong(freelistLock());
   886   NOT_PRODUCT(verify_objects_initialized());
   887   HeapWord *cur, *limit;
   888   size_t curSize;
   889   for (cur = bottom(), limit = end(); cur < limit;
   890        cur += curSize) {
   891     curSize = block_size(cur);
   892     if (block_is_obj(cur) && obj_is_alive(cur)) {
   893       blk->do_object(oop(cur));
   894     }
   895   }
   896 }
   898 void CompactibleFreeListSpace::object_iterate_mem(MemRegion mr,
   899                                                   UpwardsObjectClosure* cl) {
   900   assert_locked(freelistLock());
   901   NOT_PRODUCT(verify_objects_initialized());
   902   Space::object_iterate_mem(mr, cl);
   903 }
   905 // Callers of this iterator beware: The closure application should
   906 // be robust in the face of uninitialized objects and should (always)
   907 // return a correct size so that the next addr + size below gives us a
   908 // valid block boundary. [See for instance,
   909 // ScanMarkedObjectsAgainCarefullyClosure::do_object_careful()
   910 // in ConcurrentMarkSweepGeneration.cpp.]
   911 HeapWord*
   912 CompactibleFreeListSpace::object_iterate_careful(ObjectClosureCareful* cl) {
   913   assert_lock_strong(freelistLock());
   914   HeapWord *addr, *last;
   915   size_t size;
   916   for (addr = bottom(), last  = end();
   917        addr < last; addr += size) {
   918     FreeChunk* fc = (FreeChunk*)addr;
   919     if (fc->is_free()) {
   920       // Since we hold the free list lock, which protects direct
   921       // allocation in this generation by mutators, a free object
   922       // will remain free throughout this iteration code.
   923       size = fc->size();
   924     } else {
   925       // Note that the object need not necessarily be initialized,
   926       // because (for instance) the free list lock does NOT protect
   927       // object initialization. The closure application below must
   928       // therefore be correct in the face of uninitialized objects.
   929       size = cl->do_object_careful(oop(addr));
   930       if (size == 0) {
   931         // An unparsable object found. Signal early termination.
   932         return addr;
   933       }
   934     }
   935   }
   936   return NULL;
   937 }
   939 // Callers of this iterator beware: The closure application should
   940 // be robust in the face of uninitialized objects and should (always)
   941 // return a correct size so that the next addr + size below gives us a
   942 // valid block boundary. [See for instance,
   943 // ScanMarkedObjectsAgainCarefullyClosure::do_object_careful()
   944 // in ConcurrentMarkSweepGeneration.cpp.]
   945 HeapWord*
   946 CompactibleFreeListSpace::object_iterate_careful_m(MemRegion mr,
   947   ObjectClosureCareful* cl) {
   948   assert_lock_strong(freelistLock());
   949   // Can't use used_region() below because it may not necessarily
   950   // be the same as [bottom(),end()); although we could
   951   // use [used_region().start(),round_to(used_region().end(),CardSize)),
   952   // that appears too cumbersome, so we just do the simpler check
   953   // in the assertion below.
   954   assert(!mr.is_empty() && MemRegion(bottom(),end()).contains(mr),
   955          "mr should be non-empty and within used space");
   956   HeapWord *addr, *end;
   957   size_t size;
   958   for (addr = block_start_careful(mr.start()), end  = mr.end();
   959        addr < end; addr += size) {
   960     FreeChunk* fc = (FreeChunk*)addr;
   961     if (fc->is_free()) {
   962       // Since we hold the free list lock, which protects direct
   963       // allocation in this generation by mutators, a free object
   964       // will remain free throughout this iteration code.
   965       size = fc->size();
   966     } else {
   967       // Note that the object need not necessarily be initialized,
   968       // because (for instance) the free list lock does NOT protect
   969       // object initialization. The closure application below must
   970       // therefore be correct in the face of uninitialized objects.
   971       size = cl->do_object_careful_m(oop(addr), mr);
   972       if (size == 0) {
   973         // An unparsable object found. Signal early termination.
   974         return addr;
   975       }
   976     }
   977   }
   978   return NULL;
   979 }
   982 HeapWord* CompactibleFreeListSpace::block_start_const(const void* p) const {
   983   NOT_PRODUCT(verify_objects_initialized());
   984   return _bt.block_start(p);
   985 }
   987 HeapWord* CompactibleFreeListSpace::block_start_careful(const void* p) const {
   988   return _bt.block_start_careful(p);
   989 }
   991 size_t CompactibleFreeListSpace::block_size(const HeapWord* p) const {
   992   NOT_PRODUCT(verify_objects_initialized());
   993   // This must be volatile, or else there is a danger that the compiler
   994   // will compile the code below into a sometimes-infinite loop, by keeping
   995   // the value read the first time in a register.
   996   while (true) {
   997     // We must do this until we get a consistent view of the object.
   998     if (FreeChunk::indicatesFreeChunk(p)) {
   999       volatile FreeChunk* fc = (volatile FreeChunk*)p;
  1000       size_t res = fc->size();
  1002       // Bugfix for systems with weak memory model (PPC64/IA64). The
  1003       // block's free bit was set and we have read the size of the
  1004       // block. Acquire and check the free bit again. If the block is
  1005       // still free, the read size is correct.
  1006       OrderAccess::acquire();
  1008       // If the object is still a free chunk, return the size, else it
  1009       // has been allocated so try again.
  1010       if (FreeChunk::indicatesFreeChunk(p)) {
  1011         assert(res != 0, "Block size should not be 0");
  1012         return res;
  1014     } else {
  1015       // must read from what 'p' points to in each loop.
  1016       Klass* k = ((volatile oopDesc*)p)->klass_or_null();
  1017       if (k != NULL) {
  1018         assert(k->is_klass(), "Should really be klass oop.");
  1019         oop o = (oop)p;
  1020         assert(o->is_oop(true /* ignore mark word */), "Should be an oop.");
  1022         // Bugfix for systems with weak memory model (PPC64/IA64).
  1023         // The object o may be an array. Acquire to make sure that the array
  1024         // size (third word) is consistent.
  1025         OrderAccess::acquire();
  1027         size_t res = o->size_given_klass(k);
  1028         res = adjustObjectSize(res);
  1029         assert(res != 0, "Block size should not be 0");
  1030         return res;
  1036 // TODO: Now that is_parsable is gone, we should combine these two functions.
  1037 // A variant of the above that uses the Printezis bits for
  1038 // unparsable but allocated objects. This avoids any possible
  1039 // stalls waiting for mutators to initialize objects, and is
  1040 // thus potentially faster than the variant above. However,
  1041 // this variant may return a zero size for a block that is
  1042 // under mutation and for which a consistent size cannot be
  1043 // inferred without stalling; see CMSCollector::block_size_if_printezis_bits().
  1044 size_t CompactibleFreeListSpace::block_size_no_stall(HeapWord* p,
  1045                                                      const CMSCollector* c)
  1046 const {
  1047   assert(MemRegion(bottom(), end()).contains(p), "p not in space");
  1048   // This must be volatile, or else there is a danger that the compiler
  1049   // will compile the code below into a sometimes-infinite loop, by keeping
  1050   // the value read the first time in a register.
  1051   DEBUG_ONLY(uint loops = 0;)
  1052   while (true) {
  1053     // We must do this until we get a consistent view of the object.
  1054     if (FreeChunk::indicatesFreeChunk(p)) {
  1055       volatile FreeChunk* fc = (volatile FreeChunk*)p;
  1056       size_t res = fc->size();
  1058       // Bugfix for systems with weak memory model (PPC64/IA64). The
  1059       // free bit of the block was set and we have read the size of
  1060       // the block. Acquire and check the free bit again. If the
  1061       // block is still free, the read size is correct.
  1062       OrderAccess::acquire();
  1064       if (FreeChunk::indicatesFreeChunk(p)) {
  1065         assert(res != 0, "Block size should not be 0");
  1066         assert(loops == 0, "Should be 0");
  1067         return res;
  1069     } else {
  1070       // must read from what 'p' points to in each loop.
  1071       Klass* k = ((volatile oopDesc*)p)->klass_or_null();
  1072       // We trust the size of any object that has a non-NULL
  1073       // klass and (for those in the perm gen) is parsable
  1074       // -- irrespective of its conc_safe-ty.
  1075       if (k != NULL) {
  1076         assert(k->is_klass(), "Should really be klass oop.");
  1077         oop o = (oop)p;
  1078         assert(o->is_oop(), "Should be an oop");
  1080         // Bugfix for systems with weak memory model (PPC64/IA64).
  1081         // The object o may be an array. Acquire to make sure that the array
  1082         // size (third word) is consistent.
  1083         OrderAccess::acquire();
  1085         size_t res = o->size_given_klass(k);
  1086         res = adjustObjectSize(res);
  1087         assert(res != 0, "Block size should not be 0");
  1088         return res;
  1089       } else {
  1090         // May return 0 if P-bits not present.
  1091         return c->block_size_if_printezis_bits(p);
  1094     assert(loops == 0, "Can loop at most once");
  1095     DEBUG_ONLY(loops++;)
  1099 size_t CompactibleFreeListSpace::block_size_nopar(const HeapWord* p) const {
  1100   NOT_PRODUCT(verify_objects_initialized());
  1101   assert(MemRegion(bottom(), end()).contains(p), "p not in space");
  1102   FreeChunk* fc = (FreeChunk*)p;
  1103   if (fc->is_free()) {
  1104     return fc->size();
  1105   } else {
  1106     // Ignore mark word because this may be a recently promoted
  1107     // object whose mark word is used to chain together grey
  1108     // objects (the last one would have a null value).
  1109     assert(oop(p)->is_oop(true), "Should be an oop");
  1110     return adjustObjectSize(oop(p)->size());
  1114 // This implementation assumes that the property of "being an object" is
  1115 // stable.  But being a free chunk may not be (because of parallel
  1116 // promotion.)
  1117 bool CompactibleFreeListSpace::block_is_obj(const HeapWord* p) const {
  1118   FreeChunk* fc = (FreeChunk*)p;
  1119   assert(is_in_reserved(p), "Should be in space");
  1120   // When doing a mark-sweep-compact of the CMS generation, this
  1121   // assertion may fail because prepare_for_compaction() uses
  1122   // space that is garbage to maintain information on ranges of
  1123   // live objects so that these live ranges can be moved as a whole.
  1124   // Comment out this assertion until that problem can be solved
  1125   // (i.e., that the block start calculation may look at objects
  1126   // at address below "p" in finding the object that contains "p"
  1127   // and those objects (if garbage) may have been modified to hold
  1128   // live range information.
  1129   // assert(CollectedHeap::use_parallel_gc_threads() || _bt.block_start(p) == p,
  1130   //        "Should be a block boundary");
  1131   if (FreeChunk::indicatesFreeChunk(p)) return false;
  1132   Klass* k = oop(p)->klass_or_null();
  1133   if (k != NULL) {
  1134     // Ignore mark word because it may have been used to
  1135     // chain together promoted objects (the last one
  1136     // would have a null value).
  1137     assert(oop(p)->is_oop(true), "Should be an oop");
  1138     return true;
  1139   } else {
  1140     return false;  // Was not an object at the start of collection.
  1144 // Check if the object is alive. This fact is checked either by consulting
  1145 // the main marking bitmap in the sweeping phase or, if it's a permanent
  1146 // generation and we're not in the sweeping phase, by checking the
  1147 // perm_gen_verify_bit_map where we store the "deadness" information if
  1148 // we did not sweep the perm gen in the most recent previous GC cycle.
  1149 bool CompactibleFreeListSpace::obj_is_alive(const HeapWord* p) const {
  1150   assert(SafepointSynchronize::is_at_safepoint() || !is_init_completed(),
  1151          "Else races are possible");
  1152   assert(block_is_obj(p), "The address should point to an object");
  1154   // If we're sweeping, we use object liveness information from the main bit map
  1155   // for both perm gen and old gen.
  1156   // We don't need to lock the bitmap (live_map or dead_map below), because
  1157   // EITHER we are in the middle of the sweeping phase, and the
  1158   // main marking bit map (live_map below) is locked,
  1159   // OR we're in other phases and perm_gen_verify_bit_map (dead_map below)
  1160   // is stable, because it's mutated only in the sweeping phase.
  1161   // NOTE: This method is also used by jmap where, if class unloading is
  1162   // off, the results can return "false" for legitimate perm objects,
  1163   // when we are not in the midst of a sweeping phase, which can result
  1164   // in jmap not reporting certain perm gen objects. This will be moot
  1165   // if/when the perm gen goes away in the future.
  1166   if (_collector->abstract_state() == CMSCollector::Sweeping) {
  1167     CMSBitMap* live_map = _collector->markBitMap();
  1168     return live_map->par_isMarked((HeapWord*) p);
  1170   return true;
  1173 bool CompactibleFreeListSpace::block_is_obj_nopar(const HeapWord* p) const {
  1174   FreeChunk* fc = (FreeChunk*)p;
  1175   assert(is_in_reserved(p), "Should be in space");
  1176   assert(_bt.block_start(p) == p, "Should be a block boundary");
  1177   if (!fc->is_free()) {
  1178     // Ignore mark word because it may have been used to
  1179     // chain together promoted objects (the last one
  1180     // would have a null value).
  1181     assert(oop(p)->is_oop(true), "Should be an oop");
  1182     return true;
  1184   return false;
  1187 // "MT-safe but not guaranteed MT-precise" (TM); you may get an
  1188 // approximate answer if you don't hold the freelistlock when you call this.
  1189 size_t CompactibleFreeListSpace::totalSizeInIndexedFreeLists() const {
  1190   size_t size = 0;
  1191   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
  1192     debug_only(
  1193       // We may be calling here without the lock in which case we
  1194       // won't do this modest sanity check.
  1195       if (freelistLock()->owned_by_self()) {
  1196         size_t total_list_size = 0;
  1197         for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
  1198           fc = fc->next()) {
  1199           total_list_size += i;
  1201         assert(total_list_size == i * _indexedFreeList[i].count(),
  1202                "Count in list is incorrect");
  1205     size += i * _indexedFreeList[i].count();
  1207   return size;
  1210 HeapWord* CompactibleFreeListSpace::par_allocate(size_t size) {
  1211   MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
  1212   return allocate(size);
  1215 HeapWord*
  1216 CompactibleFreeListSpace::getChunkFromSmallLinearAllocBlockRemainder(size_t size) {
  1217   return getChunkFromLinearAllocBlockRemainder(&_smallLinearAllocBlock, size);
  1220 HeapWord* CompactibleFreeListSpace::allocate(size_t size) {
  1221   assert_lock_strong(freelistLock());
  1222   HeapWord* res = NULL;
  1223   assert(size == adjustObjectSize(size),
  1224          "use adjustObjectSize() before calling into allocate()");
  1226   if (_adaptive_freelists) {
  1227     res = allocate_adaptive_freelists(size);
  1228   } else {  // non-adaptive free lists
  1229     res = allocate_non_adaptive_freelists(size);
  1232   if (res != NULL) {
  1233     // check that res does lie in this space!
  1234     assert(is_in_reserved(res), "Not in this space!");
  1235     assert(is_aligned((void*)res), "alignment check");
  1237     FreeChunk* fc = (FreeChunk*)res;
  1238     fc->markNotFree();
  1239     assert(!fc->is_free(), "shouldn't be marked free");
  1240     assert(oop(fc)->klass_or_null() == NULL, "should look uninitialized");
  1241     // Verify that the block offset table shows this to
  1242     // be a single block, but not one which is unallocated.
  1243     _bt.verify_single_block(res, size);
  1244     _bt.verify_not_unallocated(res, size);
  1245     // mangle a just allocated object with a distinct pattern.
  1246     debug_only(fc->mangleAllocated(size));
  1249   return res;
  1252 HeapWord* CompactibleFreeListSpace::allocate_non_adaptive_freelists(size_t size) {
  1253   HeapWord* res = NULL;
  1254   // try and use linear allocation for smaller blocks
  1255   if (size < _smallLinearAllocBlock._allocation_size_limit) {
  1256     // if successful, the following also adjusts block offset table
  1257     res = getChunkFromSmallLinearAllocBlock(size);
  1259   // Else triage to indexed lists for smaller sizes
  1260   if (res == NULL) {
  1261     if (size < SmallForDictionary) {
  1262       res = (HeapWord*) getChunkFromIndexedFreeList(size);
  1263     } else {
  1264       // else get it from the big dictionary; if even this doesn't
  1265       // work we are out of luck.
  1266       res = (HeapWord*)getChunkFromDictionaryExact(size);
  1270   return res;
  1273 HeapWord* CompactibleFreeListSpace::allocate_adaptive_freelists(size_t size) {
  1274   assert_lock_strong(freelistLock());
  1275   HeapWord* res = NULL;
  1276   assert(size == adjustObjectSize(size),
  1277          "use adjustObjectSize() before calling into allocate()");
  1279   // Strategy
  1280   //   if small
  1281   //     exact size from small object indexed list if small
  1282   //     small or large linear allocation block (linAB) as appropriate
  1283   //     take from lists of greater sized chunks
  1284   //   else
  1285   //     dictionary
  1286   //     small or large linear allocation block if it has the space
  1287   // Try allocating exact size from indexTable first
  1288   if (size < IndexSetSize) {
  1289     res = (HeapWord*) getChunkFromIndexedFreeList(size);
  1290     if(res != NULL) {
  1291       assert(res != (HeapWord*)_indexedFreeList[size].head(),
  1292         "Not removed from free list");
  1293       // no block offset table adjustment is necessary on blocks in
  1294       // the indexed lists.
  1296     // Try allocating from the small LinAB
  1297     } else if (size < _smallLinearAllocBlock._allocation_size_limit &&
  1298         (res = getChunkFromSmallLinearAllocBlock(size)) != NULL) {
  1299         // if successful, the above also adjusts block offset table
  1300         // Note that this call will refill the LinAB to
  1301         // satisfy the request.  This is different that
  1302         // evm.
  1303         // Don't record chunk off a LinAB?  smallSplitBirth(size);
  1304     } else {
  1305       // Raid the exact free lists larger than size, even if they are not
  1306       // overpopulated.
  1307       res = (HeapWord*) getChunkFromGreater(size);
  1309   } else {
  1310     // Big objects get allocated directly from the dictionary.
  1311     res = (HeapWord*) getChunkFromDictionaryExact(size);
  1312     if (res == NULL) {
  1313       // Try hard not to fail since an allocation failure will likely
  1314       // trigger a synchronous GC.  Try to get the space from the
  1315       // allocation blocks.
  1316       res = getChunkFromSmallLinearAllocBlockRemainder(size);
  1320   return res;
  1323 // A worst-case estimate of the space required (in HeapWords) to expand the heap
  1324 // when promoting obj.
  1325 size_t CompactibleFreeListSpace::expansionSpaceRequired(size_t obj_size) const {
  1326   // Depending on the object size, expansion may require refilling either a
  1327   // bigLAB or a smallLAB plus refilling a PromotionInfo object.  MinChunkSize
  1328   // is added because the dictionary may over-allocate to avoid fragmentation.
  1329   size_t space = obj_size;
  1330   if (!_adaptive_freelists) {
  1331     space = MAX2(space, _smallLinearAllocBlock._refillSize);
  1333   space += _promoInfo.refillSize() + 2 * MinChunkSize;
  1334   return space;
  1337 FreeChunk* CompactibleFreeListSpace::getChunkFromGreater(size_t numWords) {
  1338   FreeChunk* ret;
  1340   assert(numWords >= MinChunkSize, "Size is less than minimum");
  1341   assert(linearAllocationWouldFail() || bestFitFirst(),
  1342     "Should not be here");
  1344   size_t i;
  1345   size_t currSize = numWords + MinChunkSize;
  1346   assert(currSize % MinObjAlignment == 0, "currSize should be aligned");
  1347   for (i = currSize; i < IndexSetSize; i += IndexSetStride) {
  1348     AdaptiveFreeList<FreeChunk>* fl = &_indexedFreeList[i];
  1349     if (fl->head()) {
  1350       ret = getFromListGreater(fl, numWords);
  1351       assert(ret == NULL || ret->is_free(), "Should be returning a free chunk");
  1352       return ret;
  1356   currSize = MAX2((size_t)SmallForDictionary,
  1357                   (size_t)(numWords + MinChunkSize));
  1359   /* Try to get a chunk that satisfies request, while avoiding
  1360      fragmentation that can't be handled. */
  1362     ret =  dictionary()->get_chunk(currSize);
  1363     if (ret != NULL) {
  1364       assert(ret->size() - numWords >= MinChunkSize,
  1365              "Chunk is too small");
  1366       _bt.allocated((HeapWord*)ret, ret->size());
  1367       /* Carve returned chunk. */
  1368       (void) splitChunkAndReturnRemainder(ret, numWords);
  1369       /* Label this as no longer a free chunk. */
  1370       assert(ret->is_free(), "This chunk should be free");
  1371       ret->link_prev(NULL);
  1373     assert(ret == NULL || ret->is_free(), "Should be returning a free chunk");
  1374     return ret;
  1376   ShouldNotReachHere();
  1379 bool CompactibleFreeListSpace::verifyChunkInIndexedFreeLists(FreeChunk* fc) const {
  1380   assert(fc->size() < IndexSetSize, "Size of chunk is too large");
  1381   return _indexedFreeList[fc->size()].verify_chunk_in_free_list(fc);
  1384 bool CompactibleFreeListSpace::verify_chunk_is_linear_alloc_block(FreeChunk* fc) const {
  1385   assert((_smallLinearAllocBlock._ptr != (HeapWord*)fc) ||
  1386          (_smallLinearAllocBlock._word_size == fc->size()),
  1387          "Linear allocation block shows incorrect size");
  1388   return ((_smallLinearAllocBlock._ptr == (HeapWord*)fc) &&
  1389           (_smallLinearAllocBlock._word_size == fc->size()));
  1392 // Check if the purported free chunk is present either as a linear
  1393 // allocation block, the size-indexed table of (smaller) free blocks,
  1394 // or the larger free blocks kept in the binary tree dictionary.
  1395 bool CompactibleFreeListSpace::verify_chunk_in_free_list(FreeChunk* fc) const {
  1396   if (verify_chunk_is_linear_alloc_block(fc)) {
  1397     return true;
  1398   } else if (fc->size() < IndexSetSize) {
  1399     return verifyChunkInIndexedFreeLists(fc);
  1400   } else {
  1401     return dictionary()->verify_chunk_in_free_list(fc);
  1405 #ifndef PRODUCT
  1406 void CompactibleFreeListSpace::assert_locked() const {
  1407   CMSLockVerifier::assert_locked(freelistLock(), parDictionaryAllocLock());
  1410 void CompactibleFreeListSpace::assert_locked(const Mutex* lock) const {
  1411   CMSLockVerifier::assert_locked(lock);
  1413 #endif
  1415 FreeChunk* CompactibleFreeListSpace::allocateScratch(size_t size) {
  1416   // In the parallel case, the main thread holds the free list lock
  1417   // on behalf the parallel threads.
  1418   FreeChunk* fc;
  1420     // If GC is parallel, this might be called by several threads.
  1421     // This should be rare enough that the locking overhead won't affect
  1422     // the sequential code.
  1423     MutexLockerEx x(parDictionaryAllocLock(),
  1424                     Mutex::_no_safepoint_check_flag);
  1425     fc = getChunkFromDictionary(size);
  1427   if (fc != NULL) {
  1428     fc->dontCoalesce();
  1429     assert(fc->is_free(), "Should be free, but not coalescable");
  1430     // Verify that the block offset table shows this to
  1431     // be a single block, but not one which is unallocated.
  1432     _bt.verify_single_block((HeapWord*)fc, fc->size());
  1433     _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
  1435   return fc;
  1438 oop CompactibleFreeListSpace::promote(oop obj, size_t obj_size) {
  1439   assert(obj_size == (size_t)obj->size(), "bad obj_size passed in");
  1440   assert_locked();
  1442   // if we are tracking promotions, then first ensure space for
  1443   // promotion (including spooling space for saving header if necessary).
  1444   // then allocate and copy, then track promoted info if needed.
  1445   // When tracking (see PromotionInfo::track()), the mark word may
  1446   // be displaced and in this case restoration of the mark word
  1447   // occurs in the (oop_since_save_marks_)iterate phase.
  1448   if (_promoInfo.tracking() && !_promoInfo.ensure_spooling_space()) {
  1449     return NULL;
  1451   // Call the allocate(size_t, bool) form directly to avoid the
  1452   // additional call through the allocate(size_t) form.  Having
  1453   // the compile inline the call is problematic because allocate(size_t)
  1454   // is a virtual method.
  1455   HeapWord* res = allocate(adjustObjectSize(obj_size));
  1456   if (res != NULL) {
  1457     Copy::aligned_disjoint_words((HeapWord*)obj, res, obj_size);
  1458     // if we should be tracking promotions, do so.
  1459     if (_promoInfo.tracking()) {
  1460         _promoInfo.track((PromotedObject*)res);
  1463   return oop(res);
  1466 HeapWord*
  1467 CompactibleFreeListSpace::getChunkFromSmallLinearAllocBlock(size_t size) {
  1468   assert_locked();
  1469   assert(size >= MinChunkSize, "minimum chunk size");
  1470   assert(size <  _smallLinearAllocBlock._allocation_size_limit,
  1471     "maximum from smallLinearAllocBlock");
  1472   return getChunkFromLinearAllocBlock(&_smallLinearAllocBlock, size);
  1475 HeapWord*
  1476 CompactibleFreeListSpace::getChunkFromLinearAllocBlock(LinearAllocBlock *blk,
  1477                                                        size_t size) {
  1478   assert_locked();
  1479   assert(size >= MinChunkSize, "too small");
  1480   HeapWord* res = NULL;
  1481   // Try to do linear allocation from blk, making sure that
  1482   if (blk->_word_size == 0) {
  1483     // We have probably been unable to fill this either in the prologue or
  1484     // when it was exhausted at the last linear allocation. Bail out until
  1485     // next time.
  1486     assert(blk->_ptr == NULL, "consistency check");
  1487     return NULL;
  1489   assert(blk->_word_size != 0 && blk->_ptr != NULL, "consistency check");
  1490   res = getChunkFromLinearAllocBlockRemainder(blk, size);
  1491   if (res != NULL) return res;
  1493   // about to exhaust this linear allocation block
  1494   if (blk->_word_size == size) { // exactly satisfied
  1495     res = blk->_ptr;
  1496     _bt.allocated(res, blk->_word_size);
  1497   } else if (size + MinChunkSize <= blk->_refillSize) {
  1498     size_t sz = blk->_word_size;
  1499     // Update _unallocated_block if the size is such that chunk would be
  1500     // returned to the indexed free list.  All other chunks in the indexed
  1501     // free lists are allocated from the dictionary so that _unallocated_block
  1502     // has already been adjusted for them.  Do it here so that the cost
  1503     // for all chunks added back to the indexed free lists.
  1504     if (sz < SmallForDictionary) {
  1505       _bt.allocated(blk->_ptr, sz);
  1507     // Return the chunk that isn't big enough, and then refill below.
  1508     addChunkToFreeLists(blk->_ptr, sz);
  1509     split_birth(sz);
  1510     // Don't keep statistics on adding back chunk from a LinAB.
  1511   } else {
  1512     // A refilled block would not satisfy the request.
  1513     return NULL;
  1516   blk->_ptr = NULL; blk->_word_size = 0;
  1517   refillLinearAllocBlock(blk);
  1518   assert(blk->_ptr == NULL || blk->_word_size >= size + MinChunkSize,
  1519          "block was replenished");
  1520   if (res != NULL) {
  1521     split_birth(size);
  1522     repairLinearAllocBlock(blk);
  1523   } else if (blk->_ptr != NULL) {
  1524     res = blk->_ptr;
  1525     size_t blk_size = blk->_word_size;
  1526     blk->_word_size -= size;
  1527     blk->_ptr  += size;
  1528     split_birth(size);
  1529     repairLinearAllocBlock(blk);
  1530     // Update BOT last so that other (parallel) GC threads see a consistent
  1531     // view of the BOT and free blocks.
  1532     // Above must occur before BOT is updated below.
  1533     OrderAccess::storestore();
  1534     _bt.split_block(res, blk_size, size);  // adjust block offset table
  1536   return res;
  1539 HeapWord*  CompactibleFreeListSpace::getChunkFromLinearAllocBlockRemainder(
  1540                                         LinearAllocBlock* blk,
  1541                                         size_t size) {
  1542   assert_locked();
  1543   assert(size >= MinChunkSize, "too small");
  1545   HeapWord* res = NULL;
  1546   // This is the common case.  Keep it simple.
  1547   if (blk->_word_size >= size + MinChunkSize) {
  1548     assert(blk->_ptr != NULL, "consistency check");
  1549     res = blk->_ptr;
  1550     // Note that the BOT is up-to-date for the linAB before allocation.  It
  1551     // indicates the start of the linAB.  The split_block() updates the
  1552     // BOT for the linAB after the allocation (indicates the start of the
  1553     // next chunk to be allocated).
  1554     size_t blk_size = blk->_word_size;
  1555     blk->_word_size -= size;
  1556     blk->_ptr  += size;
  1557     split_birth(size);
  1558     repairLinearAllocBlock(blk);
  1559     // Update BOT last so that other (parallel) GC threads see a consistent
  1560     // view of the BOT and free blocks.
  1561     // Above must occur before BOT is updated below.
  1562     OrderAccess::storestore();
  1563     _bt.split_block(res, blk_size, size);  // adjust block offset table
  1564     _bt.allocated(res, size);
  1566   return res;
  1569 FreeChunk*
  1570 CompactibleFreeListSpace::getChunkFromIndexedFreeList(size_t size) {
  1571   assert_locked();
  1572   assert(size < SmallForDictionary, "just checking");
  1573   FreeChunk* res;
  1574   res = _indexedFreeList[size].get_chunk_at_head();
  1575   if (res == NULL) {
  1576     res = getChunkFromIndexedFreeListHelper(size);
  1578   _bt.verify_not_unallocated((HeapWord*) res, size);
  1579   assert(res == NULL || res->size() == size, "Incorrect block size");
  1580   return res;
  1583 FreeChunk*
  1584 CompactibleFreeListSpace::getChunkFromIndexedFreeListHelper(size_t size,
  1585   bool replenish) {
  1586   assert_locked();
  1587   FreeChunk* fc = NULL;
  1588   if (size < SmallForDictionary) {
  1589     assert(_indexedFreeList[size].head() == NULL ||
  1590       _indexedFreeList[size].surplus() <= 0,
  1591       "List for this size should be empty or under populated");
  1592     // Try best fit in exact lists before replenishing the list
  1593     if (!bestFitFirst() || (fc = bestFitSmall(size)) == NULL) {
  1594       // Replenish list.
  1595       //
  1596       // Things tried that failed.
  1597       //   Tried allocating out of the two LinAB's first before
  1598       // replenishing lists.
  1599       //   Tried small linAB of size 256 (size in indexed list)
  1600       // and replenishing indexed lists from the small linAB.
  1601       //
  1602       FreeChunk* newFc = NULL;
  1603       const size_t replenish_size = CMSIndexedFreeListReplenish * size;
  1604       if (replenish_size < SmallForDictionary) {
  1605         // Do not replenish from an underpopulated size.
  1606         if (_indexedFreeList[replenish_size].surplus() > 0 &&
  1607             _indexedFreeList[replenish_size].head() != NULL) {
  1608           newFc = _indexedFreeList[replenish_size].get_chunk_at_head();
  1609         } else if (bestFitFirst()) {
  1610           newFc = bestFitSmall(replenish_size);
  1613       if (newFc == NULL && replenish_size > size) {
  1614         assert(CMSIndexedFreeListReplenish > 1, "ctl pt invariant");
  1615         newFc = getChunkFromIndexedFreeListHelper(replenish_size, false);
  1617       // Note: The stats update re split-death of block obtained above
  1618       // will be recorded below precisely when we know we are going to
  1619       // be actually splitting it into more than one pieces below.
  1620       if (newFc != NULL) {
  1621         if  (replenish || CMSReplenishIntermediate) {
  1622           // Replenish this list and return one block to caller.
  1623           size_t i;
  1624           FreeChunk *curFc, *nextFc;
  1625           size_t num_blk = newFc->size() / size;
  1626           assert(num_blk >= 1, "Smaller than requested?");
  1627           assert(newFc->size() % size == 0, "Should be integral multiple of request");
  1628           if (num_blk > 1) {
  1629             // we are sure we will be splitting the block just obtained
  1630             // into multiple pieces; record the split-death of the original
  1631             splitDeath(replenish_size);
  1633           // carve up and link blocks 0, ..., num_blk - 2
  1634           // The last chunk is not added to the lists but is returned as the
  1635           // free chunk.
  1636           for (curFc = newFc, nextFc = (FreeChunk*)((HeapWord*)curFc + size),
  1637                i = 0;
  1638                i < (num_blk - 1);
  1639                curFc = nextFc, nextFc = (FreeChunk*)((HeapWord*)nextFc + size),
  1640                i++) {
  1641             curFc->set_size(size);
  1642             // Don't record this as a return in order to try and
  1643             // determine the "returns" from a GC.
  1644             _bt.verify_not_unallocated((HeapWord*) fc, size);
  1645             _indexedFreeList[size].return_chunk_at_tail(curFc, false);
  1646             _bt.mark_block((HeapWord*)curFc, size);
  1647             split_birth(size);
  1648             // Don't record the initial population of the indexed list
  1649             // as a split birth.
  1652           // check that the arithmetic was OK above
  1653           assert((HeapWord*)nextFc == (HeapWord*)newFc + num_blk*size,
  1654             "inconsistency in carving newFc");
  1655           curFc->set_size(size);
  1656           _bt.mark_block((HeapWord*)curFc, size);
  1657           split_birth(size);
  1658           fc = curFc;
  1659         } else {
  1660           // Return entire block to caller
  1661           fc = newFc;
  1665   } else {
  1666     // Get a free chunk from the free chunk dictionary to be returned to
  1667     // replenish the indexed free list.
  1668     fc = getChunkFromDictionaryExact(size);
  1670   // assert(fc == NULL || fc->is_free(), "Should be returning a free chunk");
  1671   return fc;
  1674 FreeChunk*
  1675 CompactibleFreeListSpace::getChunkFromDictionary(size_t size) {
  1676   assert_locked();
  1677   FreeChunk* fc = _dictionary->get_chunk(size,
  1678                                          FreeBlockDictionary<FreeChunk>::atLeast);
  1679   if (fc == NULL) {
  1680     return NULL;
  1682   _bt.allocated((HeapWord*)fc, fc->size());
  1683   if (fc->size() >= size + MinChunkSize) {
  1684     fc = splitChunkAndReturnRemainder(fc, size);
  1686   assert(fc->size() >= size, "chunk too small");
  1687   assert(fc->size() < size + MinChunkSize, "chunk too big");
  1688   _bt.verify_single_block((HeapWord*)fc, fc->size());
  1689   return fc;
  1692 FreeChunk*
  1693 CompactibleFreeListSpace::getChunkFromDictionaryExact(size_t size) {
  1694   assert_locked();
  1695   FreeChunk* fc = _dictionary->get_chunk(size,
  1696                                          FreeBlockDictionary<FreeChunk>::atLeast);
  1697   if (fc == NULL) {
  1698     return fc;
  1700   _bt.allocated((HeapWord*)fc, fc->size());
  1701   if (fc->size() == size) {
  1702     _bt.verify_single_block((HeapWord*)fc, size);
  1703     return fc;
  1705   assert(fc->size() > size, "get_chunk() guarantee");
  1706   if (fc->size() < size + MinChunkSize) {
  1707     // Return the chunk to the dictionary and go get a bigger one.
  1708     returnChunkToDictionary(fc);
  1709     fc = _dictionary->get_chunk(size + MinChunkSize,
  1710                                 FreeBlockDictionary<FreeChunk>::atLeast);
  1711     if (fc == NULL) {
  1712       return NULL;
  1714     _bt.allocated((HeapWord*)fc, fc->size());
  1716   assert(fc->size() >= size + MinChunkSize, "tautology");
  1717   fc = splitChunkAndReturnRemainder(fc, size);
  1718   assert(fc->size() == size, "chunk is wrong size");
  1719   _bt.verify_single_block((HeapWord*)fc, size);
  1720   return fc;
  1723 void
  1724 CompactibleFreeListSpace::returnChunkToDictionary(FreeChunk* chunk) {
  1725   assert_locked();
  1727   size_t size = chunk->size();
  1728   _bt.verify_single_block((HeapWord*)chunk, size);
  1729   // adjust _unallocated_block downward, as necessary
  1730   _bt.freed((HeapWord*)chunk, size);
  1731   _dictionary->return_chunk(chunk);
  1732 #ifndef PRODUCT
  1733   if (CMSCollector::abstract_state() != CMSCollector::Sweeping) {
  1734     TreeChunk<FreeChunk, AdaptiveFreeList<FreeChunk> >* tc = TreeChunk<FreeChunk, AdaptiveFreeList<FreeChunk> >::as_TreeChunk(chunk);
  1735     TreeList<FreeChunk, AdaptiveFreeList<FreeChunk> >* tl = tc->list();
  1736     tl->verify_stats();
  1738 #endif // PRODUCT
  1741 void
  1742 CompactibleFreeListSpace::returnChunkToFreeList(FreeChunk* fc) {
  1743   assert_locked();
  1744   size_t size = fc->size();
  1745   _bt.verify_single_block((HeapWord*) fc, size);
  1746   _bt.verify_not_unallocated((HeapWord*) fc, size);
  1747   if (_adaptive_freelists) {
  1748     _indexedFreeList[size].return_chunk_at_tail(fc);
  1749   } else {
  1750     _indexedFreeList[size].return_chunk_at_head(fc);
  1752 #ifndef PRODUCT
  1753   if (CMSCollector::abstract_state() != CMSCollector::Sweeping) {
  1754      _indexedFreeList[size].verify_stats();
  1756 #endif // PRODUCT
  1759 // Add chunk to end of last block -- if it's the largest
  1760 // block -- and update BOT and census data. We would
  1761 // of course have preferred to coalesce it with the
  1762 // last block, but it's currently less expensive to find the
  1763 // largest block than it is to find the last.
  1764 void
  1765 CompactibleFreeListSpace::addChunkToFreeListsAtEndRecordingStats(
  1766   HeapWord* chunk, size_t     size) {
  1767   // check that the chunk does lie in this space!
  1768   assert(chunk != NULL && is_in_reserved(chunk), "Not in this space!");
  1769   // One of the parallel gc task threads may be here
  1770   // whilst others are allocating.
  1771   Mutex* lock = NULL;
  1772   if (ParallelGCThreads != 0) {
  1773     lock = &_parDictionaryAllocLock;
  1775   FreeChunk* ec;
  1777     MutexLockerEx x(lock, Mutex::_no_safepoint_check_flag);
  1778     ec = dictionary()->find_largest_dict();  // get largest block
  1779     if (ec != NULL && ec->end() == (uintptr_t*) chunk) {
  1780       // It's a coterminal block - we can coalesce.
  1781       size_t old_size = ec->size();
  1782       coalDeath(old_size);
  1783       removeChunkFromDictionary(ec);
  1784       size += old_size;
  1785     } else {
  1786       ec = (FreeChunk*)chunk;
  1789   ec->set_size(size);
  1790   debug_only(ec->mangleFreed(size));
  1791   if (size < SmallForDictionary && ParallelGCThreads != 0) {
  1792     lock = _indexedFreeListParLocks[size];
  1794   MutexLockerEx x(lock, Mutex::_no_safepoint_check_flag);
  1795   addChunkAndRepairOffsetTable((HeapWord*)ec, size, true);
  1796   // record the birth under the lock since the recording involves
  1797   // manipulation of the list on which the chunk lives and
  1798   // if the chunk is allocated and is the last on the list,
  1799   // the list can go away.
  1800   coalBirth(size);
  1803 void
  1804 CompactibleFreeListSpace::addChunkToFreeLists(HeapWord* chunk,
  1805                                               size_t     size) {
  1806   // check that the chunk does lie in this space!
  1807   assert(chunk != NULL && is_in_reserved(chunk), "Not in this space!");
  1808   assert_locked();
  1809   _bt.verify_single_block(chunk, size);
  1811   FreeChunk* fc = (FreeChunk*) chunk;
  1812   fc->set_size(size);
  1813   debug_only(fc->mangleFreed(size));
  1814   if (size < SmallForDictionary) {
  1815     returnChunkToFreeList(fc);
  1816   } else {
  1817     returnChunkToDictionary(fc);
  1821 void
  1822 CompactibleFreeListSpace::addChunkAndRepairOffsetTable(HeapWord* chunk,
  1823   size_t size, bool coalesced) {
  1824   assert_locked();
  1825   assert(chunk != NULL, "null chunk");
  1826   if (coalesced) {
  1827     // repair BOT
  1828     _bt.single_block(chunk, size);
  1830   addChunkToFreeLists(chunk, size);
  1833 // We _must_ find the purported chunk on our free lists;
  1834 // we assert if we don't.
  1835 void
  1836 CompactibleFreeListSpace::removeFreeChunkFromFreeLists(FreeChunk* fc) {
  1837   size_t size = fc->size();
  1838   assert_locked();
  1839   debug_only(verifyFreeLists());
  1840   if (size < SmallForDictionary) {
  1841     removeChunkFromIndexedFreeList(fc);
  1842   } else {
  1843     removeChunkFromDictionary(fc);
  1845   _bt.verify_single_block((HeapWord*)fc, size);
  1846   debug_only(verifyFreeLists());
  1849 void
  1850 CompactibleFreeListSpace::removeChunkFromDictionary(FreeChunk* fc) {
  1851   size_t size = fc->size();
  1852   assert_locked();
  1853   assert(fc != NULL, "null chunk");
  1854   _bt.verify_single_block((HeapWord*)fc, size);
  1855   _dictionary->remove_chunk(fc);
  1856   // adjust _unallocated_block upward, as necessary
  1857   _bt.allocated((HeapWord*)fc, size);
  1860 void
  1861 CompactibleFreeListSpace::removeChunkFromIndexedFreeList(FreeChunk* fc) {
  1862   assert_locked();
  1863   size_t size = fc->size();
  1864   _bt.verify_single_block((HeapWord*)fc, size);
  1865   NOT_PRODUCT(
  1866     if (FLSVerifyIndexTable) {
  1867       verifyIndexedFreeList(size);
  1870   _indexedFreeList[size].remove_chunk(fc);
  1871   NOT_PRODUCT(
  1872     if (FLSVerifyIndexTable) {
  1873       verifyIndexedFreeList(size);
  1878 FreeChunk* CompactibleFreeListSpace::bestFitSmall(size_t numWords) {
  1879   /* A hint is the next larger size that has a surplus.
  1880      Start search at a size large enough to guarantee that
  1881      the excess is >= MIN_CHUNK. */
  1882   size_t start = align_object_size(numWords + MinChunkSize);
  1883   if (start < IndexSetSize) {
  1884     AdaptiveFreeList<FreeChunk>* it   = _indexedFreeList;
  1885     size_t    hint = _indexedFreeList[start].hint();
  1886     while (hint < IndexSetSize) {
  1887       assert(hint % MinObjAlignment == 0, "hint should be aligned");
  1888       AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[hint];
  1889       if (fl->surplus() > 0 && fl->head() != NULL) {
  1890         // Found a list with surplus, reset original hint
  1891         // and split out a free chunk which is returned.
  1892         _indexedFreeList[start].set_hint(hint);
  1893         FreeChunk* res = getFromListGreater(fl, numWords);
  1894         assert(res == NULL || res->is_free(),
  1895           "Should be returning a free chunk");
  1896         return res;
  1898       hint = fl->hint(); /* keep looking */
  1900     /* None found. */
  1901     it[start].set_hint(IndexSetSize);
  1903   return NULL;
  1906 /* Requires fl->size >= numWords + MinChunkSize */
  1907 FreeChunk* CompactibleFreeListSpace::getFromListGreater(AdaptiveFreeList<FreeChunk>* fl,
  1908   size_t numWords) {
  1909   FreeChunk *curr = fl->head();
  1910   size_t oldNumWords = curr->size();
  1911   assert(numWords >= MinChunkSize, "Word size is too small");
  1912   assert(curr != NULL, "List is empty");
  1913   assert(oldNumWords >= numWords + MinChunkSize,
  1914         "Size of chunks in the list is too small");
  1916   fl->remove_chunk(curr);
  1917   // recorded indirectly by splitChunkAndReturnRemainder -
  1918   // smallSplit(oldNumWords, numWords);
  1919   FreeChunk* new_chunk = splitChunkAndReturnRemainder(curr, numWords);
  1920   // Does anything have to be done for the remainder in terms of
  1921   // fixing the card table?
  1922   assert(new_chunk == NULL || new_chunk->is_free(),
  1923     "Should be returning a free chunk");
  1924   return new_chunk;
  1927 FreeChunk*
  1928 CompactibleFreeListSpace::splitChunkAndReturnRemainder(FreeChunk* chunk,
  1929   size_t new_size) {
  1930   assert_locked();
  1931   size_t size = chunk->size();
  1932   assert(size > new_size, "Split from a smaller block?");
  1933   assert(is_aligned(chunk), "alignment problem");
  1934   assert(size == adjustObjectSize(size), "alignment problem");
  1935   size_t rem_size = size - new_size;
  1936   assert(rem_size == adjustObjectSize(rem_size), "alignment problem");
  1937   assert(rem_size >= MinChunkSize, "Free chunk smaller than minimum");
  1938   FreeChunk* ffc = (FreeChunk*)((HeapWord*)chunk + new_size);
  1939   assert(is_aligned(ffc), "alignment problem");
  1940   ffc->set_size(rem_size);
  1941   ffc->link_next(NULL);
  1942   ffc->link_prev(NULL); // Mark as a free block for other (parallel) GC threads.
  1943   // Above must occur before BOT is updated below.
  1944   // adjust block offset table
  1945   OrderAccess::storestore();
  1946   assert(chunk->is_free() && ffc->is_free(), "Error");
  1947   _bt.split_block((HeapWord*)chunk, chunk->size(), new_size);
  1948   if (rem_size < SmallForDictionary) {
  1949     bool is_par = (SharedHeap::heap()->n_par_threads() > 0);
  1950     if (is_par) _indexedFreeListParLocks[rem_size]->lock();
  1951     assert(!is_par ||
  1952            (SharedHeap::heap()->n_par_threads() ==
  1953             SharedHeap::heap()->workers()->active_workers()), "Mismatch");
  1954     returnChunkToFreeList(ffc);
  1955     split(size, rem_size);
  1956     if (is_par) _indexedFreeListParLocks[rem_size]->unlock();
  1957   } else {
  1958     returnChunkToDictionary(ffc);
  1959     split(size ,rem_size);
  1961   chunk->set_size(new_size);
  1962   return chunk;
  1965 void
  1966 CompactibleFreeListSpace::sweep_completed() {
  1967   // Now that space is probably plentiful, refill linear
  1968   // allocation blocks as needed.
  1969   refillLinearAllocBlocksIfNeeded();
  1972 void
  1973 CompactibleFreeListSpace::gc_prologue() {
  1974   assert_locked();
  1975   if (PrintFLSStatistics != 0) {
  1976     gclog_or_tty->print("Before GC:\n");
  1977     reportFreeListStatistics();
  1979   refillLinearAllocBlocksIfNeeded();
  1982 void
  1983 CompactibleFreeListSpace::gc_epilogue() {
  1984   assert_locked();
  1985   if (PrintGCDetails && Verbose && !_adaptive_freelists) {
  1986     if (_smallLinearAllocBlock._word_size == 0)
  1987       warning("CompactibleFreeListSpace(epilogue):: Linear allocation failure");
  1989   assert(_promoInfo.noPromotions(), "_promoInfo inconsistency");
  1990   _promoInfo.stopTrackingPromotions();
  1991   repairLinearAllocationBlocks();
  1992   // Print Space's stats
  1993   if (PrintFLSStatistics != 0) {
  1994     gclog_or_tty->print("After GC:\n");
  1995     reportFreeListStatistics();
  1999 // Iteration support, mostly delegated from a CMS generation
  2001 void CompactibleFreeListSpace::save_marks() {
  2002   assert(Thread::current()->is_VM_thread(),
  2003          "Global variable should only be set when single-threaded");
  2004   // Mark the "end" of the used space at the time of this call;
  2005   // note, however, that promoted objects from this point
  2006   // on are tracked in the _promoInfo below.
  2007   set_saved_mark_word(unallocated_block());
  2008 #ifdef ASSERT
  2009   // Check the sanity of save_marks() etc.
  2010   MemRegion ur    = used_region();
  2011   MemRegion urasm = used_region_at_save_marks();
  2012   assert(ur.contains(urasm),
  2013          err_msg(" Error at save_marks(): [" PTR_FORMAT "," PTR_FORMAT ")"
  2014                  " should contain [" PTR_FORMAT "," PTR_FORMAT ")",
  2015                  p2i(ur.start()), p2i(ur.end()), p2i(urasm.start()), p2i(urasm.end())));
  2016 #endif
  2017   // inform allocator that promotions should be tracked.
  2018   assert(_promoInfo.noPromotions(), "_promoInfo inconsistency");
  2019   _promoInfo.startTrackingPromotions();
  2022 bool CompactibleFreeListSpace::no_allocs_since_save_marks() {
  2023   assert(_promoInfo.tracking(), "No preceding save_marks?");
  2024   assert(SharedHeap::heap()->n_par_threads() == 0,
  2025          "Shouldn't be called if using parallel gc.");
  2026   return _promoInfo.noPromotions();
  2029 #define CFLS_OOP_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix)           \
  2031 void CompactibleFreeListSpace::                                             \
  2032 oop_since_save_marks_iterate##nv_suffix(OopClosureType* blk) {              \
  2033   assert(SharedHeap::heap()->n_par_threads() == 0,                          \
  2034          "Shouldn't be called (yet) during parallel part of gc.");          \
  2035   _promoInfo.promoted_oops_iterate##nv_suffix(blk);                         \
  2036   /*                                                                        \
  2037    * This also restores any displaced headers and removes the elements from \
  2038    * the iteration set as they are processed, so that we have a clean slate \
  2039    * at the end of the iteration. Note, thus, that if new objects are       \
  2040    * promoted as a result of the iteration they are iterated over as well.  \
  2041    */                                                                       \
  2042   assert(_promoInfo.noPromotions(), "_promoInfo inconsistency");            \
  2045 ALL_SINCE_SAVE_MARKS_CLOSURES(CFLS_OOP_SINCE_SAVE_MARKS_DEFN)
  2047 bool CompactibleFreeListSpace::linearAllocationWouldFail() const {
  2048   return _smallLinearAllocBlock._word_size == 0;
  2051 void CompactibleFreeListSpace::repairLinearAllocationBlocks() {
  2052   // Fix up linear allocation blocks to look like free blocks
  2053   repairLinearAllocBlock(&_smallLinearAllocBlock);
  2056 void CompactibleFreeListSpace::repairLinearAllocBlock(LinearAllocBlock* blk) {
  2057   assert_locked();
  2058   if (blk->_ptr != NULL) {
  2059     assert(blk->_word_size != 0 && blk->_word_size >= MinChunkSize,
  2060            "Minimum block size requirement");
  2061     FreeChunk* fc = (FreeChunk*)(blk->_ptr);
  2062     fc->set_size(blk->_word_size);
  2063     fc->link_prev(NULL);   // mark as free
  2064     fc->dontCoalesce();
  2065     assert(fc->is_free(), "just marked it free");
  2066     assert(fc->cantCoalesce(), "just marked it uncoalescable");
  2070 void CompactibleFreeListSpace::refillLinearAllocBlocksIfNeeded() {
  2071   assert_locked();
  2072   if (_smallLinearAllocBlock._ptr == NULL) {
  2073     assert(_smallLinearAllocBlock._word_size == 0,
  2074       "Size of linAB should be zero if the ptr is NULL");
  2075     // Reset the linAB refill and allocation size limit.
  2076     _smallLinearAllocBlock.set(0, 0, 1024*SmallForLinearAlloc, SmallForLinearAlloc);
  2078   refillLinearAllocBlockIfNeeded(&_smallLinearAllocBlock);
  2081 void
  2082 CompactibleFreeListSpace::refillLinearAllocBlockIfNeeded(LinearAllocBlock* blk) {
  2083   assert_locked();
  2084   assert((blk->_ptr == NULL && blk->_word_size == 0) ||
  2085          (blk->_ptr != NULL && blk->_word_size >= MinChunkSize),
  2086          "blk invariant");
  2087   if (blk->_ptr == NULL) {
  2088     refillLinearAllocBlock(blk);
  2090   if (PrintMiscellaneous && Verbose) {
  2091     if (blk->_word_size == 0) {
  2092       warning("CompactibleFreeListSpace(prologue):: Linear allocation failure");
  2097 void
  2098 CompactibleFreeListSpace::refillLinearAllocBlock(LinearAllocBlock* blk) {
  2099   assert_locked();
  2100   assert(blk->_word_size == 0 && blk->_ptr == NULL,
  2101          "linear allocation block should be empty");
  2102   FreeChunk* fc;
  2103   if (blk->_refillSize < SmallForDictionary &&
  2104       (fc = getChunkFromIndexedFreeList(blk->_refillSize)) != NULL) {
  2105     // A linAB's strategy might be to use small sizes to reduce
  2106     // fragmentation but still get the benefits of allocation from a
  2107     // linAB.
  2108   } else {
  2109     fc = getChunkFromDictionary(blk->_refillSize);
  2111   if (fc != NULL) {
  2112     blk->_ptr  = (HeapWord*)fc;
  2113     blk->_word_size = fc->size();
  2114     fc->dontCoalesce();   // to prevent sweeper from sweeping us up
  2118 // Support for concurrent collection policy decisions.
  2119 bool CompactibleFreeListSpace::should_concurrent_collect() const {
  2120   // In the future we might want to add in frgamentation stats --
  2121   // including erosion of the "mountain" into this decision as well.
  2122   return !adaptive_freelists() && linearAllocationWouldFail();
  2125 // Support for compaction
  2127 void CompactibleFreeListSpace::prepare_for_compaction(CompactPoint* cp) {
  2128   SCAN_AND_FORWARD(cp,end,block_is_obj,block_size);
  2129   // prepare_for_compaction() uses the space between live objects
  2130   // so that later phase can skip dead space quickly.  So verification
  2131   // of the free lists doesn't work after.
  2134 #define obj_size(q) adjustObjectSize(oop(q)->size())
  2135 #define adjust_obj_size(s) adjustObjectSize(s)
  2137 void CompactibleFreeListSpace::adjust_pointers() {
  2138   // In other versions of adjust_pointers(), a bail out
  2139   // based on the amount of live data in the generation
  2140   // (i.e., if 0, bail out) may be used.
  2141   // Cannot test used() == 0 here because the free lists have already
  2142   // been mangled by the compaction.
  2144   SCAN_AND_ADJUST_POINTERS(adjust_obj_size);
  2145   // See note about verification in prepare_for_compaction().
  2148 void CompactibleFreeListSpace::compact() {
  2149   SCAN_AND_COMPACT(obj_size);
  2152 // fragmentation_metric = 1 - [sum of (fbs**2) / (sum of fbs)**2]
  2153 // where fbs is free block sizes
  2154 double CompactibleFreeListSpace::flsFrag() const {
  2155   size_t itabFree = totalSizeInIndexedFreeLists();
  2156   double frag = 0.0;
  2157   size_t i;
  2159   for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
  2160     double sz  = i;
  2161     frag      += _indexedFreeList[i].count() * (sz * sz);
  2164   double totFree = itabFree +
  2165                    _dictionary->total_chunk_size(DEBUG_ONLY(freelistLock()));
  2166   if (totFree > 0) {
  2167     frag = ((frag + _dictionary->sum_of_squared_block_sizes()) /
  2168             (totFree * totFree));
  2169     frag = (double)1.0  - frag;
  2170   } else {
  2171     assert(frag == 0.0, "Follows from totFree == 0");
  2173   return frag;
  2176 void CompactibleFreeListSpace::beginSweepFLCensus(
  2177   float inter_sweep_current,
  2178   float inter_sweep_estimate,
  2179   float intra_sweep_estimate) {
  2180   assert_locked();
  2181   size_t i;
  2182   for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
  2183     AdaptiveFreeList<FreeChunk>* fl    = &_indexedFreeList[i];
  2184     if (PrintFLSStatistics > 1) {
  2185       gclog_or_tty->print("size[" SIZE_FORMAT "] : ", i);
  2187     fl->compute_desired(inter_sweep_current, inter_sweep_estimate, intra_sweep_estimate);
  2188     fl->set_coal_desired((ssize_t)((double)fl->desired() * CMSSmallCoalSurplusPercent));
  2189     fl->set_before_sweep(fl->count());
  2190     fl->set_bfr_surp(fl->surplus());
  2192   _dictionary->begin_sweep_dict_census(CMSLargeCoalSurplusPercent,
  2193                                     inter_sweep_current,
  2194                                     inter_sweep_estimate,
  2195                                     intra_sweep_estimate);
  2198 void CompactibleFreeListSpace::setFLSurplus() {
  2199   assert_locked();
  2200   size_t i;
  2201   for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
  2202     AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[i];
  2203     fl->set_surplus(fl->count() -
  2204                     (ssize_t)((double)fl->desired() * CMSSmallSplitSurplusPercent));
  2208 void CompactibleFreeListSpace::setFLHints() {
  2209   assert_locked();
  2210   size_t i;
  2211   size_t h = IndexSetSize;
  2212   for (i = IndexSetSize - 1; i != 0; i -= IndexSetStride) {
  2213     AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[i];
  2214     fl->set_hint(h);
  2215     if (fl->surplus() > 0) {
  2216       h = i;
  2221 void CompactibleFreeListSpace::clearFLCensus() {
  2222   assert_locked();
  2223   size_t i;
  2224   for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
  2225     AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[i];
  2226     fl->set_prev_sweep(fl->count());
  2227     fl->set_coal_births(0);
  2228     fl->set_coal_deaths(0);
  2229     fl->set_split_births(0);
  2230     fl->set_split_deaths(0);
  2234 void CompactibleFreeListSpace::endSweepFLCensus(size_t sweep_count) {
  2235   if (PrintFLSStatistics > 0) {
  2236     HeapWord* largestAddr = (HeapWord*) dictionary()->find_largest_dict();
  2237     gclog_or_tty->print_cr("CMS: Large block " PTR_FORMAT,
  2238                            p2i(largestAddr));
  2240   setFLSurplus();
  2241   setFLHints();
  2242   if (PrintGC && PrintFLSCensus > 0) {
  2243     printFLCensus(sweep_count);
  2245   clearFLCensus();
  2246   assert_locked();
  2247   _dictionary->end_sweep_dict_census(CMSLargeSplitSurplusPercent);
  2250 bool CompactibleFreeListSpace::coalOverPopulated(size_t size) {
  2251   if (size < SmallForDictionary) {
  2252     AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
  2253     return (fl->coal_desired() < 0) ||
  2254            ((int)fl->count() > fl->coal_desired());
  2255   } else {
  2256     return dictionary()->coal_dict_over_populated(size);
  2260 void CompactibleFreeListSpace::smallCoalBirth(size_t size) {
  2261   assert(size < SmallForDictionary, "Size too large for indexed list");
  2262   AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
  2263   fl->increment_coal_births();
  2264   fl->increment_surplus();
  2267 void CompactibleFreeListSpace::smallCoalDeath(size_t size) {
  2268   assert(size < SmallForDictionary, "Size too large for indexed list");
  2269   AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
  2270   fl->increment_coal_deaths();
  2271   fl->decrement_surplus();
  2274 void CompactibleFreeListSpace::coalBirth(size_t size) {
  2275   if (size  < SmallForDictionary) {
  2276     smallCoalBirth(size);
  2277   } else {
  2278     dictionary()->dict_census_update(size,
  2279                                    false /* split */,
  2280                                    true /* birth */);
  2284 void CompactibleFreeListSpace::coalDeath(size_t size) {
  2285   if(size  < SmallForDictionary) {
  2286     smallCoalDeath(size);
  2287   } else {
  2288     dictionary()->dict_census_update(size,
  2289                                    false /* split */,
  2290                                    false /* birth */);
  2294 void CompactibleFreeListSpace::smallSplitBirth(size_t size) {
  2295   assert(size < SmallForDictionary, "Size too large for indexed list");
  2296   AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
  2297   fl->increment_split_births();
  2298   fl->increment_surplus();
  2301 void CompactibleFreeListSpace::smallSplitDeath(size_t size) {
  2302   assert(size < SmallForDictionary, "Size too large for indexed list");
  2303   AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
  2304   fl->increment_split_deaths();
  2305   fl->decrement_surplus();
  2308 void CompactibleFreeListSpace::split_birth(size_t size) {
  2309   if (size  < SmallForDictionary) {
  2310     smallSplitBirth(size);
  2311   } else {
  2312     dictionary()->dict_census_update(size,
  2313                                    true /* split */,
  2314                                    true /* birth */);
  2318 void CompactibleFreeListSpace::splitDeath(size_t size) {
  2319   if (size  < SmallForDictionary) {
  2320     smallSplitDeath(size);
  2321   } else {
  2322     dictionary()->dict_census_update(size,
  2323                                    true /* split */,
  2324                                    false /* birth */);
  2328 void CompactibleFreeListSpace::split(size_t from, size_t to1) {
  2329   size_t to2 = from - to1;
  2330   splitDeath(from);
  2331   split_birth(to1);
  2332   split_birth(to2);
  2335 void CompactibleFreeListSpace::print() const {
  2336   print_on(tty);
  2339 void CompactibleFreeListSpace::prepare_for_verify() {
  2340   assert_locked();
  2341   repairLinearAllocationBlocks();
  2342   // Verify that the SpoolBlocks look like free blocks of
  2343   // appropriate sizes... To be done ...
  2346 class VerifyAllBlksClosure: public BlkClosure {
  2347  private:
  2348   const CompactibleFreeListSpace* _sp;
  2349   const MemRegion                 _span;
  2350   HeapWord*                       _last_addr;
  2351   size_t                          _last_size;
  2352   bool                            _last_was_obj;
  2353   bool                            _last_was_live;
  2355  public:
  2356   VerifyAllBlksClosure(const CompactibleFreeListSpace* sp,
  2357     MemRegion span) :  _sp(sp), _span(span),
  2358                        _last_addr(NULL), _last_size(0),
  2359                        _last_was_obj(false), _last_was_live(false) { }
  2361   virtual size_t do_blk(HeapWord* addr) {
  2362     size_t res;
  2363     bool   was_obj  = false;
  2364     bool   was_live = false;
  2365     if (_sp->block_is_obj(addr)) {
  2366       was_obj = true;
  2367       oop p = oop(addr);
  2368       guarantee(p->is_oop(), "Should be an oop");
  2369       res = _sp->adjustObjectSize(p->size());
  2370       if (_sp->obj_is_alive(addr)) {
  2371         was_live = true;
  2372         p->verify();
  2374     } else {
  2375       FreeChunk* fc = (FreeChunk*)addr;
  2376       res = fc->size();
  2377       if (FLSVerifyLists && !fc->cantCoalesce()) {
  2378         guarantee(_sp->verify_chunk_in_free_list(fc),
  2379                   "Chunk should be on a free list");
  2382     if (res == 0) {
  2383       gclog_or_tty->print_cr("Livelock: no rank reduction!");
  2384       gclog_or_tty->print_cr(
  2385         " Current:  addr = " PTR_FORMAT ", size = " SIZE_FORMAT ", obj = %s, live = %s \n"
  2386         " Previous: addr = " PTR_FORMAT ", size = " SIZE_FORMAT ", obj = %s, live = %s \n",
  2387         p2i(addr),       res,        was_obj      ?"true":"false", was_live      ?"true":"false",
  2388         p2i(_last_addr), _last_size, _last_was_obj?"true":"false", _last_was_live?"true":"false");
  2389       _sp->print_on(gclog_or_tty);
  2390       guarantee(false, "Seppuku!");
  2392     _last_addr = addr;
  2393     _last_size = res;
  2394     _last_was_obj  = was_obj;
  2395     _last_was_live = was_live;
  2396     return res;
  2398 };
  2400 class VerifyAllOopsClosure: public OopClosure {
  2401  private:
  2402   const CMSCollector*             _collector;
  2403   const CompactibleFreeListSpace* _sp;
  2404   const MemRegion                 _span;
  2405   const bool                      _past_remark;
  2406   const CMSBitMap*                _bit_map;
  2408  protected:
  2409   void do_oop(void* p, oop obj) {
  2410     if (_span.contains(obj)) { // the interior oop points into CMS heap
  2411       if (!_span.contains(p)) { // reference from outside CMS heap
  2412         // Should be a valid object; the first disjunct below allows
  2413         // us to sidestep an assertion in block_is_obj() that insists
  2414         // that p be in _sp. Note that several generations (and spaces)
  2415         // are spanned by _span (CMS heap) above.
  2416         guarantee(!_sp->is_in_reserved(obj) ||
  2417                   _sp->block_is_obj((HeapWord*)obj),
  2418                   "Should be an object");
  2419         guarantee(obj->is_oop(), "Should be an oop");
  2420         obj->verify();
  2421         if (_past_remark) {
  2422           // Remark has been completed, the object should be marked
  2423           _bit_map->isMarked((HeapWord*)obj);
  2425       } else { // reference within CMS heap
  2426         if (_past_remark) {
  2427           // Remark has been completed -- so the referent should have
  2428           // been marked, if referring object is.
  2429           if (_bit_map->isMarked(_collector->block_start(p))) {
  2430             guarantee(_bit_map->isMarked((HeapWord*)obj), "Marking error?");
  2434     } else if (_sp->is_in_reserved(p)) {
  2435       // the reference is from FLS, and points out of FLS
  2436       guarantee(obj->is_oop(), "Should be an oop");
  2437       obj->verify();
  2441   template <class T> void do_oop_work(T* p) {
  2442     T heap_oop = oopDesc::load_heap_oop(p);
  2443     if (!oopDesc::is_null(heap_oop)) {
  2444       oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
  2445       do_oop(p, obj);
  2449  public:
  2450   VerifyAllOopsClosure(const CMSCollector* collector,
  2451     const CompactibleFreeListSpace* sp, MemRegion span,
  2452     bool past_remark, CMSBitMap* bit_map) :
  2453     _collector(collector), _sp(sp), _span(span),
  2454     _past_remark(past_remark), _bit_map(bit_map) { }
  2456   virtual void do_oop(oop* p)       { VerifyAllOopsClosure::do_oop_work(p); }
  2457   virtual void do_oop(narrowOop* p) { VerifyAllOopsClosure::do_oop_work(p); }
  2458 };
  2460 void CompactibleFreeListSpace::verify() const {
  2461   assert_lock_strong(&_freelistLock);
  2462   verify_objects_initialized();
  2463   MemRegion span = _collector->_span;
  2464   bool past_remark = (_collector->abstract_state() ==
  2465                       CMSCollector::Sweeping);
  2467   ResourceMark rm;
  2468   HandleMark  hm;
  2470   // Check integrity of CFL data structures
  2471   _promoInfo.verify();
  2472   _dictionary->verify();
  2473   if (FLSVerifyIndexTable) {
  2474     verifyIndexedFreeLists();
  2476   // Check integrity of all objects and free blocks in space
  2478     VerifyAllBlksClosure cl(this, span);
  2479     ((CompactibleFreeListSpace*)this)->blk_iterate(&cl);  // cast off const
  2481   // Check that all references in the heap to FLS
  2482   // are to valid objects in FLS or that references in
  2483   // FLS are to valid objects elsewhere in the heap
  2484   if (FLSVerifyAllHeapReferences)
  2486     VerifyAllOopsClosure cl(_collector, this, span, past_remark,
  2487       _collector->markBitMap());
  2488     CollectedHeap* ch = Universe::heap();
  2490     // Iterate over all oops in the heap. Uses the _no_header version
  2491     // since we are not interested in following the klass pointers.
  2492     ch->oop_iterate_no_header(&cl);
  2495   if (VerifyObjectStartArray) {
  2496     // Verify the block offset table
  2497     _bt.verify();
  2501 #ifndef PRODUCT
  2502 void CompactibleFreeListSpace::verifyFreeLists() const {
  2503   if (FLSVerifyLists) {
  2504     _dictionary->verify();
  2505     verifyIndexedFreeLists();
  2506   } else {
  2507     if (FLSVerifyDictionary) {
  2508       _dictionary->verify();
  2510     if (FLSVerifyIndexTable) {
  2511       verifyIndexedFreeLists();
  2515 #endif
  2517 void CompactibleFreeListSpace::verifyIndexedFreeLists() const {
  2518   size_t i = 0;
  2519   for (; i < IndexSetStart; i++) {
  2520     guarantee(_indexedFreeList[i].head() == NULL, "should be NULL");
  2522   for (; i < IndexSetSize; i++) {
  2523     verifyIndexedFreeList(i);
  2527 void CompactibleFreeListSpace::verifyIndexedFreeList(size_t size) const {
  2528   FreeChunk* fc   =  _indexedFreeList[size].head();
  2529   FreeChunk* tail =  _indexedFreeList[size].tail();
  2530   size_t    num = _indexedFreeList[size].count();
  2531   size_t      n = 0;
  2532   guarantee(((size >= IndexSetStart) && (size % IndexSetStride == 0)) || fc == NULL,
  2533             "Slot should have been empty");
  2534   for (; fc != NULL; fc = fc->next(), n++) {
  2535     guarantee(fc->size() == size, "Size inconsistency");
  2536     guarantee(fc->is_free(), "!free?");
  2537     guarantee(fc->next() == NULL || fc->next()->prev() == fc, "Broken list");
  2538     guarantee((fc->next() == NULL) == (fc == tail), "Incorrect tail");
  2540   guarantee(n == num, "Incorrect count");
  2543 #ifndef PRODUCT
  2544 void CompactibleFreeListSpace::check_free_list_consistency() const {
  2545   assert((TreeChunk<FreeChunk, AdaptiveFreeList<FreeChunk> >::min_size() <= IndexSetSize),
  2546     "Some sizes can't be allocated without recourse to"
  2547     " linear allocation buffers");
  2548   assert((TreeChunk<FreeChunk, AdaptiveFreeList<FreeChunk> >::min_size()*HeapWordSize == sizeof(TreeChunk<FreeChunk, AdaptiveFreeList<FreeChunk> >)),
  2549     "else MIN_TREE_CHUNK_SIZE is wrong");
  2550   assert(IndexSetStart != 0, "IndexSetStart not initialized");
  2551   assert(IndexSetStride != 0, "IndexSetStride not initialized");
  2553 #endif
  2555 void CompactibleFreeListSpace::printFLCensus(size_t sweep_count) const {
  2556   assert_lock_strong(&_freelistLock);
  2557   AdaptiveFreeList<FreeChunk> total;
  2558   gclog_or_tty->print("end sweep# " SIZE_FORMAT "\n", sweep_count);
  2559   AdaptiveFreeList<FreeChunk>::print_labels_on(gclog_or_tty, "size");
  2560   size_t total_free = 0;
  2561   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
  2562     const AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[i];
  2563     total_free += fl->count() * fl->size();
  2564     if (i % (40*IndexSetStride) == 0) {
  2565       AdaptiveFreeList<FreeChunk>::print_labels_on(gclog_or_tty, "size");
  2567     fl->print_on(gclog_or_tty);
  2568     total.set_bfr_surp(    total.bfr_surp()     + fl->bfr_surp()    );
  2569     total.set_surplus(    total.surplus()     + fl->surplus()    );
  2570     total.set_desired(    total.desired()     + fl->desired()    );
  2571     total.set_prev_sweep(  total.prev_sweep()   + fl->prev_sweep()  );
  2572     total.set_before_sweep(total.before_sweep() + fl->before_sweep());
  2573     total.set_count(      total.count()       + fl->count()      );
  2574     total.set_coal_births( total.coal_births()  + fl->coal_births() );
  2575     total.set_coal_deaths( total.coal_deaths()  + fl->coal_deaths() );
  2576     total.set_split_births(total.split_births() + fl->split_births());
  2577     total.set_split_deaths(total.split_deaths() + fl->split_deaths());
  2579   total.print_on(gclog_or_tty, "TOTAL");
  2580   gclog_or_tty->print_cr("Total free in indexed lists "
  2581                          SIZE_FORMAT " words", total_free);
  2582   gclog_or_tty->print("growth: %8.5f  deficit: %8.5f\n",
  2583     (double)(total.split_births()+total.coal_births()-total.split_deaths()-total.coal_deaths())/
  2584             (total.prev_sweep() != 0 ? (double)total.prev_sweep() : 1.0),
  2585     (double)(total.desired() - total.count())/(total.desired() != 0 ? (double)total.desired() : 1.0));
  2586   _dictionary->print_dict_census();
  2589 ///////////////////////////////////////////////////////////////////////////
  2590 // CFLS_LAB
  2591 ///////////////////////////////////////////////////////////////////////////
  2593 #define VECTOR_257(x)                                                                                  \
  2594   /* 1  2  3  4  5  6  7  8  9 1x 11 12 13 14 15 16 17 18 19 2x 21 22 23 24 25 26 27 28 29 3x 31 32 */ \
  2595   {  x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
  2596      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
  2597      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
  2598      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
  2599      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
  2600      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
  2601      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
  2602      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
  2603      x }
  2605 // Initialize with default setting of CMSParPromoteBlocksToClaim, _not_
  2606 // OldPLABSize, whose static default is different; if overridden at the
  2607 // command-line, this will get reinitialized via a call to
  2608 // modify_initialization() below.
  2609 AdaptiveWeightedAverage CFLS_LAB::_blocks_to_claim[]    =
  2610   VECTOR_257(AdaptiveWeightedAverage(OldPLABWeight, (float)CMSParPromoteBlocksToClaim));
  2611 size_t CFLS_LAB::_global_num_blocks[]  = VECTOR_257(0);
  2612 uint   CFLS_LAB::_global_num_workers[] = VECTOR_257(0);
  2614 CFLS_LAB::CFLS_LAB(CompactibleFreeListSpace* cfls) :
  2615   _cfls(cfls)
  2617   assert(CompactibleFreeListSpace::IndexSetSize == 257, "Modify VECTOR_257() macro above");
  2618   for (size_t i = CompactibleFreeListSpace::IndexSetStart;
  2619        i < CompactibleFreeListSpace::IndexSetSize;
  2620        i += CompactibleFreeListSpace::IndexSetStride) {
  2621     _indexedFreeList[i].set_size(i);
  2622     _num_blocks[i] = 0;
  2626 static bool _CFLS_LAB_modified = false;
  2628 void CFLS_LAB::modify_initialization(size_t n, unsigned wt) {
  2629   assert(!_CFLS_LAB_modified, "Call only once");
  2630   _CFLS_LAB_modified = true;
  2631   for (size_t i = CompactibleFreeListSpace::IndexSetStart;
  2632        i < CompactibleFreeListSpace::IndexSetSize;
  2633        i += CompactibleFreeListSpace::IndexSetStride) {
  2634     _blocks_to_claim[i].modify(n, wt, true /* force */);
  2638 HeapWord* CFLS_LAB::alloc(size_t word_sz) {
  2639   FreeChunk* res;
  2640   assert(word_sz == _cfls->adjustObjectSize(word_sz), "Error");
  2641   if (word_sz >=  CompactibleFreeListSpace::IndexSetSize) {
  2642     // This locking manages sync with other large object allocations.
  2643     MutexLockerEx x(_cfls->parDictionaryAllocLock(),
  2644                     Mutex::_no_safepoint_check_flag);
  2645     res = _cfls->getChunkFromDictionaryExact(word_sz);
  2646     if (res == NULL) return NULL;
  2647   } else {
  2648     AdaptiveFreeList<FreeChunk>* fl = &_indexedFreeList[word_sz];
  2649     if (fl->count() == 0) {
  2650       // Attempt to refill this local free list.
  2651       get_from_global_pool(word_sz, fl);
  2652       // If it didn't work, give up.
  2653       if (fl->count() == 0) return NULL;
  2655     res = fl->get_chunk_at_head();
  2656     assert(res != NULL, "Why was count non-zero?");
  2658   res->markNotFree();
  2659   assert(!res->is_free(), "shouldn't be marked free");
  2660   assert(oop(res)->klass_or_null() == NULL, "should look uninitialized");
  2661   // mangle a just allocated object with a distinct pattern.
  2662   debug_only(res->mangleAllocated(word_sz));
  2663   return (HeapWord*)res;
  2666 // Get a chunk of blocks of the right size and update related
  2667 // book-keeping stats
  2668 void CFLS_LAB::get_from_global_pool(size_t word_sz, AdaptiveFreeList<FreeChunk>* fl) {
  2669   // Get the #blocks we want to claim
  2670   size_t n_blks = (size_t)_blocks_to_claim[word_sz].average();
  2671   assert(n_blks > 0, "Error");
  2672   assert(ResizePLAB || n_blks == OldPLABSize, "Error");
  2673   // In some cases, when the application has a phase change,
  2674   // there may be a sudden and sharp shift in the object survival
  2675   // profile, and updating the counts at the end of a scavenge
  2676   // may not be quick enough, giving rise to large scavenge pauses
  2677   // during these phase changes. It is beneficial to detect such
  2678   // changes on-the-fly during a scavenge and avoid such a phase-change
  2679   // pothole. The following code is a heuristic attempt to do that.
  2680   // It is protected by a product flag until we have gained
  2681   // enough experience with this heuristic and fine-tuned its behaviour.
  2682   // WARNING: This might increase fragmentation if we overreact to
  2683   // small spikes, so some kind of historical smoothing based on
  2684   // previous experience with the greater reactivity might be useful.
  2685   // Lacking sufficient experience, CMSOldPLABResizeQuicker is disabled by
  2686   // default.
  2687   if (ResizeOldPLAB && CMSOldPLABResizeQuicker) {
  2688     size_t multiple = _num_blocks[word_sz]/(CMSOldPLABToleranceFactor*CMSOldPLABNumRefills*n_blks);
  2689     n_blks +=  CMSOldPLABReactivityFactor*multiple*n_blks;
  2690     n_blks = MIN2(n_blks, CMSOldPLABMax);
  2692   assert(n_blks > 0, "Error");
  2693   _cfls->par_get_chunk_of_blocks(word_sz, n_blks, fl);
  2694   // Update stats table entry for this block size
  2695   _num_blocks[word_sz] += fl->count();
  2698 void CFLS_LAB::compute_desired_plab_size() {
  2699   for (size_t i =  CompactibleFreeListSpace::IndexSetStart;
  2700        i < CompactibleFreeListSpace::IndexSetSize;
  2701        i += CompactibleFreeListSpace::IndexSetStride) {
  2702     assert((_global_num_workers[i] == 0) == (_global_num_blocks[i] == 0),
  2703            "Counter inconsistency");
  2704     if (_global_num_workers[i] > 0) {
  2705       // Need to smooth wrt historical average
  2706       if (ResizeOldPLAB) {
  2707         _blocks_to_claim[i].sample(
  2708           MAX2((size_t)CMSOldPLABMin,
  2709           MIN2((size_t)CMSOldPLABMax,
  2710                _global_num_blocks[i]/(_global_num_workers[i]*CMSOldPLABNumRefills))));
  2712       // Reset counters for next round
  2713       _global_num_workers[i] = 0;
  2714       _global_num_blocks[i] = 0;
  2715       if (PrintOldPLAB) {
  2716         gclog_or_tty->print_cr("[" SIZE_FORMAT "]: " SIZE_FORMAT, i, (size_t)_blocks_to_claim[i].average());
  2722 // If this is changed in the future to allow parallel
  2723 // access, one would need to take the FL locks and,
  2724 // depending on how it is used, stagger access from
  2725 // parallel threads to reduce contention.
  2726 void CFLS_LAB::retire(int tid) {
  2727   // We run this single threaded with the world stopped;
  2728   // so no need for locks and such.
  2729   NOT_PRODUCT(Thread* t = Thread::current();)
  2730   assert(Thread::current()->is_VM_thread(), "Error");
  2731   for (size_t i =  CompactibleFreeListSpace::IndexSetStart;
  2732        i < CompactibleFreeListSpace::IndexSetSize;
  2733        i += CompactibleFreeListSpace::IndexSetStride) {
  2734     assert(_num_blocks[i] >= (size_t)_indexedFreeList[i].count(),
  2735            "Can't retire more than what we obtained");
  2736     if (_num_blocks[i] > 0) {
  2737       size_t num_retire =  _indexedFreeList[i].count();
  2738       assert(_num_blocks[i] > num_retire, "Should have used at least one");
  2740         // MutexLockerEx x(_cfls->_indexedFreeListParLocks[i],
  2741         //                Mutex::_no_safepoint_check_flag);
  2743         // Update globals stats for num_blocks used
  2744         _global_num_blocks[i] += (_num_blocks[i] - num_retire);
  2745         _global_num_workers[i]++;
  2746         assert(_global_num_workers[i] <= ParallelGCThreads, "Too big");
  2747         if (num_retire > 0) {
  2748           _cfls->_indexedFreeList[i].prepend(&_indexedFreeList[i]);
  2749           // Reset this list.
  2750           _indexedFreeList[i] = AdaptiveFreeList<FreeChunk>();
  2751           _indexedFreeList[i].set_size(i);
  2754       if (PrintOldPLAB) {
  2755         gclog_or_tty->print_cr("%d[" SIZE_FORMAT "]: " SIZE_FORMAT "/" SIZE_FORMAT "/" SIZE_FORMAT,
  2756                                tid, i, num_retire, _num_blocks[i], (size_t)_blocks_to_claim[i].average());
  2758       // Reset stats for next round
  2759       _num_blocks[i]         = 0;
  2764 void CompactibleFreeListSpace:: par_get_chunk_of_blocks(size_t word_sz, size_t n, AdaptiveFreeList<FreeChunk>* fl) {
  2765   assert(fl->count() == 0, "Precondition.");
  2766   assert(word_sz < CompactibleFreeListSpace::IndexSetSize,
  2767          "Precondition");
  2769   // We'll try all multiples of word_sz in the indexed set, starting with
  2770   // word_sz itself and, if CMSSplitIndexedFreeListBlocks, try larger multiples,
  2771   // then try getting a big chunk and splitting it.
  2773     bool found;
  2774     int  k;
  2775     size_t cur_sz;
  2776     for (k = 1, cur_sz = k * word_sz, found = false;
  2777          (cur_sz < CompactibleFreeListSpace::IndexSetSize) &&
  2778          (CMSSplitIndexedFreeListBlocks || k <= 1);
  2779          k++, cur_sz = k * word_sz) {
  2780       AdaptiveFreeList<FreeChunk> fl_for_cur_sz;  // Empty.
  2781       fl_for_cur_sz.set_size(cur_sz);
  2783         MutexLockerEx x(_indexedFreeListParLocks[cur_sz],
  2784                         Mutex::_no_safepoint_check_flag);
  2785         AdaptiveFreeList<FreeChunk>* gfl = &_indexedFreeList[cur_sz];
  2786         if (gfl->count() != 0) {
  2787           // nn is the number of chunks of size cur_sz that
  2788           // we'd need to split k-ways each, in order to create
  2789           // "n" chunks of size word_sz each.
  2790           const size_t nn = MAX2(n/k, (size_t)1);
  2791           gfl->getFirstNChunksFromList(nn, &fl_for_cur_sz);
  2792           found = true;
  2793           if (k > 1) {
  2794             // Update split death stats for the cur_sz-size blocks list:
  2795             // we increment the split death count by the number of blocks
  2796             // we just took from the cur_sz-size blocks list and which
  2797             // we will be splitting below.
  2798             ssize_t deaths = gfl->split_deaths() +
  2799                              fl_for_cur_sz.count();
  2800             gfl->set_split_deaths(deaths);
  2804       // Now transfer fl_for_cur_sz to fl.  Common case, we hope, is k = 1.
  2805       if (found) {
  2806         if (k == 1) {
  2807           fl->prepend(&fl_for_cur_sz);
  2808         } else {
  2809           // Divide each block on fl_for_cur_sz up k ways.
  2810           FreeChunk* fc;
  2811           while ((fc = fl_for_cur_sz.get_chunk_at_head()) != NULL) {
  2812             // Must do this in reverse order, so that anybody attempting to
  2813             // access the main chunk sees it as a single free block until we
  2814             // change it.
  2815             size_t fc_size = fc->size();
  2816             assert(fc->is_free(), "Error");
  2817             for (int i = k-1; i >= 0; i--) {
  2818               FreeChunk* ffc = (FreeChunk*)((HeapWord*)fc + i * word_sz);
  2819               assert((i != 0) ||
  2820                         ((fc == ffc) && ffc->is_free() &&
  2821                          (ffc->size() == k*word_sz) && (fc_size == word_sz)),
  2822                         "Counting error");
  2823               ffc->set_size(word_sz);
  2824               ffc->link_prev(NULL); // Mark as a free block for other (parallel) GC threads.
  2825               ffc->link_next(NULL);
  2826               // Above must occur before BOT is updated below.
  2827               OrderAccess::storestore();
  2828               // splitting from the right, fc_size == i * word_sz
  2829               _bt.mark_block((HeapWord*)ffc, word_sz, true /* reducing */);
  2830               fc_size -= word_sz;
  2831               assert(fc_size == i*word_sz, "Error");
  2832               _bt.verify_not_unallocated((HeapWord*)ffc, word_sz);
  2833               _bt.verify_single_block((HeapWord*)fc, fc_size);
  2834               _bt.verify_single_block((HeapWord*)ffc, word_sz);
  2835               // Push this on "fl".
  2836               fl->return_chunk_at_head(ffc);
  2838             // TRAP
  2839             assert(fl->tail()->next() == NULL, "List invariant.");
  2842         // Update birth stats for this block size.
  2843         size_t num = fl->count();
  2844         MutexLockerEx x(_indexedFreeListParLocks[word_sz],
  2845                         Mutex::_no_safepoint_check_flag);
  2846         ssize_t births = _indexedFreeList[word_sz].split_births() + num;
  2847         _indexedFreeList[word_sz].set_split_births(births);
  2848         return;
  2852   // Otherwise, we'll split a block from the dictionary.
  2853   FreeChunk* fc = NULL;
  2854   FreeChunk* rem_fc = NULL;
  2855   size_t rem;
  2857     MutexLockerEx x(parDictionaryAllocLock(),
  2858                     Mutex::_no_safepoint_check_flag);
  2859     while (n > 0) {
  2860       fc = dictionary()->get_chunk(MAX2(n * word_sz, _dictionary->min_size()),
  2861                                   FreeBlockDictionary<FreeChunk>::atLeast);
  2862       if (fc != NULL) {
  2863         _bt.allocated((HeapWord*)fc, fc->size(), true /* reducing */);  // update _unallocated_blk
  2864         dictionary()->dict_census_update(fc->size(),
  2865                                        true /*split*/,
  2866                                        false /*birth*/);
  2867         break;
  2868       } else {
  2869         n--;
  2872     if (fc == NULL) return;
  2873     // Otherwise, split up that block.
  2874     assert((ssize_t)n >= 1, "Control point invariant");
  2875     assert(fc->is_free(), "Error: should be a free block");
  2876     _bt.verify_single_block((HeapWord*)fc, fc->size());
  2877     const size_t nn = fc->size() / word_sz;
  2878     n = MIN2(nn, n);
  2879     assert((ssize_t)n >= 1, "Control point invariant");
  2880     rem = fc->size() - n * word_sz;
  2881     // If there is a remainder, and it's too small, allocate one fewer.
  2882     if (rem > 0 && rem < MinChunkSize) {
  2883       n--; rem += word_sz;
  2885     // Note that at this point we may have n == 0.
  2886     assert((ssize_t)n >= 0, "Control point invariant");
  2888     // If n is 0, the chunk fc that was found is not large
  2889     // enough to leave a viable remainder.  We are unable to
  2890     // allocate even one block.  Return fc to the
  2891     // dictionary and return, leaving "fl" empty.
  2892     if (n == 0) {
  2893       returnChunkToDictionary(fc);
  2894       assert(fl->count() == 0, "We never allocated any blocks");
  2895       return;
  2898     // First return the remainder, if any.
  2899     // Note that we hold the lock until we decide if we're going to give
  2900     // back the remainder to the dictionary, since a concurrent allocation
  2901     // may otherwise see the heap as empty.  (We're willing to take that
  2902     // hit if the block is a small block.)
  2903     if (rem > 0) {
  2904       size_t prefix_size = n * word_sz;
  2905       rem_fc = (FreeChunk*)((HeapWord*)fc + prefix_size);
  2906       rem_fc->set_size(rem);
  2907       rem_fc->link_prev(NULL); // Mark as a free block for other (parallel) GC threads.
  2908       rem_fc->link_next(NULL);
  2909       // Above must occur before BOT is updated below.
  2910       assert((ssize_t)n > 0 && prefix_size > 0 && rem_fc > fc, "Error");
  2911       OrderAccess::storestore();
  2912       _bt.split_block((HeapWord*)fc, fc->size(), prefix_size);
  2913       assert(fc->is_free(), "Error");
  2914       fc->set_size(prefix_size);
  2915       if (rem >= IndexSetSize) {
  2916         returnChunkToDictionary(rem_fc);
  2917         dictionary()->dict_census_update(rem, true /*split*/, true /*birth*/);
  2918         rem_fc = NULL;
  2920       // Otherwise, return it to the small list below.
  2923   if (rem_fc != NULL) {
  2924     MutexLockerEx x(_indexedFreeListParLocks[rem],
  2925                     Mutex::_no_safepoint_check_flag);
  2926     _bt.verify_not_unallocated((HeapWord*)rem_fc, rem_fc->size());
  2927     _indexedFreeList[rem].return_chunk_at_head(rem_fc);
  2928     smallSplitBirth(rem);
  2930   assert((ssize_t)n > 0 && fc != NULL, "Consistency");
  2931   // Now do the splitting up.
  2932   // Must do this in reverse order, so that anybody attempting to
  2933   // access the main chunk sees it as a single free block until we
  2934   // change it.
  2935   size_t fc_size = n * word_sz;
  2936   // All but first chunk in this loop
  2937   for (ssize_t i = n-1; i > 0; i--) {
  2938     FreeChunk* ffc = (FreeChunk*)((HeapWord*)fc + i * word_sz);
  2939     ffc->set_size(word_sz);
  2940     ffc->link_prev(NULL); // Mark as a free block for other (parallel) GC threads.
  2941     ffc->link_next(NULL);
  2942     // Above must occur before BOT is updated below.
  2943     OrderAccess::storestore();
  2944     // splitting from the right, fc_size == (n - i + 1) * wordsize
  2945     _bt.mark_block((HeapWord*)ffc, word_sz, true /* reducing */);
  2946     fc_size -= word_sz;
  2947     _bt.verify_not_unallocated((HeapWord*)ffc, ffc->size());
  2948     _bt.verify_single_block((HeapWord*)ffc, ffc->size());
  2949     _bt.verify_single_block((HeapWord*)fc, fc_size);
  2950     // Push this on "fl".
  2951     fl->return_chunk_at_head(ffc);
  2953   // First chunk
  2954   assert(fc->is_free() && fc->size() == n*word_sz, "Error: should still be a free block");
  2955   // The blocks above should show their new sizes before the first block below
  2956   fc->set_size(word_sz);
  2957   fc->link_prev(NULL);    // idempotent wrt free-ness, see assert above
  2958   fc->link_next(NULL);
  2959   _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
  2960   _bt.verify_single_block((HeapWord*)fc, fc->size());
  2961   fl->return_chunk_at_head(fc);
  2963   assert((ssize_t)n > 0 && (ssize_t)n == fl->count(), "Incorrect number of blocks");
  2965     // Update the stats for this block size.
  2966     MutexLockerEx x(_indexedFreeListParLocks[word_sz],
  2967                     Mutex::_no_safepoint_check_flag);
  2968     const ssize_t births = _indexedFreeList[word_sz].split_births() + n;
  2969     _indexedFreeList[word_sz].set_split_births(births);
  2970     // ssize_t new_surplus = _indexedFreeList[word_sz].surplus() + n;
  2971     // _indexedFreeList[word_sz].set_surplus(new_surplus);
  2974   // TRAP
  2975   assert(fl->tail()->next() == NULL, "List invariant.");
  2978 // Set up the space's par_seq_tasks structure for work claiming
  2979 // for parallel rescan. See CMSParRemarkTask where this is currently used.
  2980 // XXX Need to suitably abstract and generalize this and the next
  2981 // method into one.
  2982 void
  2983 CompactibleFreeListSpace::
  2984 initialize_sequential_subtasks_for_rescan(int n_threads) {
  2985   // The "size" of each task is fixed according to rescan_task_size.
  2986   assert(n_threads > 0, "Unexpected n_threads argument");
  2987   const size_t task_size = rescan_task_size();
  2988   size_t n_tasks = (used_region().word_size() + task_size - 1)/task_size;
  2989   assert((n_tasks == 0) == used_region().is_empty(), "n_tasks incorrect");
  2990   assert(n_tasks == 0 ||
  2991          ((used_region().start() + (n_tasks - 1)*task_size < used_region().end()) &&
  2992           (used_region().start() + n_tasks*task_size >= used_region().end())),
  2993          "n_tasks calculation incorrect");
  2994   SequentialSubTasksDone* pst = conc_par_seq_tasks();
  2995   assert(!pst->valid(), "Clobbering existing data?");
  2996   // Sets the condition for completion of the subtask (how many threads
  2997   // need to finish in order to be done).
  2998   pst->set_n_threads(n_threads);
  2999   pst->set_n_tasks((int)n_tasks);
  3002 // Set up the space's par_seq_tasks structure for work claiming
  3003 // for parallel concurrent marking. See CMSConcMarkTask where this is currently used.
  3004 void
  3005 CompactibleFreeListSpace::
  3006 initialize_sequential_subtasks_for_marking(int n_threads,
  3007                                            HeapWord* low) {
  3008   // The "size" of each task is fixed according to rescan_task_size.
  3009   assert(n_threads > 0, "Unexpected n_threads argument");
  3010   const size_t task_size = marking_task_size();
  3011   assert(task_size > CardTableModRefBS::card_size_in_words &&
  3012          (task_size %  CardTableModRefBS::card_size_in_words == 0),
  3013          "Otherwise arithmetic below would be incorrect");
  3014   MemRegion span = _gen->reserved();
  3015   if (low != NULL) {
  3016     if (span.contains(low)) {
  3017       // Align low down to  a card boundary so that
  3018       // we can use block_offset_careful() on span boundaries.
  3019       HeapWord* aligned_low = (HeapWord*)align_size_down((uintptr_t)low,
  3020                                  CardTableModRefBS::card_size);
  3021       // Clip span prefix at aligned_low
  3022       span = span.intersection(MemRegion(aligned_low, span.end()));
  3023     } else if (low > span.end()) {
  3024       span = MemRegion(low, low);  // Null region
  3025     } // else use entire span
  3027   assert(span.is_empty() ||
  3028          ((uintptr_t)span.start() %  CardTableModRefBS::card_size == 0),
  3029         "span should start at a card boundary");
  3030   size_t n_tasks = (span.word_size() + task_size - 1)/task_size;
  3031   assert((n_tasks == 0) == span.is_empty(), "Inconsistency");
  3032   assert(n_tasks == 0 ||
  3033          ((span.start() + (n_tasks - 1)*task_size < span.end()) &&
  3034           (span.start() + n_tasks*task_size >= span.end())),
  3035          "n_tasks calculation incorrect");
  3036   SequentialSubTasksDone* pst = conc_par_seq_tasks();
  3037   assert(!pst->valid(), "Clobbering existing data?");
  3038   // Sets the condition for completion of the subtask (how many threads
  3039   // need to finish in order to be done).
  3040   pst->set_n_threads(n_threads);
  3041   pst->set_n_tasks((int)n_tasks);

mercurial