src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp

Mon, 16 Apr 2012 08:57:18 +0200

author
brutisso
date
Mon, 16 Apr 2012 08:57:18 +0200
changeset 3711
b632e80fc9dc
parent 3357
441e946dc1af
child 3730
9f059abe8cf2
permissions
-rw-r--r--

4988100: oop_verify_old_oop appears to be dead
Summary: removed oop_verify_old_oop and allow_dirty. Also reviewed by: alexlamsl@gmail.com
Reviewed-by: jmasa, jwilhelm

     1 /*
     2  * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    20  * or visit www.oracle.com if you need additional information or have any
    21  * questions.
    22  *
    23  */
    25 #include "precompiled.hpp"
    26 #include "gc_implementation/concurrentMarkSweep/cmsLockVerifier.hpp"
    27 #include "gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp"
    28 #include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.inline.hpp"
    29 #include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.hpp"
    30 #include "gc_implementation/shared/liveRange.hpp"
    31 #include "gc_implementation/shared/spaceDecorator.hpp"
    32 #include "gc_interface/collectedHeap.hpp"
    33 #include "memory/allocation.inline.hpp"
    34 #include "memory/blockOffsetTable.inline.hpp"
    35 #include "memory/resourceArea.hpp"
    36 #include "memory/universe.inline.hpp"
    37 #include "oops/oop.inline.hpp"
    38 #include "runtime/globals.hpp"
    39 #include "runtime/handles.inline.hpp"
    40 #include "runtime/init.hpp"
    41 #include "runtime/java.hpp"
    42 #include "runtime/vmThread.hpp"
    43 #include "utilities/copy.hpp"
    45 /////////////////////////////////////////////////////////////////////////
    46 //// CompactibleFreeListSpace
    47 /////////////////////////////////////////////////////////////////////////
    49 // highest ranked  free list lock rank
    50 int CompactibleFreeListSpace::_lockRank = Mutex::leaf + 3;
    52 // Defaults are 0 so things will break badly if incorrectly initialized.
    53 size_t CompactibleFreeListSpace::IndexSetStart  = 0;
    54 size_t CompactibleFreeListSpace::IndexSetStride = 0;
    56 size_t MinChunkSize = 0;
    58 void CompactibleFreeListSpace::set_cms_values() {
    59   // Set CMS global values
    60   assert(MinChunkSize == 0, "already set");
    61   #define numQuanta(x,y) ((x+y-1)/y)
    62   MinChunkSize = numQuanta(sizeof(FreeChunk), MinObjAlignmentInBytes) * MinObjAlignment;
    64   assert(IndexSetStart == 0 && IndexSetStride == 0, "already set");
    65   IndexSetStart  = MinChunkSize;
    66   IndexSetStride = MinObjAlignment;
    67 }
    69 // Constructor
    70 CompactibleFreeListSpace::CompactibleFreeListSpace(BlockOffsetSharedArray* bs,
    71   MemRegion mr, bool use_adaptive_freelists,
    72   FreeBlockDictionary::DictionaryChoice dictionaryChoice) :
    73   _dictionaryChoice(dictionaryChoice),
    74   _adaptive_freelists(use_adaptive_freelists),
    75   _bt(bs, mr),
    76   // free list locks are in the range of values taken by _lockRank
    77   // This range currently is [_leaf+2, _leaf+3]
    78   // Note: this requires that CFLspace c'tors
    79   // are called serially in the order in which the locks are
    80   // are acquired in the program text. This is true today.
    81   _freelistLock(_lockRank--, "CompactibleFreeListSpace._lock", true),
    82   _parDictionaryAllocLock(Mutex::leaf - 1,  // == rank(ExpandHeap_lock) - 1
    83                           "CompactibleFreeListSpace._dict_par_lock", true),
    84   _rescan_task_size(CardTableModRefBS::card_size_in_words * BitsPerWord *
    85                     CMSRescanMultiple),
    86   _marking_task_size(CardTableModRefBS::card_size_in_words * BitsPerWord *
    87                     CMSConcMarkMultiple),
    88   _collector(NULL)
    89 {
    90   _bt.set_space(this);
    91   initialize(mr, SpaceDecorator::Clear, SpaceDecorator::Mangle);
    92   // We have all of "mr", all of which we place in the dictionary
    93   // as one big chunk. We'll need to decide here which of several
    94   // possible alternative dictionary implementations to use. For
    95   // now the choice is easy, since we have only one working
    96   // implementation, namely, the simple binary tree (splaying
    97   // temporarily disabled).
    98   switch (dictionaryChoice) {
    99     case FreeBlockDictionary::dictionarySplayTree:
   100     case FreeBlockDictionary::dictionarySkipList:
   101     default:
   102       warning("dictionaryChoice: selected option not understood; using"
   103               " default BinaryTreeDictionary implementation instead.");
   104     case FreeBlockDictionary::dictionaryBinaryTree:
   105       _dictionary = new BinaryTreeDictionary(mr);
   106       break;
   107   }
   108   assert(_dictionary != NULL, "CMS dictionary initialization");
   109   // The indexed free lists are initially all empty and are lazily
   110   // filled in on demand. Initialize the array elements to NULL.
   111   initializeIndexedFreeListArray();
   113   // Not using adaptive free lists assumes that allocation is first
   114   // from the linAB's.  Also a cms perm gen which can be compacted
   115   // has to have the klass's klassKlass allocated at a lower
   116   // address in the heap than the klass so that the klassKlass is
   117   // moved to its new location before the klass is moved.
   118   // Set the _refillSize for the linear allocation blocks
   119   if (!use_adaptive_freelists) {
   120     FreeChunk* fc = _dictionary->getChunk(mr.word_size());
   121     // The small linAB initially has all the space and will allocate
   122     // a chunk of any size.
   123     HeapWord* addr = (HeapWord*) fc;
   124     _smallLinearAllocBlock.set(addr, fc->size() ,
   125       1024*SmallForLinearAlloc, fc->size());
   126     // Note that _unallocated_block is not updated here.
   127     // Allocations from the linear allocation block should
   128     // update it.
   129   } else {
   130     _smallLinearAllocBlock.set(0, 0, 1024*SmallForLinearAlloc,
   131                                SmallForLinearAlloc);
   132   }
   133   // CMSIndexedFreeListReplenish should be at least 1
   134   CMSIndexedFreeListReplenish = MAX2((uintx)1, CMSIndexedFreeListReplenish);
   135   _promoInfo.setSpace(this);
   136   if (UseCMSBestFit) {
   137     _fitStrategy = FreeBlockBestFitFirst;
   138   } else {
   139     _fitStrategy = FreeBlockStrategyNone;
   140   }
   141   check_free_list_consistency();
   143   // Initialize locks for parallel case.
   145   if (CollectedHeap::use_parallel_gc_threads()) {
   146     for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
   147       _indexedFreeListParLocks[i] = new Mutex(Mutex::leaf - 1, // == ExpandHeap_lock - 1
   148                                               "a freelist par lock",
   149                                               true);
   150       if (_indexedFreeListParLocks[i] == NULL)
   151         vm_exit_during_initialization("Could not allocate a par lock");
   152       DEBUG_ONLY(
   153         _indexedFreeList[i].set_protecting_lock(_indexedFreeListParLocks[i]);
   154       )
   155     }
   156     _dictionary->set_par_lock(&_parDictionaryAllocLock);
   157   }
   158 }
   160 // Like CompactibleSpace forward() but always calls cross_threshold() to
   161 // update the block offset table.  Removed initialize_threshold call because
   162 // CFLS does not use a block offset array for contiguous spaces.
   163 HeapWord* CompactibleFreeListSpace::forward(oop q, size_t size,
   164                                     CompactPoint* cp, HeapWord* compact_top) {
   165   // q is alive
   166   // First check if we should switch compaction space
   167   assert(this == cp->space, "'this' should be current compaction space.");
   168   size_t compaction_max_size = pointer_delta(end(), compact_top);
   169   assert(adjustObjectSize(size) == cp->space->adjust_object_size_v(size),
   170     "virtual adjustObjectSize_v() method is not correct");
   171   size_t adjusted_size = adjustObjectSize(size);
   172   assert(compaction_max_size >= MinChunkSize || compaction_max_size == 0,
   173          "no small fragments allowed");
   174   assert(minimum_free_block_size() == MinChunkSize,
   175          "for de-virtualized reference below");
   176   // Can't leave a nonzero size, residual fragment smaller than MinChunkSize
   177   if (adjusted_size + MinChunkSize > compaction_max_size &&
   178       adjusted_size != compaction_max_size) {
   179     do {
   180       // switch to next compaction space
   181       cp->space->set_compaction_top(compact_top);
   182       cp->space = cp->space->next_compaction_space();
   183       if (cp->space == NULL) {
   184         cp->gen = GenCollectedHeap::heap()->prev_gen(cp->gen);
   185         assert(cp->gen != NULL, "compaction must succeed");
   186         cp->space = cp->gen->first_compaction_space();
   187         assert(cp->space != NULL, "generation must have a first compaction space");
   188       }
   189       compact_top = cp->space->bottom();
   190       cp->space->set_compaction_top(compact_top);
   191       // The correct adjusted_size may not be the same as that for this method
   192       // (i.e., cp->space may no longer be "this" so adjust the size again.
   193       // Use the virtual method which is not used above to save the virtual
   194       // dispatch.
   195       adjusted_size = cp->space->adjust_object_size_v(size);
   196       compaction_max_size = pointer_delta(cp->space->end(), compact_top);
   197       assert(cp->space->minimum_free_block_size() == 0, "just checking");
   198     } while (adjusted_size > compaction_max_size);
   199   }
   201   // store the forwarding pointer into the mark word
   202   if ((HeapWord*)q != compact_top) {
   203     q->forward_to(oop(compact_top));
   204     assert(q->is_gc_marked(), "encoding the pointer should preserve the mark");
   205   } else {
   206     // if the object isn't moving we can just set the mark to the default
   207     // mark and handle it specially later on.
   208     q->init_mark();
   209     assert(q->forwardee() == NULL, "should be forwarded to NULL");
   210   }
   212   VALIDATE_MARK_SWEEP_ONLY(MarkSweep::register_live_oop(q, adjusted_size));
   213   compact_top += adjusted_size;
   215   // we need to update the offset table so that the beginnings of objects can be
   216   // found during scavenge.  Note that we are updating the offset table based on
   217   // where the object will be once the compaction phase finishes.
   219   // Always call cross_threshold().  A contiguous space can only call it when
   220   // the compaction_top exceeds the current threshold but not for an
   221   // non-contiguous space.
   222   cp->threshold =
   223     cp->space->cross_threshold(compact_top - adjusted_size, compact_top);
   224   return compact_top;
   225 }
   227 // A modified copy of OffsetTableContigSpace::cross_threshold() with _offsets -> _bt
   228 // and use of single_block instead of alloc_block.  The name here is not really
   229 // appropriate - maybe a more general name could be invented for both the
   230 // contiguous and noncontiguous spaces.
   232 HeapWord* CompactibleFreeListSpace::cross_threshold(HeapWord* start, HeapWord* the_end) {
   233   _bt.single_block(start, the_end);
   234   return end();
   235 }
   237 // Initialize them to NULL.
   238 void CompactibleFreeListSpace::initializeIndexedFreeListArray() {
   239   for (size_t i = 0; i < IndexSetSize; i++) {
   240     // Note that on platforms where objects are double word aligned,
   241     // the odd array elements are not used.  It is convenient, however,
   242     // to map directly from the object size to the array element.
   243     _indexedFreeList[i].reset(IndexSetSize);
   244     _indexedFreeList[i].set_size(i);
   245     assert(_indexedFreeList[i].count() == 0, "reset check failed");
   246     assert(_indexedFreeList[i].head() == NULL, "reset check failed");
   247     assert(_indexedFreeList[i].tail() == NULL, "reset check failed");
   248     assert(_indexedFreeList[i].hint() == IndexSetSize, "reset check failed");
   249   }
   250 }
   252 void CompactibleFreeListSpace::resetIndexedFreeListArray() {
   253   for (size_t i = 1; i < IndexSetSize; i++) {
   254     assert(_indexedFreeList[i].size() == (size_t) i,
   255       "Indexed free list sizes are incorrect");
   256     _indexedFreeList[i].reset(IndexSetSize);
   257     assert(_indexedFreeList[i].count() == 0, "reset check failed");
   258     assert(_indexedFreeList[i].head() == NULL, "reset check failed");
   259     assert(_indexedFreeList[i].tail() == NULL, "reset check failed");
   260     assert(_indexedFreeList[i].hint() == IndexSetSize, "reset check failed");
   261   }
   262 }
   264 void CompactibleFreeListSpace::reset(MemRegion mr) {
   265   resetIndexedFreeListArray();
   266   dictionary()->reset();
   267   if (BlockOffsetArrayUseUnallocatedBlock) {
   268     assert(end() == mr.end(), "We are compacting to the bottom of CMS gen");
   269     // Everything's allocated until proven otherwise.
   270     _bt.set_unallocated_block(end());
   271   }
   272   if (!mr.is_empty()) {
   273     assert(mr.word_size() >= MinChunkSize, "Chunk size is too small");
   274     _bt.single_block(mr.start(), mr.word_size());
   275     FreeChunk* fc = (FreeChunk*) mr.start();
   276     fc->setSize(mr.word_size());
   277     if (mr.word_size() >= IndexSetSize ) {
   278       returnChunkToDictionary(fc);
   279     } else {
   280       _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
   281       _indexedFreeList[mr.word_size()].returnChunkAtHead(fc);
   282     }
   283   }
   284   _promoInfo.reset();
   285   _smallLinearAllocBlock._ptr = NULL;
   286   _smallLinearAllocBlock._word_size = 0;
   287 }
   289 void CompactibleFreeListSpace::reset_after_compaction() {
   290   // Reset the space to the new reality - one free chunk.
   291   MemRegion mr(compaction_top(), end());
   292   reset(mr);
   293   // Now refill the linear allocation block(s) if possible.
   294   if (_adaptive_freelists) {
   295     refillLinearAllocBlocksIfNeeded();
   296   } else {
   297     // Place as much of mr in the linAB as we can get,
   298     // provided it was big enough to go into the dictionary.
   299     FreeChunk* fc = dictionary()->findLargestDict();
   300     if (fc != NULL) {
   301       assert(fc->size() == mr.word_size(),
   302              "Why was the chunk broken up?");
   303       removeChunkFromDictionary(fc);
   304       HeapWord* addr = (HeapWord*) fc;
   305       _smallLinearAllocBlock.set(addr, fc->size() ,
   306         1024*SmallForLinearAlloc, fc->size());
   307       // Note that _unallocated_block is not updated here.
   308     }
   309   }
   310 }
   312 // Walks the entire dictionary, returning a coterminal
   313 // chunk, if it exists. Use with caution since it involves
   314 // a potentially complete walk of a potentially large tree.
   315 FreeChunk* CompactibleFreeListSpace::find_chunk_at_end() {
   317   assert_lock_strong(&_freelistLock);
   319   return dictionary()->find_chunk_ends_at(end());
   320 }
   323 #ifndef PRODUCT
   324 void CompactibleFreeListSpace::initializeIndexedFreeListArrayReturnedBytes() {
   325   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
   326     _indexedFreeList[i].allocation_stats()->set_returnedBytes(0);
   327   }
   328 }
   330 size_t CompactibleFreeListSpace::sumIndexedFreeListArrayReturnedBytes() {
   331   size_t sum = 0;
   332   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
   333     sum += _indexedFreeList[i].allocation_stats()->returnedBytes();
   334   }
   335   return sum;
   336 }
   338 size_t CompactibleFreeListSpace::totalCountInIndexedFreeLists() const {
   339   size_t count = 0;
   340   for (size_t i = IndexSetStart; i < IndexSetSize; i++) {
   341     debug_only(
   342       ssize_t total_list_count = 0;
   343       for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
   344          fc = fc->next()) {
   345         total_list_count++;
   346       }
   347       assert(total_list_count ==  _indexedFreeList[i].count(),
   348         "Count in list is incorrect");
   349     )
   350     count += _indexedFreeList[i].count();
   351   }
   352   return count;
   353 }
   355 size_t CompactibleFreeListSpace::totalCount() {
   356   size_t num = totalCountInIndexedFreeLists();
   357   num +=  dictionary()->totalCount();
   358   if (_smallLinearAllocBlock._word_size != 0) {
   359     num++;
   360   }
   361   return num;
   362 }
   363 #endif
   365 bool CompactibleFreeListSpace::is_free_block(const HeapWord* p) const {
   366   FreeChunk* fc = (FreeChunk*) p;
   367   return fc->isFree();
   368 }
   370 size_t CompactibleFreeListSpace::used() const {
   371   return capacity() - free();
   372 }
   374 size_t CompactibleFreeListSpace::free() const {
   375   // "MT-safe, but not MT-precise"(TM), if you will: i.e.
   376   // if you do this while the structures are in flux you
   377   // may get an approximate answer only; for instance
   378   // because there is concurrent allocation either
   379   // directly by mutators or for promotion during a GC.
   380   // It's "MT-safe", however, in the sense that you are guaranteed
   381   // not to crash and burn, for instance, because of walking
   382   // pointers that could disappear as you were walking them.
   383   // The approximation is because the various components
   384   // that are read below are not read atomically (and
   385   // further the computation of totalSizeInIndexedFreeLists()
   386   // is itself a non-atomic computation. The normal use of
   387   // this is during a resize operation at the end of GC
   388   // and at that time you are guaranteed to get the
   389   // correct actual value. However, for instance, this is
   390   // also read completely asynchronously by the "perf-sampler"
   391   // that supports jvmstat, and you are apt to see the values
   392   // flicker in such cases.
   393   assert(_dictionary != NULL, "No _dictionary?");
   394   return (_dictionary->totalChunkSize(DEBUG_ONLY(freelistLock())) +
   395           totalSizeInIndexedFreeLists() +
   396           _smallLinearAllocBlock._word_size) * HeapWordSize;
   397 }
   399 size_t CompactibleFreeListSpace::max_alloc_in_words() const {
   400   assert(_dictionary != NULL, "No _dictionary?");
   401   assert_locked();
   402   size_t res = _dictionary->maxChunkSize();
   403   res = MAX2(res, MIN2(_smallLinearAllocBlock._word_size,
   404                        (size_t) SmallForLinearAlloc - 1));
   405   // XXX the following could potentially be pretty slow;
   406   // should one, pesimally for the rare cases when res
   407   // caclulated above is less than IndexSetSize,
   408   // just return res calculated above? My reasoning was that
   409   // those cases will be so rare that the extra time spent doesn't
   410   // really matter....
   411   // Note: do not change the loop test i >= res + IndexSetStride
   412   // to i > res below, because i is unsigned and res may be zero.
   413   for (size_t i = IndexSetSize - 1; i >= res + IndexSetStride;
   414        i -= IndexSetStride) {
   415     if (_indexedFreeList[i].head() != NULL) {
   416       assert(_indexedFreeList[i].count() != 0, "Inconsistent FreeList");
   417       return i;
   418     }
   419   }
   420   return res;
   421 }
   423 void LinearAllocBlock::print_on(outputStream* st) const {
   424   st->print_cr(" LinearAllocBlock: ptr = " PTR_FORMAT ", word_size = " SIZE_FORMAT
   425             ", refillsize = " SIZE_FORMAT ", allocation_size_limit = " SIZE_FORMAT,
   426             _ptr, _word_size, _refillSize, _allocation_size_limit);
   427 }
   429 void CompactibleFreeListSpace::print_on(outputStream* st) const {
   430   st->print_cr("COMPACTIBLE FREELIST SPACE");
   431   st->print_cr(" Space:");
   432   Space::print_on(st);
   434   st->print_cr("promoInfo:");
   435   _promoInfo.print_on(st);
   437   st->print_cr("_smallLinearAllocBlock");
   438   _smallLinearAllocBlock.print_on(st);
   440   // dump_memory_block(_smallLinearAllocBlock->_ptr, 128);
   442   st->print_cr(" _fitStrategy = %s, _adaptive_freelists = %s",
   443                _fitStrategy?"true":"false", _adaptive_freelists?"true":"false");
   444 }
   446 void CompactibleFreeListSpace::print_indexed_free_lists(outputStream* st)
   447 const {
   448   reportIndexedFreeListStatistics();
   449   gclog_or_tty->print_cr("Layout of Indexed Freelists");
   450   gclog_or_tty->print_cr("---------------------------");
   451   FreeList::print_labels_on(st, "size");
   452   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
   453     _indexedFreeList[i].print_on(gclog_or_tty);
   454     for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
   455          fc = fc->next()) {
   456       gclog_or_tty->print_cr("\t[" PTR_FORMAT "," PTR_FORMAT ")  %s",
   457                           fc, (HeapWord*)fc + i,
   458                           fc->cantCoalesce() ? "\t CC" : "");
   459     }
   460   }
   461 }
   463 void CompactibleFreeListSpace::print_promo_info_blocks(outputStream* st)
   464 const {
   465   _promoInfo.print_on(st);
   466 }
   468 void CompactibleFreeListSpace::print_dictionary_free_lists(outputStream* st)
   469 const {
   470   _dictionary->reportStatistics();
   471   st->print_cr("Layout of Freelists in Tree");
   472   st->print_cr("---------------------------");
   473   _dictionary->print_free_lists(st);
   474 }
   476 class BlkPrintingClosure: public BlkClosure {
   477   const CMSCollector*             _collector;
   478   const CompactibleFreeListSpace* _sp;
   479   const CMSBitMap*                _live_bit_map;
   480   const bool                      _post_remark;
   481   outputStream*                   _st;
   482 public:
   483   BlkPrintingClosure(const CMSCollector* collector,
   484                      const CompactibleFreeListSpace* sp,
   485                      const CMSBitMap* live_bit_map,
   486                      outputStream* st):
   487     _collector(collector),
   488     _sp(sp),
   489     _live_bit_map(live_bit_map),
   490     _post_remark(collector->abstract_state() > CMSCollector::FinalMarking),
   491     _st(st) { }
   492   size_t do_blk(HeapWord* addr);
   493 };
   495 size_t BlkPrintingClosure::do_blk(HeapWord* addr) {
   496   size_t sz = _sp->block_size_no_stall(addr, _collector);
   497   assert(sz != 0, "Should always be able to compute a size");
   498   if (_sp->block_is_obj(addr)) {
   499     const bool dead = _post_remark && !_live_bit_map->isMarked(addr);
   500     _st->print_cr(PTR_FORMAT ": %s object of size " SIZE_FORMAT "%s",
   501       addr,
   502       dead ? "dead" : "live",
   503       sz,
   504       (!dead && CMSPrintObjectsInDump) ? ":" : ".");
   505     if (CMSPrintObjectsInDump && !dead) {
   506       oop(addr)->print_on(_st);
   507       _st->print_cr("--------------------------------------");
   508     }
   509   } else { // free block
   510     _st->print_cr(PTR_FORMAT ": free block of size " SIZE_FORMAT "%s",
   511       addr, sz, CMSPrintChunksInDump ? ":" : ".");
   512     if (CMSPrintChunksInDump) {
   513       ((FreeChunk*)addr)->print_on(_st);
   514       _st->print_cr("--------------------------------------");
   515     }
   516   }
   517   return sz;
   518 }
   520 void CompactibleFreeListSpace::dump_at_safepoint_with_locks(CMSCollector* c,
   521   outputStream* st) {
   522   st->print_cr("\n=========================");
   523   st->print_cr("Block layout in CMS Heap:");
   524   st->print_cr("=========================");
   525   BlkPrintingClosure  bpcl(c, this, c->markBitMap(), st);
   526   blk_iterate(&bpcl);
   528   st->print_cr("\n=======================================");
   529   st->print_cr("Order & Layout of Promotion Info Blocks");
   530   st->print_cr("=======================================");
   531   print_promo_info_blocks(st);
   533   st->print_cr("\n===========================");
   534   st->print_cr("Order of Indexed Free Lists");
   535   st->print_cr("=========================");
   536   print_indexed_free_lists(st);
   538   st->print_cr("\n=================================");
   539   st->print_cr("Order of Free Lists in Dictionary");
   540   st->print_cr("=================================");
   541   print_dictionary_free_lists(st);
   542 }
   545 void CompactibleFreeListSpace::reportFreeListStatistics() const {
   546   assert_lock_strong(&_freelistLock);
   547   assert(PrintFLSStatistics != 0, "Reporting error");
   548   _dictionary->reportStatistics();
   549   if (PrintFLSStatistics > 1) {
   550     reportIndexedFreeListStatistics();
   551     size_t totalSize = totalSizeInIndexedFreeLists() +
   552                        _dictionary->totalChunkSize(DEBUG_ONLY(freelistLock()));
   553     gclog_or_tty->print(" free=%ld frag=%1.4f\n", totalSize, flsFrag());
   554   }
   555 }
   557 void CompactibleFreeListSpace::reportIndexedFreeListStatistics() const {
   558   assert_lock_strong(&_freelistLock);
   559   gclog_or_tty->print("Statistics for IndexedFreeLists:\n"
   560                       "--------------------------------\n");
   561   size_t totalSize = totalSizeInIndexedFreeLists();
   562   size_t   freeBlocks = numFreeBlocksInIndexedFreeLists();
   563   gclog_or_tty->print("Total Free Space: %d\n", totalSize);
   564   gclog_or_tty->print("Max   Chunk Size: %d\n", maxChunkSizeInIndexedFreeLists());
   565   gclog_or_tty->print("Number of Blocks: %d\n", freeBlocks);
   566   if (freeBlocks != 0) {
   567     gclog_or_tty->print("Av.  Block  Size: %d\n", totalSize/freeBlocks);
   568   }
   569 }
   571 size_t CompactibleFreeListSpace::numFreeBlocksInIndexedFreeLists() const {
   572   size_t res = 0;
   573   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
   574     debug_only(
   575       ssize_t recount = 0;
   576       for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
   577          fc = fc->next()) {
   578         recount += 1;
   579       }
   580       assert(recount == _indexedFreeList[i].count(),
   581         "Incorrect count in list");
   582     )
   583     res += _indexedFreeList[i].count();
   584   }
   585   return res;
   586 }
   588 size_t CompactibleFreeListSpace::maxChunkSizeInIndexedFreeLists() const {
   589   for (size_t i = IndexSetSize - 1; i != 0; i -= IndexSetStride) {
   590     if (_indexedFreeList[i].head() != NULL) {
   591       assert(_indexedFreeList[i].count() != 0, "Inconsistent FreeList");
   592       return (size_t)i;
   593     }
   594   }
   595   return 0;
   596 }
   598 void CompactibleFreeListSpace::set_end(HeapWord* value) {
   599   HeapWord* prevEnd = end();
   600   assert(prevEnd != value, "unnecessary set_end call");
   601   assert(prevEnd == NULL || !BlockOffsetArrayUseUnallocatedBlock || value >= unallocated_block(),
   602         "New end is below unallocated block");
   603   _end = value;
   604   if (prevEnd != NULL) {
   605     // Resize the underlying block offset table.
   606     _bt.resize(pointer_delta(value, bottom()));
   607     if (value <= prevEnd) {
   608       assert(!BlockOffsetArrayUseUnallocatedBlock || value >= unallocated_block(),
   609              "New end is below unallocated block");
   610     } else {
   611       // Now, take this new chunk and add it to the free blocks.
   612       // Note that the BOT has not yet been updated for this block.
   613       size_t newFcSize = pointer_delta(value, prevEnd);
   614       // XXX This is REALLY UGLY and should be fixed up. XXX
   615       if (!_adaptive_freelists && _smallLinearAllocBlock._ptr == NULL) {
   616         // Mark the boundary of the new block in BOT
   617         _bt.mark_block(prevEnd, value);
   618         // put it all in the linAB
   619         if (ParallelGCThreads == 0) {
   620           _smallLinearAllocBlock._ptr = prevEnd;
   621           _smallLinearAllocBlock._word_size = newFcSize;
   622           repairLinearAllocBlock(&_smallLinearAllocBlock);
   623         } else { // ParallelGCThreads > 0
   624           MutexLockerEx x(parDictionaryAllocLock(),
   625                           Mutex::_no_safepoint_check_flag);
   626           _smallLinearAllocBlock._ptr = prevEnd;
   627           _smallLinearAllocBlock._word_size = newFcSize;
   628           repairLinearAllocBlock(&_smallLinearAllocBlock);
   629         }
   630         // Births of chunks put into a LinAB are not recorded.  Births
   631         // of chunks as they are allocated out of a LinAB are.
   632       } else {
   633         // Add the block to the free lists, if possible coalescing it
   634         // with the last free block, and update the BOT and census data.
   635         addChunkToFreeListsAtEndRecordingStats(prevEnd, newFcSize);
   636       }
   637     }
   638   }
   639 }
   641 class FreeListSpace_DCTOC : public Filtering_DCTOC {
   642   CompactibleFreeListSpace* _cfls;
   643   CMSCollector* _collector;
   644 protected:
   645   // Override.
   646 #define walk_mem_region_with_cl_DECL(ClosureType)                       \
   647   virtual void walk_mem_region_with_cl(MemRegion mr,                    \
   648                                        HeapWord* bottom, HeapWord* top, \
   649                                        ClosureType* cl);                \
   650       void walk_mem_region_with_cl_par(MemRegion mr,                    \
   651                                        HeapWord* bottom, HeapWord* top, \
   652                                        ClosureType* cl);                \
   653     void walk_mem_region_with_cl_nopar(MemRegion mr,                    \
   654                                        HeapWord* bottom, HeapWord* top, \
   655                                        ClosureType* cl)
   656   walk_mem_region_with_cl_DECL(OopClosure);
   657   walk_mem_region_with_cl_DECL(FilteringClosure);
   659 public:
   660   FreeListSpace_DCTOC(CompactibleFreeListSpace* sp,
   661                       CMSCollector* collector,
   662                       OopClosure* cl,
   663                       CardTableModRefBS::PrecisionStyle precision,
   664                       HeapWord* boundary) :
   665     Filtering_DCTOC(sp, cl, precision, boundary),
   666     _cfls(sp), _collector(collector) {}
   667 };
   669 // We de-virtualize the block-related calls below, since we know that our
   670 // space is a CompactibleFreeListSpace.
   672 #define FreeListSpace_DCTOC__walk_mem_region_with_cl_DEFN(ClosureType)          \
   673 void FreeListSpace_DCTOC::walk_mem_region_with_cl(MemRegion mr,                 \
   674                                                  HeapWord* bottom,              \
   675                                                  HeapWord* top,                 \
   676                                                  ClosureType* cl) {             \
   677    bool is_par = SharedHeap::heap()->n_par_threads() > 0;                       \
   678    if (is_par) {                                                                \
   679      assert(SharedHeap::heap()->n_par_threads() ==                              \
   680             SharedHeap::heap()->workers()->active_workers(), "Mismatch");       \
   681      walk_mem_region_with_cl_par(mr, bottom, top, cl);                          \
   682    } else {                                                                     \
   683      walk_mem_region_with_cl_nopar(mr, bottom, top, cl);                        \
   684    }                                                                            \
   685 }                                                                               \
   686 void FreeListSpace_DCTOC::walk_mem_region_with_cl_par(MemRegion mr,             \
   687                                                       HeapWord* bottom,         \
   688                                                       HeapWord* top,            \
   689                                                       ClosureType* cl) {        \
   690   /* Skip parts that are before "mr", in case "block_start" sent us             \
   691      back too far. */                                                           \
   692   HeapWord* mr_start = mr.start();                                              \
   693   size_t bot_size = _cfls->CompactibleFreeListSpace::block_size(bottom);        \
   694   HeapWord* next = bottom + bot_size;                                           \
   695   while (next < mr_start) {                                                     \
   696     bottom = next;                                                              \
   697     bot_size = _cfls->CompactibleFreeListSpace::block_size(bottom);             \
   698     next = bottom + bot_size;                                                   \
   699   }                                                                             \
   700                                                                                 \
   701   while (bottom < top) {                                                        \
   702     if (_cfls->CompactibleFreeListSpace::block_is_obj(bottom) &&                \
   703         !_cfls->CompactibleFreeListSpace::obj_allocated_since_save_marks(       \
   704                     oop(bottom)) &&                                             \
   705         !_collector->CMSCollector::is_dead_obj(oop(bottom))) {                  \
   706       size_t word_sz = oop(bottom)->oop_iterate(cl, mr);                        \
   707       bottom += _cfls->adjustObjectSize(word_sz);                               \
   708     } else {                                                                    \
   709       bottom += _cfls->CompactibleFreeListSpace::block_size(bottom);            \
   710     }                                                                           \
   711   }                                                                             \
   712 }                                                                               \
   713 void FreeListSpace_DCTOC::walk_mem_region_with_cl_nopar(MemRegion mr,           \
   714                                                         HeapWord* bottom,       \
   715                                                         HeapWord* top,          \
   716                                                         ClosureType* cl) {      \
   717   /* Skip parts that are before "mr", in case "block_start" sent us             \
   718      back too far. */                                                           \
   719   HeapWord* mr_start = mr.start();                                              \
   720   size_t bot_size = _cfls->CompactibleFreeListSpace::block_size_nopar(bottom);  \
   721   HeapWord* next = bottom + bot_size;                                           \
   722   while (next < mr_start) {                                                     \
   723     bottom = next;                                                              \
   724     bot_size = _cfls->CompactibleFreeListSpace::block_size_nopar(bottom);       \
   725     next = bottom + bot_size;                                                   \
   726   }                                                                             \
   727                                                                                 \
   728   while (bottom < top) {                                                        \
   729     if (_cfls->CompactibleFreeListSpace::block_is_obj_nopar(bottom) &&          \
   730         !_cfls->CompactibleFreeListSpace::obj_allocated_since_save_marks(       \
   731                     oop(bottom)) &&                                             \
   732         !_collector->CMSCollector::is_dead_obj(oop(bottom))) {                  \
   733       size_t word_sz = oop(bottom)->oop_iterate(cl, mr);                        \
   734       bottom += _cfls->adjustObjectSize(word_sz);                               \
   735     } else {                                                                    \
   736       bottom += _cfls->CompactibleFreeListSpace::block_size_nopar(bottom);      \
   737     }                                                                           \
   738   }                                                                             \
   739 }
   741 // (There are only two of these, rather than N, because the split is due
   742 // only to the introduction of the FilteringClosure, a local part of the
   743 // impl of this abstraction.)
   744 FreeListSpace_DCTOC__walk_mem_region_with_cl_DEFN(OopClosure)
   745 FreeListSpace_DCTOC__walk_mem_region_with_cl_DEFN(FilteringClosure)
   747 DirtyCardToOopClosure*
   748 CompactibleFreeListSpace::new_dcto_cl(OopClosure* cl,
   749                                       CardTableModRefBS::PrecisionStyle precision,
   750                                       HeapWord* boundary) {
   751   return new FreeListSpace_DCTOC(this, _collector, cl, precision, boundary);
   752 }
   755 // Note on locking for the space iteration functions:
   756 // since the collector's iteration activities are concurrent with
   757 // allocation activities by mutators, absent a suitable mutual exclusion
   758 // mechanism the iterators may go awry. For instace a block being iterated
   759 // may suddenly be allocated or divided up and part of it allocated and
   760 // so on.
   762 // Apply the given closure to each block in the space.
   763 void CompactibleFreeListSpace::blk_iterate_careful(BlkClosureCareful* cl) {
   764   assert_lock_strong(freelistLock());
   765   HeapWord *cur, *limit;
   766   for (cur = bottom(), limit = end(); cur < limit;
   767        cur += cl->do_blk_careful(cur));
   768 }
   770 // Apply the given closure to each block in the space.
   771 void CompactibleFreeListSpace::blk_iterate(BlkClosure* cl) {
   772   assert_lock_strong(freelistLock());
   773   HeapWord *cur, *limit;
   774   for (cur = bottom(), limit = end(); cur < limit;
   775        cur += cl->do_blk(cur));
   776 }
   778 // Apply the given closure to each oop in the space.
   779 void CompactibleFreeListSpace::oop_iterate(OopClosure* cl) {
   780   assert_lock_strong(freelistLock());
   781   HeapWord *cur, *limit;
   782   size_t curSize;
   783   for (cur = bottom(), limit = end(); cur < limit;
   784        cur += curSize) {
   785     curSize = block_size(cur);
   786     if (block_is_obj(cur)) {
   787       oop(cur)->oop_iterate(cl);
   788     }
   789   }
   790 }
   792 // Apply the given closure to each oop in the space \intersect memory region.
   793 void CompactibleFreeListSpace::oop_iterate(MemRegion mr, OopClosure* cl) {
   794   assert_lock_strong(freelistLock());
   795   if (is_empty()) {
   796     return;
   797   }
   798   MemRegion cur = MemRegion(bottom(), end());
   799   mr = mr.intersection(cur);
   800   if (mr.is_empty()) {
   801     return;
   802   }
   803   if (mr.equals(cur)) {
   804     oop_iterate(cl);
   805     return;
   806   }
   807   assert(mr.end() <= end(), "just took an intersection above");
   808   HeapWord* obj_addr = block_start(mr.start());
   809   HeapWord* t = mr.end();
   811   SpaceMemRegionOopsIterClosure smr_blk(cl, mr);
   812   if (block_is_obj(obj_addr)) {
   813     // Handle first object specially.
   814     oop obj = oop(obj_addr);
   815     obj_addr += adjustObjectSize(obj->oop_iterate(&smr_blk));
   816   } else {
   817     FreeChunk* fc = (FreeChunk*)obj_addr;
   818     obj_addr += fc->size();
   819   }
   820   while (obj_addr < t) {
   821     HeapWord* obj = obj_addr;
   822     obj_addr += block_size(obj_addr);
   823     // If "obj_addr" is not greater than top, then the
   824     // entire object "obj" is within the region.
   825     if (obj_addr <= t) {
   826       if (block_is_obj(obj)) {
   827         oop(obj)->oop_iterate(cl);
   828       }
   829     } else {
   830       // "obj" extends beyond end of region
   831       if (block_is_obj(obj)) {
   832         oop(obj)->oop_iterate(&smr_blk);
   833       }
   834       break;
   835     }
   836   }
   837 }
   839 // NOTE: In the following methods, in order to safely be able to
   840 // apply the closure to an object, we need to be sure that the
   841 // object has been initialized. We are guaranteed that an object
   842 // is initialized if we are holding the Heap_lock with the
   843 // world stopped.
   844 void CompactibleFreeListSpace::verify_objects_initialized() const {
   845   if (is_init_completed()) {
   846     assert_locked_or_safepoint(Heap_lock);
   847     if (Universe::is_fully_initialized()) {
   848       guarantee(SafepointSynchronize::is_at_safepoint(),
   849                 "Required for objects to be initialized");
   850     }
   851   } // else make a concession at vm start-up
   852 }
   854 // Apply the given closure to each object in the space
   855 void CompactibleFreeListSpace::object_iterate(ObjectClosure* blk) {
   856   assert_lock_strong(freelistLock());
   857   NOT_PRODUCT(verify_objects_initialized());
   858   HeapWord *cur, *limit;
   859   size_t curSize;
   860   for (cur = bottom(), limit = end(); cur < limit;
   861        cur += curSize) {
   862     curSize = block_size(cur);
   863     if (block_is_obj(cur)) {
   864       blk->do_object(oop(cur));
   865     }
   866   }
   867 }
   869 // Apply the given closure to each live object in the space
   870 //   The usage of CompactibleFreeListSpace
   871 // by the ConcurrentMarkSweepGeneration for concurrent GC's allows
   872 // objects in the space with references to objects that are no longer
   873 // valid.  For example, an object may reference another object
   874 // that has already been sweep up (collected).  This method uses
   875 // obj_is_alive() to determine whether it is safe to apply the closure to
   876 // an object.  See obj_is_alive() for details on how liveness of an
   877 // object is decided.
   879 void CompactibleFreeListSpace::safe_object_iterate(ObjectClosure* blk) {
   880   assert_lock_strong(freelistLock());
   881   NOT_PRODUCT(verify_objects_initialized());
   882   HeapWord *cur, *limit;
   883   size_t curSize;
   884   for (cur = bottom(), limit = end(); cur < limit;
   885        cur += curSize) {
   886     curSize = block_size(cur);
   887     if (block_is_obj(cur) && obj_is_alive(cur)) {
   888       blk->do_object(oop(cur));
   889     }
   890   }
   891 }
   893 void CompactibleFreeListSpace::object_iterate_mem(MemRegion mr,
   894                                                   UpwardsObjectClosure* cl) {
   895   assert_locked(freelistLock());
   896   NOT_PRODUCT(verify_objects_initialized());
   897   Space::object_iterate_mem(mr, cl);
   898 }
   900 // Callers of this iterator beware: The closure application should
   901 // be robust in the face of uninitialized objects and should (always)
   902 // return a correct size so that the next addr + size below gives us a
   903 // valid block boundary. [See for instance,
   904 // ScanMarkedObjectsAgainCarefullyClosure::do_object_careful()
   905 // in ConcurrentMarkSweepGeneration.cpp.]
   906 HeapWord*
   907 CompactibleFreeListSpace::object_iterate_careful(ObjectClosureCareful* cl) {
   908   assert_lock_strong(freelistLock());
   909   HeapWord *addr, *last;
   910   size_t size;
   911   for (addr = bottom(), last  = end();
   912        addr < last; addr += size) {
   913     FreeChunk* fc = (FreeChunk*)addr;
   914     if (fc->isFree()) {
   915       // Since we hold the free list lock, which protects direct
   916       // allocation in this generation by mutators, a free object
   917       // will remain free throughout this iteration code.
   918       size = fc->size();
   919     } else {
   920       // Note that the object need not necessarily be initialized,
   921       // because (for instance) the free list lock does NOT protect
   922       // object initialization. The closure application below must
   923       // therefore be correct in the face of uninitialized objects.
   924       size = cl->do_object_careful(oop(addr));
   925       if (size == 0) {
   926         // An unparsable object found. Signal early termination.
   927         return addr;
   928       }
   929     }
   930   }
   931   return NULL;
   932 }
   934 // Callers of this iterator beware: The closure application should
   935 // be robust in the face of uninitialized objects and should (always)
   936 // return a correct size so that the next addr + size below gives us a
   937 // valid block boundary. [See for instance,
   938 // ScanMarkedObjectsAgainCarefullyClosure::do_object_careful()
   939 // in ConcurrentMarkSweepGeneration.cpp.]
   940 HeapWord*
   941 CompactibleFreeListSpace::object_iterate_careful_m(MemRegion mr,
   942   ObjectClosureCareful* cl) {
   943   assert_lock_strong(freelistLock());
   944   // Can't use used_region() below because it may not necessarily
   945   // be the same as [bottom(),end()); although we could
   946   // use [used_region().start(),round_to(used_region().end(),CardSize)),
   947   // that appears too cumbersome, so we just do the simpler check
   948   // in the assertion below.
   949   assert(!mr.is_empty() && MemRegion(bottom(),end()).contains(mr),
   950          "mr should be non-empty and within used space");
   951   HeapWord *addr, *end;
   952   size_t size;
   953   for (addr = block_start_careful(mr.start()), end  = mr.end();
   954        addr < end; addr += size) {
   955     FreeChunk* fc = (FreeChunk*)addr;
   956     if (fc->isFree()) {
   957       // Since we hold the free list lock, which protects direct
   958       // allocation in this generation by mutators, a free object
   959       // will remain free throughout this iteration code.
   960       size = fc->size();
   961     } else {
   962       // Note that the object need not necessarily be initialized,
   963       // because (for instance) the free list lock does NOT protect
   964       // object initialization. The closure application below must
   965       // therefore be correct in the face of uninitialized objects.
   966       size = cl->do_object_careful_m(oop(addr), mr);
   967       if (size == 0) {
   968         // An unparsable object found. Signal early termination.
   969         return addr;
   970       }
   971     }
   972   }
   973   return NULL;
   974 }
   977 HeapWord* CompactibleFreeListSpace::block_start_const(const void* p) const {
   978   NOT_PRODUCT(verify_objects_initialized());
   979   return _bt.block_start(p);
   980 }
   982 HeapWord* CompactibleFreeListSpace::block_start_careful(const void* p) const {
   983   return _bt.block_start_careful(p);
   984 }
   986 size_t CompactibleFreeListSpace::block_size(const HeapWord* p) const {
   987   NOT_PRODUCT(verify_objects_initialized());
   988   // This must be volatile, or else there is a danger that the compiler
   989   // will compile the code below into a sometimes-infinite loop, by keeping
   990   // the value read the first time in a register.
   991   while (true) {
   992     // We must do this until we get a consistent view of the object.
   993     if (FreeChunk::indicatesFreeChunk(p)) {
   994       volatile FreeChunk* fc = (volatile FreeChunk*)p;
   995       size_t res = fc->size();
   996       // If the object is still a free chunk, return the size, else it
   997       // has been allocated so try again.
   998       if (FreeChunk::indicatesFreeChunk(p)) {
   999         assert(res != 0, "Block size should not be 0");
  1000         return res;
  1002     } else {
  1003       // must read from what 'p' points to in each loop.
  1004       klassOop k = ((volatile oopDesc*)p)->klass_or_null();
  1005       if (k != NULL) {
  1006         assert(k->is_oop(true /* ignore mark word */), "Should be klass oop");
  1007         oop o = (oop)p;
  1008         assert(o->is_parsable(), "Should be parsable");
  1009         assert(o->is_oop(true /* ignore mark word */), "Should be an oop.");
  1010         size_t res = o->size_given_klass(k->klass_part());
  1011         res = adjustObjectSize(res);
  1012         assert(res != 0, "Block size should not be 0");
  1013         return res;
  1019 // A variant of the above that uses the Printezis bits for
  1020 // unparsable but allocated objects. This avoids any possible
  1021 // stalls waiting for mutators to initialize objects, and is
  1022 // thus potentially faster than the variant above. However,
  1023 // this variant may return a zero size for a block that is
  1024 // under mutation and for which a consistent size cannot be
  1025 // inferred without stalling; see CMSCollector::block_size_if_printezis_bits().
  1026 size_t CompactibleFreeListSpace::block_size_no_stall(HeapWord* p,
  1027                                                      const CMSCollector* c)
  1028 const {
  1029   assert(MemRegion(bottom(), end()).contains(p), "p not in space");
  1030   // This must be volatile, or else there is a danger that the compiler
  1031   // will compile the code below into a sometimes-infinite loop, by keeping
  1032   // the value read the first time in a register.
  1033   DEBUG_ONLY(uint loops = 0;)
  1034   while (true) {
  1035     // We must do this until we get a consistent view of the object.
  1036     if (FreeChunk::indicatesFreeChunk(p)) {
  1037       volatile FreeChunk* fc = (volatile FreeChunk*)p;
  1038       size_t res = fc->size();
  1039       if (FreeChunk::indicatesFreeChunk(p)) {
  1040         assert(res != 0, "Block size should not be 0");
  1041         assert(loops == 0, "Should be 0");
  1042         return res;
  1044     } else {
  1045       // must read from what 'p' points to in each loop.
  1046       klassOop k = ((volatile oopDesc*)p)->klass_or_null();
  1047       // We trust the size of any object that has a non-NULL
  1048       // klass and (for those in the perm gen) is parsable
  1049       // -- irrespective of its conc_safe-ty.
  1050       if (k != NULL && ((oopDesc*)p)->is_parsable()) {
  1051         assert(k->is_oop(), "Should really be klass oop.");
  1052         oop o = (oop)p;
  1053         assert(o->is_oop(), "Should be an oop");
  1054         size_t res = o->size_given_klass(k->klass_part());
  1055         res = adjustObjectSize(res);
  1056         assert(res != 0, "Block size should not be 0");
  1057         return res;
  1058       } else {
  1059         // May return 0 if P-bits not present.
  1060         return c->block_size_if_printezis_bits(p);
  1063     assert(loops == 0, "Can loop at most once");
  1064     DEBUG_ONLY(loops++;)
  1068 size_t CompactibleFreeListSpace::block_size_nopar(const HeapWord* p) const {
  1069   NOT_PRODUCT(verify_objects_initialized());
  1070   assert(MemRegion(bottom(), end()).contains(p), "p not in space");
  1071   FreeChunk* fc = (FreeChunk*)p;
  1072   if (fc->isFree()) {
  1073     return fc->size();
  1074   } else {
  1075     // Ignore mark word because this may be a recently promoted
  1076     // object whose mark word is used to chain together grey
  1077     // objects (the last one would have a null value).
  1078     assert(oop(p)->is_oop(true), "Should be an oop");
  1079     return adjustObjectSize(oop(p)->size());
  1083 // This implementation assumes that the property of "being an object" is
  1084 // stable.  But being a free chunk may not be (because of parallel
  1085 // promotion.)
  1086 bool CompactibleFreeListSpace::block_is_obj(const HeapWord* p) const {
  1087   FreeChunk* fc = (FreeChunk*)p;
  1088   assert(is_in_reserved(p), "Should be in space");
  1089   // When doing a mark-sweep-compact of the CMS generation, this
  1090   // assertion may fail because prepare_for_compaction() uses
  1091   // space that is garbage to maintain information on ranges of
  1092   // live objects so that these live ranges can be moved as a whole.
  1093   // Comment out this assertion until that problem can be solved
  1094   // (i.e., that the block start calculation may look at objects
  1095   // at address below "p" in finding the object that contains "p"
  1096   // and those objects (if garbage) may have been modified to hold
  1097   // live range information.
  1098   // assert(CollectedHeap::use_parallel_gc_threads() || _bt.block_start(p) == p,
  1099   //        "Should be a block boundary");
  1100   if (FreeChunk::indicatesFreeChunk(p)) return false;
  1101   klassOop k = oop(p)->klass_or_null();
  1102   if (k != NULL) {
  1103     // Ignore mark word because it may have been used to
  1104     // chain together promoted objects (the last one
  1105     // would have a null value).
  1106     assert(oop(p)->is_oop(true), "Should be an oop");
  1107     return true;
  1108   } else {
  1109     return false;  // Was not an object at the start of collection.
  1113 // Check if the object is alive. This fact is checked either by consulting
  1114 // the main marking bitmap in the sweeping phase or, if it's a permanent
  1115 // generation and we're not in the sweeping phase, by checking the
  1116 // perm_gen_verify_bit_map where we store the "deadness" information if
  1117 // we did not sweep the perm gen in the most recent previous GC cycle.
  1118 bool CompactibleFreeListSpace::obj_is_alive(const HeapWord* p) const {
  1119   assert(SafepointSynchronize::is_at_safepoint() || !is_init_completed(),
  1120          "Else races are possible");
  1121   assert(block_is_obj(p), "The address should point to an object");
  1123   // If we're sweeping, we use object liveness information from the main bit map
  1124   // for both perm gen and old gen.
  1125   // We don't need to lock the bitmap (live_map or dead_map below), because
  1126   // EITHER we are in the middle of the sweeping phase, and the
  1127   // main marking bit map (live_map below) is locked,
  1128   // OR we're in other phases and perm_gen_verify_bit_map (dead_map below)
  1129   // is stable, because it's mutated only in the sweeping phase.
  1130   // NOTE: This method is also used by jmap where, if class unloading is
  1131   // off, the results can return "false" for legitimate perm objects,
  1132   // when we are not in the midst of a sweeping phase, which can result
  1133   // in jmap not reporting certain perm gen objects. This will be moot
  1134   // if/when the perm gen goes away in the future.
  1135   if (_collector->abstract_state() == CMSCollector::Sweeping) {
  1136     CMSBitMap* live_map = _collector->markBitMap();
  1137     return live_map->par_isMarked((HeapWord*) p);
  1138   } else {
  1139     // If we're not currently sweeping and we haven't swept the perm gen in
  1140     // the previous concurrent cycle then we may have dead but unswept objects
  1141     // in the perm gen. In this case, we use the "deadness" information
  1142     // that we had saved in perm_gen_verify_bit_map at the last sweep.
  1143     if (!CMSClassUnloadingEnabled && _collector->_permGen->reserved().contains(p)) {
  1144       if (_collector->verifying()) {
  1145         CMSBitMap* dead_map = _collector->perm_gen_verify_bit_map();
  1146         // Object is marked in the dead_map bitmap at the previous sweep
  1147         // when we know that it's dead; if the bitmap is not allocated then
  1148         // the object is alive.
  1149         return (dead_map->sizeInBits() == 0) // bit_map has been allocated
  1150                || !dead_map->par_isMarked((HeapWord*) p);
  1151       } else {
  1152         return false; // We can't say for sure if it's live, so we say that it's dead.
  1156   return true;
  1159 bool CompactibleFreeListSpace::block_is_obj_nopar(const HeapWord* p) const {
  1160   FreeChunk* fc = (FreeChunk*)p;
  1161   assert(is_in_reserved(p), "Should be in space");
  1162   assert(_bt.block_start(p) == p, "Should be a block boundary");
  1163   if (!fc->isFree()) {
  1164     // Ignore mark word because it may have been used to
  1165     // chain together promoted objects (the last one
  1166     // would have a null value).
  1167     assert(oop(p)->is_oop(true), "Should be an oop");
  1168     return true;
  1170   return false;
  1173 // "MT-safe but not guaranteed MT-precise" (TM); you may get an
  1174 // approximate answer if you don't hold the freelistlock when you call this.
  1175 size_t CompactibleFreeListSpace::totalSizeInIndexedFreeLists() const {
  1176   size_t size = 0;
  1177   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
  1178     debug_only(
  1179       // We may be calling here without the lock in which case we
  1180       // won't do this modest sanity check.
  1181       if (freelistLock()->owned_by_self()) {
  1182         size_t total_list_size = 0;
  1183         for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
  1184           fc = fc->next()) {
  1185           total_list_size += i;
  1187         assert(total_list_size == i * _indexedFreeList[i].count(),
  1188                "Count in list is incorrect");
  1191     size += i * _indexedFreeList[i].count();
  1193   return size;
  1196 HeapWord* CompactibleFreeListSpace::par_allocate(size_t size) {
  1197   MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
  1198   return allocate(size);
  1201 HeapWord*
  1202 CompactibleFreeListSpace::getChunkFromSmallLinearAllocBlockRemainder(size_t size) {
  1203   return getChunkFromLinearAllocBlockRemainder(&_smallLinearAllocBlock, size);
  1206 HeapWord* CompactibleFreeListSpace::allocate(size_t size) {
  1207   assert_lock_strong(freelistLock());
  1208   HeapWord* res = NULL;
  1209   assert(size == adjustObjectSize(size),
  1210          "use adjustObjectSize() before calling into allocate()");
  1212   if (_adaptive_freelists) {
  1213     res = allocate_adaptive_freelists(size);
  1214   } else {  // non-adaptive free lists
  1215     res = allocate_non_adaptive_freelists(size);
  1218   if (res != NULL) {
  1219     // check that res does lie in this space!
  1220     assert(is_in_reserved(res), "Not in this space!");
  1221     assert(is_aligned((void*)res), "alignment check");
  1223     FreeChunk* fc = (FreeChunk*)res;
  1224     fc->markNotFree();
  1225     assert(!fc->isFree(), "shouldn't be marked free");
  1226     assert(oop(fc)->klass_or_null() == NULL, "should look uninitialized");
  1227     // Verify that the block offset table shows this to
  1228     // be a single block, but not one which is unallocated.
  1229     _bt.verify_single_block(res, size);
  1230     _bt.verify_not_unallocated(res, size);
  1231     // mangle a just allocated object with a distinct pattern.
  1232     debug_only(fc->mangleAllocated(size));
  1235   return res;
  1238 HeapWord* CompactibleFreeListSpace::allocate_non_adaptive_freelists(size_t size) {
  1239   HeapWord* res = NULL;
  1240   // try and use linear allocation for smaller blocks
  1241   if (size < _smallLinearAllocBlock._allocation_size_limit) {
  1242     // if successful, the following also adjusts block offset table
  1243     res = getChunkFromSmallLinearAllocBlock(size);
  1245   // Else triage to indexed lists for smaller sizes
  1246   if (res == NULL) {
  1247     if (size < SmallForDictionary) {
  1248       res = (HeapWord*) getChunkFromIndexedFreeList(size);
  1249     } else {
  1250       // else get it from the big dictionary; if even this doesn't
  1251       // work we are out of luck.
  1252       res = (HeapWord*)getChunkFromDictionaryExact(size);
  1256   return res;
  1259 HeapWord* CompactibleFreeListSpace::allocate_adaptive_freelists(size_t size) {
  1260   assert_lock_strong(freelistLock());
  1261   HeapWord* res = NULL;
  1262   assert(size == adjustObjectSize(size),
  1263          "use adjustObjectSize() before calling into allocate()");
  1265   // Strategy
  1266   //   if small
  1267   //     exact size from small object indexed list if small
  1268   //     small or large linear allocation block (linAB) as appropriate
  1269   //     take from lists of greater sized chunks
  1270   //   else
  1271   //     dictionary
  1272   //     small or large linear allocation block if it has the space
  1273   // Try allocating exact size from indexTable first
  1274   if (size < IndexSetSize) {
  1275     res = (HeapWord*) getChunkFromIndexedFreeList(size);
  1276     if(res != NULL) {
  1277       assert(res != (HeapWord*)_indexedFreeList[size].head(),
  1278         "Not removed from free list");
  1279       // no block offset table adjustment is necessary on blocks in
  1280       // the indexed lists.
  1282     // Try allocating from the small LinAB
  1283     } else if (size < _smallLinearAllocBlock._allocation_size_limit &&
  1284         (res = getChunkFromSmallLinearAllocBlock(size)) != NULL) {
  1285         // if successful, the above also adjusts block offset table
  1286         // Note that this call will refill the LinAB to
  1287         // satisfy the request.  This is different that
  1288         // evm.
  1289         // Don't record chunk off a LinAB?  smallSplitBirth(size);
  1290     } else {
  1291       // Raid the exact free lists larger than size, even if they are not
  1292       // overpopulated.
  1293       res = (HeapWord*) getChunkFromGreater(size);
  1295   } else {
  1296     // Big objects get allocated directly from the dictionary.
  1297     res = (HeapWord*) getChunkFromDictionaryExact(size);
  1298     if (res == NULL) {
  1299       // Try hard not to fail since an allocation failure will likely
  1300       // trigger a synchronous GC.  Try to get the space from the
  1301       // allocation blocks.
  1302       res = getChunkFromSmallLinearAllocBlockRemainder(size);
  1306   return res;
  1309 // A worst-case estimate of the space required (in HeapWords) to expand the heap
  1310 // when promoting obj.
  1311 size_t CompactibleFreeListSpace::expansionSpaceRequired(size_t obj_size) const {
  1312   // Depending on the object size, expansion may require refilling either a
  1313   // bigLAB or a smallLAB plus refilling a PromotionInfo object.  MinChunkSize
  1314   // is added because the dictionary may over-allocate to avoid fragmentation.
  1315   size_t space = obj_size;
  1316   if (!_adaptive_freelists) {
  1317     space = MAX2(space, _smallLinearAllocBlock._refillSize);
  1319   space += _promoInfo.refillSize() + 2 * MinChunkSize;
  1320   return space;
  1323 FreeChunk* CompactibleFreeListSpace::getChunkFromGreater(size_t numWords) {
  1324   FreeChunk* ret;
  1326   assert(numWords >= MinChunkSize, "Size is less than minimum");
  1327   assert(linearAllocationWouldFail() || bestFitFirst(),
  1328     "Should not be here");
  1330   size_t i;
  1331   size_t currSize = numWords + MinChunkSize;
  1332   assert(currSize % MinObjAlignment == 0, "currSize should be aligned");
  1333   for (i = currSize; i < IndexSetSize; i += IndexSetStride) {
  1334     FreeList* fl = &_indexedFreeList[i];
  1335     if (fl->head()) {
  1336       ret = getFromListGreater(fl, numWords);
  1337       assert(ret == NULL || ret->isFree(), "Should be returning a free chunk");
  1338       return ret;
  1342   currSize = MAX2((size_t)SmallForDictionary,
  1343                   (size_t)(numWords + MinChunkSize));
  1345   /* Try to get a chunk that satisfies request, while avoiding
  1346      fragmentation that can't be handled. */
  1348     ret =  dictionary()->getChunk(currSize);
  1349     if (ret != NULL) {
  1350       assert(ret->size() - numWords >= MinChunkSize,
  1351              "Chunk is too small");
  1352       _bt.allocated((HeapWord*)ret, ret->size());
  1353       /* Carve returned chunk. */
  1354       (void) splitChunkAndReturnRemainder(ret, numWords);
  1355       /* Label this as no longer a free chunk. */
  1356       assert(ret->isFree(), "This chunk should be free");
  1357       ret->linkPrev(NULL);
  1359     assert(ret == NULL || ret->isFree(), "Should be returning a free chunk");
  1360     return ret;
  1362   ShouldNotReachHere();
  1365 bool CompactibleFreeListSpace::verifyChunkInIndexedFreeLists(FreeChunk* fc) const {
  1366   assert(fc->size() < IndexSetSize, "Size of chunk is too large");
  1367   return _indexedFreeList[fc->size()].verifyChunkInFreeLists(fc);
  1370 bool CompactibleFreeListSpace::verify_chunk_is_linear_alloc_block(FreeChunk* fc) const {
  1371   assert((_smallLinearAllocBlock._ptr != (HeapWord*)fc) ||
  1372          (_smallLinearAllocBlock._word_size == fc->size()),
  1373          "Linear allocation block shows incorrect size");
  1374   return ((_smallLinearAllocBlock._ptr == (HeapWord*)fc) &&
  1375           (_smallLinearAllocBlock._word_size == fc->size()));
  1378 // Check if the purported free chunk is present either as a linear
  1379 // allocation block, the size-indexed table of (smaller) free blocks,
  1380 // or the larger free blocks kept in the binary tree dictionary.
  1381 bool CompactibleFreeListSpace::verifyChunkInFreeLists(FreeChunk* fc) const {
  1382   if (verify_chunk_is_linear_alloc_block(fc)) {
  1383     return true;
  1384   } else if (fc->size() < IndexSetSize) {
  1385     return verifyChunkInIndexedFreeLists(fc);
  1386   } else {
  1387     return dictionary()->verifyChunkInFreeLists(fc);
  1391 #ifndef PRODUCT
  1392 void CompactibleFreeListSpace::assert_locked() const {
  1393   CMSLockVerifier::assert_locked(freelistLock(), parDictionaryAllocLock());
  1396 void CompactibleFreeListSpace::assert_locked(const Mutex* lock) const {
  1397   CMSLockVerifier::assert_locked(lock);
  1399 #endif
  1401 FreeChunk* CompactibleFreeListSpace::allocateScratch(size_t size) {
  1402   // In the parallel case, the main thread holds the free list lock
  1403   // on behalf the parallel threads.
  1404   FreeChunk* fc;
  1406     // If GC is parallel, this might be called by several threads.
  1407     // This should be rare enough that the locking overhead won't affect
  1408     // the sequential code.
  1409     MutexLockerEx x(parDictionaryAllocLock(),
  1410                     Mutex::_no_safepoint_check_flag);
  1411     fc = getChunkFromDictionary(size);
  1413   if (fc != NULL) {
  1414     fc->dontCoalesce();
  1415     assert(fc->isFree(), "Should be free, but not coalescable");
  1416     // Verify that the block offset table shows this to
  1417     // be a single block, but not one which is unallocated.
  1418     _bt.verify_single_block((HeapWord*)fc, fc->size());
  1419     _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
  1421   return fc;
  1424 oop CompactibleFreeListSpace::promote(oop obj, size_t obj_size) {
  1425   assert(obj_size == (size_t)obj->size(), "bad obj_size passed in");
  1426   assert_locked();
  1428   // if we are tracking promotions, then first ensure space for
  1429   // promotion (including spooling space for saving header if necessary).
  1430   // then allocate and copy, then track promoted info if needed.
  1431   // When tracking (see PromotionInfo::track()), the mark word may
  1432   // be displaced and in this case restoration of the mark word
  1433   // occurs in the (oop_since_save_marks_)iterate phase.
  1434   if (_promoInfo.tracking() && !_promoInfo.ensure_spooling_space()) {
  1435     return NULL;
  1437   // Call the allocate(size_t, bool) form directly to avoid the
  1438   // additional call through the allocate(size_t) form.  Having
  1439   // the compile inline the call is problematic because allocate(size_t)
  1440   // is a virtual method.
  1441   HeapWord* res = allocate(adjustObjectSize(obj_size));
  1442   if (res != NULL) {
  1443     Copy::aligned_disjoint_words((HeapWord*)obj, res, obj_size);
  1444     // if we should be tracking promotions, do so.
  1445     if (_promoInfo.tracking()) {
  1446         _promoInfo.track((PromotedObject*)res);
  1449   return oop(res);
  1452 HeapWord*
  1453 CompactibleFreeListSpace::getChunkFromSmallLinearAllocBlock(size_t size) {
  1454   assert_locked();
  1455   assert(size >= MinChunkSize, "minimum chunk size");
  1456   assert(size <  _smallLinearAllocBlock._allocation_size_limit,
  1457     "maximum from smallLinearAllocBlock");
  1458   return getChunkFromLinearAllocBlock(&_smallLinearAllocBlock, size);
  1461 HeapWord*
  1462 CompactibleFreeListSpace::getChunkFromLinearAllocBlock(LinearAllocBlock *blk,
  1463                                                        size_t size) {
  1464   assert_locked();
  1465   assert(size >= MinChunkSize, "too small");
  1466   HeapWord* res = NULL;
  1467   // Try to do linear allocation from blk, making sure that
  1468   if (blk->_word_size == 0) {
  1469     // We have probably been unable to fill this either in the prologue or
  1470     // when it was exhausted at the last linear allocation. Bail out until
  1471     // next time.
  1472     assert(blk->_ptr == NULL, "consistency check");
  1473     return NULL;
  1475   assert(blk->_word_size != 0 && blk->_ptr != NULL, "consistency check");
  1476   res = getChunkFromLinearAllocBlockRemainder(blk, size);
  1477   if (res != NULL) return res;
  1479   // about to exhaust this linear allocation block
  1480   if (blk->_word_size == size) { // exactly satisfied
  1481     res = blk->_ptr;
  1482     _bt.allocated(res, blk->_word_size);
  1483   } else if (size + MinChunkSize <= blk->_refillSize) {
  1484     size_t sz = blk->_word_size;
  1485     // Update _unallocated_block if the size is such that chunk would be
  1486     // returned to the indexed free list.  All other chunks in the indexed
  1487     // free lists are allocated from the dictionary so that _unallocated_block
  1488     // has already been adjusted for them.  Do it here so that the cost
  1489     // for all chunks added back to the indexed free lists.
  1490     if (sz < SmallForDictionary) {
  1491       _bt.allocated(blk->_ptr, sz);
  1493     // Return the chunk that isn't big enough, and then refill below.
  1494     addChunkToFreeLists(blk->_ptr, sz);
  1495     splitBirth(sz);
  1496     // Don't keep statistics on adding back chunk from a LinAB.
  1497   } else {
  1498     // A refilled block would not satisfy the request.
  1499     return NULL;
  1502   blk->_ptr = NULL; blk->_word_size = 0;
  1503   refillLinearAllocBlock(blk);
  1504   assert(blk->_ptr == NULL || blk->_word_size >= size + MinChunkSize,
  1505          "block was replenished");
  1506   if (res != NULL) {
  1507     splitBirth(size);
  1508     repairLinearAllocBlock(blk);
  1509   } else if (blk->_ptr != NULL) {
  1510     res = blk->_ptr;
  1511     size_t blk_size = blk->_word_size;
  1512     blk->_word_size -= size;
  1513     blk->_ptr  += size;
  1514     splitBirth(size);
  1515     repairLinearAllocBlock(blk);
  1516     // Update BOT last so that other (parallel) GC threads see a consistent
  1517     // view of the BOT and free blocks.
  1518     // Above must occur before BOT is updated below.
  1519     OrderAccess::storestore();
  1520     _bt.split_block(res, blk_size, size);  // adjust block offset table
  1522   return res;
  1525 HeapWord*  CompactibleFreeListSpace::getChunkFromLinearAllocBlockRemainder(
  1526                                         LinearAllocBlock* blk,
  1527                                         size_t size) {
  1528   assert_locked();
  1529   assert(size >= MinChunkSize, "too small");
  1531   HeapWord* res = NULL;
  1532   // This is the common case.  Keep it simple.
  1533   if (blk->_word_size >= size + MinChunkSize) {
  1534     assert(blk->_ptr != NULL, "consistency check");
  1535     res = blk->_ptr;
  1536     // Note that the BOT is up-to-date for the linAB before allocation.  It
  1537     // indicates the start of the linAB.  The split_block() updates the
  1538     // BOT for the linAB after the allocation (indicates the start of the
  1539     // next chunk to be allocated).
  1540     size_t blk_size = blk->_word_size;
  1541     blk->_word_size -= size;
  1542     blk->_ptr  += size;
  1543     splitBirth(size);
  1544     repairLinearAllocBlock(blk);
  1545     // Update BOT last so that other (parallel) GC threads see a consistent
  1546     // view of the BOT and free blocks.
  1547     // Above must occur before BOT is updated below.
  1548     OrderAccess::storestore();
  1549     _bt.split_block(res, blk_size, size);  // adjust block offset table
  1550     _bt.allocated(res, size);
  1552   return res;
  1555 FreeChunk*
  1556 CompactibleFreeListSpace::getChunkFromIndexedFreeList(size_t size) {
  1557   assert_locked();
  1558   assert(size < SmallForDictionary, "just checking");
  1559   FreeChunk* res;
  1560   res = _indexedFreeList[size].getChunkAtHead();
  1561   if (res == NULL) {
  1562     res = getChunkFromIndexedFreeListHelper(size);
  1564   _bt.verify_not_unallocated((HeapWord*) res, size);
  1565   assert(res == NULL || res->size() == size, "Incorrect block size");
  1566   return res;
  1569 FreeChunk*
  1570 CompactibleFreeListSpace::getChunkFromIndexedFreeListHelper(size_t size,
  1571   bool replenish) {
  1572   assert_locked();
  1573   FreeChunk* fc = NULL;
  1574   if (size < SmallForDictionary) {
  1575     assert(_indexedFreeList[size].head() == NULL ||
  1576       _indexedFreeList[size].surplus() <= 0,
  1577       "List for this size should be empty or under populated");
  1578     // Try best fit in exact lists before replenishing the list
  1579     if (!bestFitFirst() || (fc = bestFitSmall(size)) == NULL) {
  1580       // Replenish list.
  1581       //
  1582       // Things tried that failed.
  1583       //   Tried allocating out of the two LinAB's first before
  1584       // replenishing lists.
  1585       //   Tried small linAB of size 256 (size in indexed list)
  1586       // and replenishing indexed lists from the small linAB.
  1587       //
  1588       FreeChunk* newFc = NULL;
  1589       const size_t replenish_size = CMSIndexedFreeListReplenish * size;
  1590       if (replenish_size < SmallForDictionary) {
  1591         // Do not replenish from an underpopulated size.
  1592         if (_indexedFreeList[replenish_size].surplus() > 0 &&
  1593             _indexedFreeList[replenish_size].head() != NULL) {
  1594           newFc = _indexedFreeList[replenish_size].getChunkAtHead();
  1595         } else if (bestFitFirst()) {
  1596           newFc = bestFitSmall(replenish_size);
  1599       if (newFc == NULL && replenish_size > size) {
  1600         assert(CMSIndexedFreeListReplenish > 1, "ctl pt invariant");
  1601         newFc = getChunkFromIndexedFreeListHelper(replenish_size, false);
  1603       // Note: The stats update re split-death of block obtained above
  1604       // will be recorded below precisely when we know we are going to
  1605       // be actually splitting it into more than one pieces below.
  1606       if (newFc != NULL) {
  1607         if  (replenish || CMSReplenishIntermediate) {
  1608           // Replenish this list and return one block to caller.
  1609           size_t i;
  1610           FreeChunk *curFc, *nextFc;
  1611           size_t num_blk = newFc->size() / size;
  1612           assert(num_blk >= 1, "Smaller than requested?");
  1613           assert(newFc->size() % size == 0, "Should be integral multiple of request");
  1614           if (num_blk > 1) {
  1615             // we are sure we will be splitting the block just obtained
  1616             // into multiple pieces; record the split-death of the original
  1617             splitDeath(replenish_size);
  1619           // carve up and link blocks 0, ..., num_blk - 2
  1620           // The last chunk is not added to the lists but is returned as the
  1621           // free chunk.
  1622           for (curFc = newFc, nextFc = (FreeChunk*)((HeapWord*)curFc + size),
  1623                i = 0;
  1624                i < (num_blk - 1);
  1625                curFc = nextFc, nextFc = (FreeChunk*)((HeapWord*)nextFc + size),
  1626                i++) {
  1627             curFc->setSize(size);
  1628             // Don't record this as a return in order to try and
  1629             // determine the "returns" from a GC.
  1630             _bt.verify_not_unallocated((HeapWord*) fc, size);
  1631             _indexedFreeList[size].returnChunkAtTail(curFc, false);
  1632             _bt.mark_block((HeapWord*)curFc, size);
  1633             splitBirth(size);
  1634             // Don't record the initial population of the indexed list
  1635             // as a split birth.
  1638           // check that the arithmetic was OK above
  1639           assert((HeapWord*)nextFc == (HeapWord*)newFc + num_blk*size,
  1640             "inconsistency in carving newFc");
  1641           curFc->setSize(size);
  1642           _bt.mark_block((HeapWord*)curFc, size);
  1643           splitBirth(size);
  1644           fc = curFc;
  1645         } else {
  1646           // Return entire block to caller
  1647           fc = newFc;
  1651   } else {
  1652     // Get a free chunk from the free chunk dictionary to be returned to
  1653     // replenish the indexed free list.
  1654     fc = getChunkFromDictionaryExact(size);
  1656   // assert(fc == NULL || fc->isFree(), "Should be returning a free chunk");
  1657   return fc;
  1660 FreeChunk*
  1661 CompactibleFreeListSpace::getChunkFromDictionary(size_t size) {
  1662   assert_locked();
  1663   FreeChunk* fc = _dictionary->getChunk(size);
  1664   if (fc == NULL) {
  1665     return NULL;
  1667   _bt.allocated((HeapWord*)fc, fc->size());
  1668   if (fc->size() >= size + MinChunkSize) {
  1669     fc = splitChunkAndReturnRemainder(fc, size);
  1671   assert(fc->size() >= size, "chunk too small");
  1672   assert(fc->size() < size + MinChunkSize, "chunk too big");
  1673   _bt.verify_single_block((HeapWord*)fc, fc->size());
  1674   return fc;
  1677 FreeChunk*
  1678 CompactibleFreeListSpace::getChunkFromDictionaryExact(size_t size) {
  1679   assert_locked();
  1680   FreeChunk* fc = _dictionary->getChunk(size);
  1681   if (fc == NULL) {
  1682     return fc;
  1684   _bt.allocated((HeapWord*)fc, fc->size());
  1685   if (fc->size() == size) {
  1686     _bt.verify_single_block((HeapWord*)fc, size);
  1687     return fc;
  1689   assert(fc->size() > size, "getChunk() guarantee");
  1690   if (fc->size() < size + MinChunkSize) {
  1691     // Return the chunk to the dictionary and go get a bigger one.
  1692     returnChunkToDictionary(fc);
  1693     fc = _dictionary->getChunk(size + MinChunkSize);
  1694     if (fc == NULL) {
  1695       return NULL;
  1697     _bt.allocated((HeapWord*)fc, fc->size());
  1699   assert(fc->size() >= size + MinChunkSize, "tautology");
  1700   fc = splitChunkAndReturnRemainder(fc, size);
  1701   assert(fc->size() == size, "chunk is wrong size");
  1702   _bt.verify_single_block((HeapWord*)fc, size);
  1703   return fc;
  1706 void
  1707 CompactibleFreeListSpace::returnChunkToDictionary(FreeChunk* chunk) {
  1708   assert_locked();
  1710   size_t size = chunk->size();
  1711   _bt.verify_single_block((HeapWord*)chunk, size);
  1712   // adjust _unallocated_block downward, as necessary
  1713   _bt.freed((HeapWord*)chunk, size);
  1714   _dictionary->returnChunk(chunk);
  1715 #ifndef PRODUCT
  1716   if (CMSCollector::abstract_state() != CMSCollector::Sweeping) {
  1717     TreeChunk::as_TreeChunk(chunk)->list()->verify_stats();
  1719 #endif // PRODUCT
  1722 void
  1723 CompactibleFreeListSpace::returnChunkToFreeList(FreeChunk* fc) {
  1724   assert_locked();
  1725   size_t size = fc->size();
  1726   _bt.verify_single_block((HeapWord*) fc, size);
  1727   _bt.verify_not_unallocated((HeapWord*) fc, size);
  1728   if (_adaptive_freelists) {
  1729     _indexedFreeList[size].returnChunkAtTail(fc);
  1730   } else {
  1731     _indexedFreeList[size].returnChunkAtHead(fc);
  1733 #ifndef PRODUCT
  1734   if (CMSCollector::abstract_state() != CMSCollector::Sweeping) {
  1735      _indexedFreeList[size].verify_stats();
  1737 #endif // PRODUCT
  1740 // Add chunk to end of last block -- if it's the largest
  1741 // block -- and update BOT and census data. We would
  1742 // of course have preferred to coalesce it with the
  1743 // last block, but it's currently less expensive to find the
  1744 // largest block than it is to find the last.
  1745 void
  1746 CompactibleFreeListSpace::addChunkToFreeListsAtEndRecordingStats(
  1747   HeapWord* chunk, size_t     size) {
  1748   // check that the chunk does lie in this space!
  1749   assert(chunk != NULL && is_in_reserved(chunk), "Not in this space!");
  1750   // One of the parallel gc task threads may be here
  1751   // whilst others are allocating.
  1752   Mutex* lock = NULL;
  1753   if (ParallelGCThreads != 0) {
  1754     lock = &_parDictionaryAllocLock;
  1756   FreeChunk* ec;
  1758     MutexLockerEx x(lock, Mutex::_no_safepoint_check_flag);
  1759     ec = dictionary()->findLargestDict();  // get largest block
  1760     if (ec != NULL && ec->end() == chunk) {
  1761       // It's a coterminal block - we can coalesce.
  1762       size_t old_size = ec->size();
  1763       coalDeath(old_size);
  1764       removeChunkFromDictionary(ec);
  1765       size += old_size;
  1766     } else {
  1767       ec = (FreeChunk*)chunk;
  1770   ec->setSize(size);
  1771   debug_only(ec->mangleFreed(size));
  1772   if (size < SmallForDictionary) {
  1773     lock = _indexedFreeListParLocks[size];
  1775   MutexLockerEx x(lock, Mutex::_no_safepoint_check_flag);
  1776   addChunkAndRepairOffsetTable((HeapWord*)ec, size, true);
  1777   // record the birth under the lock since the recording involves
  1778   // manipulation of the list on which the chunk lives and
  1779   // if the chunk is allocated and is the last on the list,
  1780   // the list can go away.
  1781   coalBirth(size);
  1784 void
  1785 CompactibleFreeListSpace::addChunkToFreeLists(HeapWord* chunk,
  1786                                               size_t     size) {
  1787   // check that the chunk does lie in this space!
  1788   assert(chunk != NULL && is_in_reserved(chunk), "Not in this space!");
  1789   assert_locked();
  1790   _bt.verify_single_block(chunk, size);
  1792   FreeChunk* fc = (FreeChunk*) chunk;
  1793   fc->setSize(size);
  1794   debug_only(fc->mangleFreed(size));
  1795   if (size < SmallForDictionary) {
  1796     returnChunkToFreeList(fc);
  1797   } else {
  1798     returnChunkToDictionary(fc);
  1802 void
  1803 CompactibleFreeListSpace::addChunkAndRepairOffsetTable(HeapWord* chunk,
  1804   size_t size, bool coalesced) {
  1805   assert_locked();
  1806   assert(chunk != NULL, "null chunk");
  1807   if (coalesced) {
  1808     // repair BOT
  1809     _bt.single_block(chunk, size);
  1811   addChunkToFreeLists(chunk, size);
  1814 // We _must_ find the purported chunk on our free lists;
  1815 // we assert if we don't.
  1816 void
  1817 CompactibleFreeListSpace::removeFreeChunkFromFreeLists(FreeChunk* fc) {
  1818   size_t size = fc->size();
  1819   assert_locked();
  1820   debug_only(verifyFreeLists());
  1821   if (size < SmallForDictionary) {
  1822     removeChunkFromIndexedFreeList(fc);
  1823   } else {
  1824     removeChunkFromDictionary(fc);
  1826   _bt.verify_single_block((HeapWord*)fc, size);
  1827   debug_only(verifyFreeLists());
  1830 void
  1831 CompactibleFreeListSpace::removeChunkFromDictionary(FreeChunk* fc) {
  1832   size_t size = fc->size();
  1833   assert_locked();
  1834   assert(fc != NULL, "null chunk");
  1835   _bt.verify_single_block((HeapWord*)fc, size);
  1836   _dictionary->removeChunk(fc);
  1837   // adjust _unallocated_block upward, as necessary
  1838   _bt.allocated((HeapWord*)fc, size);
  1841 void
  1842 CompactibleFreeListSpace::removeChunkFromIndexedFreeList(FreeChunk* fc) {
  1843   assert_locked();
  1844   size_t size = fc->size();
  1845   _bt.verify_single_block((HeapWord*)fc, size);
  1846   NOT_PRODUCT(
  1847     if (FLSVerifyIndexTable) {
  1848       verifyIndexedFreeList(size);
  1851   _indexedFreeList[size].removeChunk(fc);
  1852   NOT_PRODUCT(
  1853     if (FLSVerifyIndexTable) {
  1854       verifyIndexedFreeList(size);
  1859 FreeChunk* CompactibleFreeListSpace::bestFitSmall(size_t numWords) {
  1860   /* A hint is the next larger size that has a surplus.
  1861      Start search at a size large enough to guarantee that
  1862      the excess is >= MIN_CHUNK. */
  1863   size_t start = align_object_size(numWords + MinChunkSize);
  1864   if (start < IndexSetSize) {
  1865     FreeList* it   = _indexedFreeList;
  1866     size_t    hint = _indexedFreeList[start].hint();
  1867     while (hint < IndexSetSize) {
  1868       assert(hint % MinObjAlignment == 0, "hint should be aligned");
  1869       FreeList *fl = &_indexedFreeList[hint];
  1870       if (fl->surplus() > 0 && fl->head() != NULL) {
  1871         // Found a list with surplus, reset original hint
  1872         // and split out a free chunk which is returned.
  1873         _indexedFreeList[start].set_hint(hint);
  1874         FreeChunk* res = getFromListGreater(fl, numWords);
  1875         assert(res == NULL || res->isFree(),
  1876           "Should be returning a free chunk");
  1877         return res;
  1879       hint = fl->hint(); /* keep looking */
  1881     /* None found. */
  1882     it[start].set_hint(IndexSetSize);
  1884   return NULL;
  1887 /* Requires fl->size >= numWords + MinChunkSize */
  1888 FreeChunk* CompactibleFreeListSpace::getFromListGreater(FreeList* fl,
  1889   size_t numWords) {
  1890   FreeChunk *curr = fl->head();
  1891   size_t oldNumWords = curr->size();
  1892   assert(numWords >= MinChunkSize, "Word size is too small");
  1893   assert(curr != NULL, "List is empty");
  1894   assert(oldNumWords >= numWords + MinChunkSize,
  1895         "Size of chunks in the list is too small");
  1897   fl->removeChunk(curr);
  1898   // recorded indirectly by splitChunkAndReturnRemainder -
  1899   // smallSplit(oldNumWords, numWords);
  1900   FreeChunk* new_chunk = splitChunkAndReturnRemainder(curr, numWords);
  1901   // Does anything have to be done for the remainder in terms of
  1902   // fixing the card table?
  1903   assert(new_chunk == NULL || new_chunk->isFree(),
  1904     "Should be returning a free chunk");
  1905   return new_chunk;
  1908 FreeChunk*
  1909 CompactibleFreeListSpace::splitChunkAndReturnRemainder(FreeChunk* chunk,
  1910   size_t new_size) {
  1911   assert_locked();
  1912   size_t size = chunk->size();
  1913   assert(size > new_size, "Split from a smaller block?");
  1914   assert(is_aligned(chunk), "alignment problem");
  1915   assert(size == adjustObjectSize(size), "alignment problem");
  1916   size_t rem_size = size - new_size;
  1917   assert(rem_size == adjustObjectSize(rem_size), "alignment problem");
  1918   assert(rem_size >= MinChunkSize, "Free chunk smaller than minimum");
  1919   FreeChunk* ffc = (FreeChunk*)((HeapWord*)chunk + new_size);
  1920   assert(is_aligned(ffc), "alignment problem");
  1921   ffc->setSize(rem_size);
  1922   ffc->linkNext(NULL);
  1923   ffc->linkPrev(NULL); // Mark as a free block for other (parallel) GC threads.
  1924   // Above must occur before BOT is updated below.
  1925   // adjust block offset table
  1926   OrderAccess::storestore();
  1927   assert(chunk->isFree() && ffc->isFree(), "Error");
  1928   _bt.split_block((HeapWord*)chunk, chunk->size(), new_size);
  1929   if (rem_size < SmallForDictionary) {
  1930     bool is_par = (SharedHeap::heap()->n_par_threads() > 0);
  1931     if (is_par) _indexedFreeListParLocks[rem_size]->lock();
  1932     assert(!is_par ||
  1933            (SharedHeap::heap()->n_par_threads() ==
  1934             SharedHeap::heap()->workers()->active_workers()), "Mismatch");
  1935     returnChunkToFreeList(ffc);
  1936     split(size, rem_size);
  1937     if (is_par) _indexedFreeListParLocks[rem_size]->unlock();
  1938   } else {
  1939     returnChunkToDictionary(ffc);
  1940     split(size ,rem_size);
  1942   chunk->setSize(new_size);
  1943   return chunk;
  1946 void
  1947 CompactibleFreeListSpace::sweep_completed() {
  1948   // Now that space is probably plentiful, refill linear
  1949   // allocation blocks as needed.
  1950   refillLinearAllocBlocksIfNeeded();
  1953 void
  1954 CompactibleFreeListSpace::gc_prologue() {
  1955   assert_locked();
  1956   if (PrintFLSStatistics != 0) {
  1957     gclog_or_tty->print("Before GC:\n");
  1958     reportFreeListStatistics();
  1960   refillLinearAllocBlocksIfNeeded();
  1963 void
  1964 CompactibleFreeListSpace::gc_epilogue() {
  1965   assert_locked();
  1966   if (PrintGCDetails && Verbose && !_adaptive_freelists) {
  1967     if (_smallLinearAllocBlock._word_size == 0)
  1968       warning("CompactibleFreeListSpace(epilogue):: Linear allocation failure");
  1970   assert(_promoInfo.noPromotions(), "_promoInfo inconsistency");
  1971   _promoInfo.stopTrackingPromotions();
  1972   repairLinearAllocationBlocks();
  1973   // Print Space's stats
  1974   if (PrintFLSStatistics != 0) {
  1975     gclog_or_tty->print("After GC:\n");
  1976     reportFreeListStatistics();
  1980 // Iteration support, mostly delegated from a CMS generation
  1982 void CompactibleFreeListSpace::save_marks() {
  1983   assert(Thread::current()->is_VM_thread(),
  1984          "Global variable should only be set when single-threaded");
  1985   // Mark the "end" of the used space at the time of this call;
  1986   // note, however, that promoted objects from this point
  1987   // on are tracked in the _promoInfo below.
  1988   set_saved_mark_word(unallocated_block());
  1989 #ifdef ASSERT
  1990   // Check the sanity of save_marks() etc.
  1991   MemRegion ur    = used_region();
  1992   MemRegion urasm = used_region_at_save_marks();
  1993   assert(ur.contains(urasm),
  1994          err_msg(" Error at save_marks(): [" PTR_FORMAT "," PTR_FORMAT ")"
  1995                  " should contain [" PTR_FORMAT "," PTR_FORMAT ")",
  1996                  ur.start(), ur.end(), urasm.start(), urasm.end()));
  1997 #endif
  1998   // inform allocator that promotions should be tracked.
  1999   assert(_promoInfo.noPromotions(), "_promoInfo inconsistency");
  2000   _promoInfo.startTrackingPromotions();
  2003 bool CompactibleFreeListSpace::no_allocs_since_save_marks() {
  2004   assert(_promoInfo.tracking(), "No preceding save_marks?");
  2005   assert(SharedHeap::heap()->n_par_threads() == 0,
  2006          "Shouldn't be called if using parallel gc.");
  2007   return _promoInfo.noPromotions();
  2010 #define CFLS_OOP_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix)           \
  2012 void CompactibleFreeListSpace::                                             \
  2013 oop_since_save_marks_iterate##nv_suffix(OopClosureType* blk) {              \
  2014   assert(SharedHeap::heap()->n_par_threads() == 0,                          \
  2015          "Shouldn't be called (yet) during parallel part of gc.");          \
  2016   _promoInfo.promoted_oops_iterate##nv_suffix(blk);                         \
  2017   /*                                                                        \
  2018    * This also restores any displaced headers and removes the elements from \
  2019    * the iteration set as they are processed, so that we have a clean slate \
  2020    * at the end of the iteration. Note, thus, that if new objects are       \
  2021    * promoted as a result of the iteration they are iterated over as well.  \
  2022    */                                                                       \
  2023   assert(_promoInfo.noPromotions(), "_promoInfo inconsistency");            \
  2026 ALL_SINCE_SAVE_MARKS_CLOSURES(CFLS_OOP_SINCE_SAVE_MARKS_DEFN)
  2029 void CompactibleFreeListSpace::object_iterate_since_last_GC(ObjectClosure* cl) {
  2030   // ugghh... how would one do this efficiently for a non-contiguous space?
  2031   guarantee(false, "NYI");
  2034 bool CompactibleFreeListSpace::linearAllocationWouldFail() const {
  2035   return _smallLinearAllocBlock._word_size == 0;
  2038 void CompactibleFreeListSpace::repairLinearAllocationBlocks() {
  2039   // Fix up linear allocation blocks to look like free blocks
  2040   repairLinearAllocBlock(&_smallLinearAllocBlock);
  2043 void CompactibleFreeListSpace::repairLinearAllocBlock(LinearAllocBlock* blk) {
  2044   assert_locked();
  2045   if (blk->_ptr != NULL) {
  2046     assert(blk->_word_size != 0 && blk->_word_size >= MinChunkSize,
  2047            "Minimum block size requirement");
  2048     FreeChunk* fc = (FreeChunk*)(blk->_ptr);
  2049     fc->setSize(blk->_word_size);
  2050     fc->linkPrev(NULL);   // mark as free
  2051     fc->dontCoalesce();
  2052     assert(fc->isFree(), "just marked it free");
  2053     assert(fc->cantCoalesce(), "just marked it uncoalescable");
  2057 void CompactibleFreeListSpace::refillLinearAllocBlocksIfNeeded() {
  2058   assert_locked();
  2059   if (_smallLinearAllocBlock._ptr == NULL) {
  2060     assert(_smallLinearAllocBlock._word_size == 0,
  2061       "Size of linAB should be zero if the ptr is NULL");
  2062     // Reset the linAB refill and allocation size limit.
  2063     _smallLinearAllocBlock.set(0, 0, 1024*SmallForLinearAlloc, SmallForLinearAlloc);
  2065   refillLinearAllocBlockIfNeeded(&_smallLinearAllocBlock);
  2068 void
  2069 CompactibleFreeListSpace::refillLinearAllocBlockIfNeeded(LinearAllocBlock* blk) {
  2070   assert_locked();
  2071   assert((blk->_ptr == NULL && blk->_word_size == 0) ||
  2072          (blk->_ptr != NULL && blk->_word_size >= MinChunkSize),
  2073          "blk invariant");
  2074   if (blk->_ptr == NULL) {
  2075     refillLinearAllocBlock(blk);
  2077   if (PrintMiscellaneous && Verbose) {
  2078     if (blk->_word_size == 0) {
  2079       warning("CompactibleFreeListSpace(prologue):: Linear allocation failure");
  2084 void
  2085 CompactibleFreeListSpace::refillLinearAllocBlock(LinearAllocBlock* blk) {
  2086   assert_locked();
  2087   assert(blk->_word_size == 0 && blk->_ptr == NULL,
  2088          "linear allocation block should be empty");
  2089   FreeChunk* fc;
  2090   if (blk->_refillSize < SmallForDictionary &&
  2091       (fc = getChunkFromIndexedFreeList(blk->_refillSize)) != NULL) {
  2092     // A linAB's strategy might be to use small sizes to reduce
  2093     // fragmentation but still get the benefits of allocation from a
  2094     // linAB.
  2095   } else {
  2096     fc = getChunkFromDictionary(blk->_refillSize);
  2098   if (fc != NULL) {
  2099     blk->_ptr  = (HeapWord*)fc;
  2100     blk->_word_size = fc->size();
  2101     fc->dontCoalesce();   // to prevent sweeper from sweeping us up
  2105 // Support for concurrent collection policy decisions.
  2106 bool CompactibleFreeListSpace::should_concurrent_collect() const {
  2107   // In the future we might want to add in frgamentation stats --
  2108   // including erosion of the "mountain" into this decision as well.
  2109   return !adaptive_freelists() && linearAllocationWouldFail();
  2112 // Support for compaction
  2114 void CompactibleFreeListSpace::prepare_for_compaction(CompactPoint* cp) {
  2115   SCAN_AND_FORWARD(cp,end,block_is_obj,block_size);
  2116   // prepare_for_compaction() uses the space between live objects
  2117   // so that later phase can skip dead space quickly.  So verification
  2118   // of the free lists doesn't work after.
  2121 #define obj_size(q) adjustObjectSize(oop(q)->size())
  2122 #define adjust_obj_size(s) adjustObjectSize(s)
  2124 void CompactibleFreeListSpace::adjust_pointers() {
  2125   // In other versions of adjust_pointers(), a bail out
  2126   // based on the amount of live data in the generation
  2127   // (i.e., if 0, bail out) may be used.
  2128   // Cannot test used() == 0 here because the free lists have already
  2129   // been mangled by the compaction.
  2131   SCAN_AND_ADJUST_POINTERS(adjust_obj_size);
  2132   // See note about verification in prepare_for_compaction().
  2135 void CompactibleFreeListSpace::compact() {
  2136   SCAN_AND_COMPACT(obj_size);
  2139 // fragmentation_metric = 1 - [sum of (fbs**2) / (sum of fbs)**2]
  2140 // where fbs is free block sizes
  2141 double CompactibleFreeListSpace::flsFrag() const {
  2142   size_t itabFree = totalSizeInIndexedFreeLists();
  2143   double frag = 0.0;
  2144   size_t i;
  2146   for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
  2147     double sz  = i;
  2148     frag      += _indexedFreeList[i].count() * (sz * sz);
  2151   double totFree = itabFree +
  2152                    _dictionary->totalChunkSize(DEBUG_ONLY(freelistLock()));
  2153   if (totFree > 0) {
  2154     frag = ((frag + _dictionary->sum_of_squared_block_sizes()) /
  2155             (totFree * totFree));
  2156     frag = (double)1.0  - frag;
  2157   } else {
  2158     assert(frag == 0.0, "Follows from totFree == 0");
  2160   return frag;
  2163 void CompactibleFreeListSpace::beginSweepFLCensus(
  2164   float inter_sweep_current,
  2165   float inter_sweep_estimate,
  2166   float intra_sweep_estimate) {
  2167   assert_locked();
  2168   size_t i;
  2169   for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
  2170     FreeList* fl    = &_indexedFreeList[i];
  2171     if (PrintFLSStatistics > 1) {
  2172       gclog_or_tty->print("size[%d] : ", i);
  2174     fl->compute_desired(inter_sweep_current, inter_sweep_estimate, intra_sweep_estimate);
  2175     fl->set_coalDesired((ssize_t)((double)fl->desired() * CMSSmallCoalSurplusPercent));
  2176     fl->set_beforeSweep(fl->count());
  2177     fl->set_bfrSurp(fl->surplus());
  2179   _dictionary->beginSweepDictCensus(CMSLargeCoalSurplusPercent,
  2180                                     inter_sweep_current,
  2181                                     inter_sweep_estimate,
  2182                                     intra_sweep_estimate);
  2185 void CompactibleFreeListSpace::setFLSurplus() {
  2186   assert_locked();
  2187   size_t i;
  2188   for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
  2189     FreeList *fl = &_indexedFreeList[i];
  2190     fl->set_surplus(fl->count() -
  2191                     (ssize_t)((double)fl->desired() * CMSSmallSplitSurplusPercent));
  2195 void CompactibleFreeListSpace::setFLHints() {
  2196   assert_locked();
  2197   size_t i;
  2198   size_t h = IndexSetSize;
  2199   for (i = IndexSetSize - 1; i != 0; i -= IndexSetStride) {
  2200     FreeList *fl = &_indexedFreeList[i];
  2201     fl->set_hint(h);
  2202     if (fl->surplus() > 0) {
  2203       h = i;
  2208 void CompactibleFreeListSpace::clearFLCensus() {
  2209   assert_locked();
  2210   size_t i;
  2211   for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
  2212     FreeList *fl = &_indexedFreeList[i];
  2213     fl->set_prevSweep(fl->count());
  2214     fl->set_coalBirths(0);
  2215     fl->set_coalDeaths(0);
  2216     fl->set_splitBirths(0);
  2217     fl->set_splitDeaths(0);
  2221 void CompactibleFreeListSpace::endSweepFLCensus(size_t sweep_count) {
  2222   if (PrintFLSStatistics > 0) {
  2223     HeapWord* largestAddr = (HeapWord*) dictionary()->findLargestDict();
  2224     gclog_or_tty->print_cr("CMS: Large block " PTR_FORMAT,
  2225                            largestAddr);
  2227   setFLSurplus();
  2228   setFLHints();
  2229   if (PrintGC && PrintFLSCensus > 0) {
  2230     printFLCensus(sweep_count);
  2232   clearFLCensus();
  2233   assert_locked();
  2234   _dictionary->endSweepDictCensus(CMSLargeSplitSurplusPercent);
  2237 bool CompactibleFreeListSpace::coalOverPopulated(size_t size) {
  2238   if (size < SmallForDictionary) {
  2239     FreeList *fl = &_indexedFreeList[size];
  2240     return (fl->coalDesired() < 0) ||
  2241            ((int)fl->count() > fl->coalDesired());
  2242   } else {
  2243     return dictionary()->coalDictOverPopulated(size);
  2247 void CompactibleFreeListSpace::smallCoalBirth(size_t size) {
  2248   assert(size < SmallForDictionary, "Size too large for indexed list");
  2249   FreeList *fl = &_indexedFreeList[size];
  2250   fl->increment_coalBirths();
  2251   fl->increment_surplus();
  2254 void CompactibleFreeListSpace::smallCoalDeath(size_t size) {
  2255   assert(size < SmallForDictionary, "Size too large for indexed list");
  2256   FreeList *fl = &_indexedFreeList[size];
  2257   fl->increment_coalDeaths();
  2258   fl->decrement_surplus();
  2261 void CompactibleFreeListSpace::coalBirth(size_t size) {
  2262   if (size  < SmallForDictionary) {
  2263     smallCoalBirth(size);
  2264   } else {
  2265     dictionary()->dictCensusUpdate(size,
  2266                                    false /* split */,
  2267                                    true /* birth */);
  2271 void CompactibleFreeListSpace::coalDeath(size_t size) {
  2272   if(size  < SmallForDictionary) {
  2273     smallCoalDeath(size);
  2274   } else {
  2275     dictionary()->dictCensusUpdate(size,
  2276                                    false /* split */,
  2277                                    false /* birth */);
  2281 void CompactibleFreeListSpace::smallSplitBirth(size_t size) {
  2282   assert(size < SmallForDictionary, "Size too large for indexed list");
  2283   FreeList *fl = &_indexedFreeList[size];
  2284   fl->increment_splitBirths();
  2285   fl->increment_surplus();
  2288 void CompactibleFreeListSpace::smallSplitDeath(size_t size) {
  2289   assert(size < SmallForDictionary, "Size too large for indexed list");
  2290   FreeList *fl = &_indexedFreeList[size];
  2291   fl->increment_splitDeaths();
  2292   fl->decrement_surplus();
  2295 void CompactibleFreeListSpace::splitBirth(size_t size) {
  2296   if (size  < SmallForDictionary) {
  2297     smallSplitBirth(size);
  2298   } else {
  2299     dictionary()->dictCensusUpdate(size,
  2300                                    true /* split */,
  2301                                    true /* birth */);
  2305 void CompactibleFreeListSpace::splitDeath(size_t size) {
  2306   if (size  < SmallForDictionary) {
  2307     smallSplitDeath(size);
  2308   } else {
  2309     dictionary()->dictCensusUpdate(size,
  2310                                    true /* split */,
  2311                                    false /* birth */);
  2315 void CompactibleFreeListSpace::split(size_t from, size_t to1) {
  2316   size_t to2 = from - to1;
  2317   splitDeath(from);
  2318   splitBirth(to1);
  2319   splitBirth(to2);
  2322 void CompactibleFreeListSpace::print() const {
  2323   print_on(tty);
  2326 void CompactibleFreeListSpace::prepare_for_verify() {
  2327   assert_locked();
  2328   repairLinearAllocationBlocks();
  2329   // Verify that the SpoolBlocks look like free blocks of
  2330   // appropriate sizes... To be done ...
  2333 class VerifyAllBlksClosure: public BlkClosure {
  2334  private:
  2335   const CompactibleFreeListSpace* _sp;
  2336   const MemRegion                 _span;
  2337   HeapWord*                       _last_addr;
  2338   size_t                          _last_size;
  2339   bool                            _last_was_obj;
  2340   bool                            _last_was_live;
  2342  public:
  2343   VerifyAllBlksClosure(const CompactibleFreeListSpace* sp,
  2344     MemRegion span) :  _sp(sp), _span(span),
  2345                        _last_addr(NULL), _last_size(0),
  2346                        _last_was_obj(false), _last_was_live(false) { }
  2348   virtual size_t do_blk(HeapWord* addr) {
  2349     size_t res;
  2350     bool   was_obj  = false;
  2351     bool   was_live = false;
  2352     if (_sp->block_is_obj(addr)) {
  2353       was_obj = true;
  2354       oop p = oop(addr);
  2355       guarantee(p->is_oop(), "Should be an oop");
  2356       res = _sp->adjustObjectSize(p->size());
  2357       if (_sp->obj_is_alive(addr)) {
  2358         was_live = true;
  2359         p->verify();
  2361     } else {
  2362       FreeChunk* fc = (FreeChunk*)addr;
  2363       res = fc->size();
  2364       if (FLSVerifyLists && !fc->cantCoalesce()) {
  2365         guarantee(_sp->verifyChunkInFreeLists(fc),
  2366                   "Chunk should be on a free list");
  2369     if (res == 0) {
  2370       gclog_or_tty->print_cr("Livelock: no rank reduction!");
  2371       gclog_or_tty->print_cr(
  2372         " Current:  addr = " PTR_FORMAT ", size = " SIZE_FORMAT ", obj = %s, live = %s \n"
  2373         " Previous: addr = " PTR_FORMAT ", size = " SIZE_FORMAT ", obj = %s, live = %s \n",
  2374         addr,       res,        was_obj      ?"true":"false", was_live      ?"true":"false",
  2375         _last_addr, _last_size, _last_was_obj?"true":"false", _last_was_live?"true":"false");
  2376       _sp->print_on(gclog_or_tty);
  2377       guarantee(false, "Seppuku!");
  2379     _last_addr = addr;
  2380     _last_size = res;
  2381     _last_was_obj  = was_obj;
  2382     _last_was_live = was_live;
  2383     return res;
  2385 };
  2387 class VerifyAllOopsClosure: public OopClosure {
  2388  private:
  2389   const CMSCollector*             _collector;
  2390   const CompactibleFreeListSpace* _sp;
  2391   const MemRegion                 _span;
  2392   const bool                      _past_remark;
  2393   const CMSBitMap*                _bit_map;
  2395  protected:
  2396   void do_oop(void* p, oop obj) {
  2397     if (_span.contains(obj)) { // the interior oop points into CMS heap
  2398       if (!_span.contains(p)) { // reference from outside CMS heap
  2399         // Should be a valid object; the first disjunct below allows
  2400         // us to sidestep an assertion in block_is_obj() that insists
  2401         // that p be in _sp. Note that several generations (and spaces)
  2402         // are spanned by _span (CMS heap) above.
  2403         guarantee(!_sp->is_in_reserved(obj) ||
  2404                   _sp->block_is_obj((HeapWord*)obj),
  2405                   "Should be an object");
  2406         guarantee(obj->is_oop(), "Should be an oop");
  2407         obj->verify();
  2408         if (_past_remark) {
  2409           // Remark has been completed, the object should be marked
  2410           _bit_map->isMarked((HeapWord*)obj);
  2412       } else { // reference within CMS heap
  2413         if (_past_remark) {
  2414           // Remark has been completed -- so the referent should have
  2415           // been marked, if referring object is.
  2416           if (_bit_map->isMarked(_collector->block_start(p))) {
  2417             guarantee(_bit_map->isMarked((HeapWord*)obj), "Marking error?");
  2421     } else if (_sp->is_in_reserved(p)) {
  2422       // the reference is from FLS, and points out of FLS
  2423       guarantee(obj->is_oop(), "Should be an oop");
  2424       obj->verify();
  2428   template <class T> void do_oop_work(T* p) {
  2429     T heap_oop = oopDesc::load_heap_oop(p);
  2430     if (!oopDesc::is_null(heap_oop)) {
  2431       oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
  2432       do_oop(p, obj);
  2436  public:
  2437   VerifyAllOopsClosure(const CMSCollector* collector,
  2438     const CompactibleFreeListSpace* sp, MemRegion span,
  2439     bool past_remark, CMSBitMap* bit_map) :
  2440     OopClosure(), _collector(collector), _sp(sp), _span(span),
  2441     _past_remark(past_remark), _bit_map(bit_map) { }
  2443   virtual void do_oop(oop* p)       { VerifyAllOopsClosure::do_oop_work(p); }
  2444   virtual void do_oop(narrowOop* p) { VerifyAllOopsClosure::do_oop_work(p); }
  2445 };
  2447 void CompactibleFreeListSpace::verify() const {
  2448   assert_lock_strong(&_freelistLock);
  2449   verify_objects_initialized();
  2450   MemRegion span = _collector->_span;
  2451   bool past_remark = (_collector->abstract_state() ==
  2452                       CMSCollector::Sweeping);
  2454   ResourceMark rm;
  2455   HandleMark  hm;
  2457   // Check integrity of CFL data structures
  2458   _promoInfo.verify();
  2459   _dictionary->verify();
  2460   if (FLSVerifyIndexTable) {
  2461     verifyIndexedFreeLists();
  2463   // Check integrity of all objects and free blocks in space
  2465     VerifyAllBlksClosure cl(this, span);
  2466     ((CompactibleFreeListSpace*)this)->blk_iterate(&cl);  // cast off const
  2468   // Check that all references in the heap to FLS
  2469   // are to valid objects in FLS or that references in
  2470   // FLS are to valid objects elsewhere in the heap
  2471   if (FLSVerifyAllHeapReferences)
  2473     VerifyAllOopsClosure cl(_collector, this, span, past_remark,
  2474       _collector->markBitMap());
  2475     CollectedHeap* ch = Universe::heap();
  2476     ch->oop_iterate(&cl);              // all oops in generations
  2477     ch->permanent_oop_iterate(&cl);    // all oops in perm gen
  2480   if (VerifyObjectStartArray) {
  2481     // Verify the block offset table
  2482     _bt.verify();
  2486 #ifndef PRODUCT
  2487 void CompactibleFreeListSpace::verifyFreeLists() const {
  2488   if (FLSVerifyLists) {
  2489     _dictionary->verify();
  2490     verifyIndexedFreeLists();
  2491   } else {
  2492     if (FLSVerifyDictionary) {
  2493       _dictionary->verify();
  2495     if (FLSVerifyIndexTable) {
  2496       verifyIndexedFreeLists();
  2500 #endif
  2502 void CompactibleFreeListSpace::verifyIndexedFreeLists() const {
  2503   size_t i = 0;
  2504   for (; i < IndexSetStart; i++) {
  2505     guarantee(_indexedFreeList[i].head() == NULL, "should be NULL");
  2507   for (; i < IndexSetSize; i++) {
  2508     verifyIndexedFreeList(i);
  2512 void CompactibleFreeListSpace::verifyIndexedFreeList(size_t size) const {
  2513   FreeChunk* fc   =  _indexedFreeList[size].head();
  2514   FreeChunk* tail =  _indexedFreeList[size].tail();
  2515   size_t    num = _indexedFreeList[size].count();
  2516   size_t      n = 0;
  2517   guarantee(((size >= IndexSetStart) && (size % IndexSetStride == 0)) || fc == NULL,
  2518             "Slot should have been empty");
  2519   for (; fc != NULL; fc = fc->next(), n++) {
  2520     guarantee(fc->size() == size, "Size inconsistency");
  2521     guarantee(fc->isFree(), "!free?");
  2522     guarantee(fc->next() == NULL || fc->next()->prev() == fc, "Broken list");
  2523     guarantee((fc->next() == NULL) == (fc == tail), "Incorrect tail");
  2525   guarantee(n == num, "Incorrect count");
  2528 #ifndef PRODUCT
  2529 void CompactibleFreeListSpace::check_free_list_consistency() const {
  2530   assert(_dictionary->minSize() <= IndexSetSize,
  2531     "Some sizes can't be allocated without recourse to"
  2532     " linear allocation buffers");
  2533   assert(MIN_TREE_CHUNK_SIZE*HeapWordSize == sizeof(TreeChunk),
  2534     "else MIN_TREE_CHUNK_SIZE is wrong");
  2535   assert((IndexSetStride == 2 && IndexSetStart == 4) ||                   // 32-bit
  2536          (IndexSetStride == 1 && IndexSetStart == 3), "just checking");   // 64-bit
  2537   assert((IndexSetStride != 2) || (IndexSetStart % 2 == 0),
  2538       "Some for-loops may be incorrectly initialized");
  2539   assert((IndexSetStride != 2) || (IndexSetSize % 2 == 1),
  2540       "For-loops that iterate over IndexSet with stride 2 may be wrong");
  2542 #endif
  2544 void CompactibleFreeListSpace::printFLCensus(size_t sweep_count) const {
  2545   assert_lock_strong(&_freelistLock);
  2546   FreeList total;
  2547   gclog_or_tty->print("end sweep# " SIZE_FORMAT "\n", sweep_count);
  2548   FreeList::print_labels_on(gclog_or_tty, "size");
  2549   size_t totalFree = 0;
  2550   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
  2551     const FreeList *fl = &_indexedFreeList[i];
  2552     totalFree += fl->count() * fl->size();
  2553     if (i % (40*IndexSetStride) == 0) {
  2554       FreeList::print_labels_on(gclog_or_tty, "size");
  2556     fl->print_on(gclog_or_tty);
  2557     total.set_bfrSurp(    total.bfrSurp()     + fl->bfrSurp()    );
  2558     total.set_surplus(    total.surplus()     + fl->surplus()    );
  2559     total.set_desired(    total.desired()     + fl->desired()    );
  2560     total.set_prevSweep(  total.prevSweep()   + fl->prevSweep()  );
  2561     total.set_beforeSweep(total.beforeSweep() + fl->beforeSweep());
  2562     total.set_count(      total.count()       + fl->count()      );
  2563     total.set_coalBirths( total.coalBirths()  + fl->coalBirths() );
  2564     total.set_coalDeaths( total.coalDeaths()  + fl->coalDeaths() );
  2565     total.set_splitBirths(total.splitBirths() + fl->splitBirths());
  2566     total.set_splitDeaths(total.splitDeaths() + fl->splitDeaths());
  2568   total.print_on(gclog_or_tty, "TOTAL");
  2569   gclog_or_tty->print_cr("Total free in indexed lists "
  2570                          SIZE_FORMAT " words", totalFree);
  2571   gclog_or_tty->print("growth: %8.5f  deficit: %8.5f\n",
  2572     (double)(total.splitBirths()+total.coalBirths()-total.splitDeaths()-total.coalDeaths())/
  2573             (total.prevSweep() != 0 ? (double)total.prevSweep() : 1.0),
  2574     (double)(total.desired() - total.count())/(total.desired() != 0 ? (double)total.desired() : 1.0));
  2575   _dictionary->printDictCensus();
  2578 ///////////////////////////////////////////////////////////////////////////
  2579 // CFLS_LAB
  2580 ///////////////////////////////////////////////////////////////////////////
  2582 #define VECTOR_257(x)                                                                                  \
  2583   /* 1  2  3  4  5  6  7  8  9 1x 11 12 13 14 15 16 17 18 19 2x 21 22 23 24 25 26 27 28 29 3x 31 32 */ \
  2584   {  x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
  2585      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
  2586      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
  2587      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
  2588      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
  2589      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
  2590      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
  2591      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
  2592      x }
  2594 // Initialize with default setting of CMSParPromoteBlocksToClaim, _not_
  2595 // OldPLABSize, whose static default is different; if overridden at the
  2596 // command-line, this will get reinitialized via a call to
  2597 // modify_initialization() below.
  2598 AdaptiveWeightedAverage CFLS_LAB::_blocks_to_claim[]    =
  2599   VECTOR_257(AdaptiveWeightedAverage(OldPLABWeight, (float)CMSParPromoteBlocksToClaim));
  2600 size_t CFLS_LAB::_global_num_blocks[]  = VECTOR_257(0);
  2601 uint   CFLS_LAB::_global_num_workers[] = VECTOR_257(0);
  2603 CFLS_LAB::CFLS_LAB(CompactibleFreeListSpace* cfls) :
  2604   _cfls(cfls)
  2606   assert(CompactibleFreeListSpace::IndexSetSize == 257, "Modify VECTOR_257() macro above");
  2607   for (size_t i = CompactibleFreeListSpace::IndexSetStart;
  2608        i < CompactibleFreeListSpace::IndexSetSize;
  2609        i += CompactibleFreeListSpace::IndexSetStride) {
  2610     _indexedFreeList[i].set_size(i);
  2611     _num_blocks[i] = 0;
  2615 static bool _CFLS_LAB_modified = false;
  2617 void CFLS_LAB::modify_initialization(size_t n, unsigned wt) {
  2618   assert(!_CFLS_LAB_modified, "Call only once");
  2619   _CFLS_LAB_modified = true;
  2620   for (size_t i = CompactibleFreeListSpace::IndexSetStart;
  2621        i < CompactibleFreeListSpace::IndexSetSize;
  2622        i += CompactibleFreeListSpace::IndexSetStride) {
  2623     _blocks_to_claim[i].modify(n, wt, true /* force */);
  2627 HeapWord* CFLS_LAB::alloc(size_t word_sz) {
  2628   FreeChunk* res;
  2629   assert(word_sz == _cfls->adjustObjectSize(word_sz), "Error");
  2630   if (word_sz >=  CompactibleFreeListSpace::IndexSetSize) {
  2631     // This locking manages sync with other large object allocations.
  2632     MutexLockerEx x(_cfls->parDictionaryAllocLock(),
  2633                     Mutex::_no_safepoint_check_flag);
  2634     res = _cfls->getChunkFromDictionaryExact(word_sz);
  2635     if (res == NULL) return NULL;
  2636   } else {
  2637     FreeList* fl = &_indexedFreeList[word_sz];
  2638     if (fl->count() == 0) {
  2639       // Attempt to refill this local free list.
  2640       get_from_global_pool(word_sz, fl);
  2641       // If it didn't work, give up.
  2642       if (fl->count() == 0) return NULL;
  2644     res = fl->getChunkAtHead();
  2645     assert(res != NULL, "Why was count non-zero?");
  2647   res->markNotFree();
  2648   assert(!res->isFree(), "shouldn't be marked free");
  2649   assert(oop(res)->klass_or_null() == NULL, "should look uninitialized");
  2650   // mangle a just allocated object with a distinct pattern.
  2651   debug_only(res->mangleAllocated(word_sz));
  2652   return (HeapWord*)res;
  2655 // Get a chunk of blocks of the right size and update related
  2656 // book-keeping stats
  2657 void CFLS_LAB::get_from_global_pool(size_t word_sz, FreeList* fl) {
  2658   // Get the #blocks we want to claim
  2659   size_t n_blks = (size_t)_blocks_to_claim[word_sz].average();
  2660   assert(n_blks > 0, "Error");
  2661   assert(ResizePLAB || n_blks == OldPLABSize, "Error");
  2662   // In some cases, when the application has a phase change,
  2663   // there may be a sudden and sharp shift in the object survival
  2664   // profile, and updating the counts at the end of a scavenge
  2665   // may not be quick enough, giving rise to large scavenge pauses
  2666   // during these phase changes. It is beneficial to detect such
  2667   // changes on-the-fly during a scavenge and avoid such a phase-change
  2668   // pothole. The following code is a heuristic attempt to do that.
  2669   // It is protected by a product flag until we have gained
  2670   // enough experience with this heuristic and fine-tuned its behaviour.
  2671   // WARNING: This might increase fragmentation if we overreact to
  2672   // small spikes, so some kind of historical smoothing based on
  2673   // previous experience with the greater reactivity might be useful.
  2674   // Lacking sufficient experience, CMSOldPLABResizeQuicker is disabled by
  2675   // default.
  2676   if (ResizeOldPLAB && CMSOldPLABResizeQuicker) {
  2677     size_t multiple = _num_blocks[word_sz]/(CMSOldPLABToleranceFactor*CMSOldPLABNumRefills*n_blks);
  2678     n_blks +=  CMSOldPLABReactivityFactor*multiple*n_blks;
  2679     n_blks = MIN2(n_blks, CMSOldPLABMax);
  2681   assert(n_blks > 0, "Error");
  2682   _cfls->par_get_chunk_of_blocks(word_sz, n_blks, fl);
  2683   // Update stats table entry for this block size
  2684   _num_blocks[word_sz] += fl->count();
  2687 void CFLS_LAB::compute_desired_plab_size() {
  2688   for (size_t i =  CompactibleFreeListSpace::IndexSetStart;
  2689        i < CompactibleFreeListSpace::IndexSetSize;
  2690        i += CompactibleFreeListSpace::IndexSetStride) {
  2691     assert((_global_num_workers[i] == 0) == (_global_num_blocks[i] == 0),
  2692            "Counter inconsistency");
  2693     if (_global_num_workers[i] > 0) {
  2694       // Need to smooth wrt historical average
  2695       if (ResizeOldPLAB) {
  2696         _blocks_to_claim[i].sample(
  2697           MAX2((size_t)CMSOldPLABMin,
  2698           MIN2((size_t)CMSOldPLABMax,
  2699                _global_num_blocks[i]/(_global_num_workers[i]*CMSOldPLABNumRefills))));
  2701       // Reset counters for next round
  2702       _global_num_workers[i] = 0;
  2703       _global_num_blocks[i] = 0;
  2704       if (PrintOldPLAB) {
  2705         gclog_or_tty->print_cr("[%d]: %d", i, (size_t)_blocks_to_claim[i].average());
  2711 // If this is changed in the future to allow parallel
  2712 // access, one would need to take the FL locks and,
  2713 // depending on how it is used, stagger access from
  2714 // parallel threads to reduce contention.
  2715 void CFLS_LAB::retire(int tid) {
  2716   // We run this single threaded with the world stopped;
  2717   // so no need for locks and such.
  2718   NOT_PRODUCT(Thread* t = Thread::current();)
  2719   assert(Thread::current()->is_VM_thread(), "Error");
  2720   for (size_t i =  CompactibleFreeListSpace::IndexSetStart;
  2721        i < CompactibleFreeListSpace::IndexSetSize;
  2722        i += CompactibleFreeListSpace::IndexSetStride) {
  2723     assert(_num_blocks[i] >= (size_t)_indexedFreeList[i].count(),
  2724            "Can't retire more than what we obtained");
  2725     if (_num_blocks[i] > 0) {
  2726       size_t num_retire =  _indexedFreeList[i].count();
  2727       assert(_num_blocks[i] > num_retire, "Should have used at least one");
  2729         // MutexLockerEx x(_cfls->_indexedFreeListParLocks[i],
  2730         //                Mutex::_no_safepoint_check_flag);
  2732         // Update globals stats for num_blocks used
  2733         _global_num_blocks[i] += (_num_blocks[i] - num_retire);
  2734         _global_num_workers[i]++;
  2735         assert(_global_num_workers[i] <= ParallelGCThreads, "Too big");
  2736         if (num_retire > 0) {
  2737           _cfls->_indexedFreeList[i].prepend(&_indexedFreeList[i]);
  2738           // Reset this list.
  2739           _indexedFreeList[i] = FreeList();
  2740           _indexedFreeList[i].set_size(i);
  2743       if (PrintOldPLAB) {
  2744         gclog_or_tty->print_cr("%d[%d]: %d/%d/%d",
  2745                                tid, i, num_retire, _num_blocks[i], (size_t)_blocks_to_claim[i].average());
  2747       // Reset stats for next round
  2748       _num_blocks[i]         = 0;
  2753 void CompactibleFreeListSpace:: par_get_chunk_of_blocks(size_t word_sz, size_t n, FreeList* fl) {
  2754   assert(fl->count() == 0, "Precondition.");
  2755   assert(word_sz < CompactibleFreeListSpace::IndexSetSize,
  2756          "Precondition");
  2758   // We'll try all multiples of word_sz in the indexed set, starting with
  2759   // word_sz itself and, if CMSSplitIndexedFreeListBlocks, try larger multiples,
  2760   // then try getting a big chunk and splitting it.
  2762     bool found;
  2763     int  k;
  2764     size_t cur_sz;
  2765     for (k = 1, cur_sz = k * word_sz, found = false;
  2766          (cur_sz < CompactibleFreeListSpace::IndexSetSize) &&
  2767          (CMSSplitIndexedFreeListBlocks || k <= 1);
  2768          k++, cur_sz = k * word_sz) {
  2769       FreeList fl_for_cur_sz;  // Empty.
  2770       fl_for_cur_sz.set_size(cur_sz);
  2772         MutexLockerEx x(_indexedFreeListParLocks[cur_sz],
  2773                         Mutex::_no_safepoint_check_flag);
  2774         FreeList* gfl = &_indexedFreeList[cur_sz];
  2775         if (gfl->count() != 0) {
  2776           // nn is the number of chunks of size cur_sz that
  2777           // we'd need to split k-ways each, in order to create
  2778           // "n" chunks of size word_sz each.
  2779           const size_t nn = MAX2(n/k, (size_t)1);
  2780           gfl->getFirstNChunksFromList(nn, &fl_for_cur_sz);
  2781           found = true;
  2782           if (k > 1) {
  2783             // Update split death stats for the cur_sz-size blocks list:
  2784             // we increment the split death count by the number of blocks
  2785             // we just took from the cur_sz-size blocks list and which
  2786             // we will be splitting below.
  2787             ssize_t deaths = gfl->splitDeaths() +
  2788                              fl_for_cur_sz.count();
  2789             gfl->set_splitDeaths(deaths);
  2793       // Now transfer fl_for_cur_sz to fl.  Common case, we hope, is k = 1.
  2794       if (found) {
  2795         if (k == 1) {
  2796           fl->prepend(&fl_for_cur_sz);
  2797         } else {
  2798           // Divide each block on fl_for_cur_sz up k ways.
  2799           FreeChunk* fc;
  2800           while ((fc = fl_for_cur_sz.getChunkAtHead()) != NULL) {
  2801             // Must do this in reverse order, so that anybody attempting to
  2802             // access the main chunk sees it as a single free block until we
  2803             // change it.
  2804             size_t fc_size = fc->size();
  2805             assert(fc->isFree(), "Error");
  2806             for (int i = k-1; i >= 0; i--) {
  2807               FreeChunk* ffc = (FreeChunk*)((HeapWord*)fc + i * word_sz);
  2808               assert((i != 0) ||
  2809                         ((fc == ffc) && ffc->isFree() &&
  2810                          (ffc->size() == k*word_sz) && (fc_size == word_sz)),
  2811                         "Counting error");
  2812               ffc->setSize(word_sz);
  2813               ffc->linkPrev(NULL); // Mark as a free block for other (parallel) GC threads.
  2814               ffc->linkNext(NULL);
  2815               // Above must occur before BOT is updated below.
  2816               OrderAccess::storestore();
  2817               // splitting from the right, fc_size == i * word_sz
  2818               _bt.mark_block((HeapWord*)ffc, word_sz, true /* reducing */);
  2819               fc_size -= word_sz;
  2820               assert(fc_size == i*word_sz, "Error");
  2821               _bt.verify_not_unallocated((HeapWord*)ffc, word_sz);
  2822               _bt.verify_single_block((HeapWord*)fc, fc_size);
  2823               _bt.verify_single_block((HeapWord*)ffc, word_sz);
  2824               // Push this on "fl".
  2825               fl->returnChunkAtHead(ffc);
  2827             // TRAP
  2828             assert(fl->tail()->next() == NULL, "List invariant.");
  2831         // Update birth stats for this block size.
  2832         size_t num = fl->count();
  2833         MutexLockerEx x(_indexedFreeListParLocks[word_sz],
  2834                         Mutex::_no_safepoint_check_flag);
  2835         ssize_t births = _indexedFreeList[word_sz].splitBirths() + num;
  2836         _indexedFreeList[word_sz].set_splitBirths(births);
  2837         return;
  2841   // Otherwise, we'll split a block from the dictionary.
  2842   FreeChunk* fc = NULL;
  2843   FreeChunk* rem_fc = NULL;
  2844   size_t rem;
  2846     MutexLockerEx x(parDictionaryAllocLock(),
  2847                     Mutex::_no_safepoint_check_flag);
  2848     while (n > 0) {
  2849       fc = dictionary()->getChunk(MAX2(n * word_sz,
  2850                                   _dictionary->minSize()),
  2851                                   FreeBlockDictionary::atLeast);
  2852       if (fc != NULL) {
  2853         _bt.allocated((HeapWord*)fc, fc->size(), true /* reducing */);  // update _unallocated_blk
  2854         dictionary()->dictCensusUpdate(fc->size(),
  2855                                        true /*split*/,
  2856                                        false /*birth*/);
  2857         break;
  2858       } else {
  2859         n--;
  2862     if (fc == NULL) return;
  2863     // Otherwise, split up that block.
  2864     assert((ssize_t)n >= 1, "Control point invariant");
  2865     assert(fc->isFree(), "Error: should be a free block");
  2866     _bt.verify_single_block((HeapWord*)fc, fc->size());
  2867     const size_t nn = fc->size() / word_sz;
  2868     n = MIN2(nn, n);
  2869     assert((ssize_t)n >= 1, "Control point invariant");
  2870     rem = fc->size() - n * word_sz;
  2871     // If there is a remainder, and it's too small, allocate one fewer.
  2872     if (rem > 0 && rem < MinChunkSize) {
  2873       n--; rem += word_sz;
  2875     // Note that at this point we may have n == 0.
  2876     assert((ssize_t)n >= 0, "Control point invariant");
  2878     // If n is 0, the chunk fc that was found is not large
  2879     // enough to leave a viable remainder.  We are unable to
  2880     // allocate even one block.  Return fc to the
  2881     // dictionary and return, leaving "fl" empty.
  2882     if (n == 0) {
  2883       returnChunkToDictionary(fc);
  2884       assert(fl->count() == 0, "We never allocated any blocks");
  2885       return;
  2888     // First return the remainder, if any.
  2889     // Note that we hold the lock until we decide if we're going to give
  2890     // back the remainder to the dictionary, since a concurrent allocation
  2891     // may otherwise see the heap as empty.  (We're willing to take that
  2892     // hit if the block is a small block.)
  2893     if (rem > 0) {
  2894       size_t prefix_size = n * word_sz;
  2895       rem_fc = (FreeChunk*)((HeapWord*)fc + prefix_size);
  2896       rem_fc->setSize(rem);
  2897       rem_fc->linkPrev(NULL); // Mark as a free block for other (parallel) GC threads.
  2898       rem_fc->linkNext(NULL);
  2899       // Above must occur before BOT is updated below.
  2900       assert((ssize_t)n > 0 && prefix_size > 0 && rem_fc > fc, "Error");
  2901       OrderAccess::storestore();
  2902       _bt.split_block((HeapWord*)fc, fc->size(), prefix_size);
  2903       assert(fc->isFree(), "Error");
  2904       fc->setSize(prefix_size);
  2905       if (rem >= IndexSetSize) {
  2906         returnChunkToDictionary(rem_fc);
  2907         dictionary()->dictCensusUpdate(rem, true /*split*/, true /*birth*/);
  2908         rem_fc = NULL;
  2910       // Otherwise, return it to the small list below.
  2913   if (rem_fc != NULL) {
  2914     MutexLockerEx x(_indexedFreeListParLocks[rem],
  2915                     Mutex::_no_safepoint_check_flag);
  2916     _bt.verify_not_unallocated((HeapWord*)rem_fc, rem_fc->size());
  2917     _indexedFreeList[rem].returnChunkAtHead(rem_fc);
  2918     smallSplitBirth(rem);
  2920   assert((ssize_t)n > 0 && fc != NULL, "Consistency");
  2921   // Now do the splitting up.
  2922   // Must do this in reverse order, so that anybody attempting to
  2923   // access the main chunk sees it as a single free block until we
  2924   // change it.
  2925   size_t fc_size = n * word_sz;
  2926   // All but first chunk in this loop
  2927   for (ssize_t i = n-1; i > 0; i--) {
  2928     FreeChunk* ffc = (FreeChunk*)((HeapWord*)fc + i * word_sz);
  2929     ffc->setSize(word_sz);
  2930     ffc->linkPrev(NULL); // Mark as a free block for other (parallel) GC threads.
  2931     ffc->linkNext(NULL);
  2932     // Above must occur before BOT is updated below.
  2933     OrderAccess::storestore();
  2934     // splitting from the right, fc_size == (n - i + 1) * wordsize
  2935     _bt.mark_block((HeapWord*)ffc, word_sz, true /* reducing */);
  2936     fc_size -= word_sz;
  2937     _bt.verify_not_unallocated((HeapWord*)ffc, ffc->size());
  2938     _bt.verify_single_block((HeapWord*)ffc, ffc->size());
  2939     _bt.verify_single_block((HeapWord*)fc, fc_size);
  2940     // Push this on "fl".
  2941     fl->returnChunkAtHead(ffc);
  2943   // First chunk
  2944   assert(fc->isFree() && fc->size() == n*word_sz, "Error: should still be a free block");
  2945   // The blocks above should show their new sizes before the first block below
  2946   fc->setSize(word_sz);
  2947   fc->linkPrev(NULL);    // idempotent wrt free-ness, see assert above
  2948   fc->linkNext(NULL);
  2949   _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
  2950   _bt.verify_single_block((HeapWord*)fc, fc->size());
  2951   fl->returnChunkAtHead(fc);
  2953   assert((ssize_t)n > 0 && (ssize_t)n == fl->count(), "Incorrect number of blocks");
  2955     // Update the stats for this block size.
  2956     MutexLockerEx x(_indexedFreeListParLocks[word_sz],
  2957                     Mutex::_no_safepoint_check_flag);
  2958     const ssize_t births = _indexedFreeList[word_sz].splitBirths() + n;
  2959     _indexedFreeList[word_sz].set_splitBirths(births);
  2960     // ssize_t new_surplus = _indexedFreeList[word_sz].surplus() + n;
  2961     // _indexedFreeList[word_sz].set_surplus(new_surplus);
  2964   // TRAP
  2965   assert(fl->tail()->next() == NULL, "List invariant.");
  2968 // Set up the space's par_seq_tasks structure for work claiming
  2969 // for parallel rescan. See CMSParRemarkTask where this is currently used.
  2970 // XXX Need to suitably abstract and generalize this and the next
  2971 // method into one.
  2972 void
  2973 CompactibleFreeListSpace::
  2974 initialize_sequential_subtasks_for_rescan(int n_threads) {
  2975   // The "size" of each task is fixed according to rescan_task_size.
  2976   assert(n_threads > 0, "Unexpected n_threads argument");
  2977   const size_t task_size = rescan_task_size();
  2978   size_t n_tasks = (used_region().word_size() + task_size - 1)/task_size;
  2979   assert((n_tasks == 0) == used_region().is_empty(), "n_tasks incorrect");
  2980   assert(n_tasks == 0 ||
  2981          ((used_region().start() + (n_tasks - 1)*task_size < used_region().end()) &&
  2982           (used_region().start() + n_tasks*task_size >= used_region().end())),
  2983          "n_tasks calculation incorrect");
  2984   SequentialSubTasksDone* pst = conc_par_seq_tasks();
  2985   assert(!pst->valid(), "Clobbering existing data?");
  2986   // Sets the condition for completion of the subtask (how many threads
  2987   // need to finish in order to be done).
  2988   pst->set_n_threads(n_threads);
  2989   pst->set_n_tasks((int)n_tasks);
  2992 // Set up the space's par_seq_tasks structure for work claiming
  2993 // for parallel concurrent marking. See CMSConcMarkTask where this is currently used.
  2994 void
  2995 CompactibleFreeListSpace::
  2996 initialize_sequential_subtasks_for_marking(int n_threads,
  2997                                            HeapWord* low) {
  2998   // The "size" of each task is fixed according to rescan_task_size.
  2999   assert(n_threads > 0, "Unexpected n_threads argument");
  3000   const size_t task_size = marking_task_size();
  3001   assert(task_size > CardTableModRefBS::card_size_in_words &&
  3002          (task_size %  CardTableModRefBS::card_size_in_words == 0),
  3003          "Otherwise arithmetic below would be incorrect");
  3004   MemRegion span = _gen->reserved();
  3005   if (low != NULL) {
  3006     if (span.contains(low)) {
  3007       // Align low down to  a card boundary so that
  3008       // we can use block_offset_careful() on span boundaries.
  3009       HeapWord* aligned_low = (HeapWord*)align_size_down((uintptr_t)low,
  3010                                  CardTableModRefBS::card_size);
  3011       // Clip span prefix at aligned_low
  3012       span = span.intersection(MemRegion(aligned_low, span.end()));
  3013     } else if (low > span.end()) {
  3014       span = MemRegion(low, low);  // Null region
  3015     } // else use entire span
  3017   assert(span.is_empty() ||
  3018          ((uintptr_t)span.start() %  CardTableModRefBS::card_size == 0),
  3019         "span should start at a card boundary");
  3020   size_t n_tasks = (span.word_size() + task_size - 1)/task_size;
  3021   assert((n_tasks == 0) == span.is_empty(), "Inconsistency");
  3022   assert(n_tasks == 0 ||
  3023          ((span.start() + (n_tasks - 1)*task_size < span.end()) &&
  3024           (span.start() + n_tasks*task_size >= span.end())),
  3025          "n_tasks calculation incorrect");
  3026   SequentialSubTasksDone* pst = conc_par_seq_tasks();
  3027   assert(!pst->valid(), "Clobbering existing data?");
  3028   // Sets the condition for completion of the subtask (how many threads
  3029   // need to finish in order to be done).
  3030   pst->set_n_threads(n_threads);
  3031   pst->set_n_tasks((int)n_tasks);

mercurial