src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp

Mon, 07 Feb 2011 22:19:57 -0800

author
ysr
date
Mon, 07 Feb 2011 22:19:57 -0800
changeset 2533
c5a923563727
parent 2314
f95d63e2154a
child 2708
1d1603768966
permissions
-rw-r--r--

6912621: iCMS: Error: assert(_markBitMap.isMarked(addr + 1),"Missing Printezis bit?")
Summary: Fix block_size_if_printezis_bits() so it does not expect the bits, only uses them when available. Fix block_size_no_stall() so it does not stall when the bits are missing such cases, letting the caller deal with zero size returns. Constant pool cache oops do not need to be unparsable or conc_unsafe after their klass pointer is installed. Some cosmetic clean-ups and some assertion checking for conc-usafety which, in the presence of class file redefinition, has no a-priori time boundedness, so all GCs must be able to safely deal with putatively conc-unsafe objects in a stop-world pause.
Reviewed-by: jmasa, johnc

     1 /*
     2  * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    20  * or visit www.oracle.com if you need additional information or have any
    21  * questions.
    22  *
    23  */
    25 #include "precompiled.hpp"
    26 #include "gc_implementation/concurrentMarkSweep/cmsLockVerifier.hpp"
    27 #include "gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp"
    28 #include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.inline.hpp"
    29 #include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.hpp"
    30 #include "gc_implementation/shared/liveRange.hpp"
    31 #include "gc_implementation/shared/spaceDecorator.hpp"
    32 #include "gc_interface/collectedHeap.hpp"
    33 #include "memory/allocation.inline.hpp"
    34 #include "memory/blockOffsetTable.inline.hpp"
    35 #include "memory/resourceArea.hpp"
    36 #include "memory/universe.inline.hpp"
    37 #include "oops/oop.inline.hpp"
    38 #include "runtime/globals.hpp"
    39 #include "runtime/handles.inline.hpp"
    40 #include "runtime/init.hpp"
    41 #include "runtime/java.hpp"
    42 #include "runtime/vmThread.hpp"
    43 #include "utilities/copy.hpp"
    45 /////////////////////////////////////////////////////////////////////////
    46 //// CompactibleFreeListSpace
    47 /////////////////////////////////////////////////////////////////////////
    49 // highest ranked  free list lock rank
    50 int CompactibleFreeListSpace::_lockRank = Mutex::leaf + 3;
    52 // Defaults are 0 so things will break badly if incorrectly initialized.
    53 int CompactibleFreeListSpace::IndexSetStart  = 0;
    54 int CompactibleFreeListSpace::IndexSetStride = 0;
    56 size_t MinChunkSize = 0;
    58 void CompactibleFreeListSpace::set_cms_values() {
    59   // Set CMS global values
    60   assert(MinChunkSize == 0, "already set");
    61   #define numQuanta(x,y) ((x+y-1)/y)
    62   MinChunkSize = numQuanta(sizeof(FreeChunk), MinObjAlignmentInBytes) * MinObjAlignment;
    64   assert(IndexSetStart == 0 && IndexSetStride == 0, "already set");
    65   IndexSetStart  = MinObjAlignment;
    66   IndexSetStride = MinObjAlignment;
    67 }
    69 // Constructor
    70 CompactibleFreeListSpace::CompactibleFreeListSpace(BlockOffsetSharedArray* bs,
    71   MemRegion mr, bool use_adaptive_freelists,
    72   FreeBlockDictionary::DictionaryChoice dictionaryChoice) :
    73   _dictionaryChoice(dictionaryChoice),
    74   _adaptive_freelists(use_adaptive_freelists),
    75   _bt(bs, mr),
    76   // free list locks are in the range of values taken by _lockRank
    77   // This range currently is [_leaf+2, _leaf+3]
    78   // Note: this requires that CFLspace c'tors
    79   // are called serially in the order in which the locks are
    80   // are acquired in the program text. This is true today.
    81   _freelistLock(_lockRank--, "CompactibleFreeListSpace._lock", true),
    82   _parDictionaryAllocLock(Mutex::leaf - 1,  // == rank(ExpandHeap_lock) - 1
    83                           "CompactibleFreeListSpace._dict_par_lock", true),
    84   _rescan_task_size(CardTableModRefBS::card_size_in_words * BitsPerWord *
    85                     CMSRescanMultiple),
    86   _marking_task_size(CardTableModRefBS::card_size_in_words * BitsPerWord *
    87                     CMSConcMarkMultiple),
    88   _collector(NULL)
    89 {
    90   _bt.set_space(this);
    91   initialize(mr, SpaceDecorator::Clear, SpaceDecorator::Mangle);
    92   // We have all of "mr", all of which we place in the dictionary
    93   // as one big chunk. We'll need to decide here which of several
    94   // possible alternative dictionary implementations to use. For
    95   // now the choice is easy, since we have only one working
    96   // implementation, namely, the simple binary tree (splaying
    97   // temporarily disabled).
    98   switch (dictionaryChoice) {
    99     case FreeBlockDictionary::dictionarySplayTree:
   100     case FreeBlockDictionary::dictionarySkipList:
   101     default:
   102       warning("dictionaryChoice: selected option not understood; using"
   103               " default BinaryTreeDictionary implementation instead.");
   104     case FreeBlockDictionary::dictionaryBinaryTree:
   105       _dictionary = new BinaryTreeDictionary(mr);
   106       break;
   107   }
   108   assert(_dictionary != NULL, "CMS dictionary initialization");
   109   // The indexed free lists are initially all empty and are lazily
   110   // filled in on demand. Initialize the array elements to NULL.
   111   initializeIndexedFreeListArray();
   113   // Not using adaptive free lists assumes that allocation is first
   114   // from the linAB's.  Also a cms perm gen which can be compacted
   115   // has to have the klass's klassKlass allocated at a lower
   116   // address in the heap than the klass so that the klassKlass is
   117   // moved to its new location before the klass is moved.
   118   // Set the _refillSize for the linear allocation blocks
   119   if (!use_adaptive_freelists) {
   120     FreeChunk* fc = _dictionary->getChunk(mr.word_size());
   121     // The small linAB initially has all the space and will allocate
   122     // a chunk of any size.
   123     HeapWord* addr = (HeapWord*) fc;
   124     _smallLinearAllocBlock.set(addr, fc->size() ,
   125       1024*SmallForLinearAlloc, fc->size());
   126     // Note that _unallocated_block is not updated here.
   127     // Allocations from the linear allocation block should
   128     // update it.
   129   } else {
   130     _smallLinearAllocBlock.set(0, 0, 1024*SmallForLinearAlloc,
   131                                SmallForLinearAlloc);
   132   }
   133   // CMSIndexedFreeListReplenish should be at least 1
   134   CMSIndexedFreeListReplenish = MAX2((uintx)1, CMSIndexedFreeListReplenish);
   135   _promoInfo.setSpace(this);
   136   if (UseCMSBestFit) {
   137     _fitStrategy = FreeBlockBestFitFirst;
   138   } else {
   139     _fitStrategy = FreeBlockStrategyNone;
   140   }
   141   checkFreeListConsistency();
   143   // Initialize locks for parallel case.
   145   if (CollectedHeap::use_parallel_gc_threads()) {
   146     for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
   147       _indexedFreeListParLocks[i] = new Mutex(Mutex::leaf - 1, // == ExpandHeap_lock - 1
   148                                               "a freelist par lock",
   149                                               true);
   150       if (_indexedFreeListParLocks[i] == NULL)
   151         vm_exit_during_initialization("Could not allocate a par lock");
   152       DEBUG_ONLY(
   153         _indexedFreeList[i].set_protecting_lock(_indexedFreeListParLocks[i]);
   154       )
   155     }
   156     _dictionary->set_par_lock(&_parDictionaryAllocLock);
   157   }
   158 }
   160 // Like CompactibleSpace forward() but always calls cross_threshold() to
   161 // update the block offset table.  Removed initialize_threshold call because
   162 // CFLS does not use a block offset array for contiguous spaces.
   163 HeapWord* CompactibleFreeListSpace::forward(oop q, size_t size,
   164                                     CompactPoint* cp, HeapWord* compact_top) {
   165   // q is alive
   166   // First check if we should switch compaction space
   167   assert(this == cp->space, "'this' should be current compaction space.");
   168   size_t compaction_max_size = pointer_delta(end(), compact_top);
   169   assert(adjustObjectSize(size) == cp->space->adjust_object_size_v(size),
   170     "virtual adjustObjectSize_v() method is not correct");
   171   size_t adjusted_size = adjustObjectSize(size);
   172   assert(compaction_max_size >= MinChunkSize || compaction_max_size == 0,
   173          "no small fragments allowed");
   174   assert(minimum_free_block_size() == MinChunkSize,
   175          "for de-virtualized reference below");
   176   // Can't leave a nonzero size, residual fragment smaller than MinChunkSize
   177   if (adjusted_size + MinChunkSize > compaction_max_size &&
   178       adjusted_size != compaction_max_size) {
   179     do {
   180       // switch to next compaction space
   181       cp->space->set_compaction_top(compact_top);
   182       cp->space = cp->space->next_compaction_space();
   183       if (cp->space == NULL) {
   184         cp->gen = GenCollectedHeap::heap()->prev_gen(cp->gen);
   185         assert(cp->gen != NULL, "compaction must succeed");
   186         cp->space = cp->gen->first_compaction_space();
   187         assert(cp->space != NULL, "generation must have a first compaction space");
   188       }
   189       compact_top = cp->space->bottom();
   190       cp->space->set_compaction_top(compact_top);
   191       // The correct adjusted_size may not be the same as that for this method
   192       // (i.e., cp->space may no longer be "this" so adjust the size again.
   193       // Use the virtual method which is not used above to save the virtual
   194       // dispatch.
   195       adjusted_size = cp->space->adjust_object_size_v(size);
   196       compaction_max_size = pointer_delta(cp->space->end(), compact_top);
   197       assert(cp->space->minimum_free_block_size() == 0, "just checking");
   198     } while (adjusted_size > compaction_max_size);
   199   }
   201   // store the forwarding pointer into the mark word
   202   if ((HeapWord*)q != compact_top) {
   203     q->forward_to(oop(compact_top));
   204     assert(q->is_gc_marked(), "encoding the pointer should preserve the mark");
   205   } else {
   206     // if the object isn't moving we can just set the mark to the default
   207     // mark and handle it specially later on.
   208     q->init_mark();
   209     assert(q->forwardee() == NULL, "should be forwarded to NULL");
   210   }
   212   VALIDATE_MARK_SWEEP_ONLY(MarkSweep::register_live_oop(q, adjusted_size));
   213   compact_top += adjusted_size;
   215   // we need to update the offset table so that the beginnings of objects can be
   216   // found during scavenge.  Note that we are updating the offset table based on
   217   // where the object will be once the compaction phase finishes.
   219   // Always call cross_threshold().  A contiguous space can only call it when
   220   // the compaction_top exceeds the current threshold but not for an
   221   // non-contiguous space.
   222   cp->threshold =
   223     cp->space->cross_threshold(compact_top - adjusted_size, compact_top);
   224   return compact_top;
   225 }
   227 // A modified copy of OffsetTableContigSpace::cross_threshold() with _offsets -> _bt
   228 // and use of single_block instead of alloc_block.  The name here is not really
   229 // appropriate - maybe a more general name could be invented for both the
   230 // contiguous and noncontiguous spaces.
   232 HeapWord* CompactibleFreeListSpace::cross_threshold(HeapWord* start, HeapWord* the_end) {
   233   _bt.single_block(start, the_end);
   234   return end();
   235 }
   237 // Initialize them to NULL.
   238 void CompactibleFreeListSpace::initializeIndexedFreeListArray() {
   239   for (size_t i = 0; i < IndexSetSize; i++) {
   240     // Note that on platforms where objects are double word aligned,
   241     // the odd array elements are not used.  It is convenient, however,
   242     // to map directly from the object size to the array element.
   243     _indexedFreeList[i].reset(IndexSetSize);
   244     _indexedFreeList[i].set_size(i);
   245     assert(_indexedFreeList[i].count() == 0, "reset check failed");
   246     assert(_indexedFreeList[i].head() == NULL, "reset check failed");
   247     assert(_indexedFreeList[i].tail() == NULL, "reset check failed");
   248     assert(_indexedFreeList[i].hint() == IndexSetSize, "reset check failed");
   249   }
   250 }
   252 void CompactibleFreeListSpace::resetIndexedFreeListArray() {
   253   for (int i = 1; i < IndexSetSize; i++) {
   254     assert(_indexedFreeList[i].size() == (size_t) i,
   255       "Indexed free list sizes are incorrect");
   256     _indexedFreeList[i].reset(IndexSetSize);
   257     assert(_indexedFreeList[i].count() == 0, "reset check failed");
   258     assert(_indexedFreeList[i].head() == NULL, "reset check failed");
   259     assert(_indexedFreeList[i].tail() == NULL, "reset check failed");
   260     assert(_indexedFreeList[i].hint() == IndexSetSize, "reset check failed");
   261   }
   262 }
   264 void CompactibleFreeListSpace::reset(MemRegion mr) {
   265   resetIndexedFreeListArray();
   266   dictionary()->reset();
   267   if (BlockOffsetArrayUseUnallocatedBlock) {
   268     assert(end() == mr.end(), "We are compacting to the bottom of CMS gen");
   269     // Everything's allocated until proven otherwise.
   270     _bt.set_unallocated_block(end());
   271   }
   272   if (!mr.is_empty()) {
   273     assert(mr.word_size() >= MinChunkSize, "Chunk size is too small");
   274     _bt.single_block(mr.start(), mr.word_size());
   275     FreeChunk* fc = (FreeChunk*) mr.start();
   276     fc->setSize(mr.word_size());
   277     if (mr.word_size() >= IndexSetSize ) {
   278       returnChunkToDictionary(fc);
   279     } else {
   280       _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
   281       _indexedFreeList[mr.word_size()].returnChunkAtHead(fc);
   282     }
   283   }
   284   _promoInfo.reset();
   285   _smallLinearAllocBlock._ptr = NULL;
   286   _smallLinearAllocBlock._word_size = 0;
   287 }
   289 void CompactibleFreeListSpace::reset_after_compaction() {
   290   // Reset the space to the new reality - one free chunk.
   291   MemRegion mr(compaction_top(), end());
   292   reset(mr);
   293   // Now refill the linear allocation block(s) if possible.
   294   if (_adaptive_freelists) {
   295     refillLinearAllocBlocksIfNeeded();
   296   } else {
   297     // Place as much of mr in the linAB as we can get,
   298     // provided it was big enough to go into the dictionary.
   299     FreeChunk* fc = dictionary()->findLargestDict();
   300     if (fc != NULL) {
   301       assert(fc->size() == mr.word_size(),
   302              "Why was the chunk broken up?");
   303       removeChunkFromDictionary(fc);
   304       HeapWord* addr = (HeapWord*) fc;
   305       _smallLinearAllocBlock.set(addr, fc->size() ,
   306         1024*SmallForLinearAlloc, fc->size());
   307       // Note that _unallocated_block is not updated here.
   308     }
   309   }
   310 }
   312 // Walks the entire dictionary, returning a coterminal
   313 // chunk, if it exists. Use with caution since it involves
   314 // a potentially complete walk of a potentially large tree.
   315 FreeChunk* CompactibleFreeListSpace::find_chunk_at_end() {
   317   assert_lock_strong(&_freelistLock);
   319   return dictionary()->find_chunk_ends_at(end());
   320 }
   323 #ifndef PRODUCT
   324 void CompactibleFreeListSpace::initializeIndexedFreeListArrayReturnedBytes() {
   325   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
   326     _indexedFreeList[i].allocation_stats()->set_returnedBytes(0);
   327   }
   328 }
   330 size_t CompactibleFreeListSpace::sumIndexedFreeListArrayReturnedBytes() {
   331   size_t sum = 0;
   332   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
   333     sum += _indexedFreeList[i].allocation_stats()->returnedBytes();
   334   }
   335   return sum;
   336 }
   338 size_t CompactibleFreeListSpace::totalCountInIndexedFreeLists() const {
   339   size_t count = 0;
   340   for (int i = (int)MinChunkSize; i < IndexSetSize; i++) {
   341     debug_only(
   342       ssize_t total_list_count = 0;
   343       for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
   344          fc = fc->next()) {
   345         total_list_count++;
   346       }
   347       assert(total_list_count ==  _indexedFreeList[i].count(),
   348         "Count in list is incorrect");
   349     )
   350     count += _indexedFreeList[i].count();
   351   }
   352   return count;
   353 }
   355 size_t CompactibleFreeListSpace::totalCount() {
   356   size_t num = totalCountInIndexedFreeLists();
   357   num +=  dictionary()->totalCount();
   358   if (_smallLinearAllocBlock._word_size != 0) {
   359     num++;
   360   }
   361   return num;
   362 }
   363 #endif
   365 bool CompactibleFreeListSpace::is_free_block(const HeapWord* p) const {
   366   FreeChunk* fc = (FreeChunk*) p;
   367   return fc->isFree();
   368 }
   370 size_t CompactibleFreeListSpace::used() const {
   371   return capacity() - free();
   372 }
   374 size_t CompactibleFreeListSpace::free() const {
   375   // "MT-safe, but not MT-precise"(TM), if you will: i.e.
   376   // if you do this while the structures are in flux you
   377   // may get an approximate answer only; for instance
   378   // because there is concurrent allocation either
   379   // directly by mutators or for promotion during a GC.
   380   // It's "MT-safe", however, in the sense that you are guaranteed
   381   // not to crash and burn, for instance, because of walking
   382   // pointers that could disappear as you were walking them.
   383   // The approximation is because the various components
   384   // that are read below are not read atomically (and
   385   // further the computation of totalSizeInIndexedFreeLists()
   386   // is itself a non-atomic computation. The normal use of
   387   // this is during a resize operation at the end of GC
   388   // and at that time you are guaranteed to get the
   389   // correct actual value. However, for instance, this is
   390   // also read completely asynchronously by the "perf-sampler"
   391   // that supports jvmstat, and you are apt to see the values
   392   // flicker in such cases.
   393   assert(_dictionary != NULL, "No _dictionary?");
   394   return (_dictionary->totalChunkSize(DEBUG_ONLY(freelistLock())) +
   395           totalSizeInIndexedFreeLists() +
   396           _smallLinearAllocBlock._word_size) * HeapWordSize;
   397 }
   399 size_t CompactibleFreeListSpace::max_alloc_in_words() const {
   400   assert(_dictionary != NULL, "No _dictionary?");
   401   assert_locked();
   402   size_t res = _dictionary->maxChunkSize();
   403   res = MAX2(res, MIN2(_smallLinearAllocBlock._word_size,
   404                        (size_t) SmallForLinearAlloc - 1));
   405   // XXX the following could potentially be pretty slow;
   406   // should one, pesimally for the rare cases when res
   407   // caclulated above is less than IndexSetSize,
   408   // just return res calculated above? My reasoning was that
   409   // those cases will be so rare that the extra time spent doesn't
   410   // really matter....
   411   // Note: do not change the loop test i >= res + IndexSetStride
   412   // to i > res below, because i is unsigned and res may be zero.
   413   for (size_t i = IndexSetSize - 1; i >= res + IndexSetStride;
   414        i -= IndexSetStride) {
   415     if (_indexedFreeList[i].head() != NULL) {
   416       assert(_indexedFreeList[i].count() != 0, "Inconsistent FreeList");
   417       return i;
   418     }
   419   }
   420   return res;
   421 }
   423 void LinearAllocBlock::print_on(outputStream* st) const {
   424   st->print_cr(" LinearAllocBlock: ptr = " PTR_FORMAT ", word_size = " SIZE_FORMAT
   425             ", refillsize = " SIZE_FORMAT ", allocation_size_limit = " SIZE_FORMAT,
   426             _ptr, _word_size, _refillSize, _allocation_size_limit);
   427 }
   429 void CompactibleFreeListSpace::print_on(outputStream* st) const {
   430   st->print_cr("COMPACTIBLE FREELIST SPACE");
   431   st->print_cr(" Space:");
   432   Space::print_on(st);
   434   st->print_cr("promoInfo:");
   435   _promoInfo.print_on(st);
   437   st->print_cr("_smallLinearAllocBlock");
   438   _smallLinearAllocBlock.print_on(st);
   440   // dump_memory_block(_smallLinearAllocBlock->_ptr, 128);
   442   st->print_cr(" _fitStrategy = %s, _adaptive_freelists = %s",
   443                _fitStrategy?"true":"false", _adaptive_freelists?"true":"false");
   444 }
   446 void CompactibleFreeListSpace::print_indexed_free_lists(outputStream* st)
   447 const {
   448   reportIndexedFreeListStatistics();
   449   gclog_or_tty->print_cr("Layout of Indexed Freelists");
   450   gclog_or_tty->print_cr("---------------------------");
   451   FreeList::print_labels_on(st, "size");
   452   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
   453     _indexedFreeList[i].print_on(gclog_or_tty);
   454     for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
   455          fc = fc->next()) {
   456       gclog_or_tty->print_cr("\t[" PTR_FORMAT "," PTR_FORMAT ")  %s",
   457                           fc, (HeapWord*)fc + i,
   458                           fc->cantCoalesce() ? "\t CC" : "");
   459     }
   460   }
   461 }
   463 void CompactibleFreeListSpace::print_promo_info_blocks(outputStream* st)
   464 const {
   465   _promoInfo.print_on(st);
   466 }
   468 void CompactibleFreeListSpace::print_dictionary_free_lists(outputStream* st)
   469 const {
   470   _dictionary->reportStatistics();
   471   st->print_cr("Layout of Freelists in Tree");
   472   st->print_cr("---------------------------");
   473   _dictionary->print_free_lists(st);
   474 }
   476 class BlkPrintingClosure: public BlkClosure {
   477   const CMSCollector*             _collector;
   478   const CompactibleFreeListSpace* _sp;
   479   const CMSBitMap*                _live_bit_map;
   480   const bool                      _post_remark;
   481   outputStream*                   _st;
   482 public:
   483   BlkPrintingClosure(const CMSCollector* collector,
   484                      const CompactibleFreeListSpace* sp,
   485                      const CMSBitMap* live_bit_map,
   486                      outputStream* st):
   487     _collector(collector),
   488     _sp(sp),
   489     _live_bit_map(live_bit_map),
   490     _post_remark(collector->abstract_state() > CMSCollector::FinalMarking),
   491     _st(st) { }
   492   size_t do_blk(HeapWord* addr);
   493 };
   495 size_t BlkPrintingClosure::do_blk(HeapWord* addr) {
   496   size_t sz = _sp->block_size_no_stall(addr, _collector);
   497   assert(sz != 0, "Should always be able to compute a size");
   498   if (_sp->block_is_obj(addr)) {
   499     const bool dead = _post_remark && !_live_bit_map->isMarked(addr);
   500     _st->print_cr(PTR_FORMAT ": %s object of size " SIZE_FORMAT "%s",
   501       addr,
   502       dead ? "dead" : "live",
   503       sz,
   504       (!dead && CMSPrintObjectsInDump) ? ":" : ".");
   505     if (CMSPrintObjectsInDump && !dead) {
   506       oop(addr)->print_on(_st);
   507       _st->print_cr("--------------------------------------");
   508     }
   509   } else { // free block
   510     _st->print_cr(PTR_FORMAT ": free block of size " SIZE_FORMAT "%s",
   511       addr, sz, CMSPrintChunksInDump ? ":" : ".");
   512     if (CMSPrintChunksInDump) {
   513       ((FreeChunk*)addr)->print_on(_st);
   514       _st->print_cr("--------------------------------------");
   515     }
   516   }
   517   return sz;
   518 }
   520 void CompactibleFreeListSpace::dump_at_safepoint_with_locks(CMSCollector* c,
   521   outputStream* st) {
   522   st->print_cr("\n=========================");
   523   st->print_cr("Block layout in CMS Heap:");
   524   st->print_cr("=========================");
   525   BlkPrintingClosure  bpcl(c, this, c->markBitMap(), st);
   526   blk_iterate(&bpcl);
   528   st->print_cr("\n=======================================");
   529   st->print_cr("Order & Layout of Promotion Info Blocks");
   530   st->print_cr("=======================================");
   531   print_promo_info_blocks(st);
   533   st->print_cr("\n===========================");
   534   st->print_cr("Order of Indexed Free Lists");
   535   st->print_cr("=========================");
   536   print_indexed_free_lists(st);
   538   st->print_cr("\n=================================");
   539   st->print_cr("Order of Free Lists in Dictionary");
   540   st->print_cr("=================================");
   541   print_dictionary_free_lists(st);
   542 }
   545 void CompactibleFreeListSpace::reportFreeListStatistics() const {
   546   assert_lock_strong(&_freelistLock);
   547   assert(PrintFLSStatistics != 0, "Reporting error");
   548   _dictionary->reportStatistics();
   549   if (PrintFLSStatistics > 1) {
   550     reportIndexedFreeListStatistics();
   551     size_t totalSize = totalSizeInIndexedFreeLists() +
   552                        _dictionary->totalChunkSize(DEBUG_ONLY(freelistLock()));
   553     gclog_or_tty->print(" free=%ld frag=%1.4f\n", totalSize, flsFrag());
   554   }
   555 }
   557 void CompactibleFreeListSpace::reportIndexedFreeListStatistics() const {
   558   assert_lock_strong(&_freelistLock);
   559   gclog_or_tty->print("Statistics for IndexedFreeLists:\n"
   560                       "--------------------------------\n");
   561   size_t totalSize = totalSizeInIndexedFreeLists();
   562   size_t   freeBlocks = numFreeBlocksInIndexedFreeLists();
   563   gclog_or_tty->print("Total Free Space: %d\n", totalSize);
   564   gclog_or_tty->print("Max   Chunk Size: %d\n", maxChunkSizeInIndexedFreeLists());
   565   gclog_or_tty->print("Number of Blocks: %d\n", freeBlocks);
   566   if (freeBlocks != 0) {
   567     gclog_or_tty->print("Av.  Block  Size: %d\n", totalSize/freeBlocks);
   568   }
   569 }
   571 size_t CompactibleFreeListSpace::numFreeBlocksInIndexedFreeLists() const {
   572   size_t res = 0;
   573   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
   574     debug_only(
   575       ssize_t recount = 0;
   576       for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
   577          fc = fc->next()) {
   578         recount += 1;
   579       }
   580       assert(recount == _indexedFreeList[i].count(),
   581         "Incorrect count in list");
   582     )
   583     res += _indexedFreeList[i].count();
   584   }
   585   return res;
   586 }
   588 size_t CompactibleFreeListSpace::maxChunkSizeInIndexedFreeLists() const {
   589   for (size_t i = IndexSetSize - 1; i != 0; i -= IndexSetStride) {
   590     if (_indexedFreeList[i].head() != NULL) {
   591       assert(_indexedFreeList[i].count() != 0, "Inconsistent FreeList");
   592       return (size_t)i;
   593     }
   594   }
   595   return 0;
   596 }
   598 void CompactibleFreeListSpace::set_end(HeapWord* value) {
   599   HeapWord* prevEnd = end();
   600   assert(prevEnd != value, "unnecessary set_end call");
   601   assert(prevEnd == NULL || !BlockOffsetArrayUseUnallocatedBlock || value >= unallocated_block(),
   602         "New end is below unallocated block");
   603   _end = value;
   604   if (prevEnd != NULL) {
   605     // Resize the underlying block offset table.
   606     _bt.resize(pointer_delta(value, bottom()));
   607     if (value <= prevEnd) {
   608       assert(!BlockOffsetArrayUseUnallocatedBlock || value >= unallocated_block(),
   609              "New end is below unallocated block");
   610     } else {
   611       // Now, take this new chunk and add it to the free blocks.
   612       // Note that the BOT has not yet been updated for this block.
   613       size_t newFcSize = pointer_delta(value, prevEnd);
   614       // XXX This is REALLY UGLY and should be fixed up. XXX
   615       if (!_adaptive_freelists && _smallLinearAllocBlock._ptr == NULL) {
   616         // Mark the boundary of the new block in BOT
   617         _bt.mark_block(prevEnd, value);
   618         // put it all in the linAB
   619         if (ParallelGCThreads == 0) {
   620           _smallLinearAllocBlock._ptr = prevEnd;
   621           _smallLinearAllocBlock._word_size = newFcSize;
   622           repairLinearAllocBlock(&_smallLinearAllocBlock);
   623         } else { // ParallelGCThreads > 0
   624           MutexLockerEx x(parDictionaryAllocLock(),
   625                           Mutex::_no_safepoint_check_flag);
   626           _smallLinearAllocBlock._ptr = prevEnd;
   627           _smallLinearAllocBlock._word_size = newFcSize;
   628           repairLinearAllocBlock(&_smallLinearAllocBlock);
   629         }
   630         // Births of chunks put into a LinAB are not recorded.  Births
   631         // of chunks as they are allocated out of a LinAB are.
   632       } else {
   633         // Add the block to the free lists, if possible coalescing it
   634         // with the last free block, and update the BOT and census data.
   635         addChunkToFreeListsAtEndRecordingStats(prevEnd, newFcSize);
   636       }
   637     }
   638   }
   639 }
   641 class FreeListSpace_DCTOC : public Filtering_DCTOC {
   642   CompactibleFreeListSpace* _cfls;
   643   CMSCollector* _collector;
   644 protected:
   645   // Override.
   646 #define walk_mem_region_with_cl_DECL(ClosureType)                       \
   647   virtual void walk_mem_region_with_cl(MemRegion mr,                    \
   648                                        HeapWord* bottom, HeapWord* top, \
   649                                        ClosureType* cl);                \
   650       void walk_mem_region_with_cl_par(MemRegion mr,                    \
   651                                        HeapWord* bottom, HeapWord* top, \
   652                                        ClosureType* cl);                \
   653     void walk_mem_region_with_cl_nopar(MemRegion mr,                    \
   654                                        HeapWord* bottom, HeapWord* top, \
   655                                        ClosureType* cl)
   656   walk_mem_region_with_cl_DECL(OopClosure);
   657   walk_mem_region_with_cl_DECL(FilteringClosure);
   659 public:
   660   FreeListSpace_DCTOC(CompactibleFreeListSpace* sp,
   661                       CMSCollector* collector,
   662                       OopClosure* cl,
   663                       CardTableModRefBS::PrecisionStyle precision,
   664                       HeapWord* boundary) :
   665     Filtering_DCTOC(sp, cl, precision, boundary),
   666     _cfls(sp), _collector(collector) {}
   667 };
   669 // We de-virtualize the block-related calls below, since we know that our
   670 // space is a CompactibleFreeListSpace.
   671 #define FreeListSpace_DCTOC__walk_mem_region_with_cl_DEFN(ClosureType)          \
   672 void FreeListSpace_DCTOC::walk_mem_region_with_cl(MemRegion mr,                 \
   673                                                  HeapWord* bottom,              \
   674                                                  HeapWord* top,                 \
   675                                                  ClosureType* cl) {             \
   676    if (SharedHeap::heap()->n_par_threads() > 0) {                               \
   677      walk_mem_region_with_cl_par(mr, bottom, top, cl);                          \
   678    } else {                                                                     \
   679      walk_mem_region_with_cl_nopar(mr, bottom, top, cl);                        \
   680    }                                                                            \
   681 }                                                                               \
   682 void FreeListSpace_DCTOC::walk_mem_region_with_cl_par(MemRegion mr,             \
   683                                                       HeapWord* bottom,         \
   684                                                       HeapWord* top,            \
   685                                                       ClosureType* cl) {        \
   686   /* Skip parts that are before "mr", in case "block_start" sent us             \
   687      back too far. */                                                           \
   688   HeapWord* mr_start = mr.start();                                              \
   689   size_t bot_size = _cfls->CompactibleFreeListSpace::block_size(bottom);        \
   690   HeapWord* next = bottom + bot_size;                                           \
   691   while (next < mr_start) {                                                     \
   692     bottom = next;                                                              \
   693     bot_size = _cfls->CompactibleFreeListSpace::block_size(bottom);             \
   694     next = bottom + bot_size;                                                   \
   695   }                                                                             \
   696                                                                                 \
   697   while (bottom < top) {                                                        \
   698     if (_cfls->CompactibleFreeListSpace::block_is_obj(bottom) &&                \
   699         !_cfls->CompactibleFreeListSpace::obj_allocated_since_save_marks(       \
   700                     oop(bottom)) &&                                             \
   701         !_collector->CMSCollector::is_dead_obj(oop(bottom))) {                  \
   702       size_t word_sz = oop(bottom)->oop_iterate(cl, mr);                        \
   703       bottom += _cfls->adjustObjectSize(word_sz);                               \
   704     } else {                                                                    \
   705       bottom += _cfls->CompactibleFreeListSpace::block_size(bottom);            \
   706     }                                                                           \
   707   }                                                                             \
   708 }                                                                               \
   709 void FreeListSpace_DCTOC::walk_mem_region_with_cl_nopar(MemRegion mr,           \
   710                                                         HeapWord* bottom,       \
   711                                                         HeapWord* top,          \
   712                                                         ClosureType* cl) {      \
   713   /* Skip parts that are before "mr", in case "block_start" sent us             \
   714      back too far. */                                                           \
   715   HeapWord* mr_start = mr.start();                                              \
   716   size_t bot_size = _cfls->CompactibleFreeListSpace::block_size_nopar(bottom);  \
   717   HeapWord* next = bottom + bot_size;                                           \
   718   while (next < mr_start) {                                                     \
   719     bottom = next;                                                              \
   720     bot_size = _cfls->CompactibleFreeListSpace::block_size_nopar(bottom);       \
   721     next = bottom + bot_size;                                                   \
   722   }                                                                             \
   723                                                                                 \
   724   while (bottom < top) {                                                        \
   725     if (_cfls->CompactibleFreeListSpace::block_is_obj_nopar(bottom) &&          \
   726         !_cfls->CompactibleFreeListSpace::obj_allocated_since_save_marks(       \
   727                     oop(bottom)) &&                                             \
   728         !_collector->CMSCollector::is_dead_obj(oop(bottom))) {                  \
   729       size_t word_sz = oop(bottom)->oop_iterate(cl, mr);                        \
   730       bottom += _cfls->adjustObjectSize(word_sz);                               \
   731     } else {                                                                    \
   732       bottom += _cfls->CompactibleFreeListSpace::block_size_nopar(bottom);      \
   733     }                                                                           \
   734   }                                                                             \
   735 }
   737 // (There are only two of these, rather than N, because the split is due
   738 // only to the introduction of the FilteringClosure, a local part of the
   739 // impl of this abstraction.)
   740 FreeListSpace_DCTOC__walk_mem_region_with_cl_DEFN(OopClosure)
   741 FreeListSpace_DCTOC__walk_mem_region_with_cl_DEFN(FilteringClosure)
   743 DirtyCardToOopClosure*
   744 CompactibleFreeListSpace::new_dcto_cl(OopClosure* cl,
   745                                       CardTableModRefBS::PrecisionStyle precision,
   746                                       HeapWord* boundary) {
   747   return new FreeListSpace_DCTOC(this, _collector, cl, precision, boundary);
   748 }
   751 // Note on locking for the space iteration functions:
   752 // since the collector's iteration activities are concurrent with
   753 // allocation activities by mutators, absent a suitable mutual exclusion
   754 // mechanism the iterators may go awry. For instace a block being iterated
   755 // may suddenly be allocated or divided up and part of it allocated and
   756 // so on.
   758 // Apply the given closure to each block in the space.
   759 void CompactibleFreeListSpace::blk_iterate_careful(BlkClosureCareful* cl) {
   760   assert_lock_strong(freelistLock());
   761   HeapWord *cur, *limit;
   762   for (cur = bottom(), limit = end(); cur < limit;
   763        cur += cl->do_blk_careful(cur));
   764 }
   766 // Apply the given closure to each block in the space.
   767 void CompactibleFreeListSpace::blk_iterate(BlkClosure* cl) {
   768   assert_lock_strong(freelistLock());
   769   HeapWord *cur, *limit;
   770   for (cur = bottom(), limit = end(); cur < limit;
   771        cur += cl->do_blk(cur));
   772 }
   774 // Apply the given closure to each oop in the space.
   775 void CompactibleFreeListSpace::oop_iterate(OopClosure* cl) {
   776   assert_lock_strong(freelistLock());
   777   HeapWord *cur, *limit;
   778   size_t curSize;
   779   for (cur = bottom(), limit = end(); cur < limit;
   780        cur += curSize) {
   781     curSize = block_size(cur);
   782     if (block_is_obj(cur)) {
   783       oop(cur)->oop_iterate(cl);
   784     }
   785   }
   786 }
   788 // Apply the given closure to each oop in the space \intersect memory region.
   789 void CompactibleFreeListSpace::oop_iterate(MemRegion mr, OopClosure* cl) {
   790   assert_lock_strong(freelistLock());
   791   if (is_empty()) {
   792     return;
   793   }
   794   MemRegion cur = MemRegion(bottom(), end());
   795   mr = mr.intersection(cur);
   796   if (mr.is_empty()) {
   797     return;
   798   }
   799   if (mr.equals(cur)) {
   800     oop_iterate(cl);
   801     return;
   802   }
   803   assert(mr.end() <= end(), "just took an intersection above");
   804   HeapWord* obj_addr = block_start(mr.start());
   805   HeapWord* t = mr.end();
   807   SpaceMemRegionOopsIterClosure smr_blk(cl, mr);
   808   if (block_is_obj(obj_addr)) {
   809     // Handle first object specially.
   810     oop obj = oop(obj_addr);
   811     obj_addr += adjustObjectSize(obj->oop_iterate(&smr_blk));
   812   } else {
   813     FreeChunk* fc = (FreeChunk*)obj_addr;
   814     obj_addr += fc->size();
   815   }
   816   while (obj_addr < t) {
   817     HeapWord* obj = obj_addr;
   818     obj_addr += block_size(obj_addr);
   819     // If "obj_addr" is not greater than top, then the
   820     // entire object "obj" is within the region.
   821     if (obj_addr <= t) {
   822       if (block_is_obj(obj)) {
   823         oop(obj)->oop_iterate(cl);
   824       }
   825     } else {
   826       // "obj" extends beyond end of region
   827       if (block_is_obj(obj)) {
   828         oop(obj)->oop_iterate(&smr_blk);
   829       }
   830       break;
   831     }
   832   }
   833 }
   835 // NOTE: In the following methods, in order to safely be able to
   836 // apply the closure to an object, we need to be sure that the
   837 // object has been initialized. We are guaranteed that an object
   838 // is initialized if we are holding the Heap_lock with the
   839 // world stopped.
   840 void CompactibleFreeListSpace::verify_objects_initialized() const {
   841   if (is_init_completed()) {
   842     assert_locked_or_safepoint(Heap_lock);
   843     if (Universe::is_fully_initialized()) {
   844       guarantee(SafepointSynchronize::is_at_safepoint(),
   845                 "Required for objects to be initialized");
   846     }
   847   } // else make a concession at vm start-up
   848 }
   850 // Apply the given closure to each object in the space
   851 void CompactibleFreeListSpace::object_iterate(ObjectClosure* blk) {
   852   assert_lock_strong(freelistLock());
   853   NOT_PRODUCT(verify_objects_initialized());
   854   HeapWord *cur, *limit;
   855   size_t curSize;
   856   for (cur = bottom(), limit = end(); cur < limit;
   857        cur += curSize) {
   858     curSize = block_size(cur);
   859     if (block_is_obj(cur)) {
   860       blk->do_object(oop(cur));
   861     }
   862   }
   863 }
   865 // Apply the given closure to each live object in the space
   866 //   The usage of CompactibleFreeListSpace
   867 // by the ConcurrentMarkSweepGeneration for concurrent GC's allows
   868 // objects in the space with references to objects that are no longer
   869 // valid.  For example, an object may reference another object
   870 // that has already been sweep up (collected).  This method uses
   871 // obj_is_alive() to determine whether it is safe to apply the closure to
   872 // an object.  See obj_is_alive() for details on how liveness of an
   873 // object is decided.
   875 void CompactibleFreeListSpace::safe_object_iterate(ObjectClosure* blk) {
   876   assert_lock_strong(freelistLock());
   877   NOT_PRODUCT(verify_objects_initialized());
   878   HeapWord *cur, *limit;
   879   size_t curSize;
   880   for (cur = bottom(), limit = end(); cur < limit;
   881        cur += curSize) {
   882     curSize = block_size(cur);
   883     if (block_is_obj(cur) && obj_is_alive(cur)) {
   884       blk->do_object(oop(cur));
   885     }
   886   }
   887 }
   889 void CompactibleFreeListSpace::object_iterate_mem(MemRegion mr,
   890                                                   UpwardsObjectClosure* cl) {
   891   assert_locked(freelistLock());
   892   NOT_PRODUCT(verify_objects_initialized());
   893   Space::object_iterate_mem(mr, cl);
   894 }
   896 // Callers of this iterator beware: The closure application should
   897 // be robust in the face of uninitialized objects and should (always)
   898 // return a correct size so that the next addr + size below gives us a
   899 // valid block boundary. [See for instance,
   900 // ScanMarkedObjectsAgainCarefullyClosure::do_object_careful()
   901 // in ConcurrentMarkSweepGeneration.cpp.]
   902 HeapWord*
   903 CompactibleFreeListSpace::object_iterate_careful(ObjectClosureCareful* cl) {
   904   assert_lock_strong(freelistLock());
   905   HeapWord *addr, *last;
   906   size_t size;
   907   for (addr = bottom(), last  = end();
   908        addr < last; addr += size) {
   909     FreeChunk* fc = (FreeChunk*)addr;
   910     if (fc->isFree()) {
   911       // Since we hold the free list lock, which protects direct
   912       // allocation in this generation by mutators, a free object
   913       // will remain free throughout this iteration code.
   914       size = fc->size();
   915     } else {
   916       // Note that the object need not necessarily be initialized,
   917       // because (for instance) the free list lock does NOT protect
   918       // object initialization. The closure application below must
   919       // therefore be correct in the face of uninitialized objects.
   920       size = cl->do_object_careful(oop(addr));
   921       if (size == 0) {
   922         // An unparsable object found. Signal early termination.
   923         return addr;
   924       }
   925     }
   926   }
   927   return NULL;
   928 }
   930 // Callers of this iterator beware: The closure application should
   931 // be robust in the face of uninitialized objects and should (always)
   932 // return a correct size so that the next addr + size below gives us a
   933 // valid block boundary. [See for instance,
   934 // ScanMarkedObjectsAgainCarefullyClosure::do_object_careful()
   935 // in ConcurrentMarkSweepGeneration.cpp.]
   936 HeapWord*
   937 CompactibleFreeListSpace::object_iterate_careful_m(MemRegion mr,
   938   ObjectClosureCareful* cl) {
   939   assert_lock_strong(freelistLock());
   940   // Can't use used_region() below because it may not necessarily
   941   // be the same as [bottom(),end()); although we could
   942   // use [used_region().start(),round_to(used_region().end(),CardSize)),
   943   // that appears too cumbersome, so we just do the simpler check
   944   // in the assertion below.
   945   assert(!mr.is_empty() && MemRegion(bottom(),end()).contains(mr),
   946          "mr should be non-empty and within used space");
   947   HeapWord *addr, *end;
   948   size_t size;
   949   for (addr = block_start_careful(mr.start()), end  = mr.end();
   950        addr < end; addr += size) {
   951     FreeChunk* fc = (FreeChunk*)addr;
   952     if (fc->isFree()) {
   953       // Since we hold the free list lock, which protects direct
   954       // allocation in this generation by mutators, a free object
   955       // will remain free throughout this iteration code.
   956       size = fc->size();
   957     } else {
   958       // Note that the object need not necessarily be initialized,
   959       // because (for instance) the free list lock does NOT protect
   960       // object initialization. The closure application below must
   961       // therefore be correct in the face of uninitialized objects.
   962       size = cl->do_object_careful_m(oop(addr), mr);
   963       if (size == 0) {
   964         // An unparsable object found. Signal early termination.
   965         return addr;
   966       }
   967     }
   968   }
   969   return NULL;
   970 }
   973 HeapWord* CompactibleFreeListSpace::block_start_const(const void* p) const {
   974   NOT_PRODUCT(verify_objects_initialized());
   975   return _bt.block_start(p);
   976 }
   978 HeapWord* CompactibleFreeListSpace::block_start_careful(const void* p) const {
   979   return _bt.block_start_careful(p);
   980 }
   982 size_t CompactibleFreeListSpace::block_size(const HeapWord* p) const {
   983   NOT_PRODUCT(verify_objects_initialized());
   984   // This must be volatile, or else there is a danger that the compiler
   985   // will compile the code below into a sometimes-infinite loop, by keeping
   986   // the value read the first time in a register.
   987   while (true) {
   988     // We must do this until we get a consistent view of the object.
   989     if (FreeChunk::indicatesFreeChunk(p)) {
   990       volatile FreeChunk* fc = (volatile FreeChunk*)p;
   991       size_t res = fc->size();
   992       // If the object is still a free chunk, return the size, else it
   993       // has been allocated so try again.
   994       if (FreeChunk::indicatesFreeChunk(p)) {
   995         assert(res != 0, "Block size should not be 0");
   996         return res;
   997       }
   998     } else {
   999       // must read from what 'p' points to in each loop.
  1000       klassOop k = ((volatile oopDesc*)p)->klass_or_null();
  1001       if (k != NULL) {
  1002         assert(k->is_oop(true /* ignore mark word */), "Should be klass oop");
  1003         oop o = (oop)p;
  1004         assert(o->is_parsable(), "Should be parsable");
  1005         assert(o->is_oop(true /* ignore mark word */), "Should be an oop.");
  1006         size_t res = o->size_given_klass(k->klass_part());
  1007         res = adjustObjectSize(res);
  1008         assert(res != 0, "Block size should not be 0");
  1009         return res;
  1015 // A variant of the above that uses the Printezis bits for
  1016 // unparsable but allocated objects. This avoids any possible
  1017 // stalls waiting for mutators to initialize objects, and is
  1018 // thus potentially faster than the variant above. However,
  1019 // this variant may return a zero size for a block that is
  1020 // under mutation and for which a consistent size cannot be
  1021 // inferred without stalling; see CMSCollector::block_size_if_printezis_bits().
  1022 size_t CompactibleFreeListSpace::block_size_no_stall(HeapWord* p,
  1023                                                      const CMSCollector* c)
  1024 const {
  1025   assert(MemRegion(bottom(), end()).contains(p), "p not in space");
  1026   // This must be volatile, or else there is a danger that the compiler
  1027   // will compile the code below into a sometimes-infinite loop, by keeping
  1028   // the value read the first time in a register.
  1029   DEBUG_ONLY(uint loops = 0;)
  1030   while (true) {
  1031     // We must do this until we get a consistent view of the object.
  1032     if (FreeChunk::indicatesFreeChunk(p)) {
  1033       volatile FreeChunk* fc = (volatile FreeChunk*)p;
  1034       size_t res = fc->size();
  1035       if (FreeChunk::indicatesFreeChunk(p)) {
  1036         assert(res != 0, "Block size should not be 0");
  1037         assert(loops == 0, "Should be 0");
  1038         return res;
  1040     } else {
  1041       // must read from what 'p' points to in each loop.
  1042       klassOop k = ((volatile oopDesc*)p)->klass_or_null();
  1043       // We trust the size of any object that has a non-NULL
  1044       // klass and (for those in the perm gen) is parsable
  1045       // -- irrespective of its conc_safe-ty.
  1046       if (k != NULL && ((oopDesc*)p)->is_parsable()) {
  1047         assert(k->is_oop(), "Should really be klass oop.");
  1048         oop o = (oop)p;
  1049         assert(o->is_oop(), "Should be an oop");
  1050         size_t res = o->size_given_klass(k->klass_part());
  1051         res = adjustObjectSize(res);
  1052         assert(res != 0, "Block size should not be 0");
  1053         return res;
  1054       } else {
  1055         // May return 0 if P-bits not present.
  1056         return c->block_size_if_printezis_bits(p);
  1059     assert(loops == 0, "Can loop at most once");
  1060     DEBUG_ONLY(loops++;)
  1064 size_t CompactibleFreeListSpace::block_size_nopar(const HeapWord* p) const {
  1065   NOT_PRODUCT(verify_objects_initialized());
  1066   assert(MemRegion(bottom(), end()).contains(p), "p not in space");
  1067   FreeChunk* fc = (FreeChunk*)p;
  1068   if (fc->isFree()) {
  1069     return fc->size();
  1070   } else {
  1071     // Ignore mark word because this may be a recently promoted
  1072     // object whose mark word is used to chain together grey
  1073     // objects (the last one would have a null value).
  1074     assert(oop(p)->is_oop(true), "Should be an oop");
  1075     return adjustObjectSize(oop(p)->size());
  1079 // This implementation assumes that the property of "being an object" is
  1080 // stable.  But being a free chunk may not be (because of parallel
  1081 // promotion.)
  1082 bool CompactibleFreeListSpace::block_is_obj(const HeapWord* p) const {
  1083   FreeChunk* fc = (FreeChunk*)p;
  1084   assert(is_in_reserved(p), "Should be in space");
  1085   // When doing a mark-sweep-compact of the CMS generation, this
  1086   // assertion may fail because prepare_for_compaction() uses
  1087   // space that is garbage to maintain information on ranges of
  1088   // live objects so that these live ranges can be moved as a whole.
  1089   // Comment out this assertion until that problem can be solved
  1090   // (i.e., that the block start calculation may look at objects
  1091   // at address below "p" in finding the object that contains "p"
  1092   // and those objects (if garbage) may have been modified to hold
  1093   // live range information.
  1094   // assert(CollectedHeap::use_parallel_gc_threads() || _bt.block_start(p) == p,
  1095   //        "Should be a block boundary");
  1096   if (FreeChunk::indicatesFreeChunk(p)) return false;
  1097   klassOop k = oop(p)->klass_or_null();
  1098   if (k != NULL) {
  1099     // Ignore mark word because it may have been used to
  1100     // chain together promoted objects (the last one
  1101     // would have a null value).
  1102     assert(oop(p)->is_oop(true), "Should be an oop");
  1103     return true;
  1104   } else {
  1105     return false;  // Was not an object at the start of collection.
  1109 // Check if the object is alive. This fact is checked either by consulting
  1110 // the main marking bitmap in the sweeping phase or, if it's a permanent
  1111 // generation and we're not in the sweeping phase, by checking the
  1112 // perm_gen_verify_bit_map where we store the "deadness" information if
  1113 // we did not sweep the perm gen in the most recent previous GC cycle.
  1114 bool CompactibleFreeListSpace::obj_is_alive(const HeapWord* p) const {
  1115   assert(SafepointSynchronize::is_at_safepoint() || !is_init_completed(),
  1116          "Else races are possible");
  1117   assert(block_is_obj(p), "The address should point to an object");
  1119   // If we're sweeping, we use object liveness information from the main bit map
  1120   // for both perm gen and old gen.
  1121   // We don't need to lock the bitmap (live_map or dead_map below), because
  1122   // EITHER we are in the middle of the sweeping phase, and the
  1123   // main marking bit map (live_map below) is locked,
  1124   // OR we're in other phases and perm_gen_verify_bit_map (dead_map below)
  1125   // is stable, because it's mutated only in the sweeping phase.
  1126   // NOTE: This method is also used by jmap where, if class unloading is
  1127   // off, the results can return "false" for legitimate perm objects,
  1128   // when we are not in the midst of a sweeping phase, which can result
  1129   // in jmap not reporting certain perm gen objects. This will be moot
  1130   // if/when the perm gen goes away in the future.
  1131   if (_collector->abstract_state() == CMSCollector::Sweeping) {
  1132     CMSBitMap* live_map = _collector->markBitMap();
  1133     return live_map->par_isMarked((HeapWord*) p);
  1134   } else {
  1135     // If we're not currently sweeping and we haven't swept the perm gen in
  1136     // the previous concurrent cycle then we may have dead but unswept objects
  1137     // in the perm gen. In this case, we use the "deadness" information
  1138     // that we had saved in perm_gen_verify_bit_map at the last sweep.
  1139     if (!CMSClassUnloadingEnabled && _collector->_permGen->reserved().contains(p)) {
  1140       if (_collector->verifying()) {
  1141         CMSBitMap* dead_map = _collector->perm_gen_verify_bit_map();
  1142         // Object is marked in the dead_map bitmap at the previous sweep
  1143         // when we know that it's dead; if the bitmap is not allocated then
  1144         // the object is alive.
  1145         return (dead_map->sizeInBits() == 0) // bit_map has been allocated
  1146                || !dead_map->par_isMarked((HeapWord*) p);
  1147       } else {
  1148         return false; // We can't say for sure if it's live, so we say that it's dead.
  1152   return true;
  1155 bool CompactibleFreeListSpace::block_is_obj_nopar(const HeapWord* p) const {
  1156   FreeChunk* fc = (FreeChunk*)p;
  1157   assert(is_in_reserved(p), "Should be in space");
  1158   assert(_bt.block_start(p) == p, "Should be a block boundary");
  1159   if (!fc->isFree()) {
  1160     // Ignore mark word because it may have been used to
  1161     // chain together promoted objects (the last one
  1162     // would have a null value).
  1163     assert(oop(p)->is_oop(true), "Should be an oop");
  1164     return true;
  1166   return false;
  1169 // "MT-safe but not guaranteed MT-precise" (TM); you may get an
  1170 // approximate answer if you don't hold the freelistlock when you call this.
  1171 size_t CompactibleFreeListSpace::totalSizeInIndexedFreeLists() const {
  1172   size_t size = 0;
  1173   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
  1174     debug_only(
  1175       // We may be calling here without the lock in which case we
  1176       // won't do this modest sanity check.
  1177       if (freelistLock()->owned_by_self()) {
  1178         size_t total_list_size = 0;
  1179         for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
  1180           fc = fc->next()) {
  1181           total_list_size += i;
  1183         assert(total_list_size == i * _indexedFreeList[i].count(),
  1184                "Count in list is incorrect");
  1187     size += i * _indexedFreeList[i].count();
  1189   return size;
  1192 HeapWord* CompactibleFreeListSpace::par_allocate(size_t size) {
  1193   MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
  1194   return allocate(size);
  1197 HeapWord*
  1198 CompactibleFreeListSpace::getChunkFromSmallLinearAllocBlockRemainder(size_t size) {
  1199   return getChunkFromLinearAllocBlockRemainder(&_smallLinearAllocBlock, size);
  1202 HeapWord* CompactibleFreeListSpace::allocate(size_t size) {
  1203   assert_lock_strong(freelistLock());
  1204   HeapWord* res = NULL;
  1205   assert(size == adjustObjectSize(size),
  1206          "use adjustObjectSize() before calling into allocate()");
  1208   if (_adaptive_freelists) {
  1209     res = allocate_adaptive_freelists(size);
  1210   } else {  // non-adaptive free lists
  1211     res = allocate_non_adaptive_freelists(size);
  1214   if (res != NULL) {
  1215     // check that res does lie in this space!
  1216     assert(is_in_reserved(res), "Not in this space!");
  1217     assert(is_aligned((void*)res), "alignment check");
  1219     FreeChunk* fc = (FreeChunk*)res;
  1220     fc->markNotFree();
  1221     assert(!fc->isFree(), "shouldn't be marked free");
  1222     assert(oop(fc)->klass_or_null() == NULL, "should look uninitialized");
  1223     // Verify that the block offset table shows this to
  1224     // be a single block, but not one which is unallocated.
  1225     _bt.verify_single_block(res, size);
  1226     _bt.verify_not_unallocated(res, size);
  1227     // mangle a just allocated object with a distinct pattern.
  1228     debug_only(fc->mangleAllocated(size));
  1231   return res;
  1234 HeapWord* CompactibleFreeListSpace::allocate_non_adaptive_freelists(size_t size) {
  1235   HeapWord* res = NULL;
  1236   // try and use linear allocation for smaller blocks
  1237   if (size < _smallLinearAllocBlock._allocation_size_limit) {
  1238     // if successful, the following also adjusts block offset table
  1239     res = getChunkFromSmallLinearAllocBlock(size);
  1241   // Else triage to indexed lists for smaller sizes
  1242   if (res == NULL) {
  1243     if (size < SmallForDictionary) {
  1244       res = (HeapWord*) getChunkFromIndexedFreeList(size);
  1245     } else {
  1246       // else get it from the big dictionary; if even this doesn't
  1247       // work we are out of luck.
  1248       res = (HeapWord*)getChunkFromDictionaryExact(size);
  1252   return res;
  1255 HeapWord* CompactibleFreeListSpace::allocate_adaptive_freelists(size_t size) {
  1256   assert_lock_strong(freelistLock());
  1257   HeapWord* res = NULL;
  1258   assert(size == adjustObjectSize(size),
  1259          "use adjustObjectSize() before calling into allocate()");
  1261   // Strategy
  1262   //   if small
  1263   //     exact size from small object indexed list if small
  1264   //     small or large linear allocation block (linAB) as appropriate
  1265   //     take from lists of greater sized chunks
  1266   //   else
  1267   //     dictionary
  1268   //     small or large linear allocation block if it has the space
  1269   // Try allocating exact size from indexTable first
  1270   if (size < IndexSetSize) {
  1271     res = (HeapWord*) getChunkFromIndexedFreeList(size);
  1272     if(res != NULL) {
  1273       assert(res != (HeapWord*)_indexedFreeList[size].head(),
  1274         "Not removed from free list");
  1275       // no block offset table adjustment is necessary on blocks in
  1276       // the indexed lists.
  1278     // Try allocating from the small LinAB
  1279     } else if (size < _smallLinearAllocBlock._allocation_size_limit &&
  1280         (res = getChunkFromSmallLinearAllocBlock(size)) != NULL) {
  1281         // if successful, the above also adjusts block offset table
  1282         // Note that this call will refill the LinAB to
  1283         // satisfy the request.  This is different that
  1284         // evm.
  1285         // Don't record chunk off a LinAB?  smallSplitBirth(size);
  1286     } else {
  1287       // Raid the exact free lists larger than size, even if they are not
  1288       // overpopulated.
  1289       res = (HeapWord*) getChunkFromGreater(size);
  1291   } else {
  1292     // Big objects get allocated directly from the dictionary.
  1293     res = (HeapWord*) getChunkFromDictionaryExact(size);
  1294     if (res == NULL) {
  1295       // Try hard not to fail since an allocation failure will likely
  1296       // trigger a synchronous GC.  Try to get the space from the
  1297       // allocation blocks.
  1298       res = getChunkFromSmallLinearAllocBlockRemainder(size);
  1302   return res;
  1305 // A worst-case estimate of the space required (in HeapWords) to expand the heap
  1306 // when promoting obj.
  1307 size_t CompactibleFreeListSpace::expansionSpaceRequired(size_t obj_size) const {
  1308   // Depending on the object size, expansion may require refilling either a
  1309   // bigLAB or a smallLAB plus refilling a PromotionInfo object.  MinChunkSize
  1310   // is added because the dictionary may over-allocate to avoid fragmentation.
  1311   size_t space = obj_size;
  1312   if (!_adaptive_freelists) {
  1313     space = MAX2(space, _smallLinearAllocBlock._refillSize);
  1315   space += _promoInfo.refillSize() + 2 * MinChunkSize;
  1316   return space;
  1319 FreeChunk* CompactibleFreeListSpace::getChunkFromGreater(size_t numWords) {
  1320   FreeChunk* ret;
  1322   assert(numWords >= MinChunkSize, "Size is less than minimum");
  1323   assert(linearAllocationWouldFail() || bestFitFirst(),
  1324     "Should not be here");
  1326   size_t i;
  1327   size_t currSize = numWords + MinChunkSize;
  1328   assert(currSize % MinObjAlignment == 0, "currSize should be aligned");
  1329   for (i = currSize; i < IndexSetSize; i += IndexSetStride) {
  1330     FreeList* fl = &_indexedFreeList[i];
  1331     if (fl->head()) {
  1332       ret = getFromListGreater(fl, numWords);
  1333       assert(ret == NULL || ret->isFree(), "Should be returning a free chunk");
  1334       return ret;
  1338   currSize = MAX2((size_t)SmallForDictionary,
  1339                   (size_t)(numWords + MinChunkSize));
  1341   /* Try to get a chunk that satisfies request, while avoiding
  1342      fragmentation that can't be handled. */
  1344     ret =  dictionary()->getChunk(currSize);
  1345     if (ret != NULL) {
  1346       assert(ret->size() - numWords >= MinChunkSize,
  1347              "Chunk is too small");
  1348       _bt.allocated((HeapWord*)ret, ret->size());
  1349       /* Carve returned chunk. */
  1350       (void) splitChunkAndReturnRemainder(ret, numWords);
  1351       /* Label this as no longer a free chunk. */
  1352       assert(ret->isFree(), "This chunk should be free");
  1353       ret->linkPrev(NULL);
  1355     assert(ret == NULL || ret->isFree(), "Should be returning a free chunk");
  1356     return ret;
  1358   ShouldNotReachHere();
  1361 bool CompactibleFreeListSpace::verifyChunkInIndexedFreeLists(FreeChunk* fc)
  1362   const {
  1363   assert(fc->size() < IndexSetSize, "Size of chunk is too large");
  1364   return _indexedFreeList[fc->size()].verifyChunkInFreeLists(fc);
  1367 bool CompactibleFreeListSpace::verifyChunkInFreeLists(FreeChunk* fc) const {
  1368   if (fc->size() >= IndexSetSize) {
  1369     return dictionary()->verifyChunkInFreeLists(fc);
  1370   } else {
  1371     return verifyChunkInIndexedFreeLists(fc);
  1375 #ifndef PRODUCT
  1376 void CompactibleFreeListSpace::assert_locked() const {
  1377   CMSLockVerifier::assert_locked(freelistLock(), parDictionaryAllocLock());
  1380 void CompactibleFreeListSpace::assert_locked(const Mutex* lock) const {
  1381   CMSLockVerifier::assert_locked(lock);
  1383 #endif
  1385 FreeChunk* CompactibleFreeListSpace::allocateScratch(size_t size) {
  1386   // In the parallel case, the main thread holds the free list lock
  1387   // on behalf the parallel threads.
  1388   FreeChunk* fc;
  1390     // If GC is parallel, this might be called by several threads.
  1391     // This should be rare enough that the locking overhead won't affect
  1392     // the sequential code.
  1393     MutexLockerEx x(parDictionaryAllocLock(),
  1394                     Mutex::_no_safepoint_check_flag);
  1395     fc = getChunkFromDictionary(size);
  1397   if (fc != NULL) {
  1398     fc->dontCoalesce();
  1399     assert(fc->isFree(), "Should be free, but not coalescable");
  1400     // Verify that the block offset table shows this to
  1401     // be a single block, but not one which is unallocated.
  1402     _bt.verify_single_block((HeapWord*)fc, fc->size());
  1403     _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
  1405   return fc;
  1408 oop CompactibleFreeListSpace::promote(oop obj, size_t obj_size) {
  1409   assert(obj_size == (size_t)obj->size(), "bad obj_size passed in");
  1410   assert_locked();
  1412   // if we are tracking promotions, then first ensure space for
  1413   // promotion (including spooling space for saving header if necessary).
  1414   // then allocate and copy, then track promoted info if needed.
  1415   // When tracking (see PromotionInfo::track()), the mark word may
  1416   // be displaced and in this case restoration of the mark word
  1417   // occurs in the (oop_since_save_marks_)iterate phase.
  1418   if (_promoInfo.tracking() && !_promoInfo.ensure_spooling_space()) {
  1419     return NULL;
  1421   // Call the allocate(size_t, bool) form directly to avoid the
  1422   // additional call through the allocate(size_t) form.  Having
  1423   // the compile inline the call is problematic because allocate(size_t)
  1424   // is a virtual method.
  1425   HeapWord* res = allocate(adjustObjectSize(obj_size));
  1426   if (res != NULL) {
  1427     Copy::aligned_disjoint_words((HeapWord*)obj, res, obj_size);
  1428     // if we should be tracking promotions, do so.
  1429     if (_promoInfo.tracking()) {
  1430         _promoInfo.track((PromotedObject*)res);
  1433   return oop(res);
  1436 HeapWord*
  1437 CompactibleFreeListSpace::getChunkFromSmallLinearAllocBlock(size_t size) {
  1438   assert_locked();
  1439   assert(size >= MinChunkSize, "minimum chunk size");
  1440   assert(size <  _smallLinearAllocBlock._allocation_size_limit,
  1441     "maximum from smallLinearAllocBlock");
  1442   return getChunkFromLinearAllocBlock(&_smallLinearAllocBlock, size);
  1445 HeapWord*
  1446 CompactibleFreeListSpace::getChunkFromLinearAllocBlock(LinearAllocBlock *blk,
  1447                                                        size_t size) {
  1448   assert_locked();
  1449   assert(size >= MinChunkSize, "too small");
  1450   HeapWord* res = NULL;
  1451   // Try to do linear allocation from blk, making sure that
  1452   if (blk->_word_size == 0) {
  1453     // We have probably been unable to fill this either in the prologue or
  1454     // when it was exhausted at the last linear allocation. Bail out until
  1455     // next time.
  1456     assert(blk->_ptr == NULL, "consistency check");
  1457     return NULL;
  1459   assert(blk->_word_size != 0 && blk->_ptr != NULL, "consistency check");
  1460   res = getChunkFromLinearAllocBlockRemainder(blk, size);
  1461   if (res != NULL) return res;
  1463   // about to exhaust this linear allocation block
  1464   if (blk->_word_size == size) { // exactly satisfied
  1465     res = blk->_ptr;
  1466     _bt.allocated(res, blk->_word_size);
  1467   } else if (size + MinChunkSize <= blk->_refillSize) {
  1468     size_t sz = blk->_word_size;
  1469     // Update _unallocated_block if the size is such that chunk would be
  1470     // returned to the indexed free list.  All other chunks in the indexed
  1471     // free lists are allocated from the dictionary so that _unallocated_block
  1472     // has already been adjusted for them.  Do it here so that the cost
  1473     // for all chunks added back to the indexed free lists.
  1474     if (sz < SmallForDictionary) {
  1475       _bt.allocated(blk->_ptr, sz);
  1477     // Return the chunk that isn't big enough, and then refill below.
  1478     addChunkToFreeLists(blk->_ptr, sz);
  1479     splitBirth(sz);
  1480     // Don't keep statistics on adding back chunk from a LinAB.
  1481   } else {
  1482     // A refilled block would not satisfy the request.
  1483     return NULL;
  1486   blk->_ptr = NULL; blk->_word_size = 0;
  1487   refillLinearAllocBlock(blk);
  1488   assert(blk->_ptr == NULL || blk->_word_size >= size + MinChunkSize,
  1489          "block was replenished");
  1490   if (res != NULL) {
  1491     splitBirth(size);
  1492     repairLinearAllocBlock(blk);
  1493   } else if (blk->_ptr != NULL) {
  1494     res = blk->_ptr;
  1495     size_t blk_size = blk->_word_size;
  1496     blk->_word_size -= size;
  1497     blk->_ptr  += size;
  1498     splitBirth(size);
  1499     repairLinearAllocBlock(blk);
  1500     // Update BOT last so that other (parallel) GC threads see a consistent
  1501     // view of the BOT and free blocks.
  1502     // Above must occur before BOT is updated below.
  1503     OrderAccess::storestore();
  1504     _bt.split_block(res, blk_size, size);  // adjust block offset table
  1506   return res;
  1509 HeapWord*  CompactibleFreeListSpace::getChunkFromLinearAllocBlockRemainder(
  1510                                         LinearAllocBlock* blk,
  1511                                         size_t size) {
  1512   assert_locked();
  1513   assert(size >= MinChunkSize, "too small");
  1515   HeapWord* res = NULL;
  1516   // This is the common case.  Keep it simple.
  1517   if (blk->_word_size >= size + MinChunkSize) {
  1518     assert(blk->_ptr != NULL, "consistency check");
  1519     res = blk->_ptr;
  1520     // Note that the BOT is up-to-date for the linAB before allocation.  It
  1521     // indicates the start of the linAB.  The split_block() updates the
  1522     // BOT for the linAB after the allocation (indicates the start of the
  1523     // next chunk to be allocated).
  1524     size_t blk_size = blk->_word_size;
  1525     blk->_word_size -= size;
  1526     blk->_ptr  += size;
  1527     splitBirth(size);
  1528     repairLinearAllocBlock(blk);
  1529     // Update BOT last so that other (parallel) GC threads see a consistent
  1530     // view of the BOT and free blocks.
  1531     // Above must occur before BOT is updated below.
  1532     OrderAccess::storestore();
  1533     _bt.split_block(res, blk_size, size);  // adjust block offset table
  1534     _bt.allocated(res, size);
  1536   return res;
  1539 FreeChunk*
  1540 CompactibleFreeListSpace::getChunkFromIndexedFreeList(size_t size) {
  1541   assert_locked();
  1542   assert(size < SmallForDictionary, "just checking");
  1543   FreeChunk* res;
  1544   res = _indexedFreeList[size].getChunkAtHead();
  1545   if (res == NULL) {
  1546     res = getChunkFromIndexedFreeListHelper(size);
  1548   _bt.verify_not_unallocated((HeapWord*) res, size);
  1549   assert(res == NULL || res->size() == size, "Incorrect block size");
  1550   return res;
  1553 FreeChunk*
  1554 CompactibleFreeListSpace::getChunkFromIndexedFreeListHelper(size_t size,
  1555   bool replenish) {
  1556   assert_locked();
  1557   FreeChunk* fc = NULL;
  1558   if (size < SmallForDictionary) {
  1559     assert(_indexedFreeList[size].head() == NULL ||
  1560       _indexedFreeList[size].surplus() <= 0,
  1561       "List for this size should be empty or under populated");
  1562     // Try best fit in exact lists before replenishing the list
  1563     if (!bestFitFirst() || (fc = bestFitSmall(size)) == NULL) {
  1564       // Replenish list.
  1565       //
  1566       // Things tried that failed.
  1567       //   Tried allocating out of the two LinAB's first before
  1568       // replenishing lists.
  1569       //   Tried small linAB of size 256 (size in indexed list)
  1570       // and replenishing indexed lists from the small linAB.
  1571       //
  1572       FreeChunk* newFc = NULL;
  1573       const size_t replenish_size = CMSIndexedFreeListReplenish * size;
  1574       if (replenish_size < SmallForDictionary) {
  1575         // Do not replenish from an underpopulated size.
  1576         if (_indexedFreeList[replenish_size].surplus() > 0 &&
  1577             _indexedFreeList[replenish_size].head() != NULL) {
  1578           newFc = _indexedFreeList[replenish_size].getChunkAtHead();
  1579         } else if (bestFitFirst()) {
  1580           newFc = bestFitSmall(replenish_size);
  1583       if (newFc == NULL && replenish_size > size) {
  1584         assert(CMSIndexedFreeListReplenish > 1, "ctl pt invariant");
  1585         newFc = getChunkFromIndexedFreeListHelper(replenish_size, false);
  1587       // Note: The stats update re split-death of block obtained above
  1588       // will be recorded below precisely when we know we are going to
  1589       // be actually splitting it into more than one pieces below.
  1590       if (newFc != NULL) {
  1591         if  (replenish || CMSReplenishIntermediate) {
  1592           // Replenish this list and return one block to caller.
  1593           size_t i;
  1594           FreeChunk *curFc, *nextFc;
  1595           size_t num_blk = newFc->size() / size;
  1596           assert(num_blk >= 1, "Smaller than requested?");
  1597           assert(newFc->size() % size == 0, "Should be integral multiple of request");
  1598           if (num_blk > 1) {
  1599             // we are sure we will be splitting the block just obtained
  1600             // into multiple pieces; record the split-death of the original
  1601             splitDeath(replenish_size);
  1603           // carve up and link blocks 0, ..., num_blk - 2
  1604           // The last chunk is not added to the lists but is returned as the
  1605           // free chunk.
  1606           for (curFc = newFc, nextFc = (FreeChunk*)((HeapWord*)curFc + size),
  1607                i = 0;
  1608                i < (num_blk - 1);
  1609                curFc = nextFc, nextFc = (FreeChunk*)((HeapWord*)nextFc + size),
  1610                i++) {
  1611             curFc->setSize(size);
  1612             // Don't record this as a return in order to try and
  1613             // determine the "returns" from a GC.
  1614             _bt.verify_not_unallocated((HeapWord*) fc, size);
  1615             _indexedFreeList[size].returnChunkAtTail(curFc, false);
  1616             _bt.mark_block((HeapWord*)curFc, size);
  1617             splitBirth(size);
  1618             // Don't record the initial population of the indexed list
  1619             // as a split birth.
  1622           // check that the arithmetic was OK above
  1623           assert((HeapWord*)nextFc == (HeapWord*)newFc + num_blk*size,
  1624             "inconsistency in carving newFc");
  1625           curFc->setSize(size);
  1626           _bt.mark_block((HeapWord*)curFc, size);
  1627           splitBirth(size);
  1628           fc = curFc;
  1629         } else {
  1630           // Return entire block to caller
  1631           fc = newFc;
  1635   } else {
  1636     // Get a free chunk from the free chunk dictionary to be returned to
  1637     // replenish the indexed free list.
  1638     fc = getChunkFromDictionaryExact(size);
  1640   // assert(fc == NULL || fc->isFree(), "Should be returning a free chunk");
  1641   return fc;
  1644 FreeChunk*
  1645 CompactibleFreeListSpace::getChunkFromDictionary(size_t size) {
  1646   assert_locked();
  1647   FreeChunk* fc = _dictionary->getChunk(size);
  1648   if (fc == NULL) {
  1649     return NULL;
  1651   _bt.allocated((HeapWord*)fc, fc->size());
  1652   if (fc->size() >= size + MinChunkSize) {
  1653     fc = splitChunkAndReturnRemainder(fc, size);
  1655   assert(fc->size() >= size, "chunk too small");
  1656   assert(fc->size() < size + MinChunkSize, "chunk too big");
  1657   _bt.verify_single_block((HeapWord*)fc, fc->size());
  1658   return fc;
  1661 FreeChunk*
  1662 CompactibleFreeListSpace::getChunkFromDictionaryExact(size_t size) {
  1663   assert_locked();
  1664   FreeChunk* fc = _dictionary->getChunk(size);
  1665   if (fc == NULL) {
  1666     return fc;
  1668   _bt.allocated((HeapWord*)fc, fc->size());
  1669   if (fc->size() == size) {
  1670     _bt.verify_single_block((HeapWord*)fc, size);
  1671     return fc;
  1673   assert(fc->size() > size, "getChunk() guarantee");
  1674   if (fc->size() < size + MinChunkSize) {
  1675     // Return the chunk to the dictionary and go get a bigger one.
  1676     returnChunkToDictionary(fc);
  1677     fc = _dictionary->getChunk(size + MinChunkSize);
  1678     if (fc == NULL) {
  1679       return NULL;
  1681     _bt.allocated((HeapWord*)fc, fc->size());
  1683   assert(fc->size() >= size + MinChunkSize, "tautology");
  1684   fc = splitChunkAndReturnRemainder(fc, size);
  1685   assert(fc->size() == size, "chunk is wrong size");
  1686   _bt.verify_single_block((HeapWord*)fc, size);
  1687   return fc;
  1690 void
  1691 CompactibleFreeListSpace::returnChunkToDictionary(FreeChunk* chunk) {
  1692   assert_locked();
  1694   size_t size = chunk->size();
  1695   _bt.verify_single_block((HeapWord*)chunk, size);
  1696   // adjust _unallocated_block downward, as necessary
  1697   _bt.freed((HeapWord*)chunk, size);
  1698   _dictionary->returnChunk(chunk);
  1699 #ifndef PRODUCT
  1700   if (CMSCollector::abstract_state() != CMSCollector::Sweeping) {
  1701     TreeChunk::as_TreeChunk(chunk)->list()->verify_stats();
  1703 #endif // PRODUCT
  1706 void
  1707 CompactibleFreeListSpace::returnChunkToFreeList(FreeChunk* fc) {
  1708   assert_locked();
  1709   size_t size = fc->size();
  1710   _bt.verify_single_block((HeapWord*) fc, size);
  1711   _bt.verify_not_unallocated((HeapWord*) fc, size);
  1712   if (_adaptive_freelists) {
  1713     _indexedFreeList[size].returnChunkAtTail(fc);
  1714   } else {
  1715     _indexedFreeList[size].returnChunkAtHead(fc);
  1717 #ifndef PRODUCT
  1718   if (CMSCollector::abstract_state() != CMSCollector::Sweeping) {
  1719      _indexedFreeList[size].verify_stats();
  1721 #endif // PRODUCT
  1724 // Add chunk to end of last block -- if it's the largest
  1725 // block -- and update BOT and census data. We would
  1726 // of course have preferred to coalesce it with the
  1727 // last block, but it's currently less expensive to find the
  1728 // largest block than it is to find the last.
  1729 void
  1730 CompactibleFreeListSpace::addChunkToFreeListsAtEndRecordingStats(
  1731   HeapWord* chunk, size_t     size) {
  1732   // check that the chunk does lie in this space!
  1733   assert(chunk != NULL && is_in_reserved(chunk), "Not in this space!");
  1734   // One of the parallel gc task threads may be here
  1735   // whilst others are allocating.
  1736   Mutex* lock = NULL;
  1737   if (ParallelGCThreads != 0) {
  1738     lock = &_parDictionaryAllocLock;
  1740   FreeChunk* ec;
  1742     MutexLockerEx x(lock, Mutex::_no_safepoint_check_flag);
  1743     ec = dictionary()->findLargestDict();  // get largest block
  1744     if (ec != NULL && ec->end() == chunk) {
  1745       // It's a coterminal block - we can coalesce.
  1746       size_t old_size = ec->size();
  1747       coalDeath(old_size);
  1748       removeChunkFromDictionary(ec);
  1749       size += old_size;
  1750     } else {
  1751       ec = (FreeChunk*)chunk;
  1754   ec->setSize(size);
  1755   debug_only(ec->mangleFreed(size));
  1756   if (size < SmallForDictionary) {
  1757     lock = _indexedFreeListParLocks[size];
  1759   MutexLockerEx x(lock, Mutex::_no_safepoint_check_flag);
  1760   addChunkAndRepairOffsetTable((HeapWord*)ec, size, true);
  1761   // record the birth under the lock since the recording involves
  1762   // manipulation of the list on which the chunk lives and
  1763   // if the chunk is allocated and is the last on the list,
  1764   // the list can go away.
  1765   coalBirth(size);
  1768 void
  1769 CompactibleFreeListSpace::addChunkToFreeLists(HeapWord* chunk,
  1770                                               size_t     size) {
  1771   // check that the chunk does lie in this space!
  1772   assert(chunk != NULL && is_in_reserved(chunk), "Not in this space!");
  1773   assert_locked();
  1774   _bt.verify_single_block(chunk, size);
  1776   FreeChunk* fc = (FreeChunk*) chunk;
  1777   fc->setSize(size);
  1778   debug_only(fc->mangleFreed(size));
  1779   if (size < SmallForDictionary) {
  1780     returnChunkToFreeList(fc);
  1781   } else {
  1782     returnChunkToDictionary(fc);
  1786 void
  1787 CompactibleFreeListSpace::addChunkAndRepairOffsetTable(HeapWord* chunk,
  1788   size_t size, bool coalesced) {
  1789   assert_locked();
  1790   assert(chunk != NULL, "null chunk");
  1791   if (coalesced) {
  1792     // repair BOT
  1793     _bt.single_block(chunk, size);
  1795   addChunkToFreeLists(chunk, size);
  1798 // We _must_ find the purported chunk on our free lists;
  1799 // we assert if we don't.
  1800 void
  1801 CompactibleFreeListSpace::removeFreeChunkFromFreeLists(FreeChunk* fc) {
  1802   size_t size = fc->size();
  1803   assert_locked();
  1804   debug_only(verifyFreeLists());
  1805   if (size < SmallForDictionary) {
  1806     removeChunkFromIndexedFreeList(fc);
  1807   } else {
  1808     removeChunkFromDictionary(fc);
  1810   _bt.verify_single_block((HeapWord*)fc, size);
  1811   debug_only(verifyFreeLists());
  1814 void
  1815 CompactibleFreeListSpace::removeChunkFromDictionary(FreeChunk* fc) {
  1816   size_t size = fc->size();
  1817   assert_locked();
  1818   assert(fc != NULL, "null chunk");
  1819   _bt.verify_single_block((HeapWord*)fc, size);
  1820   _dictionary->removeChunk(fc);
  1821   // adjust _unallocated_block upward, as necessary
  1822   _bt.allocated((HeapWord*)fc, size);
  1825 void
  1826 CompactibleFreeListSpace::removeChunkFromIndexedFreeList(FreeChunk* fc) {
  1827   assert_locked();
  1828   size_t size = fc->size();
  1829   _bt.verify_single_block((HeapWord*)fc, size);
  1830   NOT_PRODUCT(
  1831     if (FLSVerifyIndexTable) {
  1832       verifyIndexedFreeList(size);
  1835   _indexedFreeList[size].removeChunk(fc);
  1836   debug_only(fc->clearNext());
  1837   debug_only(fc->clearPrev());
  1838   NOT_PRODUCT(
  1839     if (FLSVerifyIndexTable) {
  1840       verifyIndexedFreeList(size);
  1845 FreeChunk* CompactibleFreeListSpace::bestFitSmall(size_t numWords) {
  1846   /* A hint is the next larger size that has a surplus.
  1847      Start search at a size large enough to guarantee that
  1848      the excess is >= MIN_CHUNK. */
  1849   size_t start = align_object_size(numWords + MinChunkSize);
  1850   if (start < IndexSetSize) {
  1851     FreeList* it   = _indexedFreeList;
  1852     size_t    hint = _indexedFreeList[start].hint();
  1853     while (hint < IndexSetSize) {
  1854       assert(hint % MinObjAlignment == 0, "hint should be aligned");
  1855       FreeList *fl = &_indexedFreeList[hint];
  1856       if (fl->surplus() > 0 && fl->head() != NULL) {
  1857         // Found a list with surplus, reset original hint
  1858         // and split out a free chunk which is returned.
  1859         _indexedFreeList[start].set_hint(hint);
  1860         FreeChunk* res = getFromListGreater(fl, numWords);
  1861         assert(res == NULL || res->isFree(),
  1862           "Should be returning a free chunk");
  1863         return res;
  1865       hint = fl->hint(); /* keep looking */
  1867     /* None found. */
  1868     it[start].set_hint(IndexSetSize);
  1870   return NULL;
  1873 /* Requires fl->size >= numWords + MinChunkSize */
  1874 FreeChunk* CompactibleFreeListSpace::getFromListGreater(FreeList* fl,
  1875   size_t numWords) {
  1876   FreeChunk *curr = fl->head();
  1877   size_t oldNumWords = curr->size();
  1878   assert(numWords >= MinChunkSize, "Word size is too small");
  1879   assert(curr != NULL, "List is empty");
  1880   assert(oldNumWords >= numWords + MinChunkSize,
  1881         "Size of chunks in the list is too small");
  1883   fl->removeChunk(curr);
  1884   // recorded indirectly by splitChunkAndReturnRemainder -
  1885   // smallSplit(oldNumWords, numWords);
  1886   FreeChunk* new_chunk = splitChunkAndReturnRemainder(curr, numWords);
  1887   // Does anything have to be done for the remainder in terms of
  1888   // fixing the card table?
  1889   assert(new_chunk == NULL || new_chunk->isFree(),
  1890     "Should be returning a free chunk");
  1891   return new_chunk;
  1894 FreeChunk*
  1895 CompactibleFreeListSpace::splitChunkAndReturnRemainder(FreeChunk* chunk,
  1896   size_t new_size) {
  1897   assert_locked();
  1898   size_t size = chunk->size();
  1899   assert(size > new_size, "Split from a smaller block?");
  1900   assert(is_aligned(chunk), "alignment problem");
  1901   assert(size == adjustObjectSize(size), "alignment problem");
  1902   size_t rem_size = size - new_size;
  1903   assert(rem_size == adjustObjectSize(rem_size), "alignment problem");
  1904   assert(rem_size >= MinChunkSize, "Free chunk smaller than minimum");
  1905   FreeChunk* ffc = (FreeChunk*)((HeapWord*)chunk + new_size);
  1906   assert(is_aligned(ffc), "alignment problem");
  1907   ffc->setSize(rem_size);
  1908   ffc->linkNext(NULL);
  1909   ffc->linkPrev(NULL); // Mark as a free block for other (parallel) GC threads.
  1910   // Above must occur before BOT is updated below.
  1911   // adjust block offset table
  1912   OrderAccess::storestore();
  1913   assert(chunk->isFree() && ffc->isFree(), "Error");
  1914   _bt.split_block((HeapWord*)chunk, chunk->size(), new_size);
  1915   if (rem_size < SmallForDictionary) {
  1916     bool is_par = (SharedHeap::heap()->n_par_threads() > 0);
  1917     if (is_par) _indexedFreeListParLocks[rem_size]->lock();
  1918     returnChunkToFreeList(ffc);
  1919     split(size, rem_size);
  1920     if (is_par) _indexedFreeListParLocks[rem_size]->unlock();
  1921   } else {
  1922     returnChunkToDictionary(ffc);
  1923     split(size ,rem_size);
  1925   chunk->setSize(new_size);
  1926   return chunk;
  1929 void
  1930 CompactibleFreeListSpace::sweep_completed() {
  1931   // Now that space is probably plentiful, refill linear
  1932   // allocation blocks as needed.
  1933   refillLinearAllocBlocksIfNeeded();
  1936 void
  1937 CompactibleFreeListSpace::gc_prologue() {
  1938   assert_locked();
  1939   if (PrintFLSStatistics != 0) {
  1940     gclog_or_tty->print("Before GC:\n");
  1941     reportFreeListStatistics();
  1943   refillLinearAllocBlocksIfNeeded();
  1946 void
  1947 CompactibleFreeListSpace::gc_epilogue() {
  1948   assert_locked();
  1949   if (PrintGCDetails && Verbose && !_adaptive_freelists) {
  1950     if (_smallLinearAllocBlock._word_size == 0)
  1951       warning("CompactibleFreeListSpace(epilogue):: Linear allocation failure");
  1953   assert(_promoInfo.noPromotions(), "_promoInfo inconsistency");
  1954   _promoInfo.stopTrackingPromotions();
  1955   repairLinearAllocationBlocks();
  1956   // Print Space's stats
  1957   if (PrintFLSStatistics != 0) {
  1958     gclog_or_tty->print("After GC:\n");
  1959     reportFreeListStatistics();
  1963 // Iteration support, mostly delegated from a CMS generation
  1965 void CompactibleFreeListSpace::save_marks() {
  1966   // mark the "end" of the used space at the time of this call;
  1967   // note, however, that promoted objects from this point
  1968   // on are tracked in the _promoInfo below.
  1969   set_saved_mark_word(unallocated_block());
  1970   // inform allocator that promotions should be tracked.
  1971   assert(_promoInfo.noPromotions(), "_promoInfo inconsistency");
  1972   _promoInfo.startTrackingPromotions();
  1975 bool CompactibleFreeListSpace::no_allocs_since_save_marks() {
  1976   assert(_promoInfo.tracking(), "No preceding save_marks?");
  1977   assert(SharedHeap::heap()->n_par_threads() == 0,
  1978          "Shouldn't be called if using parallel gc.");
  1979   return _promoInfo.noPromotions();
  1982 #define CFLS_OOP_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix)           \
  1984 void CompactibleFreeListSpace::                                             \
  1985 oop_since_save_marks_iterate##nv_suffix(OopClosureType* blk) {              \
  1986   assert(SharedHeap::heap()->n_par_threads() == 0,                          \
  1987          "Shouldn't be called (yet) during parallel part of gc.");          \
  1988   _promoInfo.promoted_oops_iterate##nv_suffix(blk);                         \
  1989   /*                                                                        \
  1990    * This also restores any displaced headers and removes the elements from \
  1991    * the iteration set as they are processed, so that we have a clean slate \
  1992    * at the end of the iteration. Note, thus, that if new objects are       \
  1993    * promoted as a result of the iteration they are iterated over as well.  \
  1994    */                                                                       \
  1995   assert(_promoInfo.noPromotions(), "_promoInfo inconsistency");            \
  1998 ALL_SINCE_SAVE_MARKS_CLOSURES(CFLS_OOP_SINCE_SAVE_MARKS_DEFN)
  2001 void CompactibleFreeListSpace::object_iterate_since_last_GC(ObjectClosure* cl) {
  2002   // ugghh... how would one do this efficiently for a non-contiguous space?
  2003   guarantee(false, "NYI");
  2006 bool CompactibleFreeListSpace::linearAllocationWouldFail() const {
  2007   return _smallLinearAllocBlock._word_size == 0;
  2010 void CompactibleFreeListSpace::repairLinearAllocationBlocks() {
  2011   // Fix up linear allocation blocks to look like free blocks
  2012   repairLinearAllocBlock(&_smallLinearAllocBlock);
  2015 void CompactibleFreeListSpace::repairLinearAllocBlock(LinearAllocBlock* blk) {
  2016   assert_locked();
  2017   if (blk->_ptr != NULL) {
  2018     assert(blk->_word_size != 0 && blk->_word_size >= MinChunkSize,
  2019            "Minimum block size requirement");
  2020     FreeChunk* fc = (FreeChunk*)(blk->_ptr);
  2021     fc->setSize(blk->_word_size);
  2022     fc->linkPrev(NULL);   // mark as free
  2023     fc->dontCoalesce();
  2024     assert(fc->isFree(), "just marked it free");
  2025     assert(fc->cantCoalesce(), "just marked it uncoalescable");
  2029 void CompactibleFreeListSpace::refillLinearAllocBlocksIfNeeded() {
  2030   assert_locked();
  2031   if (_smallLinearAllocBlock._ptr == NULL) {
  2032     assert(_smallLinearAllocBlock._word_size == 0,
  2033       "Size of linAB should be zero if the ptr is NULL");
  2034     // Reset the linAB refill and allocation size limit.
  2035     _smallLinearAllocBlock.set(0, 0, 1024*SmallForLinearAlloc, SmallForLinearAlloc);
  2037   refillLinearAllocBlockIfNeeded(&_smallLinearAllocBlock);
  2040 void
  2041 CompactibleFreeListSpace::refillLinearAllocBlockIfNeeded(LinearAllocBlock* blk) {
  2042   assert_locked();
  2043   assert((blk->_ptr == NULL && blk->_word_size == 0) ||
  2044          (blk->_ptr != NULL && blk->_word_size >= MinChunkSize),
  2045          "blk invariant");
  2046   if (blk->_ptr == NULL) {
  2047     refillLinearAllocBlock(blk);
  2049   if (PrintMiscellaneous && Verbose) {
  2050     if (blk->_word_size == 0) {
  2051       warning("CompactibleFreeListSpace(prologue):: Linear allocation failure");
  2056 void
  2057 CompactibleFreeListSpace::refillLinearAllocBlock(LinearAllocBlock* blk) {
  2058   assert_locked();
  2059   assert(blk->_word_size == 0 && blk->_ptr == NULL,
  2060          "linear allocation block should be empty");
  2061   FreeChunk* fc;
  2062   if (blk->_refillSize < SmallForDictionary &&
  2063       (fc = getChunkFromIndexedFreeList(blk->_refillSize)) != NULL) {
  2064     // A linAB's strategy might be to use small sizes to reduce
  2065     // fragmentation but still get the benefits of allocation from a
  2066     // linAB.
  2067   } else {
  2068     fc = getChunkFromDictionary(blk->_refillSize);
  2070   if (fc != NULL) {
  2071     blk->_ptr  = (HeapWord*)fc;
  2072     blk->_word_size = fc->size();
  2073     fc->dontCoalesce();   // to prevent sweeper from sweeping us up
  2077 // Support for concurrent collection policy decisions.
  2078 bool CompactibleFreeListSpace::should_concurrent_collect() const {
  2079   // In the future we might want to add in frgamentation stats --
  2080   // including erosion of the "mountain" into this decision as well.
  2081   return !adaptive_freelists() && linearAllocationWouldFail();
  2084 // Support for compaction
  2086 void CompactibleFreeListSpace::prepare_for_compaction(CompactPoint* cp) {
  2087   SCAN_AND_FORWARD(cp,end,block_is_obj,block_size);
  2088   // prepare_for_compaction() uses the space between live objects
  2089   // so that later phase can skip dead space quickly.  So verification
  2090   // of the free lists doesn't work after.
  2093 #define obj_size(q) adjustObjectSize(oop(q)->size())
  2094 #define adjust_obj_size(s) adjustObjectSize(s)
  2096 void CompactibleFreeListSpace::adjust_pointers() {
  2097   // In other versions of adjust_pointers(), a bail out
  2098   // based on the amount of live data in the generation
  2099   // (i.e., if 0, bail out) may be used.
  2100   // Cannot test used() == 0 here because the free lists have already
  2101   // been mangled by the compaction.
  2103   SCAN_AND_ADJUST_POINTERS(adjust_obj_size);
  2104   // See note about verification in prepare_for_compaction().
  2107 void CompactibleFreeListSpace::compact() {
  2108   SCAN_AND_COMPACT(obj_size);
  2111 // fragmentation_metric = 1 - [sum of (fbs**2) / (sum of fbs)**2]
  2112 // where fbs is free block sizes
  2113 double CompactibleFreeListSpace::flsFrag() const {
  2114   size_t itabFree = totalSizeInIndexedFreeLists();
  2115   double frag = 0.0;
  2116   size_t i;
  2118   for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
  2119     double sz  = i;
  2120     frag      += _indexedFreeList[i].count() * (sz * sz);
  2123   double totFree = itabFree +
  2124                    _dictionary->totalChunkSize(DEBUG_ONLY(freelistLock()));
  2125   if (totFree > 0) {
  2126     frag = ((frag + _dictionary->sum_of_squared_block_sizes()) /
  2127             (totFree * totFree));
  2128     frag = (double)1.0  - frag;
  2129   } else {
  2130     assert(frag == 0.0, "Follows from totFree == 0");
  2132   return frag;
  2135 void CompactibleFreeListSpace::beginSweepFLCensus(
  2136   float inter_sweep_current,
  2137   float inter_sweep_estimate,
  2138   float intra_sweep_estimate) {
  2139   assert_locked();
  2140   size_t i;
  2141   for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
  2142     FreeList* fl    = &_indexedFreeList[i];
  2143     if (PrintFLSStatistics > 1) {
  2144       gclog_or_tty->print("size[%d] : ", i);
  2146     fl->compute_desired(inter_sweep_current, inter_sweep_estimate, intra_sweep_estimate);
  2147     fl->set_coalDesired((ssize_t)((double)fl->desired() * CMSSmallCoalSurplusPercent));
  2148     fl->set_beforeSweep(fl->count());
  2149     fl->set_bfrSurp(fl->surplus());
  2151   _dictionary->beginSweepDictCensus(CMSLargeCoalSurplusPercent,
  2152                                     inter_sweep_current,
  2153                                     inter_sweep_estimate,
  2154                                     intra_sweep_estimate);
  2157 void CompactibleFreeListSpace::setFLSurplus() {
  2158   assert_locked();
  2159   size_t i;
  2160   for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
  2161     FreeList *fl = &_indexedFreeList[i];
  2162     fl->set_surplus(fl->count() -
  2163                     (ssize_t)((double)fl->desired() * CMSSmallSplitSurplusPercent));
  2167 void CompactibleFreeListSpace::setFLHints() {
  2168   assert_locked();
  2169   size_t i;
  2170   size_t h = IndexSetSize;
  2171   for (i = IndexSetSize - 1; i != 0; i -= IndexSetStride) {
  2172     FreeList *fl = &_indexedFreeList[i];
  2173     fl->set_hint(h);
  2174     if (fl->surplus() > 0) {
  2175       h = i;
  2180 void CompactibleFreeListSpace::clearFLCensus() {
  2181   assert_locked();
  2182   int i;
  2183   for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
  2184     FreeList *fl = &_indexedFreeList[i];
  2185     fl->set_prevSweep(fl->count());
  2186     fl->set_coalBirths(0);
  2187     fl->set_coalDeaths(0);
  2188     fl->set_splitBirths(0);
  2189     fl->set_splitDeaths(0);
  2193 void CompactibleFreeListSpace::endSweepFLCensus(size_t sweep_count) {
  2194   if (PrintFLSStatistics > 0) {
  2195     HeapWord* largestAddr = (HeapWord*) dictionary()->findLargestDict();
  2196     gclog_or_tty->print_cr("CMS: Large block " PTR_FORMAT,
  2197                            largestAddr);
  2199   setFLSurplus();
  2200   setFLHints();
  2201   if (PrintGC && PrintFLSCensus > 0) {
  2202     printFLCensus(sweep_count);
  2204   clearFLCensus();
  2205   assert_locked();
  2206   _dictionary->endSweepDictCensus(CMSLargeSplitSurplusPercent);
  2209 bool CompactibleFreeListSpace::coalOverPopulated(size_t size) {
  2210   if (size < SmallForDictionary) {
  2211     FreeList *fl = &_indexedFreeList[size];
  2212     return (fl->coalDesired() < 0) ||
  2213            ((int)fl->count() > fl->coalDesired());
  2214   } else {
  2215     return dictionary()->coalDictOverPopulated(size);
  2219 void CompactibleFreeListSpace::smallCoalBirth(size_t size) {
  2220   assert(size < SmallForDictionary, "Size too large for indexed list");
  2221   FreeList *fl = &_indexedFreeList[size];
  2222   fl->increment_coalBirths();
  2223   fl->increment_surplus();
  2226 void CompactibleFreeListSpace::smallCoalDeath(size_t size) {
  2227   assert(size < SmallForDictionary, "Size too large for indexed list");
  2228   FreeList *fl = &_indexedFreeList[size];
  2229   fl->increment_coalDeaths();
  2230   fl->decrement_surplus();
  2233 void CompactibleFreeListSpace::coalBirth(size_t size) {
  2234   if (size  < SmallForDictionary) {
  2235     smallCoalBirth(size);
  2236   } else {
  2237     dictionary()->dictCensusUpdate(size,
  2238                                    false /* split */,
  2239                                    true /* birth */);
  2243 void CompactibleFreeListSpace::coalDeath(size_t size) {
  2244   if(size  < SmallForDictionary) {
  2245     smallCoalDeath(size);
  2246   } else {
  2247     dictionary()->dictCensusUpdate(size,
  2248                                    false /* split */,
  2249                                    false /* birth */);
  2253 void CompactibleFreeListSpace::smallSplitBirth(size_t size) {
  2254   assert(size < SmallForDictionary, "Size too large for indexed list");
  2255   FreeList *fl = &_indexedFreeList[size];
  2256   fl->increment_splitBirths();
  2257   fl->increment_surplus();
  2260 void CompactibleFreeListSpace::smallSplitDeath(size_t size) {
  2261   assert(size < SmallForDictionary, "Size too large for indexed list");
  2262   FreeList *fl = &_indexedFreeList[size];
  2263   fl->increment_splitDeaths();
  2264   fl->decrement_surplus();
  2267 void CompactibleFreeListSpace::splitBirth(size_t size) {
  2268   if (size  < SmallForDictionary) {
  2269     smallSplitBirth(size);
  2270   } else {
  2271     dictionary()->dictCensusUpdate(size,
  2272                                    true /* split */,
  2273                                    true /* birth */);
  2277 void CompactibleFreeListSpace::splitDeath(size_t size) {
  2278   if (size  < SmallForDictionary) {
  2279     smallSplitDeath(size);
  2280   } else {
  2281     dictionary()->dictCensusUpdate(size,
  2282                                    true /* split */,
  2283                                    false /* birth */);
  2287 void CompactibleFreeListSpace::split(size_t from, size_t to1) {
  2288   size_t to2 = from - to1;
  2289   splitDeath(from);
  2290   splitBirth(to1);
  2291   splitBirth(to2);
  2294 void CompactibleFreeListSpace::print() const {
  2295   print_on(tty);
  2298 void CompactibleFreeListSpace::prepare_for_verify() {
  2299   assert_locked();
  2300   repairLinearAllocationBlocks();
  2301   // Verify that the SpoolBlocks look like free blocks of
  2302   // appropriate sizes... To be done ...
  2305 class VerifyAllBlksClosure: public BlkClosure {
  2306  private:
  2307   const CompactibleFreeListSpace* _sp;
  2308   const MemRegion                 _span;
  2309   HeapWord*                       _last_addr;
  2310   size_t                          _last_size;
  2311   bool                            _last_was_obj;
  2312   bool                            _last_was_live;
  2314  public:
  2315   VerifyAllBlksClosure(const CompactibleFreeListSpace* sp,
  2316     MemRegion span) :  _sp(sp), _span(span),
  2317                        _last_addr(NULL), _last_size(0),
  2318                        _last_was_obj(false), _last_was_live(false) { }
  2320   virtual size_t do_blk(HeapWord* addr) {
  2321     size_t res;
  2322     bool   was_obj  = false;
  2323     bool   was_live = false;
  2324     if (_sp->block_is_obj(addr)) {
  2325       was_obj = true;
  2326       oop p = oop(addr);
  2327       guarantee(p->is_oop(), "Should be an oop");
  2328       res = _sp->adjustObjectSize(p->size());
  2329       if (_sp->obj_is_alive(addr)) {
  2330         was_live = true;
  2331         p->verify();
  2333     } else {
  2334       FreeChunk* fc = (FreeChunk*)addr;
  2335       res = fc->size();
  2336       if (FLSVerifyLists && !fc->cantCoalesce()) {
  2337         guarantee(_sp->verifyChunkInFreeLists(fc),
  2338                   "Chunk should be on a free list");
  2341     if (res == 0) {
  2342       gclog_or_tty->print_cr("Livelock: no rank reduction!");
  2343       gclog_or_tty->print_cr(
  2344         " Current:  addr = " PTR_FORMAT ", size = " SIZE_FORMAT ", obj = %s, live = %s \n"
  2345         " Previous: addr = " PTR_FORMAT ", size = " SIZE_FORMAT ", obj = %s, live = %s \n",
  2346         addr,       res,        was_obj      ?"true":"false", was_live      ?"true":"false",
  2347         _last_addr, _last_size, _last_was_obj?"true":"false", _last_was_live?"true":"false");
  2348       _sp->print_on(gclog_or_tty);
  2349       guarantee(false, "Seppuku!");
  2351     _last_addr = addr;
  2352     _last_size = res;
  2353     _last_was_obj  = was_obj;
  2354     _last_was_live = was_live;
  2355     return res;
  2357 };
  2359 class VerifyAllOopsClosure: public OopClosure {
  2360  private:
  2361   const CMSCollector*             _collector;
  2362   const CompactibleFreeListSpace* _sp;
  2363   const MemRegion                 _span;
  2364   const bool                      _past_remark;
  2365   const CMSBitMap*                _bit_map;
  2367  protected:
  2368   void do_oop(void* p, oop obj) {
  2369     if (_span.contains(obj)) { // the interior oop points into CMS heap
  2370       if (!_span.contains(p)) { // reference from outside CMS heap
  2371         // Should be a valid object; the first disjunct below allows
  2372         // us to sidestep an assertion in block_is_obj() that insists
  2373         // that p be in _sp. Note that several generations (and spaces)
  2374         // are spanned by _span (CMS heap) above.
  2375         guarantee(!_sp->is_in_reserved(obj) ||
  2376                   _sp->block_is_obj((HeapWord*)obj),
  2377                   "Should be an object");
  2378         guarantee(obj->is_oop(), "Should be an oop");
  2379         obj->verify();
  2380         if (_past_remark) {
  2381           // Remark has been completed, the object should be marked
  2382           _bit_map->isMarked((HeapWord*)obj);
  2384       } else { // reference within CMS heap
  2385         if (_past_remark) {
  2386           // Remark has been completed -- so the referent should have
  2387           // been marked, if referring object is.
  2388           if (_bit_map->isMarked(_collector->block_start(p))) {
  2389             guarantee(_bit_map->isMarked((HeapWord*)obj), "Marking error?");
  2393     } else if (_sp->is_in_reserved(p)) {
  2394       // the reference is from FLS, and points out of FLS
  2395       guarantee(obj->is_oop(), "Should be an oop");
  2396       obj->verify();
  2400   template <class T> void do_oop_work(T* p) {
  2401     T heap_oop = oopDesc::load_heap_oop(p);
  2402     if (!oopDesc::is_null(heap_oop)) {
  2403       oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
  2404       do_oop(p, obj);
  2408  public:
  2409   VerifyAllOopsClosure(const CMSCollector* collector,
  2410     const CompactibleFreeListSpace* sp, MemRegion span,
  2411     bool past_remark, CMSBitMap* bit_map) :
  2412     OopClosure(), _collector(collector), _sp(sp), _span(span),
  2413     _past_remark(past_remark), _bit_map(bit_map) { }
  2415   virtual void do_oop(oop* p)       { VerifyAllOopsClosure::do_oop_work(p); }
  2416   virtual void do_oop(narrowOop* p) { VerifyAllOopsClosure::do_oop_work(p); }
  2417 };
  2419 void CompactibleFreeListSpace::verify(bool ignored) const {
  2420   assert_lock_strong(&_freelistLock);
  2421   verify_objects_initialized();
  2422   MemRegion span = _collector->_span;
  2423   bool past_remark = (_collector->abstract_state() ==
  2424                       CMSCollector::Sweeping);
  2426   ResourceMark rm;
  2427   HandleMark  hm;
  2429   // Check integrity of CFL data structures
  2430   _promoInfo.verify();
  2431   _dictionary->verify();
  2432   if (FLSVerifyIndexTable) {
  2433     verifyIndexedFreeLists();
  2435   // Check integrity of all objects and free blocks in space
  2437     VerifyAllBlksClosure cl(this, span);
  2438     ((CompactibleFreeListSpace*)this)->blk_iterate(&cl);  // cast off const
  2440   // Check that all references in the heap to FLS
  2441   // are to valid objects in FLS or that references in
  2442   // FLS are to valid objects elsewhere in the heap
  2443   if (FLSVerifyAllHeapReferences)
  2445     VerifyAllOopsClosure cl(_collector, this, span, past_remark,
  2446       _collector->markBitMap());
  2447     CollectedHeap* ch = Universe::heap();
  2448     ch->oop_iterate(&cl);              // all oops in generations
  2449     ch->permanent_oop_iterate(&cl);    // all oops in perm gen
  2452   if (VerifyObjectStartArray) {
  2453     // Verify the block offset table
  2454     _bt.verify();
  2458 #ifndef PRODUCT
  2459 void CompactibleFreeListSpace::verifyFreeLists() const {
  2460   if (FLSVerifyLists) {
  2461     _dictionary->verify();
  2462     verifyIndexedFreeLists();
  2463   } else {
  2464     if (FLSVerifyDictionary) {
  2465       _dictionary->verify();
  2467     if (FLSVerifyIndexTable) {
  2468       verifyIndexedFreeLists();
  2472 #endif
  2474 void CompactibleFreeListSpace::verifyIndexedFreeLists() const {
  2475   size_t i = 0;
  2476   for (; i < MinChunkSize; i++) {
  2477     guarantee(_indexedFreeList[i].head() == NULL, "should be NULL");
  2479   for (; i < IndexSetSize; i++) {
  2480     verifyIndexedFreeList(i);
  2484 void CompactibleFreeListSpace::verifyIndexedFreeList(size_t size) const {
  2485   FreeChunk* fc   =  _indexedFreeList[size].head();
  2486   FreeChunk* tail =  _indexedFreeList[size].tail();
  2487   size_t    num = _indexedFreeList[size].count();
  2488   size_t      n = 0;
  2489   guarantee((size % 2 == 0) || fc == NULL, "Odd slots should be empty");
  2490   for (; fc != NULL; fc = fc->next(), n++) {
  2491     guarantee(fc->size() == size, "Size inconsistency");
  2492     guarantee(fc->isFree(), "!free?");
  2493     guarantee(fc->next() == NULL || fc->next()->prev() == fc, "Broken list");
  2494     guarantee((fc->next() == NULL) == (fc == tail), "Incorrect tail");
  2496   guarantee(n == num, "Incorrect count");
  2499 #ifndef PRODUCT
  2500 void CompactibleFreeListSpace::checkFreeListConsistency() const {
  2501   assert(_dictionary->minSize() <= IndexSetSize,
  2502     "Some sizes can't be allocated without recourse to"
  2503     " linear allocation buffers");
  2504   assert(MIN_TREE_CHUNK_SIZE*HeapWordSize == sizeof(TreeChunk),
  2505     "else MIN_TREE_CHUNK_SIZE is wrong");
  2506   assert((IndexSetStride == 2 && IndexSetStart == 2) ||
  2507          (IndexSetStride == 1 && IndexSetStart == 1), "just checking");
  2508   assert((IndexSetStride != 2) || (MinChunkSize % 2 == 0),
  2509       "Some for-loops may be incorrectly initialized");
  2510   assert((IndexSetStride != 2) || (IndexSetSize % 2 == 1),
  2511       "For-loops that iterate over IndexSet with stride 2 may be wrong");
  2513 #endif
  2515 void CompactibleFreeListSpace::printFLCensus(size_t sweep_count) const {
  2516   assert_lock_strong(&_freelistLock);
  2517   FreeList total;
  2518   gclog_or_tty->print("end sweep# " SIZE_FORMAT "\n", sweep_count);
  2519   FreeList::print_labels_on(gclog_or_tty, "size");
  2520   size_t totalFree = 0;
  2521   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
  2522     const FreeList *fl = &_indexedFreeList[i];
  2523     totalFree += fl->count() * fl->size();
  2524     if (i % (40*IndexSetStride) == 0) {
  2525       FreeList::print_labels_on(gclog_or_tty, "size");
  2527     fl->print_on(gclog_or_tty);
  2528     total.set_bfrSurp(    total.bfrSurp()     + fl->bfrSurp()    );
  2529     total.set_surplus(    total.surplus()     + fl->surplus()    );
  2530     total.set_desired(    total.desired()     + fl->desired()    );
  2531     total.set_prevSweep(  total.prevSweep()   + fl->prevSweep()  );
  2532     total.set_beforeSweep(total.beforeSweep() + fl->beforeSweep());
  2533     total.set_count(      total.count()       + fl->count()      );
  2534     total.set_coalBirths( total.coalBirths()  + fl->coalBirths() );
  2535     total.set_coalDeaths( total.coalDeaths()  + fl->coalDeaths() );
  2536     total.set_splitBirths(total.splitBirths() + fl->splitBirths());
  2537     total.set_splitDeaths(total.splitDeaths() + fl->splitDeaths());
  2539   total.print_on(gclog_or_tty, "TOTAL");
  2540   gclog_or_tty->print_cr("Total free in indexed lists "
  2541                          SIZE_FORMAT " words", totalFree);
  2542   gclog_or_tty->print("growth: %8.5f  deficit: %8.5f\n",
  2543     (double)(total.splitBirths()+total.coalBirths()-total.splitDeaths()-total.coalDeaths())/
  2544             (total.prevSweep() != 0 ? (double)total.prevSweep() : 1.0),
  2545     (double)(total.desired() - total.count())/(total.desired() != 0 ? (double)total.desired() : 1.0));
  2546   _dictionary->printDictCensus();
  2549 ///////////////////////////////////////////////////////////////////////////
  2550 // CFLS_LAB
  2551 ///////////////////////////////////////////////////////////////////////////
  2553 #define VECTOR_257(x)                                                                                  \
  2554   /* 1  2  3  4  5  6  7  8  9 1x 11 12 13 14 15 16 17 18 19 2x 21 22 23 24 25 26 27 28 29 3x 31 32 */ \
  2555   {  x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
  2556      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
  2557      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
  2558      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
  2559      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
  2560      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
  2561      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
  2562      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
  2563      x }
  2565 // Initialize with default setting of CMSParPromoteBlocksToClaim, _not_
  2566 // OldPLABSize, whose static default is different; if overridden at the
  2567 // command-line, this will get reinitialized via a call to
  2568 // modify_initialization() below.
  2569 AdaptiveWeightedAverage CFLS_LAB::_blocks_to_claim[]    =
  2570   VECTOR_257(AdaptiveWeightedAverage(OldPLABWeight, (float)CMSParPromoteBlocksToClaim));
  2571 size_t CFLS_LAB::_global_num_blocks[]  = VECTOR_257(0);
  2572 int    CFLS_LAB::_global_num_workers[] = VECTOR_257(0);
  2574 CFLS_LAB::CFLS_LAB(CompactibleFreeListSpace* cfls) :
  2575   _cfls(cfls)
  2577   assert(CompactibleFreeListSpace::IndexSetSize == 257, "Modify VECTOR_257() macro above");
  2578   for (size_t i = CompactibleFreeListSpace::IndexSetStart;
  2579        i < CompactibleFreeListSpace::IndexSetSize;
  2580        i += CompactibleFreeListSpace::IndexSetStride) {
  2581     _indexedFreeList[i].set_size(i);
  2582     _num_blocks[i] = 0;
  2586 static bool _CFLS_LAB_modified = false;
  2588 void CFLS_LAB::modify_initialization(size_t n, unsigned wt) {
  2589   assert(!_CFLS_LAB_modified, "Call only once");
  2590   _CFLS_LAB_modified = true;
  2591   for (size_t i = CompactibleFreeListSpace::IndexSetStart;
  2592        i < CompactibleFreeListSpace::IndexSetSize;
  2593        i += CompactibleFreeListSpace::IndexSetStride) {
  2594     _blocks_to_claim[i].modify(n, wt, true /* force */);
  2598 HeapWord* CFLS_LAB::alloc(size_t word_sz) {
  2599   FreeChunk* res;
  2600   assert(word_sz == _cfls->adjustObjectSize(word_sz), "Error");
  2601   if (word_sz >=  CompactibleFreeListSpace::IndexSetSize) {
  2602     // This locking manages sync with other large object allocations.
  2603     MutexLockerEx x(_cfls->parDictionaryAllocLock(),
  2604                     Mutex::_no_safepoint_check_flag);
  2605     res = _cfls->getChunkFromDictionaryExact(word_sz);
  2606     if (res == NULL) return NULL;
  2607   } else {
  2608     FreeList* fl = &_indexedFreeList[word_sz];
  2609     if (fl->count() == 0) {
  2610       // Attempt to refill this local free list.
  2611       get_from_global_pool(word_sz, fl);
  2612       // If it didn't work, give up.
  2613       if (fl->count() == 0) return NULL;
  2615     res = fl->getChunkAtHead();
  2616     assert(res != NULL, "Why was count non-zero?");
  2618   res->markNotFree();
  2619   assert(!res->isFree(), "shouldn't be marked free");
  2620   assert(oop(res)->klass_or_null() == NULL, "should look uninitialized");
  2621   // mangle a just allocated object with a distinct pattern.
  2622   debug_only(res->mangleAllocated(word_sz));
  2623   return (HeapWord*)res;
  2626 // Get a chunk of blocks of the right size and update related
  2627 // book-keeping stats
  2628 void CFLS_LAB::get_from_global_pool(size_t word_sz, FreeList* fl) {
  2629   // Get the #blocks we want to claim
  2630   size_t n_blks = (size_t)_blocks_to_claim[word_sz].average();
  2631   assert(n_blks > 0, "Error");
  2632   assert(ResizePLAB || n_blks == OldPLABSize, "Error");
  2633   // In some cases, when the application has a phase change,
  2634   // there may be a sudden and sharp shift in the object survival
  2635   // profile, and updating the counts at the end of a scavenge
  2636   // may not be quick enough, giving rise to large scavenge pauses
  2637   // during these phase changes. It is beneficial to detect such
  2638   // changes on-the-fly during a scavenge and avoid such a phase-change
  2639   // pothole. The following code is a heuristic attempt to do that.
  2640   // It is protected by a product flag until we have gained
  2641   // enough experience with this heuristic and fine-tuned its behaviour.
  2642   // WARNING: This might increase fragmentation if we overreact to
  2643   // small spikes, so some kind of historical smoothing based on
  2644   // previous experience with the greater reactivity might be useful.
  2645   // Lacking sufficient experience, CMSOldPLABResizeQuicker is disabled by
  2646   // default.
  2647   if (ResizeOldPLAB && CMSOldPLABResizeQuicker) {
  2648     size_t multiple = _num_blocks[word_sz]/(CMSOldPLABToleranceFactor*CMSOldPLABNumRefills*n_blks);
  2649     n_blks +=  CMSOldPLABReactivityFactor*multiple*n_blks;
  2650     n_blks = MIN2(n_blks, CMSOldPLABMax);
  2652   assert(n_blks > 0, "Error");
  2653   _cfls->par_get_chunk_of_blocks(word_sz, n_blks, fl);
  2654   // Update stats table entry for this block size
  2655   _num_blocks[word_sz] += fl->count();
  2658 void CFLS_LAB::compute_desired_plab_size() {
  2659   for (size_t i =  CompactibleFreeListSpace::IndexSetStart;
  2660        i < CompactibleFreeListSpace::IndexSetSize;
  2661        i += CompactibleFreeListSpace::IndexSetStride) {
  2662     assert((_global_num_workers[i] == 0) == (_global_num_blocks[i] == 0),
  2663            "Counter inconsistency");
  2664     if (_global_num_workers[i] > 0) {
  2665       // Need to smooth wrt historical average
  2666       if (ResizeOldPLAB) {
  2667         _blocks_to_claim[i].sample(
  2668           MAX2((size_t)CMSOldPLABMin,
  2669           MIN2((size_t)CMSOldPLABMax,
  2670                _global_num_blocks[i]/(_global_num_workers[i]*CMSOldPLABNumRefills))));
  2672       // Reset counters for next round
  2673       _global_num_workers[i] = 0;
  2674       _global_num_blocks[i] = 0;
  2675       if (PrintOldPLAB) {
  2676         gclog_or_tty->print_cr("[%d]: %d", i, (size_t)_blocks_to_claim[i].average());
  2682 void CFLS_LAB::retire(int tid) {
  2683   // We run this single threaded with the world stopped;
  2684   // so no need for locks and such.
  2685 #define CFLS_LAB_PARALLEL_ACCESS 0
  2686   NOT_PRODUCT(Thread* t = Thread::current();)
  2687   assert(Thread::current()->is_VM_thread(), "Error");
  2688   assert(CompactibleFreeListSpace::IndexSetStart == CompactibleFreeListSpace::IndexSetStride,
  2689          "Will access to uninitialized slot below");
  2690 #if CFLS_LAB_PARALLEL_ACCESS
  2691   for (size_t i = CompactibleFreeListSpace::IndexSetSize - 1;
  2692        i > 0;
  2693        i -= CompactibleFreeListSpace::IndexSetStride) {
  2694 #else // CFLS_LAB_PARALLEL_ACCESS
  2695   for (size_t i =  CompactibleFreeListSpace::IndexSetStart;
  2696        i < CompactibleFreeListSpace::IndexSetSize;
  2697        i += CompactibleFreeListSpace::IndexSetStride) {
  2698 #endif // !CFLS_LAB_PARALLEL_ACCESS
  2699     assert(_num_blocks[i] >= (size_t)_indexedFreeList[i].count(),
  2700            "Can't retire more than what we obtained");
  2701     if (_num_blocks[i] > 0) {
  2702       size_t num_retire =  _indexedFreeList[i].count();
  2703       assert(_num_blocks[i] > num_retire, "Should have used at least one");
  2705 #if CFLS_LAB_PARALLEL_ACCESS
  2706         MutexLockerEx x(_cfls->_indexedFreeListParLocks[i],
  2707                         Mutex::_no_safepoint_check_flag);
  2708 #endif // CFLS_LAB_PARALLEL_ACCESS
  2709         // Update globals stats for num_blocks used
  2710         _global_num_blocks[i] += (_num_blocks[i] - num_retire);
  2711         _global_num_workers[i]++;
  2712         assert(_global_num_workers[i] <= (ssize_t)ParallelGCThreads, "Too big");
  2713         if (num_retire > 0) {
  2714           _cfls->_indexedFreeList[i].prepend(&_indexedFreeList[i]);
  2715           // Reset this list.
  2716           _indexedFreeList[i] = FreeList();
  2717           _indexedFreeList[i].set_size(i);
  2720       if (PrintOldPLAB) {
  2721         gclog_or_tty->print_cr("%d[%d]: %d/%d/%d",
  2722                                tid, i, num_retire, _num_blocks[i], (size_t)_blocks_to_claim[i].average());
  2724       // Reset stats for next round
  2725       _num_blocks[i]         = 0;
  2730 void CompactibleFreeListSpace:: par_get_chunk_of_blocks(size_t word_sz, size_t n, FreeList* fl) {
  2731   assert(fl->count() == 0, "Precondition.");
  2732   assert(word_sz < CompactibleFreeListSpace::IndexSetSize,
  2733          "Precondition");
  2735   // We'll try all multiples of word_sz in the indexed set, starting with
  2736   // word_sz itself and, if CMSSplitIndexedFreeListBlocks, try larger multiples,
  2737   // then try getting a big chunk and splitting it.
  2739     bool found;
  2740     int  k;
  2741     size_t cur_sz;
  2742     for (k = 1, cur_sz = k * word_sz, found = false;
  2743          (cur_sz < CompactibleFreeListSpace::IndexSetSize) &&
  2744          (CMSSplitIndexedFreeListBlocks || k <= 1);
  2745          k++, cur_sz = k * word_sz) {
  2746       FreeList fl_for_cur_sz;  // Empty.
  2747       fl_for_cur_sz.set_size(cur_sz);
  2749         MutexLockerEx x(_indexedFreeListParLocks[cur_sz],
  2750                         Mutex::_no_safepoint_check_flag);
  2751         FreeList* gfl = &_indexedFreeList[cur_sz];
  2752         if (gfl->count() != 0) {
  2753           // nn is the number of chunks of size cur_sz that
  2754           // we'd need to split k-ways each, in order to create
  2755           // "n" chunks of size word_sz each.
  2756           const size_t nn = MAX2(n/k, (size_t)1);
  2757           gfl->getFirstNChunksFromList(nn, &fl_for_cur_sz);
  2758           found = true;
  2759           if (k > 1) {
  2760             // Update split death stats for the cur_sz-size blocks list:
  2761             // we increment the split death count by the number of blocks
  2762             // we just took from the cur_sz-size blocks list and which
  2763             // we will be splitting below.
  2764             ssize_t deaths = gfl->splitDeaths() +
  2765                              fl_for_cur_sz.count();
  2766             gfl->set_splitDeaths(deaths);
  2770       // Now transfer fl_for_cur_sz to fl.  Common case, we hope, is k = 1.
  2771       if (found) {
  2772         if (k == 1) {
  2773           fl->prepend(&fl_for_cur_sz);
  2774         } else {
  2775           // Divide each block on fl_for_cur_sz up k ways.
  2776           FreeChunk* fc;
  2777           while ((fc = fl_for_cur_sz.getChunkAtHead()) != NULL) {
  2778             // Must do this in reverse order, so that anybody attempting to
  2779             // access the main chunk sees it as a single free block until we
  2780             // change it.
  2781             size_t fc_size = fc->size();
  2782             assert(fc->isFree(), "Error");
  2783             for (int i = k-1; i >= 0; i--) {
  2784               FreeChunk* ffc = (FreeChunk*)((HeapWord*)fc + i * word_sz);
  2785               assert((i != 0) ||
  2786                         ((fc == ffc) && ffc->isFree() &&
  2787                          (ffc->size() == k*word_sz) && (fc_size == word_sz)),
  2788                         "Counting error");
  2789               ffc->setSize(word_sz);
  2790               ffc->linkPrev(NULL); // Mark as a free block for other (parallel) GC threads.
  2791               ffc->linkNext(NULL);
  2792               // Above must occur before BOT is updated below.
  2793               OrderAccess::storestore();
  2794               // splitting from the right, fc_size == i * word_sz
  2795               _bt.mark_block((HeapWord*)ffc, word_sz, true /* reducing */);
  2796               fc_size -= word_sz;
  2797               assert(fc_size == i*word_sz, "Error");
  2798               _bt.verify_not_unallocated((HeapWord*)ffc, word_sz);
  2799               _bt.verify_single_block((HeapWord*)fc, fc_size);
  2800               _bt.verify_single_block((HeapWord*)ffc, word_sz);
  2801               // Push this on "fl".
  2802               fl->returnChunkAtHead(ffc);
  2804             // TRAP
  2805             assert(fl->tail()->next() == NULL, "List invariant.");
  2808         // Update birth stats for this block size.
  2809         size_t num = fl->count();
  2810         MutexLockerEx x(_indexedFreeListParLocks[word_sz],
  2811                         Mutex::_no_safepoint_check_flag);
  2812         ssize_t births = _indexedFreeList[word_sz].splitBirths() + num;
  2813         _indexedFreeList[word_sz].set_splitBirths(births);
  2814         return;
  2818   // Otherwise, we'll split a block from the dictionary.
  2819   FreeChunk* fc = NULL;
  2820   FreeChunk* rem_fc = NULL;
  2821   size_t rem;
  2823     MutexLockerEx x(parDictionaryAllocLock(),
  2824                     Mutex::_no_safepoint_check_flag);
  2825     while (n > 0) {
  2826       fc = dictionary()->getChunk(MAX2(n * word_sz,
  2827                                   _dictionary->minSize()),
  2828                                   FreeBlockDictionary::atLeast);
  2829       if (fc != NULL) {
  2830         _bt.allocated((HeapWord*)fc, fc->size(), true /* reducing */);  // update _unallocated_blk
  2831         dictionary()->dictCensusUpdate(fc->size(),
  2832                                        true /*split*/,
  2833                                        false /*birth*/);
  2834         break;
  2835       } else {
  2836         n--;
  2839     if (fc == NULL) return;
  2840     // Otherwise, split up that block.
  2841     assert((ssize_t)n >= 1, "Control point invariant");
  2842     assert(fc->isFree(), "Error: should be a free block");
  2843     _bt.verify_single_block((HeapWord*)fc, fc->size());
  2844     const size_t nn = fc->size() / word_sz;
  2845     n = MIN2(nn, n);
  2846     assert((ssize_t)n >= 1, "Control point invariant");
  2847     rem = fc->size() - n * word_sz;
  2848     // If there is a remainder, and it's too small, allocate one fewer.
  2849     if (rem > 0 && rem < MinChunkSize) {
  2850       n--; rem += word_sz;
  2852     // Note that at this point we may have n == 0.
  2853     assert((ssize_t)n >= 0, "Control point invariant");
  2855     // If n is 0, the chunk fc that was found is not large
  2856     // enough to leave a viable remainder.  We are unable to
  2857     // allocate even one block.  Return fc to the
  2858     // dictionary and return, leaving "fl" empty.
  2859     if (n == 0) {
  2860       returnChunkToDictionary(fc);
  2861       assert(fl->count() == 0, "We never allocated any blocks");
  2862       return;
  2865     // First return the remainder, if any.
  2866     // Note that we hold the lock until we decide if we're going to give
  2867     // back the remainder to the dictionary, since a concurrent allocation
  2868     // may otherwise see the heap as empty.  (We're willing to take that
  2869     // hit if the block is a small block.)
  2870     if (rem > 0) {
  2871       size_t prefix_size = n * word_sz;
  2872       rem_fc = (FreeChunk*)((HeapWord*)fc + prefix_size);
  2873       rem_fc->setSize(rem);
  2874       rem_fc->linkPrev(NULL); // Mark as a free block for other (parallel) GC threads.
  2875       rem_fc->linkNext(NULL);
  2876       // Above must occur before BOT is updated below.
  2877       assert((ssize_t)n > 0 && prefix_size > 0 && rem_fc > fc, "Error");
  2878       OrderAccess::storestore();
  2879       _bt.split_block((HeapWord*)fc, fc->size(), prefix_size);
  2880       assert(fc->isFree(), "Error");
  2881       fc->setSize(prefix_size);
  2882       if (rem >= IndexSetSize) {
  2883         returnChunkToDictionary(rem_fc);
  2884         dictionary()->dictCensusUpdate(rem, true /*split*/, true /*birth*/);
  2885         rem_fc = NULL;
  2887       // Otherwise, return it to the small list below.
  2890   if (rem_fc != NULL) {
  2891     MutexLockerEx x(_indexedFreeListParLocks[rem],
  2892                     Mutex::_no_safepoint_check_flag);
  2893     _bt.verify_not_unallocated((HeapWord*)rem_fc, rem_fc->size());
  2894     _indexedFreeList[rem].returnChunkAtHead(rem_fc);
  2895     smallSplitBirth(rem);
  2897   assert((ssize_t)n > 0 && fc != NULL, "Consistency");
  2898   // Now do the splitting up.
  2899   // Must do this in reverse order, so that anybody attempting to
  2900   // access the main chunk sees it as a single free block until we
  2901   // change it.
  2902   size_t fc_size = n * word_sz;
  2903   // All but first chunk in this loop
  2904   for (ssize_t i = n-1; i > 0; i--) {
  2905     FreeChunk* ffc = (FreeChunk*)((HeapWord*)fc + i * word_sz);
  2906     ffc->setSize(word_sz);
  2907     ffc->linkPrev(NULL); // Mark as a free block for other (parallel) GC threads.
  2908     ffc->linkNext(NULL);
  2909     // Above must occur before BOT is updated below.
  2910     OrderAccess::storestore();
  2911     // splitting from the right, fc_size == (n - i + 1) * wordsize
  2912     _bt.mark_block((HeapWord*)ffc, word_sz, true /* reducing */);
  2913     fc_size -= word_sz;
  2914     _bt.verify_not_unallocated((HeapWord*)ffc, ffc->size());
  2915     _bt.verify_single_block((HeapWord*)ffc, ffc->size());
  2916     _bt.verify_single_block((HeapWord*)fc, fc_size);
  2917     // Push this on "fl".
  2918     fl->returnChunkAtHead(ffc);
  2920   // First chunk
  2921   assert(fc->isFree() && fc->size() == n*word_sz, "Error: should still be a free block");
  2922   // The blocks above should show their new sizes before the first block below
  2923   fc->setSize(word_sz);
  2924   fc->linkPrev(NULL);    // idempotent wrt free-ness, see assert above
  2925   fc->linkNext(NULL);
  2926   _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
  2927   _bt.verify_single_block((HeapWord*)fc, fc->size());
  2928   fl->returnChunkAtHead(fc);
  2930   assert((ssize_t)n > 0 && (ssize_t)n == fl->count(), "Incorrect number of blocks");
  2932     // Update the stats for this block size.
  2933     MutexLockerEx x(_indexedFreeListParLocks[word_sz],
  2934                     Mutex::_no_safepoint_check_flag);
  2935     const ssize_t births = _indexedFreeList[word_sz].splitBirths() + n;
  2936     _indexedFreeList[word_sz].set_splitBirths(births);
  2937     // ssize_t new_surplus = _indexedFreeList[word_sz].surplus() + n;
  2938     // _indexedFreeList[word_sz].set_surplus(new_surplus);
  2941   // TRAP
  2942   assert(fl->tail()->next() == NULL, "List invariant.");
  2945 // Set up the space's par_seq_tasks structure for work claiming
  2946 // for parallel rescan. See CMSParRemarkTask where this is currently used.
  2947 // XXX Need to suitably abstract and generalize this and the next
  2948 // method into one.
  2949 void
  2950 CompactibleFreeListSpace::
  2951 initialize_sequential_subtasks_for_rescan(int n_threads) {
  2952   // The "size" of each task is fixed according to rescan_task_size.
  2953   assert(n_threads > 0, "Unexpected n_threads argument");
  2954   const size_t task_size = rescan_task_size();
  2955   size_t n_tasks = (used_region().word_size() + task_size - 1)/task_size;
  2956   assert((n_tasks == 0) == used_region().is_empty(), "n_tasks incorrect");
  2957   assert(n_tasks == 0 ||
  2958          ((used_region().start() + (n_tasks - 1)*task_size < used_region().end()) &&
  2959           (used_region().start() + n_tasks*task_size >= used_region().end())),
  2960          "n_tasks calculation incorrect");
  2961   SequentialSubTasksDone* pst = conc_par_seq_tasks();
  2962   assert(!pst->valid(), "Clobbering existing data?");
  2963   // Sets the condition for completion of the subtask (how many threads
  2964   // need to finish in order to be done).
  2965   pst->set_n_threads(n_threads);
  2966   pst->set_n_tasks((int)n_tasks);
  2969 // Set up the space's par_seq_tasks structure for work claiming
  2970 // for parallel concurrent marking. See CMSConcMarkTask where this is currently used.
  2971 void
  2972 CompactibleFreeListSpace::
  2973 initialize_sequential_subtasks_for_marking(int n_threads,
  2974                                            HeapWord* low) {
  2975   // The "size" of each task is fixed according to rescan_task_size.
  2976   assert(n_threads > 0, "Unexpected n_threads argument");
  2977   const size_t task_size = marking_task_size();
  2978   assert(task_size > CardTableModRefBS::card_size_in_words &&
  2979          (task_size %  CardTableModRefBS::card_size_in_words == 0),
  2980          "Otherwise arithmetic below would be incorrect");
  2981   MemRegion span = _gen->reserved();
  2982   if (low != NULL) {
  2983     if (span.contains(low)) {
  2984       // Align low down to  a card boundary so that
  2985       // we can use block_offset_careful() on span boundaries.
  2986       HeapWord* aligned_low = (HeapWord*)align_size_down((uintptr_t)low,
  2987                                  CardTableModRefBS::card_size);
  2988       // Clip span prefix at aligned_low
  2989       span = span.intersection(MemRegion(aligned_low, span.end()));
  2990     } else if (low > span.end()) {
  2991       span = MemRegion(low, low);  // Null region
  2992     } // else use entire span
  2994   assert(span.is_empty() ||
  2995          ((uintptr_t)span.start() %  CardTableModRefBS::card_size == 0),
  2996         "span should start at a card boundary");
  2997   size_t n_tasks = (span.word_size() + task_size - 1)/task_size;
  2998   assert((n_tasks == 0) == span.is_empty(), "Inconsistency");
  2999   assert(n_tasks == 0 ||
  3000          ((span.start() + (n_tasks - 1)*task_size < span.end()) &&
  3001           (span.start() + n_tasks*task_size >= span.end())),
  3002          "n_tasks calculation incorrect");
  3003   SequentialSubTasksDone* pst = conc_par_seq_tasks();
  3004   assert(!pst->valid(), "Clobbering existing data?");
  3005   // Sets the condition for completion of the subtask (how many threads
  3006   // need to finish in order to be done).
  3007   pst->set_n_threads(n_threads);
  3008   pst->set_n_tasks((int)n_tasks);

mercurial