src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp

Thu, 07 Jan 2010 08:14:45 -0800

author
jmasa
date
Thu, 07 Jan 2010 08:14:45 -0800
changeset 1583
05b775309e59
parent 1580
e018e6884bd8
child 1876
a8127dc669ba
permissions
-rw-r--r--

6912018: CMS: guarantee(head() != 0,"The head of the list cannot be NULL")
Summary: Block too small to split was not correctly putback to free lists.
Reviewed-by: ysr

     1 /*
     2  * Copyright 2001-2009 Sun Microsystems, Inc.  All Rights Reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
    20  * CA 95054 USA or visit www.sun.com if you need additional information or
    21  * have any questions.
    22  *
    23  */
    25 # include "incls/_precompiled.incl"
    26 # include "incls/_compactibleFreeListSpace.cpp.incl"
    28 /////////////////////////////////////////////////////////////////////////
    29 //// CompactibleFreeListSpace
    30 /////////////////////////////////////////////////////////////////////////
    32 // highest ranked  free list lock rank
    33 int CompactibleFreeListSpace::_lockRank = Mutex::leaf + 3;
    35 // Constructor
    36 CompactibleFreeListSpace::CompactibleFreeListSpace(BlockOffsetSharedArray* bs,
    37   MemRegion mr, bool use_adaptive_freelists,
    38   FreeBlockDictionary::DictionaryChoice dictionaryChoice) :
    39   _dictionaryChoice(dictionaryChoice),
    40   _adaptive_freelists(use_adaptive_freelists),
    41   _bt(bs, mr),
    42   // free list locks are in the range of values taken by _lockRank
    43   // This range currently is [_leaf+2, _leaf+3]
    44   // Note: this requires that CFLspace c'tors
    45   // are called serially in the order in which the locks are
    46   // are acquired in the program text. This is true today.
    47   _freelistLock(_lockRank--, "CompactibleFreeListSpace._lock", true),
    48   _parDictionaryAllocLock(Mutex::leaf - 1,  // == rank(ExpandHeap_lock) - 1
    49                           "CompactibleFreeListSpace._dict_par_lock", true),
    50   _rescan_task_size(CardTableModRefBS::card_size_in_words * BitsPerWord *
    51                     CMSRescanMultiple),
    52   _marking_task_size(CardTableModRefBS::card_size_in_words * BitsPerWord *
    53                     CMSConcMarkMultiple),
    54   _collector(NULL)
    55 {
    56   _bt.set_space(this);
    57   initialize(mr, SpaceDecorator::Clear, SpaceDecorator::Mangle);
    58   // We have all of "mr", all of which we place in the dictionary
    59   // as one big chunk. We'll need to decide here which of several
    60   // possible alternative dictionary implementations to use. For
    61   // now the choice is easy, since we have only one working
    62   // implementation, namely, the simple binary tree (splaying
    63   // temporarily disabled).
    64   switch (dictionaryChoice) {
    65     case FreeBlockDictionary::dictionarySplayTree:
    66     case FreeBlockDictionary::dictionarySkipList:
    67     default:
    68       warning("dictionaryChoice: selected option not understood; using"
    69               " default BinaryTreeDictionary implementation instead.");
    70     case FreeBlockDictionary::dictionaryBinaryTree:
    71       _dictionary = new BinaryTreeDictionary(mr);
    72       break;
    73   }
    74   assert(_dictionary != NULL, "CMS dictionary initialization");
    75   // The indexed free lists are initially all empty and are lazily
    76   // filled in on demand. Initialize the array elements to NULL.
    77   initializeIndexedFreeListArray();
    79   // Not using adaptive free lists assumes that allocation is first
    80   // from the linAB's.  Also a cms perm gen which can be compacted
    81   // has to have the klass's klassKlass allocated at a lower
    82   // address in the heap than the klass so that the klassKlass is
    83   // moved to its new location before the klass is moved.
    84   // Set the _refillSize for the linear allocation blocks
    85   if (!use_adaptive_freelists) {
    86     FreeChunk* fc = _dictionary->getChunk(mr.word_size());
    87     // The small linAB initially has all the space and will allocate
    88     // a chunk of any size.
    89     HeapWord* addr = (HeapWord*) fc;
    90     _smallLinearAllocBlock.set(addr, fc->size() ,
    91       1024*SmallForLinearAlloc, fc->size());
    92     // Note that _unallocated_block is not updated here.
    93     // Allocations from the linear allocation block should
    94     // update it.
    95   } else {
    96     _smallLinearAllocBlock.set(0, 0, 1024*SmallForLinearAlloc,
    97                                SmallForLinearAlloc);
    98   }
    99   // CMSIndexedFreeListReplenish should be at least 1
   100   CMSIndexedFreeListReplenish = MAX2((uintx)1, CMSIndexedFreeListReplenish);
   101   _promoInfo.setSpace(this);
   102   if (UseCMSBestFit) {
   103     _fitStrategy = FreeBlockBestFitFirst;
   104   } else {
   105     _fitStrategy = FreeBlockStrategyNone;
   106   }
   107   checkFreeListConsistency();
   109   // Initialize locks for parallel case.
   110   if (ParallelGCThreads > 0) {
   111     for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
   112       _indexedFreeListParLocks[i] = new Mutex(Mutex::leaf - 1, // == ExpandHeap_lock - 1
   113                                               "a freelist par lock",
   114                                               true);
   115       if (_indexedFreeListParLocks[i] == NULL)
   116         vm_exit_during_initialization("Could not allocate a par lock");
   117       DEBUG_ONLY(
   118         _indexedFreeList[i].set_protecting_lock(_indexedFreeListParLocks[i]);
   119       )
   120     }
   121     _dictionary->set_par_lock(&_parDictionaryAllocLock);
   122   }
   123 }
   125 // Like CompactibleSpace forward() but always calls cross_threshold() to
   126 // update the block offset table.  Removed initialize_threshold call because
   127 // CFLS does not use a block offset array for contiguous spaces.
   128 HeapWord* CompactibleFreeListSpace::forward(oop q, size_t size,
   129                                     CompactPoint* cp, HeapWord* compact_top) {
   130   // q is alive
   131   // First check if we should switch compaction space
   132   assert(this == cp->space, "'this' should be current compaction space.");
   133   size_t compaction_max_size = pointer_delta(end(), compact_top);
   134   assert(adjustObjectSize(size) == cp->space->adjust_object_size_v(size),
   135     "virtual adjustObjectSize_v() method is not correct");
   136   size_t adjusted_size = adjustObjectSize(size);
   137   assert(compaction_max_size >= MinChunkSize || compaction_max_size == 0,
   138          "no small fragments allowed");
   139   assert(minimum_free_block_size() == MinChunkSize,
   140          "for de-virtualized reference below");
   141   // Can't leave a nonzero size, residual fragment smaller than MinChunkSize
   142   if (adjusted_size + MinChunkSize > compaction_max_size &&
   143       adjusted_size != compaction_max_size) {
   144     do {
   145       // switch to next compaction space
   146       cp->space->set_compaction_top(compact_top);
   147       cp->space = cp->space->next_compaction_space();
   148       if (cp->space == NULL) {
   149         cp->gen = GenCollectedHeap::heap()->prev_gen(cp->gen);
   150         assert(cp->gen != NULL, "compaction must succeed");
   151         cp->space = cp->gen->first_compaction_space();
   152         assert(cp->space != NULL, "generation must have a first compaction space");
   153       }
   154       compact_top = cp->space->bottom();
   155       cp->space->set_compaction_top(compact_top);
   156       // The correct adjusted_size may not be the same as that for this method
   157       // (i.e., cp->space may no longer be "this" so adjust the size again.
   158       // Use the virtual method which is not used above to save the virtual
   159       // dispatch.
   160       adjusted_size = cp->space->adjust_object_size_v(size);
   161       compaction_max_size = pointer_delta(cp->space->end(), compact_top);
   162       assert(cp->space->minimum_free_block_size() == 0, "just checking");
   163     } while (adjusted_size > compaction_max_size);
   164   }
   166   // store the forwarding pointer into the mark word
   167   if ((HeapWord*)q != compact_top) {
   168     q->forward_to(oop(compact_top));
   169     assert(q->is_gc_marked(), "encoding the pointer should preserve the mark");
   170   } else {
   171     // if the object isn't moving we can just set the mark to the default
   172     // mark and handle it specially later on.
   173     q->init_mark();
   174     assert(q->forwardee() == NULL, "should be forwarded to NULL");
   175   }
   177   VALIDATE_MARK_SWEEP_ONLY(MarkSweep::register_live_oop(q, adjusted_size));
   178   compact_top += adjusted_size;
   180   // we need to update the offset table so that the beginnings of objects can be
   181   // found during scavenge.  Note that we are updating the offset table based on
   182   // where the object will be once the compaction phase finishes.
   184   // Always call cross_threshold().  A contiguous space can only call it when
   185   // the compaction_top exceeds the current threshold but not for an
   186   // non-contiguous space.
   187   cp->threshold =
   188     cp->space->cross_threshold(compact_top - adjusted_size, compact_top);
   189   return compact_top;
   190 }
   192 // A modified copy of OffsetTableContigSpace::cross_threshold() with _offsets -> _bt
   193 // and use of single_block instead of alloc_block.  The name here is not really
   194 // appropriate - maybe a more general name could be invented for both the
   195 // contiguous and noncontiguous spaces.
   197 HeapWord* CompactibleFreeListSpace::cross_threshold(HeapWord* start, HeapWord* the_end) {
   198   _bt.single_block(start, the_end);
   199   return end();
   200 }
   202 // Initialize them to NULL.
   203 void CompactibleFreeListSpace::initializeIndexedFreeListArray() {
   204   for (size_t i = 0; i < IndexSetSize; i++) {
   205     // Note that on platforms where objects are double word aligned,
   206     // the odd array elements are not used.  It is convenient, however,
   207     // to map directly from the object size to the array element.
   208     _indexedFreeList[i].reset(IndexSetSize);
   209     _indexedFreeList[i].set_size(i);
   210     assert(_indexedFreeList[i].count() == 0, "reset check failed");
   211     assert(_indexedFreeList[i].head() == NULL, "reset check failed");
   212     assert(_indexedFreeList[i].tail() == NULL, "reset check failed");
   213     assert(_indexedFreeList[i].hint() == IndexSetSize, "reset check failed");
   214   }
   215 }
   217 void CompactibleFreeListSpace::resetIndexedFreeListArray() {
   218   for (int i = 1; i < IndexSetSize; i++) {
   219     assert(_indexedFreeList[i].size() == (size_t) i,
   220       "Indexed free list sizes are incorrect");
   221     _indexedFreeList[i].reset(IndexSetSize);
   222     assert(_indexedFreeList[i].count() == 0, "reset check failed");
   223     assert(_indexedFreeList[i].head() == NULL, "reset check failed");
   224     assert(_indexedFreeList[i].tail() == NULL, "reset check failed");
   225     assert(_indexedFreeList[i].hint() == IndexSetSize, "reset check failed");
   226   }
   227 }
   229 void CompactibleFreeListSpace::reset(MemRegion mr) {
   230   resetIndexedFreeListArray();
   231   dictionary()->reset();
   232   if (BlockOffsetArrayUseUnallocatedBlock) {
   233     assert(end() == mr.end(), "We are compacting to the bottom of CMS gen");
   234     // Everything's allocated until proven otherwise.
   235     _bt.set_unallocated_block(end());
   236   }
   237   if (!mr.is_empty()) {
   238     assert(mr.word_size() >= MinChunkSize, "Chunk size is too small");
   239     _bt.single_block(mr.start(), mr.word_size());
   240     FreeChunk* fc = (FreeChunk*) mr.start();
   241     fc->setSize(mr.word_size());
   242     if (mr.word_size() >= IndexSetSize ) {
   243       returnChunkToDictionary(fc);
   244     } else {
   245       _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
   246       _indexedFreeList[mr.word_size()].returnChunkAtHead(fc);
   247     }
   248   }
   249   _promoInfo.reset();
   250   _smallLinearAllocBlock._ptr = NULL;
   251   _smallLinearAllocBlock._word_size = 0;
   252 }
   254 void CompactibleFreeListSpace::reset_after_compaction() {
   255   // Reset the space to the new reality - one free chunk.
   256   MemRegion mr(compaction_top(), end());
   257   reset(mr);
   258   // Now refill the linear allocation block(s) if possible.
   259   if (_adaptive_freelists) {
   260     refillLinearAllocBlocksIfNeeded();
   261   } else {
   262     // Place as much of mr in the linAB as we can get,
   263     // provided it was big enough to go into the dictionary.
   264     FreeChunk* fc = dictionary()->findLargestDict();
   265     if (fc != NULL) {
   266       assert(fc->size() == mr.word_size(),
   267              "Why was the chunk broken up?");
   268       removeChunkFromDictionary(fc);
   269       HeapWord* addr = (HeapWord*) fc;
   270       _smallLinearAllocBlock.set(addr, fc->size() ,
   271         1024*SmallForLinearAlloc, fc->size());
   272       // Note that _unallocated_block is not updated here.
   273     }
   274   }
   275 }
   277 // Walks the entire dictionary, returning a coterminal
   278 // chunk, if it exists. Use with caution since it involves
   279 // a potentially complete walk of a potentially large tree.
   280 FreeChunk* CompactibleFreeListSpace::find_chunk_at_end() {
   282   assert_lock_strong(&_freelistLock);
   284   return dictionary()->find_chunk_ends_at(end());
   285 }
   288 #ifndef PRODUCT
   289 void CompactibleFreeListSpace::initializeIndexedFreeListArrayReturnedBytes() {
   290   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
   291     _indexedFreeList[i].allocation_stats()->set_returnedBytes(0);
   292   }
   293 }
   295 size_t CompactibleFreeListSpace::sumIndexedFreeListArrayReturnedBytes() {
   296   size_t sum = 0;
   297   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
   298     sum += _indexedFreeList[i].allocation_stats()->returnedBytes();
   299   }
   300   return sum;
   301 }
   303 size_t CompactibleFreeListSpace::totalCountInIndexedFreeLists() const {
   304   size_t count = 0;
   305   for (int i = MinChunkSize; i < IndexSetSize; i++) {
   306     debug_only(
   307       ssize_t total_list_count = 0;
   308       for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
   309          fc = fc->next()) {
   310         total_list_count++;
   311       }
   312       assert(total_list_count ==  _indexedFreeList[i].count(),
   313         "Count in list is incorrect");
   314     )
   315     count += _indexedFreeList[i].count();
   316   }
   317   return count;
   318 }
   320 size_t CompactibleFreeListSpace::totalCount() {
   321   size_t num = totalCountInIndexedFreeLists();
   322   num +=  dictionary()->totalCount();
   323   if (_smallLinearAllocBlock._word_size != 0) {
   324     num++;
   325   }
   326   return num;
   327 }
   328 #endif
   330 bool CompactibleFreeListSpace::is_free_block(const HeapWord* p) const {
   331   FreeChunk* fc = (FreeChunk*) p;
   332   return fc->isFree();
   333 }
   335 size_t CompactibleFreeListSpace::used() const {
   336   return capacity() - free();
   337 }
   339 size_t CompactibleFreeListSpace::free() const {
   340   // "MT-safe, but not MT-precise"(TM), if you will: i.e.
   341   // if you do this while the structures are in flux you
   342   // may get an approximate answer only; for instance
   343   // because there is concurrent allocation either
   344   // directly by mutators or for promotion during a GC.
   345   // It's "MT-safe", however, in the sense that you are guaranteed
   346   // not to crash and burn, for instance, because of walking
   347   // pointers that could disappear as you were walking them.
   348   // The approximation is because the various components
   349   // that are read below are not read atomically (and
   350   // further the computation of totalSizeInIndexedFreeLists()
   351   // is itself a non-atomic computation. The normal use of
   352   // this is during a resize operation at the end of GC
   353   // and at that time you are guaranteed to get the
   354   // correct actual value. However, for instance, this is
   355   // also read completely asynchronously by the "perf-sampler"
   356   // that supports jvmstat, and you are apt to see the values
   357   // flicker in such cases.
   358   assert(_dictionary != NULL, "No _dictionary?");
   359   return (_dictionary->totalChunkSize(DEBUG_ONLY(freelistLock())) +
   360           totalSizeInIndexedFreeLists() +
   361           _smallLinearAllocBlock._word_size) * HeapWordSize;
   362 }
   364 size_t CompactibleFreeListSpace::max_alloc_in_words() const {
   365   assert(_dictionary != NULL, "No _dictionary?");
   366   assert_locked();
   367   size_t res = _dictionary->maxChunkSize();
   368   res = MAX2(res, MIN2(_smallLinearAllocBlock._word_size,
   369                        (size_t) SmallForLinearAlloc - 1));
   370   // XXX the following could potentially be pretty slow;
   371   // should one, pesimally for the rare cases when res
   372   // caclulated above is less than IndexSetSize,
   373   // just return res calculated above? My reasoning was that
   374   // those cases will be so rare that the extra time spent doesn't
   375   // really matter....
   376   // Note: do not change the loop test i >= res + IndexSetStride
   377   // to i > res below, because i is unsigned and res may be zero.
   378   for (size_t i = IndexSetSize - 1; i >= res + IndexSetStride;
   379        i -= IndexSetStride) {
   380     if (_indexedFreeList[i].head() != NULL) {
   381       assert(_indexedFreeList[i].count() != 0, "Inconsistent FreeList");
   382       return i;
   383     }
   384   }
   385   return res;
   386 }
   388 void CompactibleFreeListSpace::print_indexed_free_lists(outputStream* st)
   389 const {
   390   reportIndexedFreeListStatistics();
   391   gclog_or_tty->print_cr("Layout of Indexed Freelists");
   392   gclog_or_tty->print_cr("---------------------------");
   393   FreeList::print_labels_on(st, "size");
   394   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
   395     _indexedFreeList[i].print_on(gclog_or_tty);
   396     for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
   397          fc = fc->next()) {
   398       gclog_or_tty->print_cr("\t[" PTR_FORMAT "," PTR_FORMAT ")  %s",
   399                           fc, (HeapWord*)fc + i,
   400                           fc->cantCoalesce() ? "\t CC" : "");
   401     }
   402   }
   403 }
   405 void CompactibleFreeListSpace::print_promo_info_blocks(outputStream* st)
   406 const {
   407   _promoInfo.print_on(st);
   408 }
   410 void CompactibleFreeListSpace::print_dictionary_free_lists(outputStream* st)
   411 const {
   412   _dictionary->reportStatistics();
   413   st->print_cr("Layout of Freelists in Tree");
   414   st->print_cr("---------------------------");
   415   _dictionary->print_free_lists(st);
   416 }
   418 class BlkPrintingClosure: public BlkClosure {
   419   const CMSCollector*             _collector;
   420   const CompactibleFreeListSpace* _sp;
   421   const CMSBitMap*                _live_bit_map;
   422   const bool                      _post_remark;
   423   outputStream*                   _st;
   424 public:
   425   BlkPrintingClosure(const CMSCollector* collector,
   426                      const CompactibleFreeListSpace* sp,
   427                      const CMSBitMap* live_bit_map,
   428                      outputStream* st):
   429     _collector(collector),
   430     _sp(sp),
   431     _live_bit_map(live_bit_map),
   432     _post_remark(collector->abstract_state() > CMSCollector::FinalMarking),
   433     _st(st) { }
   434   size_t do_blk(HeapWord* addr);
   435 };
   437 size_t BlkPrintingClosure::do_blk(HeapWord* addr) {
   438   size_t sz = _sp->block_size_no_stall(addr, _collector);
   439   assert(sz != 0, "Should always be able to compute a size");
   440   if (_sp->block_is_obj(addr)) {
   441     const bool dead = _post_remark && !_live_bit_map->isMarked(addr);
   442     _st->print_cr(PTR_FORMAT ": %s object of size " SIZE_FORMAT "%s",
   443       addr,
   444       dead ? "dead" : "live",
   445       sz,
   446       (!dead && CMSPrintObjectsInDump) ? ":" : ".");
   447     if (CMSPrintObjectsInDump && !dead) {
   448       oop(addr)->print_on(_st);
   449       _st->print_cr("--------------------------------------");
   450     }
   451   } else { // free block
   452     _st->print_cr(PTR_FORMAT ": free block of size " SIZE_FORMAT "%s",
   453       addr, sz, CMSPrintChunksInDump ? ":" : ".");
   454     if (CMSPrintChunksInDump) {
   455       ((FreeChunk*)addr)->print_on(_st);
   456       _st->print_cr("--------------------------------------");
   457     }
   458   }
   459   return sz;
   460 }
   462 void CompactibleFreeListSpace::dump_at_safepoint_with_locks(CMSCollector* c,
   463   outputStream* st) {
   464   st->print_cr("\n=========================");
   465   st->print_cr("Block layout in CMS Heap:");
   466   st->print_cr("=========================");
   467   BlkPrintingClosure  bpcl(c, this, c->markBitMap(), st);
   468   blk_iterate(&bpcl);
   470   st->print_cr("\n=======================================");
   471   st->print_cr("Order & Layout of Promotion Info Blocks");
   472   st->print_cr("=======================================");
   473   print_promo_info_blocks(st);
   475   st->print_cr("\n===========================");
   476   st->print_cr("Order of Indexed Free Lists");
   477   st->print_cr("=========================");
   478   print_indexed_free_lists(st);
   480   st->print_cr("\n=================================");
   481   st->print_cr("Order of Free Lists in Dictionary");
   482   st->print_cr("=================================");
   483   print_dictionary_free_lists(st);
   484 }
   487 void CompactibleFreeListSpace::reportFreeListStatistics() const {
   488   assert_lock_strong(&_freelistLock);
   489   assert(PrintFLSStatistics != 0, "Reporting error");
   490   _dictionary->reportStatistics();
   491   if (PrintFLSStatistics > 1) {
   492     reportIndexedFreeListStatistics();
   493     size_t totalSize = totalSizeInIndexedFreeLists() +
   494                        _dictionary->totalChunkSize(DEBUG_ONLY(freelistLock()));
   495     gclog_or_tty->print(" free=%ld frag=%1.4f\n", totalSize, flsFrag());
   496   }
   497 }
   499 void CompactibleFreeListSpace::reportIndexedFreeListStatistics() const {
   500   assert_lock_strong(&_freelistLock);
   501   gclog_or_tty->print("Statistics for IndexedFreeLists:\n"
   502                       "--------------------------------\n");
   503   size_t totalSize = totalSizeInIndexedFreeLists();
   504   size_t   freeBlocks = numFreeBlocksInIndexedFreeLists();
   505   gclog_or_tty->print("Total Free Space: %d\n", totalSize);
   506   gclog_or_tty->print("Max   Chunk Size: %d\n", maxChunkSizeInIndexedFreeLists());
   507   gclog_or_tty->print("Number of Blocks: %d\n", freeBlocks);
   508   if (freeBlocks != 0) {
   509     gclog_or_tty->print("Av.  Block  Size: %d\n", totalSize/freeBlocks);
   510   }
   511 }
   513 size_t CompactibleFreeListSpace::numFreeBlocksInIndexedFreeLists() const {
   514   size_t res = 0;
   515   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
   516     debug_only(
   517       ssize_t recount = 0;
   518       for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
   519          fc = fc->next()) {
   520         recount += 1;
   521       }
   522       assert(recount == _indexedFreeList[i].count(),
   523         "Incorrect count in list");
   524     )
   525     res += _indexedFreeList[i].count();
   526   }
   527   return res;
   528 }
   530 size_t CompactibleFreeListSpace::maxChunkSizeInIndexedFreeLists() const {
   531   for (size_t i = IndexSetSize - 1; i != 0; i -= IndexSetStride) {
   532     if (_indexedFreeList[i].head() != NULL) {
   533       assert(_indexedFreeList[i].count() != 0, "Inconsistent FreeList");
   534       return (size_t)i;
   535     }
   536   }
   537   return 0;
   538 }
   540 void CompactibleFreeListSpace::set_end(HeapWord* value) {
   541   HeapWord* prevEnd = end();
   542   assert(prevEnd != value, "unnecessary set_end call");
   543   assert(prevEnd == NULL || value >= unallocated_block(), "New end is below unallocated block");
   544   _end = value;
   545   if (prevEnd != NULL) {
   546     // Resize the underlying block offset table.
   547     _bt.resize(pointer_delta(value, bottom()));
   548     if (value <= prevEnd) {
   549       assert(value >= unallocated_block(), "New end is below unallocated block");
   550     } else {
   551       // Now, take this new chunk and add it to the free blocks.
   552       // Note that the BOT has not yet been updated for this block.
   553       size_t newFcSize = pointer_delta(value, prevEnd);
   554       // XXX This is REALLY UGLY and should be fixed up. XXX
   555       if (!_adaptive_freelists && _smallLinearAllocBlock._ptr == NULL) {
   556         // Mark the boundary of the new block in BOT
   557         _bt.mark_block(prevEnd, value);
   558         // put it all in the linAB
   559         if (ParallelGCThreads == 0) {
   560           _smallLinearAllocBlock._ptr = prevEnd;
   561           _smallLinearAllocBlock._word_size = newFcSize;
   562           repairLinearAllocBlock(&_smallLinearAllocBlock);
   563         } else { // ParallelGCThreads > 0
   564           MutexLockerEx x(parDictionaryAllocLock(),
   565                           Mutex::_no_safepoint_check_flag);
   566           _smallLinearAllocBlock._ptr = prevEnd;
   567           _smallLinearAllocBlock._word_size = newFcSize;
   568           repairLinearAllocBlock(&_smallLinearAllocBlock);
   569         }
   570         // Births of chunks put into a LinAB are not recorded.  Births
   571         // of chunks as they are allocated out of a LinAB are.
   572       } else {
   573         // Add the block to the free lists, if possible coalescing it
   574         // with the last free block, and update the BOT and census data.
   575         addChunkToFreeListsAtEndRecordingStats(prevEnd, newFcSize);
   576       }
   577     }
   578   }
   579 }
   581 class FreeListSpace_DCTOC : public Filtering_DCTOC {
   582   CompactibleFreeListSpace* _cfls;
   583   CMSCollector* _collector;
   584 protected:
   585   // Override.
   586 #define walk_mem_region_with_cl_DECL(ClosureType)                       \
   587   virtual void walk_mem_region_with_cl(MemRegion mr,                    \
   588                                        HeapWord* bottom, HeapWord* top, \
   589                                        ClosureType* cl);                \
   590       void walk_mem_region_with_cl_par(MemRegion mr,                    \
   591                                        HeapWord* bottom, HeapWord* top, \
   592                                        ClosureType* cl);                \
   593     void walk_mem_region_with_cl_nopar(MemRegion mr,                    \
   594                                        HeapWord* bottom, HeapWord* top, \
   595                                        ClosureType* cl)
   596   walk_mem_region_with_cl_DECL(OopClosure);
   597   walk_mem_region_with_cl_DECL(FilteringClosure);
   599 public:
   600   FreeListSpace_DCTOC(CompactibleFreeListSpace* sp,
   601                       CMSCollector* collector,
   602                       OopClosure* cl,
   603                       CardTableModRefBS::PrecisionStyle precision,
   604                       HeapWord* boundary) :
   605     Filtering_DCTOC(sp, cl, precision, boundary),
   606     _cfls(sp), _collector(collector) {}
   607 };
   609 // We de-virtualize the block-related calls below, since we know that our
   610 // space is a CompactibleFreeListSpace.
   611 #define FreeListSpace_DCTOC__walk_mem_region_with_cl_DEFN(ClosureType)          \
   612 void FreeListSpace_DCTOC::walk_mem_region_with_cl(MemRegion mr,                 \
   613                                                  HeapWord* bottom,              \
   614                                                  HeapWord* top,                 \
   615                                                  ClosureType* cl) {             \
   616    if (SharedHeap::heap()->n_par_threads() > 0) {                               \
   617      walk_mem_region_with_cl_par(mr, bottom, top, cl);                          \
   618    } else {                                                                     \
   619      walk_mem_region_with_cl_nopar(mr, bottom, top, cl);                        \
   620    }                                                                            \
   621 }                                                                               \
   622 void FreeListSpace_DCTOC::walk_mem_region_with_cl_par(MemRegion mr,             \
   623                                                       HeapWord* bottom,         \
   624                                                       HeapWord* top,            \
   625                                                       ClosureType* cl) {        \
   626   /* Skip parts that are before "mr", in case "block_start" sent us             \
   627      back too far. */                                                           \
   628   HeapWord* mr_start = mr.start();                                              \
   629   size_t bot_size = _cfls->CompactibleFreeListSpace::block_size(bottom);        \
   630   HeapWord* next = bottom + bot_size;                                           \
   631   while (next < mr_start) {                                                     \
   632     bottom = next;                                                              \
   633     bot_size = _cfls->CompactibleFreeListSpace::block_size(bottom);             \
   634     next = bottom + bot_size;                                                   \
   635   }                                                                             \
   636                                                                                 \
   637   while (bottom < top) {                                                        \
   638     if (_cfls->CompactibleFreeListSpace::block_is_obj(bottom) &&                \
   639         !_cfls->CompactibleFreeListSpace::obj_allocated_since_save_marks(       \
   640                     oop(bottom)) &&                                             \
   641         !_collector->CMSCollector::is_dead_obj(oop(bottom))) {                  \
   642       size_t word_sz = oop(bottom)->oop_iterate(cl, mr);                        \
   643       bottom += _cfls->adjustObjectSize(word_sz);                               \
   644     } else {                                                                    \
   645       bottom += _cfls->CompactibleFreeListSpace::block_size(bottom);            \
   646     }                                                                           \
   647   }                                                                             \
   648 }                                                                               \
   649 void FreeListSpace_DCTOC::walk_mem_region_with_cl_nopar(MemRegion mr,           \
   650                                                         HeapWord* bottom,       \
   651                                                         HeapWord* top,          \
   652                                                         ClosureType* cl) {      \
   653   /* Skip parts that are before "mr", in case "block_start" sent us             \
   654      back too far. */                                                           \
   655   HeapWord* mr_start = mr.start();                                              \
   656   size_t bot_size = _cfls->CompactibleFreeListSpace::block_size_nopar(bottom);  \
   657   HeapWord* next = bottom + bot_size;                                           \
   658   while (next < mr_start) {                                                     \
   659     bottom = next;                                                              \
   660     bot_size = _cfls->CompactibleFreeListSpace::block_size_nopar(bottom);       \
   661     next = bottom + bot_size;                                                   \
   662   }                                                                             \
   663                                                                                 \
   664   while (bottom < top) {                                                        \
   665     if (_cfls->CompactibleFreeListSpace::block_is_obj_nopar(bottom) &&          \
   666         !_cfls->CompactibleFreeListSpace::obj_allocated_since_save_marks(       \
   667                     oop(bottom)) &&                                             \
   668         !_collector->CMSCollector::is_dead_obj(oop(bottom))) {                  \
   669       size_t word_sz = oop(bottom)->oop_iterate(cl, mr);                        \
   670       bottom += _cfls->adjustObjectSize(word_sz);                               \
   671     } else {                                                                    \
   672       bottom += _cfls->CompactibleFreeListSpace::block_size_nopar(bottom);      \
   673     }                                                                           \
   674   }                                                                             \
   675 }
   677 // (There are only two of these, rather than N, because the split is due
   678 // only to the introduction of the FilteringClosure, a local part of the
   679 // impl of this abstraction.)
   680 FreeListSpace_DCTOC__walk_mem_region_with_cl_DEFN(OopClosure)
   681 FreeListSpace_DCTOC__walk_mem_region_with_cl_DEFN(FilteringClosure)
   683 DirtyCardToOopClosure*
   684 CompactibleFreeListSpace::new_dcto_cl(OopClosure* cl,
   685                                       CardTableModRefBS::PrecisionStyle precision,
   686                                       HeapWord* boundary) {
   687   return new FreeListSpace_DCTOC(this, _collector, cl, precision, boundary);
   688 }
   691 // Note on locking for the space iteration functions:
   692 // since the collector's iteration activities are concurrent with
   693 // allocation activities by mutators, absent a suitable mutual exclusion
   694 // mechanism the iterators may go awry. For instace a block being iterated
   695 // may suddenly be allocated or divided up and part of it allocated and
   696 // so on.
   698 // Apply the given closure to each block in the space.
   699 void CompactibleFreeListSpace::blk_iterate_careful(BlkClosureCareful* cl) {
   700   assert_lock_strong(freelistLock());
   701   HeapWord *cur, *limit;
   702   for (cur = bottom(), limit = end(); cur < limit;
   703        cur += cl->do_blk_careful(cur));
   704 }
   706 // Apply the given closure to each block in the space.
   707 void CompactibleFreeListSpace::blk_iterate(BlkClosure* cl) {
   708   assert_lock_strong(freelistLock());
   709   HeapWord *cur, *limit;
   710   for (cur = bottom(), limit = end(); cur < limit;
   711        cur += cl->do_blk(cur));
   712 }
   714 // Apply the given closure to each oop in the space.
   715 void CompactibleFreeListSpace::oop_iterate(OopClosure* cl) {
   716   assert_lock_strong(freelistLock());
   717   HeapWord *cur, *limit;
   718   size_t curSize;
   719   for (cur = bottom(), limit = end(); cur < limit;
   720        cur += curSize) {
   721     curSize = block_size(cur);
   722     if (block_is_obj(cur)) {
   723       oop(cur)->oop_iterate(cl);
   724     }
   725   }
   726 }
   728 // Apply the given closure to each oop in the space \intersect memory region.
   729 void CompactibleFreeListSpace::oop_iterate(MemRegion mr, OopClosure* cl) {
   730   assert_lock_strong(freelistLock());
   731   if (is_empty()) {
   732     return;
   733   }
   734   MemRegion cur = MemRegion(bottom(), end());
   735   mr = mr.intersection(cur);
   736   if (mr.is_empty()) {
   737     return;
   738   }
   739   if (mr.equals(cur)) {
   740     oop_iterate(cl);
   741     return;
   742   }
   743   assert(mr.end() <= end(), "just took an intersection above");
   744   HeapWord* obj_addr = block_start(mr.start());
   745   HeapWord* t = mr.end();
   747   SpaceMemRegionOopsIterClosure smr_blk(cl, mr);
   748   if (block_is_obj(obj_addr)) {
   749     // Handle first object specially.
   750     oop obj = oop(obj_addr);
   751     obj_addr += adjustObjectSize(obj->oop_iterate(&smr_blk));
   752   } else {
   753     FreeChunk* fc = (FreeChunk*)obj_addr;
   754     obj_addr += fc->size();
   755   }
   756   while (obj_addr < t) {
   757     HeapWord* obj = obj_addr;
   758     obj_addr += block_size(obj_addr);
   759     // If "obj_addr" is not greater than top, then the
   760     // entire object "obj" is within the region.
   761     if (obj_addr <= t) {
   762       if (block_is_obj(obj)) {
   763         oop(obj)->oop_iterate(cl);
   764       }
   765     } else {
   766       // "obj" extends beyond end of region
   767       if (block_is_obj(obj)) {
   768         oop(obj)->oop_iterate(&smr_blk);
   769       }
   770       break;
   771     }
   772   }
   773 }
   775 // NOTE: In the following methods, in order to safely be able to
   776 // apply the closure to an object, we need to be sure that the
   777 // object has been initialized. We are guaranteed that an object
   778 // is initialized if we are holding the Heap_lock with the
   779 // world stopped.
   780 void CompactibleFreeListSpace::verify_objects_initialized() const {
   781   if (is_init_completed()) {
   782     assert_locked_or_safepoint(Heap_lock);
   783     if (Universe::is_fully_initialized()) {
   784       guarantee(SafepointSynchronize::is_at_safepoint(),
   785                 "Required for objects to be initialized");
   786     }
   787   } // else make a concession at vm start-up
   788 }
   790 // Apply the given closure to each object in the space
   791 void CompactibleFreeListSpace::object_iterate(ObjectClosure* blk) {
   792   assert_lock_strong(freelistLock());
   793   NOT_PRODUCT(verify_objects_initialized());
   794   HeapWord *cur, *limit;
   795   size_t curSize;
   796   for (cur = bottom(), limit = end(); cur < limit;
   797        cur += curSize) {
   798     curSize = block_size(cur);
   799     if (block_is_obj(cur)) {
   800       blk->do_object(oop(cur));
   801     }
   802   }
   803 }
   805 // Apply the given closure to each live object in the space
   806 //   The usage of CompactibleFreeListSpace
   807 // by the ConcurrentMarkSweepGeneration for concurrent GC's allows
   808 // objects in the space with references to objects that are no longer
   809 // valid.  For example, an object may reference another object
   810 // that has already been sweep up (collected).  This method uses
   811 // obj_is_alive() to determine whether it is safe to apply the closure to
   812 // an object.  See obj_is_alive() for details on how liveness of an
   813 // object is decided.
   815 void CompactibleFreeListSpace::safe_object_iterate(ObjectClosure* blk) {
   816   assert_lock_strong(freelistLock());
   817   NOT_PRODUCT(verify_objects_initialized());
   818   HeapWord *cur, *limit;
   819   size_t curSize;
   820   for (cur = bottom(), limit = end(); cur < limit;
   821        cur += curSize) {
   822     curSize = block_size(cur);
   823     if (block_is_obj(cur) && obj_is_alive(cur)) {
   824       blk->do_object(oop(cur));
   825     }
   826   }
   827 }
   829 void CompactibleFreeListSpace::object_iterate_mem(MemRegion mr,
   830                                                   UpwardsObjectClosure* cl) {
   831   assert_locked(freelistLock());
   832   NOT_PRODUCT(verify_objects_initialized());
   833   Space::object_iterate_mem(mr, cl);
   834 }
   836 // Callers of this iterator beware: The closure application should
   837 // be robust in the face of uninitialized objects and should (always)
   838 // return a correct size so that the next addr + size below gives us a
   839 // valid block boundary. [See for instance,
   840 // ScanMarkedObjectsAgainCarefullyClosure::do_object_careful()
   841 // in ConcurrentMarkSweepGeneration.cpp.]
   842 HeapWord*
   843 CompactibleFreeListSpace::object_iterate_careful(ObjectClosureCareful* cl) {
   844   assert_lock_strong(freelistLock());
   845   HeapWord *addr, *last;
   846   size_t size;
   847   for (addr = bottom(), last  = end();
   848        addr < last; addr += size) {
   849     FreeChunk* fc = (FreeChunk*)addr;
   850     if (fc->isFree()) {
   851       // Since we hold the free list lock, which protects direct
   852       // allocation in this generation by mutators, a free object
   853       // will remain free throughout this iteration code.
   854       size = fc->size();
   855     } else {
   856       // Note that the object need not necessarily be initialized,
   857       // because (for instance) the free list lock does NOT protect
   858       // object initialization. The closure application below must
   859       // therefore be correct in the face of uninitialized objects.
   860       size = cl->do_object_careful(oop(addr));
   861       if (size == 0) {
   862         // An unparsable object found. Signal early termination.
   863         return addr;
   864       }
   865     }
   866   }
   867   return NULL;
   868 }
   870 // Callers of this iterator beware: The closure application should
   871 // be robust in the face of uninitialized objects and should (always)
   872 // return a correct size so that the next addr + size below gives us a
   873 // valid block boundary. [See for instance,
   874 // ScanMarkedObjectsAgainCarefullyClosure::do_object_careful()
   875 // in ConcurrentMarkSweepGeneration.cpp.]
   876 HeapWord*
   877 CompactibleFreeListSpace::object_iterate_careful_m(MemRegion mr,
   878   ObjectClosureCareful* cl) {
   879   assert_lock_strong(freelistLock());
   880   // Can't use used_region() below because it may not necessarily
   881   // be the same as [bottom(),end()); although we could
   882   // use [used_region().start(),round_to(used_region().end(),CardSize)),
   883   // that appears too cumbersome, so we just do the simpler check
   884   // in the assertion below.
   885   assert(!mr.is_empty() && MemRegion(bottom(),end()).contains(mr),
   886          "mr should be non-empty and within used space");
   887   HeapWord *addr, *end;
   888   size_t size;
   889   for (addr = block_start_careful(mr.start()), end  = mr.end();
   890        addr < end; addr += size) {
   891     FreeChunk* fc = (FreeChunk*)addr;
   892     if (fc->isFree()) {
   893       // Since we hold the free list lock, which protects direct
   894       // allocation in this generation by mutators, a free object
   895       // will remain free throughout this iteration code.
   896       size = fc->size();
   897     } else {
   898       // Note that the object need not necessarily be initialized,
   899       // because (for instance) the free list lock does NOT protect
   900       // object initialization. The closure application below must
   901       // therefore be correct in the face of uninitialized objects.
   902       size = cl->do_object_careful_m(oop(addr), mr);
   903       if (size == 0) {
   904         // An unparsable object found. Signal early termination.
   905         return addr;
   906       }
   907     }
   908   }
   909   return NULL;
   910 }
   913 HeapWord* CompactibleFreeListSpace::block_start_const(const void* p) const {
   914   NOT_PRODUCT(verify_objects_initialized());
   915   return _bt.block_start(p);
   916 }
   918 HeapWord* CompactibleFreeListSpace::block_start_careful(const void* p) const {
   919   return _bt.block_start_careful(p);
   920 }
   922 size_t CompactibleFreeListSpace::block_size(const HeapWord* p) const {
   923   NOT_PRODUCT(verify_objects_initialized());
   924   assert(MemRegion(bottom(), end()).contains(p), "p not in space");
   925   // This must be volatile, or else there is a danger that the compiler
   926   // will compile the code below into a sometimes-infinite loop, by keeping
   927   // the value read the first time in a register.
   928   while (true) {
   929     // We must do this until we get a consistent view of the object.
   930     if (FreeChunk::indicatesFreeChunk(p)) {
   931       volatile FreeChunk* fc = (volatile FreeChunk*)p;
   932       size_t res = fc->size();
   933       // If the object is still a free chunk, return the size, else it
   934       // has been allocated so try again.
   935       if (FreeChunk::indicatesFreeChunk(p)) {
   936         assert(res != 0, "Block size should not be 0");
   937         return res;
   938       }
   939     } else {
   940       // must read from what 'p' points to in each loop.
   941       klassOop k = ((volatile oopDesc*)p)->klass_or_null();
   942       if (k != NULL) {
   943         assert(k->is_oop(true /* ignore mark word */), "Should really be klass oop.");
   944         oop o = (oop)p;
   945         assert(o->is_parsable(), "Should be parsable");
   946         assert(o->is_oop(true /* ignore mark word */), "Should be an oop.");
   947         size_t res = o->size_given_klass(k->klass_part());
   948         res = adjustObjectSize(res);
   949         assert(res != 0, "Block size should not be 0");
   950         return res;
   951       }
   952     }
   953   }
   954 }
   956 // A variant of the above that uses the Printezis bits for
   957 // unparsable but allocated objects. This avoids any possible
   958 // stalls waiting for mutators to initialize objects, and is
   959 // thus potentially faster than the variant above. However,
   960 // this variant may return a zero size for a block that is
   961 // under mutation and for which a consistent size cannot be
   962 // inferred without stalling; see CMSCollector::block_size_if_printezis_bits().
   963 size_t CompactibleFreeListSpace::block_size_no_stall(HeapWord* p,
   964                                                      const CMSCollector* c)
   965 const {
   966   assert(MemRegion(bottom(), end()).contains(p), "p not in space");
   967   // This must be volatile, or else there is a danger that the compiler
   968   // will compile the code below into a sometimes-infinite loop, by keeping
   969   // the value read the first time in a register.
   970   DEBUG_ONLY(uint loops = 0;)
   971   while (true) {
   972     // We must do this until we get a consistent view of the object.
   973     if (FreeChunk::indicatesFreeChunk(p)) {
   974       volatile FreeChunk* fc = (volatile FreeChunk*)p;
   975       size_t res = fc->size();
   976       if (FreeChunk::indicatesFreeChunk(p)) {
   977         assert(res != 0, "Block size should not be 0");
   978         assert(loops == 0, "Should be 0");
   979         return res;
   980       }
   981     } else {
   982       // must read from what 'p' points to in each loop.
   983       klassOop k = ((volatile oopDesc*)p)->klass_or_null();
   984       if (k != NULL &&
   985           ((oopDesc*)p)->is_parsable() &&
   986           ((oopDesc*)p)->is_conc_safe()) {
   987         assert(k->is_oop(), "Should really be klass oop.");
   988         oop o = (oop)p;
   989         assert(o->is_oop(), "Should be an oop");
   990         size_t res = o->size_given_klass(k->klass_part());
   991         res = adjustObjectSize(res);
   992         assert(res != 0, "Block size should not be 0");
   993         return res;
   994       } else {
   995         return c->block_size_if_printezis_bits(p);
   996       }
   997     }
   998     assert(loops == 0, "Can loop at most once");
   999     DEBUG_ONLY(loops++;)
  1003 size_t CompactibleFreeListSpace::block_size_nopar(const HeapWord* p) const {
  1004   NOT_PRODUCT(verify_objects_initialized());
  1005   assert(MemRegion(bottom(), end()).contains(p), "p not in space");
  1006   FreeChunk* fc = (FreeChunk*)p;
  1007   if (fc->isFree()) {
  1008     return fc->size();
  1009   } else {
  1010     // Ignore mark word because this may be a recently promoted
  1011     // object whose mark word is used to chain together grey
  1012     // objects (the last one would have a null value).
  1013     assert(oop(p)->is_oop(true), "Should be an oop");
  1014     return adjustObjectSize(oop(p)->size());
  1018 // This implementation assumes that the property of "being an object" is
  1019 // stable.  But being a free chunk may not be (because of parallel
  1020 // promotion.)
  1021 bool CompactibleFreeListSpace::block_is_obj(const HeapWord* p) const {
  1022   FreeChunk* fc = (FreeChunk*)p;
  1023   assert(is_in_reserved(p), "Should be in space");
  1024   // When doing a mark-sweep-compact of the CMS generation, this
  1025   // assertion may fail because prepare_for_compaction() uses
  1026   // space that is garbage to maintain information on ranges of
  1027   // live objects so that these live ranges can be moved as a whole.
  1028   // Comment out this assertion until that problem can be solved
  1029   // (i.e., that the block start calculation may look at objects
  1030   // at address below "p" in finding the object that contains "p"
  1031   // and those objects (if garbage) may have been modified to hold
  1032   // live range information.
  1033   // assert(ParallelGCThreads > 0 || _bt.block_start(p) == p, "Should be a block boundary");
  1034   if (FreeChunk::indicatesFreeChunk(p)) return false;
  1035   klassOop k = oop(p)->klass_or_null();
  1036   if (k != NULL) {
  1037     // Ignore mark word because it may have been used to
  1038     // chain together promoted objects (the last one
  1039     // would have a null value).
  1040     assert(oop(p)->is_oop(true), "Should be an oop");
  1041     return true;
  1042   } else {
  1043     return false;  // Was not an object at the start of collection.
  1047 // Check if the object is alive. This fact is checked either by consulting
  1048 // the main marking bitmap in the sweeping phase or, if it's a permanent
  1049 // generation and we're not in the sweeping phase, by checking the
  1050 // perm_gen_verify_bit_map where we store the "deadness" information if
  1051 // we did not sweep the perm gen in the most recent previous GC cycle.
  1052 bool CompactibleFreeListSpace::obj_is_alive(const HeapWord* p) const {
  1053   assert (block_is_obj(p), "The address should point to an object");
  1055   // If we're sweeping, we use object liveness information from the main bit map
  1056   // for both perm gen and old gen.
  1057   // We don't need to lock the bitmap (live_map or dead_map below), because
  1058   // EITHER we are in the middle of the sweeping phase, and the
  1059   // main marking bit map (live_map below) is locked,
  1060   // OR we're in other phases and perm_gen_verify_bit_map (dead_map below)
  1061   // is stable, because it's mutated only in the sweeping phase.
  1062   if (_collector->abstract_state() == CMSCollector::Sweeping) {
  1063     CMSBitMap* live_map = _collector->markBitMap();
  1064     return live_map->isMarked((HeapWord*) p);
  1065   } else {
  1066     // If we're not currently sweeping and we haven't swept the perm gen in
  1067     // the previous concurrent cycle then we may have dead but unswept objects
  1068     // in the perm gen. In this case, we use the "deadness" information
  1069     // that we had saved in perm_gen_verify_bit_map at the last sweep.
  1070     if (!CMSClassUnloadingEnabled && _collector->_permGen->reserved().contains(p)) {
  1071       if (_collector->verifying()) {
  1072         CMSBitMap* dead_map = _collector->perm_gen_verify_bit_map();
  1073         // Object is marked in the dead_map bitmap at the previous sweep
  1074         // when we know that it's dead; if the bitmap is not allocated then
  1075         // the object is alive.
  1076         return (dead_map->sizeInBits() == 0) // bit_map has been allocated
  1077                || !dead_map->par_isMarked((HeapWord*) p);
  1078       } else {
  1079         return false; // We can't say for sure if it's live, so we say that it's dead.
  1083   return true;
  1086 bool CompactibleFreeListSpace::block_is_obj_nopar(const HeapWord* p) const {
  1087   FreeChunk* fc = (FreeChunk*)p;
  1088   assert(is_in_reserved(p), "Should be in space");
  1089   assert(_bt.block_start(p) == p, "Should be a block boundary");
  1090   if (!fc->isFree()) {
  1091     // Ignore mark word because it may have been used to
  1092     // chain together promoted objects (the last one
  1093     // would have a null value).
  1094     assert(oop(p)->is_oop(true), "Should be an oop");
  1095     return true;
  1097   return false;
  1100 // "MT-safe but not guaranteed MT-precise" (TM); you may get an
  1101 // approximate answer if you don't hold the freelistlock when you call this.
  1102 size_t CompactibleFreeListSpace::totalSizeInIndexedFreeLists() const {
  1103   size_t size = 0;
  1104   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
  1105     debug_only(
  1106       // We may be calling here without the lock in which case we
  1107       // won't do this modest sanity check.
  1108       if (freelistLock()->owned_by_self()) {
  1109         size_t total_list_size = 0;
  1110         for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
  1111           fc = fc->next()) {
  1112           total_list_size += i;
  1114         assert(total_list_size == i * _indexedFreeList[i].count(),
  1115                "Count in list is incorrect");
  1118     size += i * _indexedFreeList[i].count();
  1120   return size;
  1123 HeapWord* CompactibleFreeListSpace::par_allocate(size_t size) {
  1124   MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
  1125   return allocate(size);
  1128 HeapWord*
  1129 CompactibleFreeListSpace::getChunkFromSmallLinearAllocBlockRemainder(size_t size) {
  1130   return getChunkFromLinearAllocBlockRemainder(&_smallLinearAllocBlock, size);
  1133 HeapWord* CompactibleFreeListSpace::allocate(size_t size) {
  1134   assert_lock_strong(freelistLock());
  1135   HeapWord* res = NULL;
  1136   assert(size == adjustObjectSize(size),
  1137          "use adjustObjectSize() before calling into allocate()");
  1139   if (_adaptive_freelists) {
  1140     res = allocate_adaptive_freelists(size);
  1141   } else {  // non-adaptive free lists
  1142     res = allocate_non_adaptive_freelists(size);
  1145   if (res != NULL) {
  1146     // check that res does lie in this space!
  1147     assert(is_in_reserved(res), "Not in this space!");
  1148     assert(is_aligned((void*)res), "alignment check");
  1150     FreeChunk* fc = (FreeChunk*)res;
  1151     fc->markNotFree();
  1152     assert(!fc->isFree(), "shouldn't be marked free");
  1153     assert(oop(fc)->klass_or_null() == NULL, "should look uninitialized");
  1154     // Verify that the block offset table shows this to
  1155     // be a single block, but not one which is unallocated.
  1156     _bt.verify_single_block(res, size);
  1157     _bt.verify_not_unallocated(res, size);
  1158     // mangle a just allocated object with a distinct pattern.
  1159     debug_only(fc->mangleAllocated(size));
  1162   return res;
  1165 HeapWord* CompactibleFreeListSpace::allocate_non_adaptive_freelists(size_t size) {
  1166   HeapWord* res = NULL;
  1167   // try and use linear allocation for smaller blocks
  1168   if (size < _smallLinearAllocBlock._allocation_size_limit) {
  1169     // if successful, the following also adjusts block offset table
  1170     res = getChunkFromSmallLinearAllocBlock(size);
  1172   // Else triage to indexed lists for smaller sizes
  1173   if (res == NULL) {
  1174     if (size < SmallForDictionary) {
  1175       res = (HeapWord*) getChunkFromIndexedFreeList(size);
  1176     } else {
  1177       // else get it from the big dictionary; if even this doesn't
  1178       // work we are out of luck.
  1179       res = (HeapWord*)getChunkFromDictionaryExact(size);
  1183   return res;
  1186 HeapWord* CompactibleFreeListSpace::allocate_adaptive_freelists(size_t size) {
  1187   assert_lock_strong(freelistLock());
  1188   HeapWord* res = NULL;
  1189   assert(size == adjustObjectSize(size),
  1190          "use adjustObjectSize() before calling into allocate()");
  1192   // Strategy
  1193   //   if small
  1194   //     exact size from small object indexed list if small
  1195   //     small or large linear allocation block (linAB) as appropriate
  1196   //     take from lists of greater sized chunks
  1197   //   else
  1198   //     dictionary
  1199   //     small or large linear allocation block if it has the space
  1200   // Try allocating exact size from indexTable first
  1201   if (size < IndexSetSize) {
  1202     res = (HeapWord*) getChunkFromIndexedFreeList(size);
  1203     if(res != NULL) {
  1204       assert(res != (HeapWord*)_indexedFreeList[size].head(),
  1205         "Not removed from free list");
  1206       // no block offset table adjustment is necessary on blocks in
  1207       // the indexed lists.
  1209     // Try allocating from the small LinAB
  1210     } else if (size < _smallLinearAllocBlock._allocation_size_limit &&
  1211         (res = getChunkFromSmallLinearAllocBlock(size)) != NULL) {
  1212         // if successful, the above also adjusts block offset table
  1213         // Note that this call will refill the LinAB to
  1214         // satisfy the request.  This is different that
  1215         // evm.
  1216         // Don't record chunk off a LinAB?  smallSplitBirth(size);
  1218     } else {
  1219       // Raid the exact free lists larger than size, even if they are not
  1220       // overpopulated.
  1221       res = (HeapWord*) getChunkFromGreater(size);
  1223   } else {
  1224     // Big objects get allocated directly from the dictionary.
  1225     res = (HeapWord*) getChunkFromDictionaryExact(size);
  1226     if (res == NULL) {
  1227       // Try hard not to fail since an allocation failure will likely
  1228       // trigger a synchronous GC.  Try to get the space from the
  1229       // allocation blocks.
  1230       res = getChunkFromSmallLinearAllocBlockRemainder(size);
  1234   return res;
  1237 // A worst-case estimate of the space required (in HeapWords) to expand the heap
  1238 // when promoting obj.
  1239 size_t CompactibleFreeListSpace::expansionSpaceRequired(size_t obj_size) const {
  1240   // Depending on the object size, expansion may require refilling either a
  1241   // bigLAB or a smallLAB plus refilling a PromotionInfo object.  MinChunkSize
  1242   // is added because the dictionary may over-allocate to avoid fragmentation.
  1243   size_t space = obj_size;
  1244   if (!_adaptive_freelists) {
  1245     space = MAX2(space, _smallLinearAllocBlock._refillSize);
  1247   space += _promoInfo.refillSize() + 2 * MinChunkSize;
  1248   return space;
  1251 FreeChunk* CompactibleFreeListSpace::getChunkFromGreater(size_t numWords) {
  1252   FreeChunk* ret;
  1254   assert(numWords >= MinChunkSize, "Size is less than minimum");
  1255   assert(linearAllocationWouldFail() || bestFitFirst(),
  1256     "Should not be here");
  1258   size_t i;
  1259   size_t currSize = numWords + MinChunkSize;
  1260   assert(currSize % MinObjAlignment == 0, "currSize should be aligned");
  1261   for (i = currSize; i < IndexSetSize; i += IndexSetStride) {
  1262     FreeList* fl = &_indexedFreeList[i];
  1263     if (fl->head()) {
  1264       ret = getFromListGreater(fl, numWords);
  1265       assert(ret == NULL || ret->isFree(), "Should be returning a free chunk");
  1266       return ret;
  1270   currSize = MAX2((size_t)SmallForDictionary,
  1271                   (size_t)(numWords + MinChunkSize));
  1273   /* Try to get a chunk that satisfies request, while avoiding
  1274      fragmentation that can't be handled. */
  1276     ret =  dictionary()->getChunk(currSize);
  1277     if (ret != NULL) {
  1278       assert(ret->size() - numWords >= MinChunkSize,
  1279              "Chunk is too small");
  1280       _bt.allocated((HeapWord*)ret, ret->size());
  1281       /* Carve returned chunk. */
  1282       (void) splitChunkAndReturnRemainder(ret, numWords);
  1283       /* Label this as no longer a free chunk. */
  1284       assert(ret->isFree(), "This chunk should be free");
  1285       ret->linkPrev(NULL);
  1287     assert(ret == NULL || ret->isFree(), "Should be returning a free chunk");
  1288     return ret;
  1290   ShouldNotReachHere();
  1293 bool CompactibleFreeListSpace::verifyChunkInIndexedFreeLists(FreeChunk* fc)
  1294   const {
  1295   assert(fc->size() < IndexSetSize, "Size of chunk is too large");
  1296   return _indexedFreeList[fc->size()].verifyChunkInFreeLists(fc);
  1299 bool CompactibleFreeListSpace::verifyChunkInFreeLists(FreeChunk* fc) const {
  1300   if (fc->size() >= IndexSetSize) {
  1301     return dictionary()->verifyChunkInFreeLists(fc);
  1302   } else {
  1303     return verifyChunkInIndexedFreeLists(fc);
  1307 #ifndef PRODUCT
  1308 void CompactibleFreeListSpace::assert_locked() const {
  1309   CMSLockVerifier::assert_locked(freelistLock(), parDictionaryAllocLock());
  1312 void CompactibleFreeListSpace::assert_locked(const Mutex* lock) const {
  1313   CMSLockVerifier::assert_locked(lock);
  1315 #endif
  1317 FreeChunk* CompactibleFreeListSpace::allocateScratch(size_t size) {
  1318   // In the parallel case, the main thread holds the free list lock
  1319   // on behalf the parallel threads.
  1320   FreeChunk* fc;
  1322     // If GC is parallel, this might be called by several threads.
  1323     // This should be rare enough that the locking overhead won't affect
  1324     // the sequential code.
  1325     MutexLockerEx x(parDictionaryAllocLock(),
  1326                     Mutex::_no_safepoint_check_flag);
  1327     fc = getChunkFromDictionary(size);
  1329   if (fc != NULL) {
  1330     fc->dontCoalesce();
  1331     assert(fc->isFree(), "Should be free, but not coalescable");
  1332     // Verify that the block offset table shows this to
  1333     // be a single block, but not one which is unallocated.
  1334     _bt.verify_single_block((HeapWord*)fc, fc->size());
  1335     _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
  1337   return fc;
  1340 oop CompactibleFreeListSpace::promote(oop obj, size_t obj_size) {
  1341   assert(obj_size == (size_t)obj->size(), "bad obj_size passed in");
  1342   assert_locked();
  1344   // if we are tracking promotions, then first ensure space for
  1345   // promotion (including spooling space for saving header if necessary).
  1346   // then allocate and copy, then track promoted info if needed.
  1347   // When tracking (see PromotionInfo::track()), the mark word may
  1348   // be displaced and in this case restoration of the mark word
  1349   // occurs in the (oop_since_save_marks_)iterate phase.
  1350   if (_promoInfo.tracking() && !_promoInfo.ensure_spooling_space()) {
  1351     return NULL;
  1353   // Call the allocate(size_t, bool) form directly to avoid the
  1354   // additional call through the allocate(size_t) form.  Having
  1355   // the compile inline the call is problematic because allocate(size_t)
  1356   // is a virtual method.
  1357   HeapWord* res = allocate(adjustObjectSize(obj_size));
  1358   if (res != NULL) {
  1359     Copy::aligned_disjoint_words((HeapWord*)obj, res, obj_size);
  1360     // if we should be tracking promotions, do so.
  1361     if (_promoInfo.tracking()) {
  1362         _promoInfo.track((PromotedObject*)res);
  1365   return oop(res);
  1368 HeapWord*
  1369 CompactibleFreeListSpace::getChunkFromSmallLinearAllocBlock(size_t size) {
  1370   assert_locked();
  1371   assert(size >= MinChunkSize, "minimum chunk size");
  1372   assert(size <  _smallLinearAllocBlock._allocation_size_limit,
  1373     "maximum from smallLinearAllocBlock");
  1374   return getChunkFromLinearAllocBlock(&_smallLinearAllocBlock, size);
  1377 HeapWord*
  1378 CompactibleFreeListSpace::getChunkFromLinearAllocBlock(LinearAllocBlock *blk,
  1379                                                        size_t size) {
  1380   assert_locked();
  1381   assert(size >= MinChunkSize, "too small");
  1382   HeapWord* res = NULL;
  1383   // Try to do linear allocation from blk, making sure that
  1384   if (blk->_word_size == 0) {
  1385     // We have probably been unable to fill this either in the prologue or
  1386     // when it was exhausted at the last linear allocation. Bail out until
  1387     // next time.
  1388     assert(blk->_ptr == NULL, "consistency check");
  1389     return NULL;
  1391   assert(blk->_word_size != 0 && blk->_ptr != NULL, "consistency check");
  1392   res = getChunkFromLinearAllocBlockRemainder(blk, size);
  1393   if (res != NULL) return res;
  1395   // about to exhaust this linear allocation block
  1396   if (blk->_word_size == size) { // exactly satisfied
  1397     res = blk->_ptr;
  1398     _bt.allocated(res, blk->_word_size);
  1399   } else if (size + MinChunkSize <= blk->_refillSize) {
  1400     size_t sz = blk->_word_size;
  1401     // Update _unallocated_block if the size is such that chunk would be
  1402     // returned to the indexed free list.  All other chunks in the indexed
  1403     // free lists are allocated from the dictionary so that _unallocated_block
  1404     // has already been adjusted for them.  Do it here so that the cost
  1405     // for all chunks added back to the indexed free lists.
  1406     if (sz < SmallForDictionary) {
  1407       _bt.allocated(blk->_ptr, sz);
  1409     // Return the chunk that isn't big enough, and then refill below.
  1410     addChunkToFreeLists(blk->_ptr, sz);
  1411     splitBirth(sz);
  1412     // Don't keep statistics on adding back chunk from a LinAB.
  1413   } else {
  1414     // A refilled block would not satisfy the request.
  1415     return NULL;
  1418   blk->_ptr = NULL; blk->_word_size = 0;
  1419   refillLinearAllocBlock(blk);
  1420   assert(blk->_ptr == NULL || blk->_word_size >= size + MinChunkSize,
  1421          "block was replenished");
  1422   if (res != NULL) {
  1423     splitBirth(size);
  1424     repairLinearAllocBlock(blk);
  1425   } else if (blk->_ptr != NULL) {
  1426     res = blk->_ptr;
  1427     size_t blk_size = blk->_word_size;
  1428     blk->_word_size -= size;
  1429     blk->_ptr  += size;
  1430     splitBirth(size);
  1431     repairLinearAllocBlock(blk);
  1432     // Update BOT last so that other (parallel) GC threads see a consistent
  1433     // view of the BOT and free blocks.
  1434     // Above must occur before BOT is updated below.
  1435     _bt.split_block(res, blk_size, size);  // adjust block offset table
  1437   return res;
  1440 HeapWord*  CompactibleFreeListSpace::getChunkFromLinearAllocBlockRemainder(
  1441                                         LinearAllocBlock* blk,
  1442                                         size_t size) {
  1443   assert_locked();
  1444   assert(size >= MinChunkSize, "too small");
  1446   HeapWord* res = NULL;
  1447   // This is the common case.  Keep it simple.
  1448   if (blk->_word_size >= size + MinChunkSize) {
  1449     assert(blk->_ptr != NULL, "consistency check");
  1450     res = blk->_ptr;
  1451     // Note that the BOT is up-to-date for the linAB before allocation.  It
  1452     // indicates the start of the linAB.  The split_block() updates the
  1453     // BOT for the linAB after the allocation (indicates the start of the
  1454     // next chunk to be allocated).
  1455     size_t blk_size = blk->_word_size;
  1456     blk->_word_size -= size;
  1457     blk->_ptr  += size;
  1458     splitBirth(size);
  1459     repairLinearAllocBlock(blk);
  1460     // Update BOT last so that other (parallel) GC threads see a consistent
  1461     // view of the BOT and free blocks.
  1462     // Above must occur before BOT is updated below.
  1463     _bt.split_block(res, blk_size, size);  // adjust block offset table
  1464     _bt.allocated(res, size);
  1466   return res;
  1469 FreeChunk*
  1470 CompactibleFreeListSpace::getChunkFromIndexedFreeList(size_t size) {
  1471   assert_locked();
  1472   assert(size < SmallForDictionary, "just checking");
  1473   FreeChunk* res;
  1474   res = _indexedFreeList[size].getChunkAtHead();
  1475   if (res == NULL) {
  1476     res = getChunkFromIndexedFreeListHelper(size);
  1478   _bt.verify_not_unallocated((HeapWord*) res, size);
  1479   assert(res == NULL || res->size() == size, "Incorrect block size");
  1480   return res;
  1483 FreeChunk*
  1484 CompactibleFreeListSpace::getChunkFromIndexedFreeListHelper(size_t size,
  1485   bool replenish) {
  1486   assert_locked();
  1487   FreeChunk* fc = NULL;
  1488   if (size < SmallForDictionary) {
  1489     assert(_indexedFreeList[size].head() == NULL ||
  1490       _indexedFreeList[size].surplus() <= 0,
  1491       "List for this size should be empty or under populated");
  1492     // Try best fit in exact lists before replenishing the list
  1493     if (!bestFitFirst() || (fc = bestFitSmall(size)) == NULL) {
  1494       // Replenish list.
  1495       //
  1496       // Things tried that failed.
  1497       //   Tried allocating out of the two LinAB's first before
  1498       // replenishing lists.
  1499       //   Tried small linAB of size 256 (size in indexed list)
  1500       // and replenishing indexed lists from the small linAB.
  1501       //
  1502       FreeChunk* newFc = NULL;
  1503       const size_t replenish_size = CMSIndexedFreeListReplenish * size;
  1504       if (replenish_size < SmallForDictionary) {
  1505         // Do not replenish from an underpopulated size.
  1506         if (_indexedFreeList[replenish_size].surplus() > 0 &&
  1507             _indexedFreeList[replenish_size].head() != NULL) {
  1508           newFc = _indexedFreeList[replenish_size].getChunkAtHead();
  1509         } else if (bestFitFirst()) {
  1510           newFc = bestFitSmall(replenish_size);
  1513       if (newFc == NULL && replenish_size > size) {
  1514         assert(CMSIndexedFreeListReplenish > 1, "ctl pt invariant");
  1515         newFc = getChunkFromIndexedFreeListHelper(replenish_size, false);
  1517       // Note: The stats update re split-death of block obtained above
  1518       // will be recorded below precisely when we know we are going to
  1519       // be actually splitting it into more than one pieces below.
  1520       if (newFc != NULL) {
  1521         if  (replenish || CMSReplenishIntermediate) {
  1522           // Replenish this list and return one block to caller.
  1523           size_t i;
  1524           FreeChunk *curFc, *nextFc;
  1525           size_t num_blk = newFc->size() / size;
  1526           assert(num_blk >= 1, "Smaller than requested?");
  1527           assert(newFc->size() % size == 0, "Should be integral multiple of request");
  1528           if (num_blk > 1) {
  1529             // we are sure we will be splitting the block just obtained
  1530             // into multiple pieces; record the split-death of the original
  1531             splitDeath(replenish_size);
  1533           // carve up and link blocks 0, ..., num_blk - 2
  1534           // The last chunk is not added to the lists but is returned as the
  1535           // free chunk.
  1536           for (curFc = newFc, nextFc = (FreeChunk*)((HeapWord*)curFc + size),
  1537                i = 0;
  1538                i < (num_blk - 1);
  1539                curFc = nextFc, nextFc = (FreeChunk*)((HeapWord*)nextFc + size),
  1540                i++) {
  1541             curFc->setSize(size);
  1542             // Don't record this as a return in order to try and
  1543             // determine the "returns" from a GC.
  1544             _bt.verify_not_unallocated((HeapWord*) fc, size);
  1545             _indexedFreeList[size].returnChunkAtTail(curFc, false);
  1546             _bt.mark_block((HeapWord*)curFc, size);
  1547             splitBirth(size);
  1548             // Don't record the initial population of the indexed list
  1549             // as a split birth.
  1552           // check that the arithmetic was OK above
  1553           assert((HeapWord*)nextFc == (HeapWord*)newFc + num_blk*size,
  1554             "inconsistency in carving newFc");
  1555           curFc->setSize(size);
  1556           _bt.mark_block((HeapWord*)curFc, size);
  1557           splitBirth(size);
  1558           fc = curFc;
  1559         } else {
  1560           // Return entire block to caller
  1561           fc = newFc;
  1565   } else {
  1566     // Get a free chunk from the free chunk dictionary to be returned to
  1567     // replenish the indexed free list.
  1568     fc = getChunkFromDictionaryExact(size);
  1570   // assert(fc == NULL || fc->isFree(), "Should be returning a free chunk");
  1571   return fc;
  1574 FreeChunk*
  1575 CompactibleFreeListSpace::getChunkFromDictionary(size_t size) {
  1576   assert_locked();
  1577   FreeChunk* fc = _dictionary->getChunk(size);
  1578   if (fc == NULL) {
  1579     return NULL;
  1581   _bt.allocated((HeapWord*)fc, fc->size());
  1582   if (fc->size() >= size + MinChunkSize) {
  1583     fc = splitChunkAndReturnRemainder(fc, size);
  1585   assert(fc->size() >= size, "chunk too small");
  1586   assert(fc->size() < size + MinChunkSize, "chunk too big");
  1587   _bt.verify_single_block((HeapWord*)fc, fc->size());
  1588   return fc;
  1591 FreeChunk*
  1592 CompactibleFreeListSpace::getChunkFromDictionaryExact(size_t size) {
  1593   assert_locked();
  1594   FreeChunk* fc = _dictionary->getChunk(size);
  1595   if (fc == NULL) {
  1596     return fc;
  1598   _bt.allocated((HeapWord*)fc, fc->size());
  1599   if (fc->size() == size) {
  1600     _bt.verify_single_block((HeapWord*)fc, size);
  1601     return fc;
  1603   assert(fc->size() > size, "getChunk() guarantee");
  1604   if (fc->size() < size + MinChunkSize) {
  1605     // Return the chunk to the dictionary and go get a bigger one.
  1606     returnChunkToDictionary(fc);
  1607     fc = _dictionary->getChunk(size + MinChunkSize);
  1608     if (fc == NULL) {
  1609       return NULL;
  1611     _bt.allocated((HeapWord*)fc, fc->size());
  1613   assert(fc->size() >= size + MinChunkSize, "tautology");
  1614   fc = splitChunkAndReturnRemainder(fc, size);
  1615   assert(fc->size() == size, "chunk is wrong size");
  1616   _bt.verify_single_block((HeapWord*)fc, size);
  1617   return fc;
  1620 void
  1621 CompactibleFreeListSpace::returnChunkToDictionary(FreeChunk* chunk) {
  1622   assert_locked();
  1624   size_t size = chunk->size();
  1625   _bt.verify_single_block((HeapWord*)chunk, size);
  1626   // adjust _unallocated_block downward, as necessary
  1627   _bt.freed((HeapWord*)chunk, size);
  1628   _dictionary->returnChunk(chunk);
  1629 #ifndef PRODUCT
  1630   if (CMSCollector::abstract_state() != CMSCollector::Sweeping) {
  1631     TreeChunk::as_TreeChunk(chunk)->list()->verify_stats();
  1633 #endif // PRODUCT
  1636 void
  1637 CompactibleFreeListSpace::returnChunkToFreeList(FreeChunk* fc) {
  1638   assert_locked();
  1639   size_t size = fc->size();
  1640   _bt.verify_single_block((HeapWord*) fc, size);
  1641   _bt.verify_not_unallocated((HeapWord*) fc, size);
  1642   if (_adaptive_freelists) {
  1643     _indexedFreeList[size].returnChunkAtTail(fc);
  1644   } else {
  1645     _indexedFreeList[size].returnChunkAtHead(fc);
  1647 #ifndef PRODUCT
  1648   if (CMSCollector::abstract_state() != CMSCollector::Sweeping) {
  1649      _indexedFreeList[size].verify_stats();
  1651 #endif // PRODUCT
  1654 // Add chunk to end of last block -- if it's the largest
  1655 // block -- and update BOT and census data. We would
  1656 // of course have preferred to coalesce it with the
  1657 // last block, but it's currently less expensive to find the
  1658 // largest block than it is to find the last.
  1659 void
  1660 CompactibleFreeListSpace::addChunkToFreeListsAtEndRecordingStats(
  1661   HeapWord* chunk, size_t     size) {
  1662   // check that the chunk does lie in this space!
  1663   assert(chunk != NULL && is_in_reserved(chunk), "Not in this space!");
  1664   // One of the parallel gc task threads may be here
  1665   // whilst others are allocating.
  1666   Mutex* lock = NULL;
  1667   if (ParallelGCThreads != 0) {
  1668     lock = &_parDictionaryAllocLock;
  1670   FreeChunk* ec;
  1672     MutexLockerEx x(lock, Mutex::_no_safepoint_check_flag);
  1673     ec = dictionary()->findLargestDict();  // get largest block
  1674     if (ec != NULL && ec->end() == chunk) {
  1675       // It's a coterminal block - we can coalesce.
  1676       size_t old_size = ec->size();
  1677       coalDeath(old_size);
  1678       removeChunkFromDictionary(ec);
  1679       size += old_size;
  1680     } else {
  1681       ec = (FreeChunk*)chunk;
  1684   ec->setSize(size);
  1685   debug_only(ec->mangleFreed(size));
  1686   if (size < SmallForDictionary) {
  1687     lock = _indexedFreeListParLocks[size];
  1689   MutexLockerEx x(lock, Mutex::_no_safepoint_check_flag);
  1690   addChunkAndRepairOffsetTable((HeapWord*)ec, size, true);
  1691   // record the birth under the lock since the recording involves
  1692   // manipulation of the list on which the chunk lives and
  1693   // if the chunk is allocated and is the last on the list,
  1694   // the list can go away.
  1695   coalBirth(size);
  1698 void
  1699 CompactibleFreeListSpace::addChunkToFreeLists(HeapWord* chunk,
  1700                                               size_t     size) {
  1701   // check that the chunk does lie in this space!
  1702   assert(chunk != NULL && is_in_reserved(chunk), "Not in this space!");
  1703   assert_locked();
  1704   _bt.verify_single_block(chunk, size);
  1706   FreeChunk* fc = (FreeChunk*) chunk;
  1707   fc->setSize(size);
  1708   debug_only(fc->mangleFreed(size));
  1709   if (size < SmallForDictionary) {
  1710     returnChunkToFreeList(fc);
  1711   } else {
  1712     returnChunkToDictionary(fc);
  1716 void
  1717 CompactibleFreeListSpace::addChunkAndRepairOffsetTable(HeapWord* chunk,
  1718   size_t size, bool coalesced) {
  1719   assert_locked();
  1720   assert(chunk != NULL, "null chunk");
  1721   if (coalesced) {
  1722     // repair BOT
  1723     _bt.single_block(chunk, size);
  1725   addChunkToFreeLists(chunk, size);
  1728 // We _must_ find the purported chunk on our free lists;
  1729 // we assert if we don't.
  1730 void
  1731 CompactibleFreeListSpace::removeFreeChunkFromFreeLists(FreeChunk* fc) {
  1732   size_t size = fc->size();
  1733   assert_locked();
  1734   debug_only(verifyFreeLists());
  1735   if (size < SmallForDictionary) {
  1736     removeChunkFromIndexedFreeList(fc);
  1737   } else {
  1738     removeChunkFromDictionary(fc);
  1740   _bt.verify_single_block((HeapWord*)fc, size);
  1741   debug_only(verifyFreeLists());
  1744 void
  1745 CompactibleFreeListSpace::removeChunkFromDictionary(FreeChunk* fc) {
  1746   size_t size = fc->size();
  1747   assert_locked();
  1748   assert(fc != NULL, "null chunk");
  1749   _bt.verify_single_block((HeapWord*)fc, size);
  1750   _dictionary->removeChunk(fc);
  1751   // adjust _unallocated_block upward, as necessary
  1752   _bt.allocated((HeapWord*)fc, size);
  1755 void
  1756 CompactibleFreeListSpace::removeChunkFromIndexedFreeList(FreeChunk* fc) {
  1757   assert_locked();
  1758   size_t size = fc->size();
  1759   _bt.verify_single_block((HeapWord*)fc, size);
  1760   NOT_PRODUCT(
  1761     if (FLSVerifyIndexTable) {
  1762       verifyIndexedFreeList(size);
  1765   _indexedFreeList[size].removeChunk(fc);
  1766   debug_only(fc->clearNext());
  1767   debug_only(fc->clearPrev());
  1768   NOT_PRODUCT(
  1769     if (FLSVerifyIndexTable) {
  1770       verifyIndexedFreeList(size);
  1775 FreeChunk* CompactibleFreeListSpace::bestFitSmall(size_t numWords) {
  1776   /* A hint is the next larger size that has a surplus.
  1777      Start search at a size large enough to guarantee that
  1778      the excess is >= MIN_CHUNK. */
  1779   size_t start = align_object_size(numWords + MinChunkSize);
  1780   if (start < IndexSetSize) {
  1781     FreeList* it   = _indexedFreeList;
  1782     size_t    hint = _indexedFreeList[start].hint();
  1783     while (hint < IndexSetSize) {
  1784       assert(hint % MinObjAlignment == 0, "hint should be aligned");
  1785       FreeList *fl = &_indexedFreeList[hint];
  1786       if (fl->surplus() > 0 && fl->head() != NULL) {
  1787         // Found a list with surplus, reset original hint
  1788         // and split out a free chunk which is returned.
  1789         _indexedFreeList[start].set_hint(hint);
  1790         FreeChunk* res = getFromListGreater(fl, numWords);
  1791         assert(res == NULL || res->isFree(),
  1792           "Should be returning a free chunk");
  1793         return res;
  1795       hint = fl->hint(); /* keep looking */
  1797     /* None found. */
  1798     it[start].set_hint(IndexSetSize);
  1800   return NULL;
  1803 /* Requires fl->size >= numWords + MinChunkSize */
  1804 FreeChunk* CompactibleFreeListSpace::getFromListGreater(FreeList* fl,
  1805   size_t numWords) {
  1806   FreeChunk *curr = fl->head();
  1807   size_t oldNumWords = curr->size();
  1808   assert(numWords >= MinChunkSize, "Word size is too small");
  1809   assert(curr != NULL, "List is empty");
  1810   assert(oldNumWords >= numWords + MinChunkSize,
  1811         "Size of chunks in the list is too small");
  1813   fl->removeChunk(curr);
  1814   // recorded indirectly by splitChunkAndReturnRemainder -
  1815   // smallSplit(oldNumWords, numWords);
  1816   FreeChunk* new_chunk = splitChunkAndReturnRemainder(curr, numWords);
  1817   // Does anything have to be done for the remainder in terms of
  1818   // fixing the card table?
  1819   assert(new_chunk == NULL || new_chunk->isFree(),
  1820     "Should be returning a free chunk");
  1821   return new_chunk;
  1824 FreeChunk*
  1825 CompactibleFreeListSpace::splitChunkAndReturnRemainder(FreeChunk* chunk,
  1826   size_t new_size) {
  1827   assert_locked();
  1828   size_t size = chunk->size();
  1829   assert(size > new_size, "Split from a smaller block?");
  1830   assert(is_aligned(chunk), "alignment problem");
  1831   assert(size == adjustObjectSize(size), "alignment problem");
  1832   size_t rem_size = size - new_size;
  1833   assert(rem_size == adjustObjectSize(rem_size), "alignment problem");
  1834   assert(rem_size >= MinChunkSize, "Free chunk smaller than minimum");
  1835   FreeChunk* ffc = (FreeChunk*)((HeapWord*)chunk + new_size);
  1836   assert(is_aligned(ffc), "alignment problem");
  1837   ffc->setSize(rem_size);
  1838   ffc->linkNext(NULL);
  1839   ffc->linkPrev(NULL); // Mark as a free block for other (parallel) GC threads.
  1840   // Above must occur before BOT is updated below.
  1841   // adjust block offset table
  1842   _bt.split_block((HeapWord*)chunk, chunk->size(), new_size);
  1843   if (rem_size < SmallForDictionary) {
  1844     bool is_par = (SharedHeap::heap()->n_par_threads() > 0);
  1845     if (is_par) _indexedFreeListParLocks[rem_size]->lock();
  1846     returnChunkToFreeList(ffc);
  1847     split(size, rem_size);
  1848     if (is_par) _indexedFreeListParLocks[rem_size]->unlock();
  1849   } else {
  1850     returnChunkToDictionary(ffc);
  1851     split(size ,rem_size);
  1853   chunk->setSize(new_size);
  1854   return chunk;
  1857 void
  1858 CompactibleFreeListSpace::sweep_completed() {
  1859   // Now that space is probably plentiful, refill linear
  1860   // allocation blocks as needed.
  1861   refillLinearAllocBlocksIfNeeded();
  1864 void
  1865 CompactibleFreeListSpace::gc_prologue() {
  1866   assert_locked();
  1867   if (PrintFLSStatistics != 0) {
  1868     gclog_or_tty->print("Before GC:\n");
  1869     reportFreeListStatistics();
  1871   refillLinearAllocBlocksIfNeeded();
  1874 void
  1875 CompactibleFreeListSpace::gc_epilogue() {
  1876   assert_locked();
  1877   if (PrintGCDetails && Verbose && !_adaptive_freelists) {
  1878     if (_smallLinearAllocBlock._word_size == 0)
  1879       warning("CompactibleFreeListSpace(epilogue):: Linear allocation failure");
  1881   assert(_promoInfo.noPromotions(), "_promoInfo inconsistency");
  1882   _promoInfo.stopTrackingPromotions();
  1883   repairLinearAllocationBlocks();
  1884   // Print Space's stats
  1885   if (PrintFLSStatistics != 0) {
  1886     gclog_or_tty->print("After GC:\n");
  1887     reportFreeListStatistics();
  1891 // Iteration support, mostly delegated from a CMS generation
  1893 void CompactibleFreeListSpace::save_marks() {
  1894   // mark the "end" of the used space at the time of this call;
  1895   // note, however, that promoted objects from this point
  1896   // on are tracked in the _promoInfo below.
  1897   set_saved_mark_word(BlockOffsetArrayUseUnallocatedBlock ?
  1898                       unallocated_block() : end());
  1899   // inform allocator that promotions should be tracked.
  1900   assert(_promoInfo.noPromotions(), "_promoInfo inconsistency");
  1901   _promoInfo.startTrackingPromotions();
  1904 bool CompactibleFreeListSpace::no_allocs_since_save_marks() {
  1905   assert(_promoInfo.tracking(), "No preceding save_marks?");
  1906   guarantee(SharedHeap::heap()->n_par_threads() == 0,
  1907             "Shouldn't be called (yet) during parallel part of gc.");
  1908   return _promoInfo.noPromotions();
  1911 #define CFLS_OOP_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix)           \
  1913 void CompactibleFreeListSpace::                                             \
  1914 oop_since_save_marks_iterate##nv_suffix(OopClosureType* blk) {              \
  1915   assert(SharedHeap::heap()->n_par_threads() == 0,                          \
  1916          "Shouldn't be called (yet) during parallel part of gc.");          \
  1917   _promoInfo.promoted_oops_iterate##nv_suffix(blk);                         \
  1918   /*                                                                        \
  1919    * This also restores any displaced headers and removes the elements from \
  1920    * the iteration set as they are processed, so that we have a clean slate \
  1921    * at the end of the iteration. Note, thus, that if new objects are       \
  1922    * promoted as a result of the iteration they are iterated over as well.  \
  1923    */                                                                       \
  1924   assert(_promoInfo.noPromotions(), "_promoInfo inconsistency");            \
  1927 ALL_SINCE_SAVE_MARKS_CLOSURES(CFLS_OOP_SINCE_SAVE_MARKS_DEFN)
  1929 //////////////////////////////////////////////////////////////////////////////
  1930 // We go over the list of promoted objects, removing each from the list,
  1931 // and applying the closure (this may, in turn, add more elements to
  1932 // the tail of the promoted list, and these newly added objects will
  1933 // also be processed) until the list is empty.
  1934 // To aid verification and debugging, in the non-product builds
  1935 // we actually forward _promoHead each time we process a promoted oop.
  1936 // Note that this is not necessary in general (i.e. when we don't need to
  1937 // call PromotionInfo::verify()) because oop_iterate can only add to the
  1938 // end of _promoTail, and never needs to look at _promoHead.
  1940 #define PROMOTED_OOPS_ITERATE_DEFN(OopClosureType, nv_suffix)               \
  1942 void PromotionInfo::promoted_oops_iterate##nv_suffix(OopClosureType* cl) {  \
  1943   NOT_PRODUCT(verify());                                                    \
  1944   PromotedObject *curObj, *nextObj;                                         \
  1945   for (curObj = _promoHead; curObj != NULL; curObj = nextObj) {             \
  1946     if ((nextObj = curObj->next()) == NULL) {                               \
  1947       /* protect ourselves against additions due to closure application     \
  1948          below by resetting the list.  */                                   \
  1949       assert(_promoTail == curObj, "Should have been the tail");            \
  1950       _promoHead = _promoTail = NULL;                                       \
  1951     }                                                                       \
  1952     if (curObj->hasDisplacedMark()) {                                       \
  1953       /* restore displaced header */                                        \
  1954       oop(curObj)->set_mark(nextDisplacedHeader());                         \
  1955     } else {                                                                \
  1956       /* restore prototypical header */                                     \
  1957       oop(curObj)->init_mark();                                             \
  1958     }                                                                       \
  1959     /* The "promoted_mark" should now not be set */                         \
  1960     assert(!curObj->hasPromotedMark(),                                      \
  1961            "Should have been cleared by restoring displaced mark-word");    \
  1962     NOT_PRODUCT(_promoHead = nextObj);                                      \
  1963     if (cl != NULL) oop(curObj)->oop_iterate(cl);                           \
  1964     if (nextObj == NULL) { /* start at head of list reset above */          \
  1965       nextObj = _promoHead;                                                 \
  1966     }                                                                       \
  1967   }                                                                         \
  1968   assert(noPromotions(), "post-condition violation");                       \
  1969   assert(_promoHead == NULL && _promoTail == NULL, "emptied promoted list");\
  1970   assert(_spoolHead == _spoolTail, "emptied spooling buffers");             \
  1971   assert(_firstIndex == _nextIndex, "empty buffer");                        \
  1974 // This should have been ALL_SINCE_...() just like the others,
  1975 // but, because the body of the method above is somehwat longer,
  1976 // the MSVC compiler cannot cope; as a workaround, we split the
  1977 // macro into its 3 constituent parts below (see original macro
  1978 // definition in specializedOopClosures.hpp).
  1979 SPECIALIZED_SINCE_SAVE_MARKS_CLOSURES_YOUNG(PROMOTED_OOPS_ITERATE_DEFN)
  1980 PROMOTED_OOPS_ITERATE_DEFN(OopsInGenClosure,_v)
  1983 void CompactibleFreeListSpace::object_iterate_since_last_GC(ObjectClosure* cl) {
  1984   // ugghh... how would one do this efficiently for a non-contiguous space?
  1985   guarantee(false, "NYI");
  1988 bool CompactibleFreeListSpace::linearAllocationWouldFail() const {
  1989   return _smallLinearAllocBlock._word_size == 0;
  1992 void CompactibleFreeListSpace::repairLinearAllocationBlocks() {
  1993   // Fix up linear allocation blocks to look like free blocks
  1994   repairLinearAllocBlock(&_smallLinearAllocBlock);
  1997 void CompactibleFreeListSpace::repairLinearAllocBlock(LinearAllocBlock* blk) {
  1998   assert_locked();
  1999   if (blk->_ptr != NULL) {
  2000     assert(blk->_word_size != 0 && blk->_word_size >= MinChunkSize,
  2001            "Minimum block size requirement");
  2002     FreeChunk* fc = (FreeChunk*)(blk->_ptr);
  2003     fc->setSize(blk->_word_size);
  2004     fc->linkPrev(NULL);   // mark as free
  2005     fc->dontCoalesce();
  2006     assert(fc->isFree(), "just marked it free");
  2007     assert(fc->cantCoalesce(), "just marked it uncoalescable");
  2011 void CompactibleFreeListSpace::refillLinearAllocBlocksIfNeeded() {
  2012   assert_locked();
  2013   if (_smallLinearAllocBlock._ptr == NULL) {
  2014     assert(_smallLinearAllocBlock._word_size == 0,
  2015       "Size of linAB should be zero if the ptr is NULL");
  2016     // Reset the linAB refill and allocation size limit.
  2017     _smallLinearAllocBlock.set(0, 0, 1024*SmallForLinearAlloc, SmallForLinearAlloc);
  2019   refillLinearAllocBlockIfNeeded(&_smallLinearAllocBlock);
  2022 void
  2023 CompactibleFreeListSpace::refillLinearAllocBlockIfNeeded(LinearAllocBlock* blk) {
  2024   assert_locked();
  2025   assert((blk->_ptr == NULL && blk->_word_size == 0) ||
  2026          (blk->_ptr != NULL && blk->_word_size >= MinChunkSize),
  2027          "blk invariant");
  2028   if (blk->_ptr == NULL) {
  2029     refillLinearAllocBlock(blk);
  2031   if (PrintMiscellaneous && Verbose) {
  2032     if (blk->_word_size == 0) {
  2033       warning("CompactibleFreeListSpace(prologue):: Linear allocation failure");
  2038 void
  2039 CompactibleFreeListSpace::refillLinearAllocBlock(LinearAllocBlock* blk) {
  2040   assert_locked();
  2041   assert(blk->_word_size == 0 && blk->_ptr == NULL,
  2042          "linear allocation block should be empty");
  2043   FreeChunk* fc;
  2044   if (blk->_refillSize < SmallForDictionary &&
  2045       (fc = getChunkFromIndexedFreeList(blk->_refillSize)) != NULL) {
  2046     // A linAB's strategy might be to use small sizes to reduce
  2047     // fragmentation but still get the benefits of allocation from a
  2048     // linAB.
  2049   } else {
  2050     fc = getChunkFromDictionary(blk->_refillSize);
  2052   if (fc != NULL) {
  2053     blk->_ptr  = (HeapWord*)fc;
  2054     blk->_word_size = fc->size();
  2055     fc->dontCoalesce();   // to prevent sweeper from sweeping us up
  2059 // Support for concurrent collection policy decisions.
  2060 bool CompactibleFreeListSpace::should_concurrent_collect() const {
  2061   // In the future we might want to add in frgamentation stats --
  2062   // including erosion of the "mountain" into this decision as well.
  2063   return !adaptive_freelists() && linearAllocationWouldFail();
  2066 // Support for compaction
  2068 void CompactibleFreeListSpace::prepare_for_compaction(CompactPoint* cp) {
  2069   SCAN_AND_FORWARD(cp,end,block_is_obj,block_size);
  2070   // prepare_for_compaction() uses the space between live objects
  2071   // so that later phase can skip dead space quickly.  So verification
  2072   // of the free lists doesn't work after.
  2075 #define obj_size(q) adjustObjectSize(oop(q)->size())
  2076 #define adjust_obj_size(s) adjustObjectSize(s)
  2078 void CompactibleFreeListSpace::adjust_pointers() {
  2079   // In other versions of adjust_pointers(), a bail out
  2080   // based on the amount of live data in the generation
  2081   // (i.e., if 0, bail out) may be used.
  2082   // Cannot test used() == 0 here because the free lists have already
  2083   // been mangled by the compaction.
  2085   SCAN_AND_ADJUST_POINTERS(adjust_obj_size);
  2086   // See note about verification in prepare_for_compaction().
  2089 void CompactibleFreeListSpace::compact() {
  2090   SCAN_AND_COMPACT(obj_size);
  2093 // fragmentation_metric = 1 - [sum of (fbs**2) / (sum of fbs)**2]
  2094 // where fbs is free block sizes
  2095 double CompactibleFreeListSpace::flsFrag() const {
  2096   size_t itabFree = totalSizeInIndexedFreeLists();
  2097   double frag = 0.0;
  2098   size_t i;
  2100   for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
  2101     double sz  = i;
  2102     frag      += _indexedFreeList[i].count() * (sz * sz);
  2105   double totFree = itabFree +
  2106                    _dictionary->totalChunkSize(DEBUG_ONLY(freelistLock()));
  2107   if (totFree > 0) {
  2108     frag = ((frag + _dictionary->sum_of_squared_block_sizes()) /
  2109             (totFree * totFree));
  2110     frag = (double)1.0  - frag;
  2111   } else {
  2112     assert(frag == 0.0, "Follows from totFree == 0");
  2114   return frag;
  2117 void CompactibleFreeListSpace::beginSweepFLCensus(
  2118   float inter_sweep_current,
  2119   float inter_sweep_estimate,
  2120   float intra_sweep_estimate) {
  2121   assert_locked();
  2122   size_t i;
  2123   for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
  2124     FreeList* fl    = &_indexedFreeList[i];
  2125     if (PrintFLSStatistics > 1) {
  2126       gclog_or_tty->print("size[%d] : ", i);
  2128     fl->compute_desired(inter_sweep_current, inter_sweep_estimate, intra_sweep_estimate);
  2129     fl->set_coalDesired((ssize_t)((double)fl->desired() * CMSSmallCoalSurplusPercent));
  2130     fl->set_beforeSweep(fl->count());
  2131     fl->set_bfrSurp(fl->surplus());
  2133   _dictionary->beginSweepDictCensus(CMSLargeCoalSurplusPercent,
  2134                                     inter_sweep_current,
  2135                                     inter_sweep_estimate,
  2136                                     intra_sweep_estimate);
  2139 void CompactibleFreeListSpace::setFLSurplus() {
  2140   assert_locked();
  2141   size_t i;
  2142   for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
  2143     FreeList *fl = &_indexedFreeList[i];
  2144     fl->set_surplus(fl->count() -
  2145                     (ssize_t)((double)fl->desired() * CMSSmallSplitSurplusPercent));
  2149 void CompactibleFreeListSpace::setFLHints() {
  2150   assert_locked();
  2151   size_t i;
  2152   size_t h = IndexSetSize;
  2153   for (i = IndexSetSize - 1; i != 0; i -= IndexSetStride) {
  2154     FreeList *fl = &_indexedFreeList[i];
  2155     fl->set_hint(h);
  2156     if (fl->surplus() > 0) {
  2157       h = i;
  2162 void CompactibleFreeListSpace::clearFLCensus() {
  2163   assert_locked();
  2164   int i;
  2165   for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
  2166     FreeList *fl = &_indexedFreeList[i];
  2167     fl->set_prevSweep(fl->count());
  2168     fl->set_coalBirths(0);
  2169     fl->set_coalDeaths(0);
  2170     fl->set_splitBirths(0);
  2171     fl->set_splitDeaths(0);
  2175 void CompactibleFreeListSpace::endSweepFLCensus(size_t sweep_count) {
  2176   if (PrintFLSStatistics > 0) {
  2177     HeapWord* largestAddr = (HeapWord*) dictionary()->findLargestDict();
  2178     gclog_or_tty->print_cr("CMS: Large block " PTR_FORMAT,
  2179                            largestAddr);
  2181   setFLSurplus();
  2182   setFLHints();
  2183   if (PrintGC && PrintFLSCensus > 0) {
  2184     printFLCensus(sweep_count);
  2186   clearFLCensus();
  2187   assert_locked();
  2188   _dictionary->endSweepDictCensus(CMSLargeSplitSurplusPercent);
  2191 bool CompactibleFreeListSpace::coalOverPopulated(size_t size) {
  2192   if (size < SmallForDictionary) {
  2193     FreeList *fl = &_indexedFreeList[size];
  2194     return (fl->coalDesired() < 0) ||
  2195            ((int)fl->count() > fl->coalDesired());
  2196   } else {
  2197     return dictionary()->coalDictOverPopulated(size);
  2201 void CompactibleFreeListSpace::smallCoalBirth(size_t size) {
  2202   assert(size < SmallForDictionary, "Size too large for indexed list");
  2203   FreeList *fl = &_indexedFreeList[size];
  2204   fl->increment_coalBirths();
  2205   fl->increment_surplus();
  2208 void CompactibleFreeListSpace::smallCoalDeath(size_t size) {
  2209   assert(size < SmallForDictionary, "Size too large for indexed list");
  2210   FreeList *fl = &_indexedFreeList[size];
  2211   fl->increment_coalDeaths();
  2212   fl->decrement_surplus();
  2215 void CompactibleFreeListSpace::coalBirth(size_t size) {
  2216   if (size  < SmallForDictionary) {
  2217     smallCoalBirth(size);
  2218   } else {
  2219     dictionary()->dictCensusUpdate(size,
  2220                                    false /* split */,
  2221                                    true /* birth */);
  2225 void CompactibleFreeListSpace::coalDeath(size_t size) {
  2226   if(size  < SmallForDictionary) {
  2227     smallCoalDeath(size);
  2228   } else {
  2229     dictionary()->dictCensusUpdate(size,
  2230                                    false /* split */,
  2231                                    false /* birth */);
  2235 void CompactibleFreeListSpace::smallSplitBirth(size_t size) {
  2236   assert(size < SmallForDictionary, "Size too large for indexed list");
  2237   FreeList *fl = &_indexedFreeList[size];
  2238   fl->increment_splitBirths();
  2239   fl->increment_surplus();
  2242 void CompactibleFreeListSpace::smallSplitDeath(size_t size) {
  2243   assert(size < SmallForDictionary, "Size too large for indexed list");
  2244   FreeList *fl = &_indexedFreeList[size];
  2245   fl->increment_splitDeaths();
  2246   fl->decrement_surplus();
  2249 void CompactibleFreeListSpace::splitBirth(size_t size) {
  2250   if (size  < SmallForDictionary) {
  2251     smallSplitBirth(size);
  2252   } else {
  2253     dictionary()->dictCensusUpdate(size,
  2254                                    true /* split */,
  2255                                    true /* birth */);
  2259 void CompactibleFreeListSpace::splitDeath(size_t size) {
  2260   if (size  < SmallForDictionary) {
  2261     smallSplitDeath(size);
  2262   } else {
  2263     dictionary()->dictCensusUpdate(size,
  2264                                    true /* split */,
  2265                                    false /* birth */);
  2269 void CompactibleFreeListSpace::split(size_t from, size_t to1) {
  2270   size_t to2 = from - to1;
  2271   splitDeath(from);
  2272   splitBirth(to1);
  2273   splitBirth(to2);
  2276 void CompactibleFreeListSpace::print() const {
  2277   tty->print(" CompactibleFreeListSpace");
  2278   Space::print();
  2281 void CompactibleFreeListSpace::prepare_for_verify() {
  2282   assert_locked();
  2283   repairLinearAllocationBlocks();
  2284   // Verify that the SpoolBlocks look like free blocks of
  2285   // appropriate sizes... To be done ...
  2288 class VerifyAllBlksClosure: public BlkClosure {
  2289  private:
  2290   const CompactibleFreeListSpace* _sp;
  2291   const MemRegion                 _span;
  2293  public:
  2294   VerifyAllBlksClosure(const CompactibleFreeListSpace* sp,
  2295     MemRegion span) :  _sp(sp), _span(span) { }
  2297   virtual size_t do_blk(HeapWord* addr) {
  2298     size_t res;
  2299     if (_sp->block_is_obj(addr)) {
  2300       oop p = oop(addr);
  2301       guarantee(p->is_oop(), "Should be an oop");
  2302       res = _sp->adjustObjectSize(p->size());
  2303       if (_sp->obj_is_alive(addr)) {
  2304         p->verify();
  2306     } else {
  2307       FreeChunk* fc = (FreeChunk*)addr;
  2308       res = fc->size();
  2309       if (FLSVerifyLists && !fc->cantCoalesce()) {
  2310         guarantee(_sp->verifyChunkInFreeLists(fc),
  2311                   "Chunk should be on a free list");
  2314     guarantee(res != 0, "Livelock: no rank reduction!");
  2315     return res;
  2317 };
  2319 class VerifyAllOopsClosure: public OopClosure {
  2320  private:
  2321   const CMSCollector*             _collector;
  2322   const CompactibleFreeListSpace* _sp;
  2323   const MemRegion                 _span;
  2324   const bool                      _past_remark;
  2325   const CMSBitMap*                _bit_map;
  2327  protected:
  2328   void do_oop(void* p, oop obj) {
  2329     if (_span.contains(obj)) { // the interior oop points into CMS heap
  2330       if (!_span.contains(p)) { // reference from outside CMS heap
  2331         // Should be a valid object; the first disjunct below allows
  2332         // us to sidestep an assertion in block_is_obj() that insists
  2333         // that p be in _sp. Note that several generations (and spaces)
  2334         // are spanned by _span (CMS heap) above.
  2335         guarantee(!_sp->is_in_reserved(obj) ||
  2336                   _sp->block_is_obj((HeapWord*)obj),
  2337                   "Should be an object");
  2338         guarantee(obj->is_oop(), "Should be an oop");
  2339         obj->verify();
  2340         if (_past_remark) {
  2341           // Remark has been completed, the object should be marked
  2342           _bit_map->isMarked((HeapWord*)obj);
  2344       } else { // reference within CMS heap
  2345         if (_past_remark) {
  2346           // Remark has been completed -- so the referent should have
  2347           // been marked, if referring object is.
  2348           if (_bit_map->isMarked(_collector->block_start(p))) {
  2349             guarantee(_bit_map->isMarked((HeapWord*)obj), "Marking error?");
  2353     } else if (_sp->is_in_reserved(p)) {
  2354       // the reference is from FLS, and points out of FLS
  2355       guarantee(obj->is_oop(), "Should be an oop");
  2356       obj->verify();
  2360   template <class T> void do_oop_work(T* p) {
  2361     T heap_oop = oopDesc::load_heap_oop(p);
  2362     if (!oopDesc::is_null(heap_oop)) {
  2363       oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
  2364       do_oop(p, obj);
  2368  public:
  2369   VerifyAllOopsClosure(const CMSCollector* collector,
  2370     const CompactibleFreeListSpace* sp, MemRegion span,
  2371     bool past_remark, CMSBitMap* bit_map) :
  2372     OopClosure(), _collector(collector), _sp(sp), _span(span),
  2373     _past_remark(past_remark), _bit_map(bit_map) { }
  2375   virtual void do_oop(oop* p)       { VerifyAllOopsClosure::do_oop_work(p); }
  2376   virtual void do_oop(narrowOop* p) { VerifyAllOopsClosure::do_oop_work(p); }
  2377 };
  2379 void CompactibleFreeListSpace::verify(bool ignored) const {
  2380   assert_lock_strong(&_freelistLock);
  2381   verify_objects_initialized();
  2382   MemRegion span = _collector->_span;
  2383   bool past_remark = (_collector->abstract_state() ==
  2384                       CMSCollector::Sweeping);
  2386   ResourceMark rm;
  2387   HandleMark  hm;
  2389   // Check integrity of CFL data structures
  2390   _promoInfo.verify();
  2391   _dictionary->verify();
  2392   if (FLSVerifyIndexTable) {
  2393     verifyIndexedFreeLists();
  2395   // Check integrity of all objects and free blocks in space
  2397     VerifyAllBlksClosure cl(this, span);
  2398     ((CompactibleFreeListSpace*)this)->blk_iterate(&cl);  // cast off const
  2400   // Check that all references in the heap to FLS
  2401   // are to valid objects in FLS or that references in
  2402   // FLS are to valid objects elsewhere in the heap
  2403   if (FLSVerifyAllHeapReferences)
  2405     VerifyAllOopsClosure cl(_collector, this, span, past_remark,
  2406       _collector->markBitMap());
  2407     CollectedHeap* ch = Universe::heap();
  2408     ch->oop_iterate(&cl);              // all oops in generations
  2409     ch->permanent_oop_iterate(&cl);    // all oops in perm gen
  2412   if (VerifyObjectStartArray) {
  2413     // Verify the block offset table
  2414     _bt.verify();
  2418 #ifndef PRODUCT
  2419 void CompactibleFreeListSpace::verifyFreeLists() const {
  2420   if (FLSVerifyLists) {
  2421     _dictionary->verify();
  2422     verifyIndexedFreeLists();
  2423   } else {
  2424     if (FLSVerifyDictionary) {
  2425       _dictionary->verify();
  2427     if (FLSVerifyIndexTable) {
  2428       verifyIndexedFreeLists();
  2432 #endif
  2434 void CompactibleFreeListSpace::verifyIndexedFreeLists() const {
  2435   size_t i = 0;
  2436   for (; i < MinChunkSize; i++) {
  2437     guarantee(_indexedFreeList[i].head() == NULL, "should be NULL");
  2439   for (; i < IndexSetSize; i++) {
  2440     verifyIndexedFreeList(i);
  2444 void CompactibleFreeListSpace::verifyIndexedFreeList(size_t size) const {
  2445   FreeChunk* fc   =  _indexedFreeList[size].head();
  2446   FreeChunk* tail =  _indexedFreeList[size].tail();
  2447   size_t    num = _indexedFreeList[size].count();
  2448   size_t      n = 0;
  2449   guarantee((size % 2 == 0) || fc == NULL, "Odd slots should be empty");
  2450   for (; fc != NULL; fc = fc->next(), n++) {
  2451     guarantee(fc->size() == size, "Size inconsistency");
  2452     guarantee(fc->isFree(), "!free?");
  2453     guarantee(fc->next() == NULL || fc->next()->prev() == fc, "Broken list");
  2454     guarantee((fc->next() == NULL) == (fc == tail), "Incorrect tail");
  2456   guarantee(n == num, "Incorrect count");
  2459 #ifndef PRODUCT
  2460 void CompactibleFreeListSpace::checkFreeListConsistency() const {
  2461   assert(_dictionary->minSize() <= IndexSetSize,
  2462     "Some sizes can't be allocated without recourse to"
  2463     " linear allocation buffers");
  2464   assert(MIN_TREE_CHUNK_SIZE*HeapWordSize == sizeof(TreeChunk),
  2465     "else MIN_TREE_CHUNK_SIZE is wrong");
  2466   assert((IndexSetStride == 2 && IndexSetStart == 2) ||
  2467          (IndexSetStride == 1 && IndexSetStart == 1), "just checking");
  2468   assert((IndexSetStride != 2) || (MinChunkSize % 2 == 0),
  2469       "Some for-loops may be incorrectly initialized");
  2470   assert((IndexSetStride != 2) || (IndexSetSize % 2 == 1),
  2471       "For-loops that iterate over IndexSet with stride 2 may be wrong");
  2473 #endif
  2475 void CompactibleFreeListSpace::printFLCensus(size_t sweep_count) const {
  2476   assert_lock_strong(&_freelistLock);
  2477   FreeList total;
  2478   gclog_or_tty->print("end sweep# " SIZE_FORMAT "\n", sweep_count);
  2479   FreeList::print_labels_on(gclog_or_tty, "size");
  2480   size_t totalFree = 0;
  2481   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
  2482     const FreeList *fl = &_indexedFreeList[i];
  2483     totalFree += fl->count() * fl->size();
  2484     if (i % (40*IndexSetStride) == 0) {
  2485       FreeList::print_labels_on(gclog_or_tty, "size");
  2487     fl->print_on(gclog_or_tty);
  2488     total.set_bfrSurp(    total.bfrSurp()     + fl->bfrSurp()    );
  2489     total.set_surplus(    total.surplus()     + fl->surplus()    );
  2490     total.set_desired(    total.desired()     + fl->desired()    );
  2491     total.set_prevSweep(  total.prevSweep()   + fl->prevSweep()  );
  2492     total.set_beforeSweep(total.beforeSweep() + fl->beforeSweep());
  2493     total.set_count(      total.count()       + fl->count()      );
  2494     total.set_coalBirths( total.coalBirths()  + fl->coalBirths() );
  2495     total.set_coalDeaths( total.coalDeaths()  + fl->coalDeaths() );
  2496     total.set_splitBirths(total.splitBirths() + fl->splitBirths());
  2497     total.set_splitDeaths(total.splitDeaths() + fl->splitDeaths());
  2499   total.print_on(gclog_or_tty, "TOTAL");
  2500   gclog_or_tty->print_cr("Total free in indexed lists "
  2501                          SIZE_FORMAT " words", totalFree);
  2502   gclog_or_tty->print("growth: %8.5f  deficit: %8.5f\n",
  2503     (double)(total.splitBirths()+total.coalBirths()-total.splitDeaths()-total.coalDeaths())/
  2504             (total.prevSweep() != 0 ? (double)total.prevSweep() : 1.0),
  2505     (double)(total.desired() - total.count())/(total.desired() != 0 ? (double)total.desired() : 1.0));
  2506   _dictionary->printDictCensus();
  2509 // Return the next displaced header, incrementing the pointer and
  2510 // recycling spool area as necessary.
  2511 markOop PromotionInfo::nextDisplacedHeader() {
  2512   assert(_spoolHead != NULL, "promotionInfo inconsistency");
  2513   assert(_spoolHead != _spoolTail || _firstIndex < _nextIndex,
  2514          "Empty spool space: no displaced header can be fetched");
  2515   assert(_spoolHead->bufferSize > _firstIndex, "Off by one error at head?");
  2516   markOop hdr = _spoolHead->displacedHdr[_firstIndex];
  2517   // Spool forward
  2518   if (++_firstIndex == _spoolHead->bufferSize) { // last location in this block
  2519     // forward to next block, recycling this block into spare spool buffer
  2520     SpoolBlock* tmp = _spoolHead->nextSpoolBlock;
  2521     assert(_spoolHead != _spoolTail, "Spooling storage mix-up");
  2522     _spoolHead->nextSpoolBlock = _spareSpool;
  2523     _spareSpool = _spoolHead;
  2524     _spoolHead = tmp;
  2525     _firstIndex = 1;
  2526     NOT_PRODUCT(
  2527       if (_spoolHead == NULL) {  // all buffers fully consumed
  2528         assert(_spoolTail == NULL && _nextIndex == 1,
  2529                "spool buffers processing inconsistency");
  2533   return hdr;
  2536 void PromotionInfo::track(PromotedObject* trackOop) {
  2537   track(trackOop, oop(trackOop)->klass());
  2540 void PromotionInfo::track(PromotedObject* trackOop, klassOop klassOfOop) {
  2541   // make a copy of header as it may need to be spooled
  2542   markOop mark = oop(trackOop)->mark();
  2543   trackOop->clearNext();
  2544   if (mark->must_be_preserved_for_cms_scavenge(klassOfOop)) {
  2545     // save non-prototypical header, and mark oop
  2546     saveDisplacedHeader(mark);
  2547     trackOop->setDisplacedMark();
  2548   } else {
  2549     // we'd like to assert something like the following:
  2550     // assert(mark == markOopDesc::prototype(), "consistency check");
  2551     // ... but the above won't work because the age bits have not (yet) been
  2552     // cleared. The remainder of the check would be identical to the
  2553     // condition checked in must_be_preserved() above, so we don't really
  2554     // have anything useful to check here!
  2556   if (_promoTail != NULL) {
  2557     assert(_promoHead != NULL, "List consistency");
  2558     _promoTail->setNext(trackOop);
  2559     _promoTail = trackOop;
  2560   } else {
  2561     assert(_promoHead == NULL, "List consistency");
  2562     _promoHead = _promoTail = trackOop;
  2564   // Mask as newly promoted, so we can skip over such objects
  2565   // when scanning dirty cards
  2566   assert(!trackOop->hasPromotedMark(), "Should not have been marked");
  2567   trackOop->setPromotedMark();
  2570 // Save the given displaced header, incrementing the pointer and
  2571 // obtaining more spool area as necessary.
  2572 void PromotionInfo::saveDisplacedHeader(markOop hdr) {
  2573   assert(_spoolHead != NULL && _spoolTail != NULL,
  2574          "promotionInfo inconsistency");
  2575   assert(_spoolTail->bufferSize > _nextIndex, "Off by one error at tail?");
  2576   _spoolTail->displacedHdr[_nextIndex] = hdr;
  2577   // Spool forward
  2578   if (++_nextIndex == _spoolTail->bufferSize) { // last location in this block
  2579     // get a new spooling block
  2580     assert(_spoolTail->nextSpoolBlock == NULL, "tail should terminate spool list");
  2581     _splice_point = _spoolTail;                   // save for splicing
  2582     _spoolTail->nextSpoolBlock = getSpoolBlock(); // might fail
  2583     _spoolTail = _spoolTail->nextSpoolBlock;      // might become NULL ...
  2584     // ... but will attempt filling before next promotion attempt
  2585     _nextIndex = 1;
  2589 // Ensure that spooling space exists. Return false if spooling space
  2590 // could not be obtained.
  2591 bool PromotionInfo::ensure_spooling_space_work() {
  2592   assert(!has_spooling_space(), "Only call when there is no spooling space");
  2593   // Try and obtain more spooling space
  2594   SpoolBlock* newSpool = getSpoolBlock();
  2595   assert(newSpool == NULL ||
  2596          (newSpool->bufferSize != 0 && newSpool->nextSpoolBlock == NULL),
  2597         "getSpoolBlock() sanity check");
  2598   if (newSpool == NULL) {
  2599     return false;
  2601   _nextIndex = 1;
  2602   if (_spoolTail == NULL) {
  2603     _spoolTail = newSpool;
  2604     if (_spoolHead == NULL) {
  2605       _spoolHead = newSpool;
  2606       _firstIndex = 1;
  2607     } else {
  2608       assert(_splice_point != NULL && _splice_point->nextSpoolBlock == NULL,
  2609              "Splice point invariant");
  2610       // Extra check that _splice_point is connected to list
  2611       #ifdef ASSERT
  2613         SpoolBlock* blk = _spoolHead;
  2614         for (; blk->nextSpoolBlock != NULL;
  2615              blk = blk->nextSpoolBlock);
  2616         assert(blk != NULL && blk == _splice_point,
  2617                "Splice point incorrect");
  2619       #endif // ASSERT
  2620       _splice_point->nextSpoolBlock = newSpool;
  2622   } else {
  2623     assert(_spoolHead != NULL, "spool list consistency");
  2624     _spoolTail->nextSpoolBlock = newSpool;
  2625     _spoolTail = newSpool;
  2627   return true;
  2630 // Get a free spool buffer from the free pool, getting a new block
  2631 // from the heap if necessary.
  2632 SpoolBlock* PromotionInfo::getSpoolBlock() {
  2633   SpoolBlock* res;
  2634   if ((res = _spareSpool) != NULL) {
  2635     _spareSpool = _spareSpool->nextSpoolBlock;
  2636     res->nextSpoolBlock = NULL;
  2637   } else {  // spare spool exhausted, get some from heap
  2638     res = (SpoolBlock*)(space()->allocateScratch(refillSize()));
  2639     if (res != NULL) {
  2640       res->init();
  2643   assert(res == NULL || res->nextSpoolBlock == NULL, "postcondition");
  2644   return res;
  2647 void PromotionInfo::startTrackingPromotions() {
  2648   assert(_spoolHead == _spoolTail && _firstIndex == _nextIndex,
  2649          "spooling inconsistency?");
  2650   _firstIndex = _nextIndex = 1;
  2651   _tracking = true;
  2654 #define CMSPrintPromoBlockInfo 1
  2656 void PromotionInfo::stopTrackingPromotions(uint worker_id) {
  2657   assert(_spoolHead == _spoolTail && _firstIndex == _nextIndex,
  2658          "spooling inconsistency?");
  2659   _firstIndex = _nextIndex = 1;
  2660   _tracking = false;
  2661   if (CMSPrintPromoBlockInfo > 1) {
  2662     print_statistics(worker_id);
  2666 void PromotionInfo::print_statistics(uint worker_id) const {
  2667   assert(_spoolHead == _spoolTail && _firstIndex == _nextIndex,
  2668          "Else will undercount");
  2669   assert(CMSPrintPromoBlockInfo > 0, "Else unnecessary call");
  2670   // Count the number of blocks and slots in the free pool
  2671   size_t slots  = 0;
  2672   size_t blocks = 0;
  2673   for (SpoolBlock* cur_spool = _spareSpool;
  2674        cur_spool != NULL;
  2675        cur_spool = cur_spool->nextSpoolBlock) {
  2676     // the first entry is just a self-pointer; indices 1 through
  2677     // bufferSize - 1 are occupied (thus, bufferSize - 1 slots).
  2678     guarantee((void*)cur_spool->displacedHdr == (void*)&cur_spool->displacedHdr,
  2679               "first entry of displacedHdr should be self-referential");
  2680     slots += cur_spool->bufferSize - 1;
  2681     blocks++;
  2683   if (_spoolHead != NULL) {
  2684     slots += _spoolHead->bufferSize - 1;
  2685     blocks++;
  2687   gclog_or_tty->print_cr(" [worker %d] promo_blocks = %d, promo_slots = %d ",
  2688                          worker_id, blocks, slots);
  2691 // When _spoolTail is not NULL, then the slot <_spoolTail, _nextIndex>
  2692 // points to the next slot available for filling.
  2693 // The set of slots holding displaced headers are then all those in the
  2694 // right-open interval denoted by:
  2695 //
  2696 //    [ <_spoolHead, _firstIndex>, <_spoolTail, _nextIndex> )
  2697 //
  2698 // When _spoolTail is NULL, then the set of slots with displaced headers
  2699 // is all those starting at the slot <_spoolHead, _firstIndex> and
  2700 // going up to the last slot of last block in the linked list.
  2701 // In this lartter case, _splice_point points to the tail block of
  2702 // this linked list of blocks holding displaced headers.
  2703 void PromotionInfo::verify() const {
  2704   // Verify the following:
  2705   // 1. the number of displaced headers matches the number of promoted
  2706   //    objects that have displaced headers
  2707   // 2. each promoted object lies in this space
  2708   debug_only(
  2709     PromotedObject* junk = NULL;
  2710     assert(junk->next_addr() == (void*)(oop(junk)->mark_addr()),
  2711            "Offset of PromotedObject::_next is expected to align with "
  2712            "  the OopDesc::_mark within OopDesc");
  2714   // FIXME: guarantee????
  2715   guarantee(_spoolHead == NULL || _spoolTail != NULL ||
  2716             _splice_point != NULL, "list consistency");
  2717   guarantee(_promoHead == NULL || _promoTail != NULL, "list consistency");
  2718   // count the number of objects with displaced headers
  2719   size_t numObjsWithDisplacedHdrs = 0;
  2720   for (PromotedObject* curObj = _promoHead; curObj != NULL; curObj = curObj->next()) {
  2721     guarantee(space()->is_in_reserved((HeapWord*)curObj), "Containment");
  2722     // the last promoted object may fail the mark() != NULL test of is_oop().
  2723     guarantee(curObj->next() == NULL || oop(curObj)->is_oop(), "must be an oop");
  2724     if (curObj->hasDisplacedMark()) {
  2725       numObjsWithDisplacedHdrs++;
  2728   // Count the number of displaced headers
  2729   size_t numDisplacedHdrs = 0;
  2730   for (SpoolBlock* curSpool = _spoolHead;
  2731        curSpool != _spoolTail && curSpool != NULL;
  2732        curSpool = curSpool->nextSpoolBlock) {
  2733     // the first entry is just a self-pointer; indices 1 through
  2734     // bufferSize - 1 are occupied (thus, bufferSize - 1 slots).
  2735     guarantee((void*)curSpool->displacedHdr == (void*)&curSpool->displacedHdr,
  2736               "first entry of displacedHdr should be self-referential");
  2737     numDisplacedHdrs += curSpool->bufferSize - 1;
  2739   guarantee((_spoolHead == _spoolTail) == (numDisplacedHdrs == 0),
  2740             "internal consistency");
  2741   guarantee(_spoolTail != NULL || _nextIndex == 1,
  2742             "Inconsistency between _spoolTail and _nextIndex");
  2743   // We overcounted (_firstIndex-1) worth of slots in block
  2744   // _spoolHead and we undercounted (_nextIndex-1) worth of
  2745   // slots in block _spoolTail. We make an appropriate
  2746   // adjustment by subtracting the first and adding the
  2747   // second:  - (_firstIndex - 1) + (_nextIndex - 1)
  2748   numDisplacedHdrs += (_nextIndex - _firstIndex);
  2749   guarantee(numDisplacedHdrs == numObjsWithDisplacedHdrs, "Displaced hdr count");
  2752 void PromotionInfo::print_on(outputStream* st) const {
  2753   SpoolBlock* curSpool = NULL;
  2754   size_t i = 0;
  2755   st->print_cr("start & end indices: [" SIZE_FORMAT ", " SIZE_FORMAT ")",
  2756                _firstIndex, _nextIndex);
  2757   for (curSpool = _spoolHead; curSpool != _spoolTail && curSpool != NULL;
  2758        curSpool = curSpool->nextSpoolBlock) {
  2759     curSpool->print_on(st);
  2760     st->print_cr(" active ");
  2761     i++;
  2763   for (curSpool = _spoolTail; curSpool != NULL;
  2764        curSpool = curSpool->nextSpoolBlock) {
  2765     curSpool->print_on(st);
  2766     st->print_cr(" inactive ");
  2767     i++;
  2769   for (curSpool = _spareSpool; curSpool != NULL;
  2770        curSpool = curSpool->nextSpoolBlock) {
  2771     curSpool->print_on(st);
  2772     st->print_cr(" free ");
  2773     i++;
  2775   st->print_cr(SIZE_FORMAT " header spooling blocks", i);
  2778 void SpoolBlock::print_on(outputStream* st) const {
  2779   st->print("[" PTR_FORMAT "," PTR_FORMAT "), " SIZE_FORMAT " HeapWords -> " PTR_FORMAT,
  2780             this, (HeapWord*)displacedHdr + bufferSize,
  2781             bufferSize, nextSpoolBlock);
  2784 ///////////////////////////////////////////////////////////////////////////
  2785 // CFLS_LAB
  2786 ///////////////////////////////////////////////////////////////////////////
  2788 #define VECTOR_257(x)                                                                                  \
  2789   /* 1  2  3  4  5  6  7  8  9 1x 11 12 13 14 15 16 17 18 19 2x 21 22 23 24 25 26 27 28 29 3x 31 32 */ \
  2790   {  x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
  2791      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
  2792      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
  2793      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
  2794      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
  2795      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
  2796      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
  2797      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
  2798      x }
  2800 // Initialize with default setting of CMSParPromoteBlocksToClaim, _not_
  2801 // OldPLABSize, whose static default is different; if overridden at the
  2802 // command-line, this will get reinitialized via a call to
  2803 // modify_initialization() below.
  2804 AdaptiveWeightedAverage CFLS_LAB::_blocks_to_claim[]    =
  2805   VECTOR_257(AdaptiveWeightedAverage(OldPLABWeight, (float)CMSParPromoteBlocksToClaim));
  2806 size_t CFLS_LAB::_global_num_blocks[]  = VECTOR_257(0);
  2807 int    CFLS_LAB::_global_num_workers[] = VECTOR_257(0);
  2809 CFLS_LAB::CFLS_LAB(CompactibleFreeListSpace* cfls) :
  2810   _cfls(cfls)
  2812   assert(CompactibleFreeListSpace::IndexSetSize == 257, "Modify VECTOR_257() macro above");
  2813   for (size_t i = CompactibleFreeListSpace::IndexSetStart;
  2814        i < CompactibleFreeListSpace::IndexSetSize;
  2815        i += CompactibleFreeListSpace::IndexSetStride) {
  2816     _indexedFreeList[i].set_size(i);
  2817     _num_blocks[i] = 0;
  2821 static bool _CFLS_LAB_modified = false;
  2823 void CFLS_LAB::modify_initialization(size_t n, unsigned wt) {
  2824   assert(!_CFLS_LAB_modified, "Call only once");
  2825   _CFLS_LAB_modified = true;
  2826   for (size_t i = CompactibleFreeListSpace::IndexSetStart;
  2827        i < CompactibleFreeListSpace::IndexSetSize;
  2828        i += CompactibleFreeListSpace::IndexSetStride) {
  2829     _blocks_to_claim[i].modify(n, wt, true /* force */);
  2833 HeapWord* CFLS_LAB::alloc(size_t word_sz) {
  2834   FreeChunk* res;
  2835   word_sz = _cfls->adjustObjectSize(word_sz);
  2836   if (word_sz >=  CompactibleFreeListSpace::IndexSetSize) {
  2837     // This locking manages sync with other large object allocations.
  2838     MutexLockerEx x(_cfls->parDictionaryAllocLock(),
  2839                     Mutex::_no_safepoint_check_flag);
  2840     res = _cfls->getChunkFromDictionaryExact(word_sz);
  2841     if (res == NULL) return NULL;
  2842   } else {
  2843     FreeList* fl = &_indexedFreeList[word_sz];
  2844     if (fl->count() == 0) {
  2845       // Attempt to refill this local free list.
  2846       get_from_global_pool(word_sz, fl);
  2847       // If it didn't work, give up.
  2848       if (fl->count() == 0) return NULL;
  2850     res = fl->getChunkAtHead();
  2851     assert(res != NULL, "Why was count non-zero?");
  2853   res->markNotFree();
  2854   assert(!res->isFree(), "shouldn't be marked free");
  2855   assert(oop(res)->klass_or_null() == NULL, "should look uninitialized");
  2856   // mangle a just allocated object with a distinct pattern.
  2857   debug_only(res->mangleAllocated(word_sz));
  2858   return (HeapWord*)res;
  2861 // Get a chunk of blocks of the right size and update related
  2862 // book-keeping stats
  2863 void CFLS_LAB::get_from_global_pool(size_t word_sz, FreeList* fl) {
  2864   // Get the #blocks we want to claim
  2865   size_t n_blks = (size_t)_blocks_to_claim[word_sz].average();
  2866   assert(n_blks > 0, "Error");
  2867   assert(ResizePLAB || n_blks == OldPLABSize, "Error");
  2868   // In some cases, when the application has a phase change,
  2869   // there may be a sudden and sharp shift in the object survival
  2870   // profile, and updating the counts at the end of a scavenge
  2871   // may not be quick enough, giving rise to large scavenge pauses
  2872   // during these phase changes. It is beneficial to detect such
  2873   // changes on-the-fly during a scavenge and avoid such a phase-change
  2874   // pothole. The following code is a heuristic attempt to do that.
  2875   // It is protected by a product flag until we have gained
  2876   // enough experience with this heuristic and fine-tuned its behaviour.
  2877   // WARNING: This might increase fragmentation if we overreact to
  2878   // small spikes, so some kind of historical smoothing based on
  2879   // previous experience with the greater reactivity might be useful.
  2880   // Lacking sufficient experience, CMSOldPLABResizeQuicker is disabled by
  2881   // default.
  2882   if (ResizeOldPLAB && CMSOldPLABResizeQuicker) {
  2883     size_t multiple = _num_blocks[word_sz]/(CMSOldPLABToleranceFactor*CMSOldPLABNumRefills*n_blks);
  2884     n_blks +=  CMSOldPLABReactivityFactor*multiple*n_blks;
  2885     n_blks = MIN2(n_blks, CMSOldPLABMax);
  2887   assert(n_blks > 0, "Error");
  2888   _cfls->par_get_chunk_of_blocks(word_sz, n_blks, fl);
  2889   // Update stats table entry for this block size
  2890   _num_blocks[word_sz] += fl->count();
  2893 void CFLS_LAB::compute_desired_plab_size() {
  2894   for (size_t i =  CompactibleFreeListSpace::IndexSetStart;
  2895        i < CompactibleFreeListSpace::IndexSetSize;
  2896        i += CompactibleFreeListSpace::IndexSetStride) {
  2897     assert((_global_num_workers[i] == 0) == (_global_num_blocks[i] == 0),
  2898            "Counter inconsistency");
  2899     if (_global_num_workers[i] > 0) {
  2900       // Need to smooth wrt historical average
  2901       if (ResizeOldPLAB) {
  2902         _blocks_to_claim[i].sample(
  2903           MAX2((size_t)CMSOldPLABMin,
  2904           MIN2((size_t)CMSOldPLABMax,
  2905                _global_num_blocks[i]/(_global_num_workers[i]*CMSOldPLABNumRefills))));
  2907       // Reset counters for next round
  2908       _global_num_workers[i] = 0;
  2909       _global_num_blocks[i] = 0;
  2910       if (PrintOldPLAB) {
  2911         gclog_or_tty->print_cr("[%d]: %d", i, (size_t)_blocks_to_claim[i].average());
  2917 void CFLS_LAB::retire(int tid) {
  2918   // We run this single threaded with the world stopped;
  2919   // so no need for locks and such.
  2920 #define CFLS_LAB_PARALLEL_ACCESS 0
  2921   NOT_PRODUCT(Thread* t = Thread::current();)
  2922   assert(Thread::current()->is_VM_thread(), "Error");
  2923   assert(CompactibleFreeListSpace::IndexSetStart == CompactibleFreeListSpace::IndexSetStride,
  2924          "Will access to uninitialized slot below");
  2925 #if CFLS_LAB_PARALLEL_ACCESS
  2926   for (size_t i = CompactibleFreeListSpace::IndexSetSize - 1;
  2927        i > 0;
  2928        i -= CompactibleFreeListSpace::IndexSetStride) {
  2929 #else // CFLS_LAB_PARALLEL_ACCESS
  2930   for (size_t i =  CompactibleFreeListSpace::IndexSetStart;
  2931        i < CompactibleFreeListSpace::IndexSetSize;
  2932        i += CompactibleFreeListSpace::IndexSetStride) {
  2933 #endif // !CFLS_LAB_PARALLEL_ACCESS
  2934     assert(_num_blocks[i] >= (size_t)_indexedFreeList[i].count(),
  2935            "Can't retire more than what we obtained");
  2936     if (_num_blocks[i] > 0) {
  2937       size_t num_retire =  _indexedFreeList[i].count();
  2938       assert(_num_blocks[i] > num_retire, "Should have used at least one");
  2940 #if CFLS_LAB_PARALLEL_ACCESS
  2941         MutexLockerEx x(_cfls->_indexedFreeListParLocks[i],
  2942                         Mutex::_no_safepoint_check_flag);
  2943 #endif // CFLS_LAB_PARALLEL_ACCESS
  2944         // Update globals stats for num_blocks used
  2945         _global_num_blocks[i] += (_num_blocks[i] - num_retire);
  2946         _global_num_workers[i]++;
  2947         assert(_global_num_workers[i] <= (ssize_t)ParallelGCThreads, "Too big");
  2948         if (num_retire > 0) {
  2949           _cfls->_indexedFreeList[i].prepend(&_indexedFreeList[i]);
  2950           // Reset this list.
  2951           _indexedFreeList[i] = FreeList();
  2952           _indexedFreeList[i].set_size(i);
  2955       if (PrintOldPLAB) {
  2956         gclog_or_tty->print_cr("%d[%d]: %d/%d/%d",
  2957                                tid, i, num_retire, _num_blocks[i], (size_t)_blocks_to_claim[i].average());
  2959       // Reset stats for next round
  2960       _num_blocks[i]         = 0;
  2965 void CompactibleFreeListSpace:: par_get_chunk_of_blocks(size_t word_sz, size_t n, FreeList* fl) {
  2966   assert(fl->count() == 0, "Precondition.");
  2967   assert(word_sz < CompactibleFreeListSpace::IndexSetSize,
  2968          "Precondition");
  2970   // We'll try all multiples of word_sz in the indexed set, starting with
  2971   // word_sz itself and, if CMSSplitIndexedFreeListBlocks, try larger multiples,
  2972   // then try getting a big chunk and splitting it.
  2974     bool found;
  2975     int  k;
  2976     size_t cur_sz;
  2977     for (k = 1, cur_sz = k * word_sz, found = false;
  2978          (cur_sz < CompactibleFreeListSpace::IndexSetSize) &&
  2979          (CMSSplitIndexedFreeListBlocks || k <= 1);
  2980          k++, cur_sz = k * word_sz) {
  2981       FreeList* gfl = &_indexedFreeList[cur_sz];
  2982       FreeList fl_for_cur_sz;  // Empty.
  2983       fl_for_cur_sz.set_size(cur_sz);
  2985         MutexLockerEx x(_indexedFreeListParLocks[cur_sz],
  2986                         Mutex::_no_safepoint_check_flag);
  2987         if (gfl->count() != 0) {
  2988           // nn is the number of chunks of size cur_sz that
  2989           // we'd need to split k-ways each, in order to create
  2990           // "n" chunks of size word_sz each.
  2991           const size_t nn = MAX2(n/k, (size_t)1);
  2992           gfl->getFirstNChunksFromList(nn, &fl_for_cur_sz);
  2993           found = true;
  2994           if (k > 1) {
  2995             // Update split death stats for the cur_sz-size blocks list:
  2996             // we increment the split death count by the number of blocks
  2997             // we just took from the cur_sz-size blocks list and which
  2998             // we will be splitting below.
  2999             ssize_t deaths = _indexedFreeList[cur_sz].splitDeaths() +
  3000                              fl_for_cur_sz.count();
  3001             _indexedFreeList[cur_sz].set_splitDeaths(deaths);
  3005       // Now transfer fl_for_cur_sz to fl.  Common case, we hope, is k = 1.
  3006       if (found) {
  3007         if (k == 1) {
  3008           fl->prepend(&fl_for_cur_sz);
  3009         } else {
  3010           // Divide each block on fl_for_cur_sz up k ways.
  3011           FreeChunk* fc;
  3012           while ((fc = fl_for_cur_sz.getChunkAtHead()) != NULL) {
  3013             // Must do this in reverse order, so that anybody attempting to
  3014             // access the main chunk sees it as a single free block until we
  3015             // change it.
  3016             size_t fc_size = fc->size();
  3017             for (int i = k-1; i >= 0; i--) {
  3018               FreeChunk* ffc = (FreeChunk*)((HeapWord*)fc + i * word_sz);
  3019               ffc->setSize(word_sz);
  3020               ffc->linkNext(NULL);
  3021               ffc->linkPrev(NULL); // Mark as a free block for other (parallel) GC threads.
  3022               // Above must occur before BOT is updated below.
  3023               // splitting from the right, fc_size == (k - i + 1) * wordsize
  3024               _bt.mark_block((HeapWord*)ffc, word_sz);
  3025               fc_size -= word_sz;
  3026               _bt.verify_not_unallocated((HeapWord*)ffc, ffc->size());
  3027               _bt.verify_single_block((HeapWord*)fc, fc_size);
  3028               _bt.verify_single_block((HeapWord*)ffc, ffc->size());
  3029               // Push this on "fl".
  3030               fl->returnChunkAtHead(ffc);
  3032             // TRAP
  3033             assert(fl->tail()->next() == NULL, "List invariant.");
  3036         // Update birth stats for this block size.
  3037         size_t num = fl->count();
  3038         MutexLockerEx x(_indexedFreeListParLocks[word_sz],
  3039                         Mutex::_no_safepoint_check_flag);
  3040         ssize_t births = _indexedFreeList[word_sz].splitBirths() + num;
  3041         _indexedFreeList[word_sz].set_splitBirths(births);
  3042         return;
  3046   // Otherwise, we'll split a block from the dictionary.
  3047   FreeChunk* fc = NULL;
  3048   FreeChunk* rem_fc = NULL;
  3049   size_t rem;
  3051     MutexLockerEx x(parDictionaryAllocLock(),
  3052                     Mutex::_no_safepoint_check_flag);
  3053     while (n > 0) {
  3054       fc = dictionary()->getChunk(MAX2(n * word_sz,
  3055                                   _dictionary->minSize()),
  3056                                   FreeBlockDictionary::atLeast);
  3057       if (fc != NULL) {
  3058         _bt.allocated((HeapWord*)fc, fc->size());  // update _unallocated_blk
  3059         dictionary()->dictCensusUpdate(fc->size(),
  3060                                        true /*split*/,
  3061                                        false /*birth*/);
  3062         break;
  3063       } else {
  3064         n--;
  3067     if (fc == NULL) return;
  3068     assert((ssize_t)n >= 1, "Control point invariant");
  3069     // Otherwise, split up that block.
  3070     const size_t nn = fc->size() / word_sz;
  3071     n = MIN2(nn, n);
  3072     assert((ssize_t)n >= 1, "Control point invariant");
  3073     rem = fc->size() - n * word_sz;
  3074     // If there is a remainder, and it's too small, allocate one fewer.
  3075     if (rem > 0 && rem < MinChunkSize) {
  3076       n--; rem += word_sz;
  3078     // Note that at this point we may have n == 0.
  3079     assert((ssize_t)n >= 0, "Control point invariant");
  3081     // If n is 0, the chunk fc that was found is not large
  3082     // enough to leave a viable remainder.  We are unable to
  3083     // allocate even one block.  Return fc to the
  3084     // dictionary and return, leaving "fl" empty.
  3085     if (n == 0) {
  3086       returnChunkToDictionary(fc);
  3087       return;
  3090     // First return the remainder, if any.
  3091     // Note that we hold the lock until we decide if we're going to give
  3092     // back the remainder to the dictionary, since a concurrent allocation
  3093     // may otherwise see the heap as empty.  (We're willing to take that
  3094     // hit if the block is a small block.)
  3095     if (rem > 0) {
  3096       size_t prefix_size = n * word_sz;
  3097       rem_fc = (FreeChunk*)((HeapWord*)fc + prefix_size);
  3098       rem_fc->setSize(rem);
  3099       rem_fc->linkNext(NULL);
  3100       rem_fc->linkPrev(NULL); // Mark as a free block for other (parallel) GC threads.
  3101       // Above must occur before BOT is updated below.
  3102       assert((ssize_t)n > 0 && prefix_size > 0 && rem_fc > fc, "Error");
  3103       _bt.split_block((HeapWord*)fc, fc->size(), prefix_size);
  3104       if (rem >= IndexSetSize) {
  3105         returnChunkToDictionary(rem_fc);
  3106         dictionary()->dictCensusUpdate(rem, true /*split*/, true /*birth*/);
  3107         rem_fc = NULL;
  3109       // Otherwise, return it to the small list below.
  3112   if (rem_fc != NULL) {
  3113     MutexLockerEx x(_indexedFreeListParLocks[rem],
  3114                     Mutex::_no_safepoint_check_flag);
  3115     _bt.verify_not_unallocated((HeapWord*)rem_fc, rem_fc->size());
  3116     _indexedFreeList[rem].returnChunkAtHead(rem_fc);
  3117     smallSplitBirth(rem);
  3119   assert((ssize_t)n > 0 && fc != NULL, "Consistency");
  3120   // Now do the splitting up.
  3121   // Must do this in reverse order, so that anybody attempting to
  3122   // access the main chunk sees it as a single free block until we
  3123   // change it.
  3124   size_t fc_size = n * word_sz;
  3125   // All but first chunk in this loop
  3126   for (ssize_t i = n-1; i > 0; i--) {
  3127     FreeChunk* ffc = (FreeChunk*)((HeapWord*)fc + i * word_sz);
  3128     ffc->setSize(word_sz);
  3129     ffc->linkNext(NULL);
  3130     ffc->linkPrev(NULL); // Mark as a free block for other (parallel) GC threads.
  3131     // Above must occur before BOT is updated below.
  3132     // splitting from the right, fc_size == (n - i + 1) * wordsize
  3133     _bt.mark_block((HeapWord*)ffc, word_sz);
  3134     fc_size -= word_sz;
  3135     _bt.verify_not_unallocated((HeapWord*)ffc, ffc->size());
  3136     _bt.verify_single_block((HeapWord*)ffc, ffc->size());
  3137     _bt.verify_single_block((HeapWord*)fc, fc_size);
  3138     // Push this on "fl".
  3139     fl->returnChunkAtHead(ffc);
  3141   // First chunk
  3142   fc->setSize(word_sz);
  3143   fc->linkNext(NULL);
  3144   fc->linkPrev(NULL);
  3145   _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
  3146   _bt.verify_single_block((HeapWord*)fc, fc->size());
  3147   fl->returnChunkAtHead(fc);
  3149   assert((ssize_t)n > 0 && (ssize_t)n == fl->count(), "Incorrect number of blocks");
  3151     // Update the stats for this block size.
  3152     MutexLockerEx x(_indexedFreeListParLocks[word_sz],
  3153                     Mutex::_no_safepoint_check_flag);
  3154     const ssize_t births = _indexedFreeList[word_sz].splitBirths() + n;
  3155     _indexedFreeList[word_sz].set_splitBirths(births);
  3156     // ssize_t new_surplus = _indexedFreeList[word_sz].surplus() + n;
  3157     // _indexedFreeList[word_sz].set_surplus(new_surplus);
  3160   // TRAP
  3161   assert(fl->tail()->next() == NULL, "List invariant.");
  3164 // Set up the space's par_seq_tasks structure for work claiming
  3165 // for parallel rescan. See CMSParRemarkTask where this is currently used.
  3166 // XXX Need to suitably abstract and generalize this and the next
  3167 // method into one.
  3168 void
  3169 CompactibleFreeListSpace::
  3170 initialize_sequential_subtasks_for_rescan(int n_threads) {
  3171   // The "size" of each task is fixed according to rescan_task_size.
  3172   assert(n_threads > 0, "Unexpected n_threads argument");
  3173   const size_t task_size = rescan_task_size();
  3174   size_t n_tasks = (used_region().word_size() + task_size - 1)/task_size;
  3175   assert((n_tasks == 0) == used_region().is_empty(), "n_tasks incorrect");
  3176   assert(n_tasks == 0 ||
  3177          ((used_region().start() + (n_tasks - 1)*task_size < used_region().end()) &&
  3178           (used_region().start() + n_tasks*task_size >= used_region().end())),
  3179          "n_tasks calculation incorrect");
  3180   SequentialSubTasksDone* pst = conc_par_seq_tasks();
  3181   assert(!pst->valid(), "Clobbering existing data?");
  3182   pst->set_par_threads(n_threads);
  3183   pst->set_n_tasks((int)n_tasks);
  3186 // Set up the space's par_seq_tasks structure for work claiming
  3187 // for parallel concurrent marking. See CMSConcMarkTask where this is currently used.
  3188 void
  3189 CompactibleFreeListSpace::
  3190 initialize_sequential_subtasks_for_marking(int n_threads,
  3191                                            HeapWord* low) {
  3192   // The "size" of each task is fixed according to rescan_task_size.
  3193   assert(n_threads > 0, "Unexpected n_threads argument");
  3194   const size_t task_size = marking_task_size();
  3195   assert(task_size > CardTableModRefBS::card_size_in_words &&
  3196          (task_size %  CardTableModRefBS::card_size_in_words == 0),
  3197          "Otherwise arithmetic below would be incorrect");
  3198   MemRegion span = _gen->reserved();
  3199   if (low != NULL) {
  3200     if (span.contains(low)) {
  3201       // Align low down to  a card boundary so that
  3202       // we can use block_offset_careful() on span boundaries.
  3203       HeapWord* aligned_low = (HeapWord*)align_size_down((uintptr_t)low,
  3204                                  CardTableModRefBS::card_size);
  3205       // Clip span prefix at aligned_low
  3206       span = span.intersection(MemRegion(aligned_low, span.end()));
  3207     } else if (low > span.end()) {
  3208       span = MemRegion(low, low);  // Null region
  3209     } // else use entire span
  3211   assert(span.is_empty() ||
  3212          ((uintptr_t)span.start() %  CardTableModRefBS::card_size == 0),
  3213         "span should start at a card boundary");
  3214   size_t n_tasks = (span.word_size() + task_size - 1)/task_size;
  3215   assert((n_tasks == 0) == span.is_empty(), "Inconsistency");
  3216   assert(n_tasks == 0 ||
  3217          ((span.start() + (n_tasks - 1)*task_size < span.end()) &&
  3218           (span.start() + n_tasks*task_size >= span.end())),
  3219          "n_tasks calculation incorrect");
  3220   SequentialSubTasksDone* pst = conc_par_seq_tasks();
  3221   assert(!pst->valid(), "Clobbering existing data?");
  3222   pst->set_par_threads(n_threads);
  3223   pst->set_n_tasks((int)n_tasks);

mercurial