src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp

Tue, 09 Nov 2010 10:47:20 -0800

author
ysr
date
Tue, 09 Nov 2010 10:47:20 -0800
changeset 2294
4df7f8cba524
parent 2293
899bbbdcb6ea
child 2301
9eecf81a02fb
permissions
-rw-r--r--

6996613: CompactibleFreeListSpace::print should call CompactibleFreeListSpace::print_on, not Space::print_on
Reviewed-by: tonyp

     1 /*
     2  * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    20  * or visit www.oracle.com if you need additional information or have any
    21  * questions.
    22  *
    23  */
    25 # include "incls/_precompiled.incl"
    26 # include "incls/_compactibleFreeListSpace.cpp.incl"
    28 /////////////////////////////////////////////////////////////////////////
    29 //// CompactibleFreeListSpace
    30 /////////////////////////////////////////////////////////////////////////
    32 // highest ranked  free list lock rank
    33 int CompactibleFreeListSpace::_lockRank = Mutex::leaf + 3;
    35 // Defaults are 0 so things will break badly if incorrectly initialized.
    36 int CompactibleFreeListSpace::IndexSetStart  = 0;
    37 int CompactibleFreeListSpace::IndexSetStride = 0;
    39 size_t MinChunkSize = 0;
    41 void CompactibleFreeListSpace::set_cms_values() {
    42   // Set CMS global values
    43   assert(MinChunkSize == 0, "already set");
    44   #define numQuanta(x,y) ((x+y-1)/y)
    45   MinChunkSize = numQuanta(sizeof(FreeChunk), MinObjAlignmentInBytes) * MinObjAlignment;
    47   assert(IndexSetStart == 0 && IndexSetStride == 0, "already set");
    48   IndexSetStart  = MinObjAlignment;
    49   IndexSetStride = MinObjAlignment;
    50 }
    52 // Constructor
    53 CompactibleFreeListSpace::CompactibleFreeListSpace(BlockOffsetSharedArray* bs,
    54   MemRegion mr, bool use_adaptive_freelists,
    55   FreeBlockDictionary::DictionaryChoice dictionaryChoice) :
    56   _dictionaryChoice(dictionaryChoice),
    57   _adaptive_freelists(use_adaptive_freelists),
    58   _bt(bs, mr),
    59   // free list locks are in the range of values taken by _lockRank
    60   // This range currently is [_leaf+2, _leaf+3]
    61   // Note: this requires that CFLspace c'tors
    62   // are called serially in the order in which the locks are
    63   // are acquired in the program text. This is true today.
    64   _freelistLock(_lockRank--, "CompactibleFreeListSpace._lock", true),
    65   _parDictionaryAllocLock(Mutex::leaf - 1,  // == rank(ExpandHeap_lock) - 1
    66                           "CompactibleFreeListSpace._dict_par_lock", true),
    67   _rescan_task_size(CardTableModRefBS::card_size_in_words * BitsPerWord *
    68                     CMSRescanMultiple),
    69   _marking_task_size(CardTableModRefBS::card_size_in_words * BitsPerWord *
    70                     CMSConcMarkMultiple),
    71   _collector(NULL)
    72 {
    73   _bt.set_space(this);
    74   initialize(mr, SpaceDecorator::Clear, SpaceDecorator::Mangle);
    75   // We have all of "mr", all of which we place in the dictionary
    76   // as one big chunk. We'll need to decide here which of several
    77   // possible alternative dictionary implementations to use. For
    78   // now the choice is easy, since we have only one working
    79   // implementation, namely, the simple binary tree (splaying
    80   // temporarily disabled).
    81   switch (dictionaryChoice) {
    82     case FreeBlockDictionary::dictionarySplayTree:
    83     case FreeBlockDictionary::dictionarySkipList:
    84     default:
    85       warning("dictionaryChoice: selected option not understood; using"
    86               " default BinaryTreeDictionary implementation instead.");
    87     case FreeBlockDictionary::dictionaryBinaryTree:
    88       _dictionary = new BinaryTreeDictionary(mr);
    89       break;
    90   }
    91   assert(_dictionary != NULL, "CMS dictionary initialization");
    92   // The indexed free lists are initially all empty and are lazily
    93   // filled in on demand. Initialize the array elements to NULL.
    94   initializeIndexedFreeListArray();
    96   // Not using adaptive free lists assumes that allocation is first
    97   // from the linAB's.  Also a cms perm gen which can be compacted
    98   // has to have the klass's klassKlass allocated at a lower
    99   // address in the heap than the klass so that the klassKlass is
   100   // moved to its new location before the klass is moved.
   101   // Set the _refillSize for the linear allocation blocks
   102   if (!use_adaptive_freelists) {
   103     FreeChunk* fc = _dictionary->getChunk(mr.word_size());
   104     // The small linAB initially has all the space and will allocate
   105     // a chunk of any size.
   106     HeapWord* addr = (HeapWord*) fc;
   107     _smallLinearAllocBlock.set(addr, fc->size() ,
   108       1024*SmallForLinearAlloc, fc->size());
   109     // Note that _unallocated_block is not updated here.
   110     // Allocations from the linear allocation block should
   111     // update it.
   112   } else {
   113     _smallLinearAllocBlock.set(0, 0, 1024*SmallForLinearAlloc,
   114                                SmallForLinearAlloc);
   115   }
   116   // CMSIndexedFreeListReplenish should be at least 1
   117   CMSIndexedFreeListReplenish = MAX2((uintx)1, CMSIndexedFreeListReplenish);
   118   _promoInfo.setSpace(this);
   119   if (UseCMSBestFit) {
   120     _fitStrategy = FreeBlockBestFitFirst;
   121   } else {
   122     _fitStrategy = FreeBlockStrategyNone;
   123   }
   124   checkFreeListConsistency();
   126   // Initialize locks for parallel case.
   128   if (CollectedHeap::use_parallel_gc_threads()) {
   129     for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
   130       _indexedFreeListParLocks[i] = new Mutex(Mutex::leaf - 1, // == ExpandHeap_lock - 1
   131                                               "a freelist par lock",
   132                                               true);
   133       if (_indexedFreeListParLocks[i] == NULL)
   134         vm_exit_during_initialization("Could not allocate a par lock");
   135       DEBUG_ONLY(
   136         _indexedFreeList[i].set_protecting_lock(_indexedFreeListParLocks[i]);
   137       )
   138     }
   139     _dictionary->set_par_lock(&_parDictionaryAllocLock);
   140   }
   141 }
   143 // Like CompactibleSpace forward() but always calls cross_threshold() to
   144 // update the block offset table.  Removed initialize_threshold call because
   145 // CFLS does not use a block offset array for contiguous spaces.
   146 HeapWord* CompactibleFreeListSpace::forward(oop q, size_t size,
   147                                     CompactPoint* cp, HeapWord* compact_top) {
   148   // q is alive
   149   // First check if we should switch compaction space
   150   assert(this == cp->space, "'this' should be current compaction space.");
   151   size_t compaction_max_size = pointer_delta(end(), compact_top);
   152   assert(adjustObjectSize(size) == cp->space->adjust_object_size_v(size),
   153     "virtual adjustObjectSize_v() method is not correct");
   154   size_t adjusted_size = adjustObjectSize(size);
   155   assert(compaction_max_size >= MinChunkSize || compaction_max_size == 0,
   156          "no small fragments allowed");
   157   assert(minimum_free_block_size() == MinChunkSize,
   158          "for de-virtualized reference below");
   159   // Can't leave a nonzero size, residual fragment smaller than MinChunkSize
   160   if (adjusted_size + MinChunkSize > compaction_max_size &&
   161       adjusted_size != compaction_max_size) {
   162     do {
   163       // switch to next compaction space
   164       cp->space->set_compaction_top(compact_top);
   165       cp->space = cp->space->next_compaction_space();
   166       if (cp->space == NULL) {
   167         cp->gen = GenCollectedHeap::heap()->prev_gen(cp->gen);
   168         assert(cp->gen != NULL, "compaction must succeed");
   169         cp->space = cp->gen->first_compaction_space();
   170         assert(cp->space != NULL, "generation must have a first compaction space");
   171       }
   172       compact_top = cp->space->bottom();
   173       cp->space->set_compaction_top(compact_top);
   174       // The correct adjusted_size may not be the same as that for this method
   175       // (i.e., cp->space may no longer be "this" so adjust the size again.
   176       // Use the virtual method which is not used above to save the virtual
   177       // dispatch.
   178       adjusted_size = cp->space->adjust_object_size_v(size);
   179       compaction_max_size = pointer_delta(cp->space->end(), compact_top);
   180       assert(cp->space->minimum_free_block_size() == 0, "just checking");
   181     } while (adjusted_size > compaction_max_size);
   182   }
   184   // store the forwarding pointer into the mark word
   185   if ((HeapWord*)q != compact_top) {
   186     q->forward_to(oop(compact_top));
   187     assert(q->is_gc_marked(), "encoding the pointer should preserve the mark");
   188   } else {
   189     // if the object isn't moving we can just set the mark to the default
   190     // mark and handle it specially later on.
   191     q->init_mark();
   192     assert(q->forwardee() == NULL, "should be forwarded to NULL");
   193   }
   195   VALIDATE_MARK_SWEEP_ONLY(MarkSweep::register_live_oop(q, adjusted_size));
   196   compact_top += adjusted_size;
   198   // we need to update the offset table so that the beginnings of objects can be
   199   // found during scavenge.  Note that we are updating the offset table based on
   200   // where the object will be once the compaction phase finishes.
   202   // Always call cross_threshold().  A contiguous space can only call it when
   203   // the compaction_top exceeds the current threshold but not for an
   204   // non-contiguous space.
   205   cp->threshold =
   206     cp->space->cross_threshold(compact_top - adjusted_size, compact_top);
   207   return compact_top;
   208 }
   210 // A modified copy of OffsetTableContigSpace::cross_threshold() with _offsets -> _bt
   211 // and use of single_block instead of alloc_block.  The name here is not really
   212 // appropriate - maybe a more general name could be invented for both the
   213 // contiguous and noncontiguous spaces.
   215 HeapWord* CompactibleFreeListSpace::cross_threshold(HeapWord* start, HeapWord* the_end) {
   216   _bt.single_block(start, the_end);
   217   return end();
   218 }
   220 // Initialize them to NULL.
   221 void CompactibleFreeListSpace::initializeIndexedFreeListArray() {
   222   for (size_t i = 0; i < IndexSetSize; i++) {
   223     // Note that on platforms where objects are double word aligned,
   224     // the odd array elements are not used.  It is convenient, however,
   225     // to map directly from the object size to the array element.
   226     _indexedFreeList[i].reset(IndexSetSize);
   227     _indexedFreeList[i].set_size(i);
   228     assert(_indexedFreeList[i].count() == 0, "reset check failed");
   229     assert(_indexedFreeList[i].head() == NULL, "reset check failed");
   230     assert(_indexedFreeList[i].tail() == NULL, "reset check failed");
   231     assert(_indexedFreeList[i].hint() == IndexSetSize, "reset check failed");
   232   }
   233 }
   235 void CompactibleFreeListSpace::resetIndexedFreeListArray() {
   236   for (int i = 1; i < IndexSetSize; i++) {
   237     assert(_indexedFreeList[i].size() == (size_t) i,
   238       "Indexed free list sizes are incorrect");
   239     _indexedFreeList[i].reset(IndexSetSize);
   240     assert(_indexedFreeList[i].count() == 0, "reset check failed");
   241     assert(_indexedFreeList[i].head() == NULL, "reset check failed");
   242     assert(_indexedFreeList[i].tail() == NULL, "reset check failed");
   243     assert(_indexedFreeList[i].hint() == IndexSetSize, "reset check failed");
   244   }
   245 }
   247 void CompactibleFreeListSpace::reset(MemRegion mr) {
   248   resetIndexedFreeListArray();
   249   dictionary()->reset();
   250   if (BlockOffsetArrayUseUnallocatedBlock) {
   251     assert(end() == mr.end(), "We are compacting to the bottom of CMS gen");
   252     // Everything's allocated until proven otherwise.
   253     _bt.set_unallocated_block(end());
   254   }
   255   if (!mr.is_empty()) {
   256     assert(mr.word_size() >= MinChunkSize, "Chunk size is too small");
   257     _bt.single_block(mr.start(), mr.word_size());
   258     FreeChunk* fc = (FreeChunk*) mr.start();
   259     fc->setSize(mr.word_size());
   260     if (mr.word_size() >= IndexSetSize ) {
   261       returnChunkToDictionary(fc);
   262     } else {
   263       _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
   264       _indexedFreeList[mr.word_size()].returnChunkAtHead(fc);
   265     }
   266   }
   267   _promoInfo.reset();
   268   _smallLinearAllocBlock._ptr = NULL;
   269   _smallLinearAllocBlock._word_size = 0;
   270 }
   272 void CompactibleFreeListSpace::reset_after_compaction() {
   273   // Reset the space to the new reality - one free chunk.
   274   MemRegion mr(compaction_top(), end());
   275   reset(mr);
   276   // Now refill the linear allocation block(s) if possible.
   277   if (_adaptive_freelists) {
   278     refillLinearAllocBlocksIfNeeded();
   279   } else {
   280     // Place as much of mr in the linAB as we can get,
   281     // provided it was big enough to go into the dictionary.
   282     FreeChunk* fc = dictionary()->findLargestDict();
   283     if (fc != NULL) {
   284       assert(fc->size() == mr.word_size(),
   285              "Why was the chunk broken up?");
   286       removeChunkFromDictionary(fc);
   287       HeapWord* addr = (HeapWord*) fc;
   288       _smallLinearAllocBlock.set(addr, fc->size() ,
   289         1024*SmallForLinearAlloc, fc->size());
   290       // Note that _unallocated_block is not updated here.
   291     }
   292   }
   293 }
   295 // Walks the entire dictionary, returning a coterminal
   296 // chunk, if it exists. Use with caution since it involves
   297 // a potentially complete walk of a potentially large tree.
   298 FreeChunk* CompactibleFreeListSpace::find_chunk_at_end() {
   300   assert_lock_strong(&_freelistLock);
   302   return dictionary()->find_chunk_ends_at(end());
   303 }
   306 #ifndef PRODUCT
   307 void CompactibleFreeListSpace::initializeIndexedFreeListArrayReturnedBytes() {
   308   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
   309     _indexedFreeList[i].allocation_stats()->set_returnedBytes(0);
   310   }
   311 }
   313 size_t CompactibleFreeListSpace::sumIndexedFreeListArrayReturnedBytes() {
   314   size_t sum = 0;
   315   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
   316     sum += _indexedFreeList[i].allocation_stats()->returnedBytes();
   317   }
   318   return sum;
   319 }
   321 size_t CompactibleFreeListSpace::totalCountInIndexedFreeLists() const {
   322   size_t count = 0;
   323   for (int i = (int)MinChunkSize; i < IndexSetSize; i++) {
   324     debug_only(
   325       ssize_t total_list_count = 0;
   326       for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
   327          fc = fc->next()) {
   328         total_list_count++;
   329       }
   330       assert(total_list_count ==  _indexedFreeList[i].count(),
   331         "Count in list is incorrect");
   332     )
   333     count += _indexedFreeList[i].count();
   334   }
   335   return count;
   336 }
   338 size_t CompactibleFreeListSpace::totalCount() {
   339   size_t num = totalCountInIndexedFreeLists();
   340   num +=  dictionary()->totalCount();
   341   if (_smallLinearAllocBlock._word_size != 0) {
   342     num++;
   343   }
   344   return num;
   345 }
   346 #endif
   348 bool CompactibleFreeListSpace::is_free_block(const HeapWord* p) const {
   349   FreeChunk* fc = (FreeChunk*) p;
   350   return fc->isFree();
   351 }
   353 size_t CompactibleFreeListSpace::used() const {
   354   return capacity() - free();
   355 }
   357 size_t CompactibleFreeListSpace::free() const {
   358   // "MT-safe, but not MT-precise"(TM), if you will: i.e.
   359   // if you do this while the structures are in flux you
   360   // may get an approximate answer only; for instance
   361   // because there is concurrent allocation either
   362   // directly by mutators or for promotion during a GC.
   363   // It's "MT-safe", however, in the sense that you are guaranteed
   364   // not to crash and burn, for instance, because of walking
   365   // pointers that could disappear as you were walking them.
   366   // The approximation is because the various components
   367   // that are read below are not read atomically (and
   368   // further the computation of totalSizeInIndexedFreeLists()
   369   // is itself a non-atomic computation. The normal use of
   370   // this is during a resize operation at the end of GC
   371   // and at that time you are guaranteed to get the
   372   // correct actual value. However, for instance, this is
   373   // also read completely asynchronously by the "perf-sampler"
   374   // that supports jvmstat, and you are apt to see the values
   375   // flicker in such cases.
   376   assert(_dictionary != NULL, "No _dictionary?");
   377   return (_dictionary->totalChunkSize(DEBUG_ONLY(freelistLock())) +
   378           totalSizeInIndexedFreeLists() +
   379           _smallLinearAllocBlock._word_size) * HeapWordSize;
   380 }
   382 size_t CompactibleFreeListSpace::max_alloc_in_words() const {
   383   assert(_dictionary != NULL, "No _dictionary?");
   384   assert_locked();
   385   size_t res = _dictionary->maxChunkSize();
   386   res = MAX2(res, MIN2(_smallLinearAllocBlock._word_size,
   387                        (size_t) SmallForLinearAlloc - 1));
   388   // XXX the following could potentially be pretty slow;
   389   // should one, pesimally for the rare cases when res
   390   // caclulated above is less than IndexSetSize,
   391   // just return res calculated above? My reasoning was that
   392   // those cases will be so rare that the extra time spent doesn't
   393   // really matter....
   394   // Note: do not change the loop test i >= res + IndexSetStride
   395   // to i > res below, because i is unsigned and res may be zero.
   396   for (size_t i = IndexSetSize - 1; i >= res + IndexSetStride;
   397        i -= IndexSetStride) {
   398     if (_indexedFreeList[i].head() != NULL) {
   399       assert(_indexedFreeList[i].count() != 0, "Inconsistent FreeList");
   400       return i;
   401     }
   402   }
   403   return res;
   404 }
   406 void LinearAllocBlock::print_on(outputStream* st) const {
   407   st->print_cr(" LinearAllocBlock: ptr = " PTR_FORMAT ", word_size = " SIZE_FORMAT
   408             ", refillsize = " SIZE_FORMAT ", allocation_size_limit = " SIZE_FORMAT,
   409             _ptr, _word_size, _refillSize, _allocation_size_limit);
   410 }
   412 void CompactibleFreeListSpace::print_on(outputStream* st) const {
   413   st->print_cr("COMPACTIBLE FREELIST SPACE");
   414   st->print_cr(" Space:");
   415   Space::print_on(st);
   417   st->print_cr("promoInfo:");
   418   _promoInfo.print_on(st);
   420   st->print_cr("_smallLinearAllocBlock");
   421   _smallLinearAllocBlock.print_on(st);
   423   // dump_memory_block(_smallLinearAllocBlock->_ptr, 128);
   425   st->print_cr(" _fitStrategy = %s, _adaptive_freelists = %s",
   426                _fitStrategy?"true":"false", _adaptive_freelists?"true":"false");
   427 }
   429 void CompactibleFreeListSpace::print_indexed_free_lists(outputStream* st)
   430 const {
   431   reportIndexedFreeListStatistics();
   432   gclog_or_tty->print_cr("Layout of Indexed Freelists");
   433   gclog_or_tty->print_cr("---------------------------");
   434   FreeList::print_labels_on(st, "size");
   435   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
   436     _indexedFreeList[i].print_on(gclog_or_tty);
   437     for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
   438          fc = fc->next()) {
   439       gclog_or_tty->print_cr("\t[" PTR_FORMAT "," PTR_FORMAT ")  %s",
   440                           fc, (HeapWord*)fc + i,
   441                           fc->cantCoalesce() ? "\t CC" : "");
   442     }
   443   }
   444 }
   446 void CompactibleFreeListSpace::print_promo_info_blocks(outputStream* st)
   447 const {
   448   _promoInfo.print_on(st);
   449 }
   451 void CompactibleFreeListSpace::print_dictionary_free_lists(outputStream* st)
   452 const {
   453   _dictionary->reportStatistics();
   454   st->print_cr("Layout of Freelists in Tree");
   455   st->print_cr("---------------------------");
   456   _dictionary->print_free_lists(st);
   457 }
   459 class BlkPrintingClosure: public BlkClosure {
   460   const CMSCollector*             _collector;
   461   const CompactibleFreeListSpace* _sp;
   462   const CMSBitMap*                _live_bit_map;
   463   const bool                      _post_remark;
   464   outputStream*                   _st;
   465 public:
   466   BlkPrintingClosure(const CMSCollector* collector,
   467                      const CompactibleFreeListSpace* sp,
   468                      const CMSBitMap* live_bit_map,
   469                      outputStream* st):
   470     _collector(collector),
   471     _sp(sp),
   472     _live_bit_map(live_bit_map),
   473     _post_remark(collector->abstract_state() > CMSCollector::FinalMarking),
   474     _st(st) { }
   475   size_t do_blk(HeapWord* addr);
   476 };
   478 size_t BlkPrintingClosure::do_blk(HeapWord* addr) {
   479   size_t sz = _sp->block_size_no_stall(addr, _collector);
   480   assert(sz != 0, "Should always be able to compute a size");
   481   if (_sp->block_is_obj(addr)) {
   482     const bool dead = _post_remark && !_live_bit_map->isMarked(addr);
   483     _st->print_cr(PTR_FORMAT ": %s object of size " SIZE_FORMAT "%s",
   484       addr,
   485       dead ? "dead" : "live",
   486       sz,
   487       (!dead && CMSPrintObjectsInDump) ? ":" : ".");
   488     if (CMSPrintObjectsInDump && !dead) {
   489       oop(addr)->print_on(_st);
   490       _st->print_cr("--------------------------------------");
   491     }
   492   } else { // free block
   493     _st->print_cr(PTR_FORMAT ": free block of size " SIZE_FORMAT "%s",
   494       addr, sz, CMSPrintChunksInDump ? ":" : ".");
   495     if (CMSPrintChunksInDump) {
   496       ((FreeChunk*)addr)->print_on(_st);
   497       _st->print_cr("--------------------------------------");
   498     }
   499   }
   500   return sz;
   501 }
   503 void CompactibleFreeListSpace::dump_at_safepoint_with_locks(CMSCollector* c,
   504   outputStream* st) {
   505   st->print_cr("\n=========================");
   506   st->print_cr("Block layout in CMS Heap:");
   507   st->print_cr("=========================");
   508   BlkPrintingClosure  bpcl(c, this, c->markBitMap(), st);
   509   blk_iterate(&bpcl);
   511   st->print_cr("\n=======================================");
   512   st->print_cr("Order & Layout of Promotion Info Blocks");
   513   st->print_cr("=======================================");
   514   print_promo_info_blocks(st);
   516   st->print_cr("\n===========================");
   517   st->print_cr("Order of Indexed Free Lists");
   518   st->print_cr("=========================");
   519   print_indexed_free_lists(st);
   521   st->print_cr("\n=================================");
   522   st->print_cr("Order of Free Lists in Dictionary");
   523   st->print_cr("=================================");
   524   print_dictionary_free_lists(st);
   525 }
   528 void CompactibleFreeListSpace::reportFreeListStatistics() const {
   529   assert_lock_strong(&_freelistLock);
   530   assert(PrintFLSStatistics != 0, "Reporting error");
   531   _dictionary->reportStatistics();
   532   if (PrintFLSStatistics > 1) {
   533     reportIndexedFreeListStatistics();
   534     size_t totalSize = totalSizeInIndexedFreeLists() +
   535                        _dictionary->totalChunkSize(DEBUG_ONLY(freelistLock()));
   536     gclog_or_tty->print(" free=%ld frag=%1.4f\n", totalSize, flsFrag());
   537   }
   538 }
   540 void CompactibleFreeListSpace::reportIndexedFreeListStatistics() const {
   541   assert_lock_strong(&_freelistLock);
   542   gclog_or_tty->print("Statistics for IndexedFreeLists:\n"
   543                       "--------------------------------\n");
   544   size_t totalSize = totalSizeInIndexedFreeLists();
   545   size_t   freeBlocks = numFreeBlocksInIndexedFreeLists();
   546   gclog_or_tty->print("Total Free Space: %d\n", totalSize);
   547   gclog_or_tty->print("Max   Chunk Size: %d\n", maxChunkSizeInIndexedFreeLists());
   548   gclog_or_tty->print("Number of Blocks: %d\n", freeBlocks);
   549   if (freeBlocks != 0) {
   550     gclog_or_tty->print("Av.  Block  Size: %d\n", totalSize/freeBlocks);
   551   }
   552 }
   554 size_t CompactibleFreeListSpace::numFreeBlocksInIndexedFreeLists() const {
   555   size_t res = 0;
   556   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
   557     debug_only(
   558       ssize_t recount = 0;
   559       for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
   560          fc = fc->next()) {
   561         recount += 1;
   562       }
   563       assert(recount == _indexedFreeList[i].count(),
   564         "Incorrect count in list");
   565     )
   566     res += _indexedFreeList[i].count();
   567   }
   568   return res;
   569 }
   571 size_t CompactibleFreeListSpace::maxChunkSizeInIndexedFreeLists() const {
   572   for (size_t i = IndexSetSize - 1; i != 0; i -= IndexSetStride) {
   573     if (_indexedFreeList[i].head() != NULL) {
   574       assert(_indexedFreeList[i].count() != 0, "Inconsistent FreeList");
   575       return (size_t)i;
   576     }
   577   }
   578   return 0;
   579 }
   581 void CompactibleFreeListSpace::set_end(HeapWord* value) {
   582   HeapWord* prevEnd = end();
   583   assert(prevEnd != value, "unnecessary set_end call");
   584   assert(prevEnd == NULL || !BlockOffsetArrayUseUnallocatedBlock || value >= unallocated_block(),
   585         "New end is below unallocated block");
   586   _end = value;
   587   if (prevEnd != NULL) {
   588     // Resize the underlying block offset table.
   589     _bt.resize(pointer_delta(value, bottom()));
   590     if (value <= prevEnd) {
   591       assert(!BlockOffsetArrayUseUnallocatedBlock || value >= unallocated_block(),
   592              "New end is below unallocated block");
   593     } else {
   594       // Now, take this new chunk and add it to the free blocks.
   595       // Note that the BOT has not yet been updated for this block.
   596       size_t newFcSize = pointer_delta(value, prevEnd);
   597       // XXX This is REALLY UGLY and should be fixed up. XXX
   598       if (!_adaptive_freelists && _smallLinearAllocBlock._ptr == NULL) {
   599         // Mark the boundary of the new block in BOT
   600         _bt.mark_block(prevEnd, value);
   601         // put it all in the linAB
   602         if (ParallelGCThreads == 0) {
   603           _smallLinearAllocBlock._ptr = prevEnd;
   604           _smallLinearAllocBlock._word_size = newFcSize;
   605           repairLinearAllocBlock(&_smallLinearAllocBlock);
   606         } else { // ParallelGCThreads > 0
   607           MutexLockerEx x(parDictionaryAllocLock(),
   608                           Mutex::_no_safepoint_check_flag);
   609           _smallLinearAllocBlock._ptr = prevEnd;
   610           _smallLinearAllocBlock._word_size = newFcSize;
   611           repairLinearAllocBlock(&_smallLinearAllocBlock);
   612         }
   613         // Births of chunks put into a LinAB are not recorded.  Births
   614         // of chunks as they are allocated out of a LinAB are.
   615       } else {
   616         // Add the block to the free lists, if possible coalescing it
   617         // with the last free block, and update the BOT and census data.
   618         addChunkToFreeListsAtEndRecordingStats(prevEnd, newFcSize);
   619       }
   620     }
   621   }
   622 }
   624 class FreeListSpace_DCTOC : public Filtering_DCTOC {
   625   CompactibleFreeListSpace* _cfls;
   626   CMSCollector* _collector;
   627 protected:
   628   // Override.
   629 #define walk_mem_region_with_cl_DECL(ClosureType)                       \
   630   virtual void walk_mem_region_with_cl(MemRegion mr,                    \
   631                                        HeapWord* bottom, HeapWord* top, \
   632                                        ClosureType* cl);                \
   633       void walk_mem_region_with_cl_par(MemRegion mr,                    \
   634                                        HeapWord* bottom, HeapWord* top, \
   635                                        ClosureType* cl);                \
   636     void walk_mem_region_with_cl_nopar(MemRegion mr,                    \
   637                                        HeapWord* bottom, HeapWord* top, \
   638                                        ClosureType* cl)
   639   walk_mem_region_with_cl_DECL(OopClosure);
   640   walk_mem_region_with_cl_DECL(FilteringClosure);
   642 public:
   643   FreeListSpace_DCTOC(CompactibleFreeListSpace* sp,
   644                       CMSCollector* collector,
   645                       OopClosure* cl,
   646                       CardTableModRefBS::PrecisionStyle precision,
   647                       HeapWord* boundary) :
   648     Filtering_DCTOC(sp, cl, precision, boundary),
   649     _cfls(sp), _collector(collector) {}
   650 };
   652 // We de-virtualize the block-related calls below, since we know that our
   653 // space is a CompactibleFreeListSpace.
   654 #define FreeListSpace_DCTOC__walk_mem_region_with_cl_DEFN(ClosureType)          \
   655 void FreeListSpace_DCTOC::walk_mem_region_with_cl(MemRegion mr,                 \
   656                                                  HeapWord* bottom,              \
   657                                                  HeapWord* top,                 \
   658                                                  ClosureType* cl) {             \
   659    if (SharedHeap::heap()->n_par_threads() > 0) {                               \
   660      walk_mem_region_with_cl_par(mr, bottom, top, cl);                          \
   661    } else {                                                                     \
   662      walk_mem_region_with_cl_nopar(mr, bottom, top, cl);                        \
   663    }                                                                            \
   664 }                                                                               \
   665 void FreeListSpace_DCTOC::walk_mem_region_with_cl_par(MemRegion mr,             \
   666                                                       HeapWord* bottom,         \
   667                                                       HeapWord* top,            \
   668                                                       ClosureType* cl) {        \
   669   /* Skip parts that are before "mr", in case "block_start" sent us             \
   670      back too far. */                                                           \
   671   HeapWord* mr_start = mr.start();                                              \
   672   size_t bot_size = _cfls->CompactibleFreeListSpace::block_size(bottom);        \
   673   HeapWord* next = bottom + bot_size;                                           \
   674   while (next < mr_start) {                                                     \
   675     bottom = next;                                                              \
   676     bot_size = _cfls->CompactibleFreeListSpace::block_size(bottom);             \
   677     next = bottom + bot_size;                                                   \
   678   }                                                                             \
   679                                                                                 \
   680   while (bottom < top) {                                                        \
   681     if (_cfls->CompactibleFreeListSpace::block_is_obj(bottom) &&                \
   682         !_cfls->CompactibleFreeListSpace::obj_allocated_since_save_marks(       \
   683                     oop(bottom)) &&                                             \
   684         !_collector->CMSCollector::is_dead_obj(oop(bottom))) {                  \
   685       size_t word_sz = oop(bottom)->oop_iterate(cl, mr);                        \
   686       bottom += _cfls->adjustObjectSize(word_sz);                               \
   687     } else {                                                                    \
   688       bottom += _cfls->CompactibleFreeListSpace::block_size(bottom);            \
   689     }                                                                           \
   690   }                                                                             \
   691 }                                                                               \
   692 void FreeListSpace_DCTOC::walk_mem_region_with_cl_nopar(MemRegion mr,           \
   693                                                         HeapWord* bottom,       \
   694                                                         HeapWord* top,          \
   695                                                         ClosureType* cl) {      \
   696   /* Skip parts that are before "mr", in case "block_start" sent us             \
   697      back too far. */                                                           \
   698   HeapWord* mr_start = mr.start();                                              \
   699   size_t bot_size = _cfls->CompactibleFreeListSpace::block_size_nopar(bottom);  \
   700   HeapWord* next = bottom + bot_size;                                           \
   701   while (next < mr_start) {                                                     \
   702     bottom = next;                                                              \
   703     bot_size = _cfls->CompactibleFreeListSpace::block_size_nopar(bottom);       \
   704     next = bottom + bot_size;                                                   \
   705   }                                                                             \
   706                                                                                 \
   707   while (bottom < top) {                                                        \
   708     if (_cfls->CompactibleFreeListSpace::block_is_obj_nopar(bottom) &&          \
   709         !_cfls->CompactibleFreeListSpace::obj_allocated_since_save_marks(       \
   710                     oop(bottom)) &&                                             \
   711         !_collector->CMSCollector::is_dead_obj(oop(bottom))) {                  \
   712       size_t word_sz = oop(bottom)->oop_iterate(cl, mr);                        \
   713       bottom += _cfls->adjustObjectSize(word_sz);                               \
   714     } else {                                                                    \
   715       bottom += _cfls->CompactibleFreeListSpace::block_size_nopar(bottom);      \
   716     }                                                                           \
   717   }                                                                             \
   718 }
   720 // (There are only two of these, rather than N, because the split is due
   721 // only to the introduction of the FilteringClosure, a local part of the
   722 // impl of this abstraction.)
   723 FreeListSpace_DCTOC__walk_mem_region_with_cl_DEFN(OopClosure)
   724 FreeListSpace_DCTOC__walk_mem_region_with_cl_DEFN(FilteringClosure)
   726 DirtyCardToOopClosure*
   727 CompactibleFreeListSpace::new_dcto_cl(OopClosure* cl,
   728                                       CardTableModRefBS::PrecisionStyle precision,
   729                                       HeapWord* boundary) {
   730   return new FreeListSpace_DCTOC(this, _collector, cl, precision, boundary);
   731 }
   734 // Note on locking for the space iteration functions:
   735 // since the collector's iteration activities are concurrent with
   736 // allocation activities by mutators, absent a suitable mutual exclusion
   737 // mechanism the iterators may go awry. For instace a block being iterated
   738 // may suddenly be allocated or divided up and part of it allocated and
   739 // so on.
   741 // Apply the given closure to each block in the space.
   742 void CompactibleFreeListSpace::blk_iterate_careful(BlkClosureCareful* cl) {
   743   assert_lock_strong(freelistLock());
   744   HeapWord *cur, *limit;
   745   for (cur = bottom(), limit = end(); cur < limit;
   746        cur += cl->do_blk_careful(cur));
   747 }
   749 // Apply the given closure to each block in the space.
   750 void CompactibleFreeListSpace::blk_iterate(BlkClosure* cl) {
   751   assert_lock_strong(freelistLock());
   752   HeapWord *cur, *limit;
   753   for (cur = bottom(), limit = end(); cur < limit;
   754        cur += cl->do_blk(cur));
   755 }
   757 // Apply the given closure to each oop in the space.
   758 void CompactibleFreeListSpace::oop_iterate(OopClosure* cl) {
   759   assert_lock_strong(freelistLock());
   760   HeapWord *cur, *limit;
   761   size_t curSize;
   762   for (cur = bottom(), limit = end(); cur < limit;
   763        cur += curSize) {
   764     curSize = block_size(cur);
   765     if (block_is_obj(cur)) {
   766       oop(cur)->oop_iterate(cl);
   767     }
   768   }
   769 }
   771 // Apply the given closure to each oop in the space \intersect memory region.
   772 void CompactibleFreeListSpace::oop_iterate(MemRegion mr, OopClosure* cl) {
   773   assert_lock_strong(freelistLock());
   774   if (is_empty()) {
   775     return;
   776   }
   777   MemRegion cur = MemRegion(bottom(), end());
   778   mr = mr.intersection(cur);
   779   if (mr.is_empty()) {
   780     return;
   781   }
   782   if (mr.equals(cur)) {
   783     oop_iterate(cl);
   784     return;
   785   }
   786   assert(mr.end() <= end(), "just took an intersection above");
   787   HeapWord* obj_addr = block_start(mr.start());
   788   HeapWord* t = mr.end();
   790   SpaceMemRegionOopsIterClosure smr_blk(cl, mr);
   791   if (block_is_obj(obj_addr)) {
   792     // Handle first object specially.
   793     oop obj = oop(obj_addr);
   794     obj_addr += adjustObjectSize(obj->oop_iterate(&smr_blk));
   795   } else {
   796     FreeChunk* fc = (FreeChunk*)obj_addr;
   797     obj_addr += fc->size();
   798   }
   799   while (obj_addr < t) {
   800     HeapWord* obj = obj_addr;
   801     obj_addr += block_size(obj_addr);
   802     // If "obj_addr" is not greater than top, then the
   803     // entire object "obj" is within the region.
   804     if (obj_addr <= t) {
   805       if (block_is_obj(obj)) {
   806         oop(obj)->oop_iterate(cl);
   807       }
   808     } else {
   809       // "obj" extends beyond end of region
   810       if (block_is_obj(obj)) {
   811         oop(obj)->oop_iterate(&smr_blk);
   812       }
   813       break;
   814     }
   815   }
   816 }
   818 // NOTE: In the following methods, in order to safely be able to
   819 // apply the closure to an object, we need to be sure that the
   820 // object has been initialized. We are guaranteed that an object
   821 // is initialized if we are holding the Heap_lock with the
   822 // world stopped.
   823 void CompactibleFreeListSpace::verify_objects_initialized() const {
   824   if (is_init_completed()) {
   825     assert_locked_or_safepoint(Heap_lock);
   826     if (Universe::is_fully_initialized()) {
   827       guarantee(SafepointSynchronize::is_at_safepoint(),
   828                 "Required for objects to be initialized");
   829     }
   830   } // else make a concession at vm start-up
   831 }
   833 // Apply the given closure to each object in the space
   834 void CompactibleFreeListSpace::object_iterate(ObjectClosure* blk) {
   835   assert_lock_strong(freelistLock());
   836   NOT_PRODUCT(verify_objects_initialized());
   837   HeapWord *cur, *limit;
   838   size_t curSize;
   839   for (cur = bottom(), limit = end(); cur < limit;
   840        cur += curSize) {
   841     curSize = block_size(cur);
   842     if (block_is_obj(cur)) {
   843       blk->do_object(oop(cur));
   844     }
   845   }
   846 }
   848 // Apply the given closure to each live object in the space
   849 //   The usage of CompactibleFreeListSpace
   850 // by the ConcurrentMarkSweepGeneration for concurrent GC's allows
   851 // objects in the space with references to objects that are no longer
   852 // valid.  For example, an object may reference another object
   853 // that has already been sweep up (collected).  This method uses
   854 // obj_is_alive() to determine whether it is safe to apply the closure to
   855 // an object.  See obj_is_alive() for details on how liveness of an
   856 // object is decided.
   858 void CompactibleFreeListSpace::safe_object_iterate(ObjectClosure* blk) {
   859   assert_lock_strong(freelistLock());
   860   NOT_PRODUCT(verify_objects_initialized());
   861   HeapWord *cur, *limit;
   862   size_t curSize;
   863   for (cur = bottom(), limit = end(); cur < limit;
   864        cur += curSize) {
   865     curSize = block_size(cur);
   866     if (block_is_obj(cur) && obj_is_alive(cur)) {
   867       blk->do_object(oop(cur));
   868     }
   869   }
   870 }
   872 void CompactibleFreeListSpace::object_iterate_mem(MemRegion mr,
   873                                                   UpwardsObjectClosure* cl) {
   874   assert_locked(freelistLock());
   875   NOT_PRODUCT(verify_objects_initialized());
   876   Space::object_iterate_mem(mr, cl);
   877 }
   879 // Callers of this iterator beware: The closure application should
   880 // be robust in the face of uninitialized objects and should (always)
   881 // return a correct size so that the next addr + size below gives us a
   882 // valid block boundary. [See for instance,
   883 // ScanMarkedObjectsAgainCarefullyClosure::do_object_careful()
   884 // in ConcurrentMarkSweepGeneration.cpp.]
   885 HeapWord*
   886 CompactibleFreeListSpace::object_iterate_careful(ObjectClosureCareful* cl) {
   887   assert_lock_strong(freelistLock());
   888   HeapWord *addr, *last;
   889   size_t size;
   890   for (addr = bottom(), last  = end();
   891        addr < last; addr += size) {
   892     FreeChunk* fc = (FreeChunk*)addr;
   893     if (fc->isFree()) {
   894       // Since we hold the free list lock, which protects direct
   895       // allocation in this generation by mutators, a free object
   896       // will remain free throughout this iteration code.
   897       size = fc->size();
   898     } else {
   899       // Note that the object need not necessarily be initialized,
   900       // because (for instance) the free list lock does NOT protect
   901       // object initialization. The closure application below must
   902       // therefore be correct in the face of uninitialized objects.
   903       size = cl->do_object_careful(oop(addr));
   904       if (size == 0) {
   905         // An unparsable object found. Signal early termination.
   906         return addr;
   907       }
   908     }
   909   }
   910   return NULL;
   911 }
   913 // Callers of this iterator beware: The closure application should
   914 // be robust in the face of uninitialized objects and should (always)
   915 // return a correct size so that the next addr + size below gives us a
   916 // valid block boundary. [See for instance,
   917 // ScanMarkedObjectsAgainCarefullyClosure::do_object_careful()
   918 // in ConcurrentMarkSweepGeneration.cpp.]
   919 HeapWord*
   920 CompactibleFreeListSpace::object_iterate_careful_m(MemRegion mr,
   921   ObjectClosureCareful* cl) {
   922   assert_lock_strong(freelistLock());
   923   // Can't use used_region() below because it may not necessarily
   924   // be the same as [bottom(),end()); although we could
   925   // use [used_region().start(),round_to(used_region().end(),CardSize)),
   926   // that appears too cumbersome, so we just do the simpler check
   927   // in the assertion below.
   928   assert(!mr.is_empty() && MemRegion(bottom(),end()).contains(mr),
   929          "mr should be non-empty and within used space");
   930   HeapWord *addr, *end;
   931   size_t size;
   932   for (addr = block_start_careful(mr.start()), end  = mr.end();
   933        addr < end; addr += size) {
   934     FreeChunk* fc = (FreeChunk*)addr;
   935     if (fc->isFree()) {
   936       // Since we hold the free list lock, which protects direct
   937       // allocation in this generation by mutators, a free object
   938       // will remain free throughout this iteration code.
   939       size = fc->size();
   940     } else {
   941       // Note that the object need not necessarily be initialized,
   942       // because (for instance) the free list lock does NOT protect
   943       // object initialization. The closure application below must
   944       // therefore be correct in the face of uninitialized objects.
   945       size = cl->do_object_careful_m(oop(addr), mr);
   946       if (size == 0) {
   947         // An unparsable object found. Signal early termination.
   948         return addr;
   949       }
   950     }
   951   }
   952   return NULL;
   953 }
   956 HeapWord* CompactibleFreeListSpace::block_start_const(const void* p) const {
   957   NOT_PRODUCT(verify_objects_initialized());
   958   return _bt.block_start(p);
   959 }
   961 HeapWord* CompactibleFreeListSpace::block_start_careful(const void* p) const {
   962   return _bt.block_start_careful(p);
   963 }
   965 size_t CompactibleFreeListSpace::block_size(const HeapWord* p) const {
   966   NOT_PRODUCT(verify_objects_initialized());
   967   // This must be volatile, or else there is a danger that the compiler
   968   // will compile the code below into a sometimes-infinite loop, by keeping
   969   // the value read the first time in a register.
   970   while (true) {
   971     // We must do this until we get a consistent view of the object.
   972     if (FreeChunk::indicatesFreeChunk(p)) {
   973       volatile FreeChunk* fc = (volatile FreeChunk*)p;
   974       size_t res = fc->size();
   975       // If the object is still a free chunk, return the size, else it
   976       // has been allocated so try again.
   977       if (FreeChunk::indicatesFreeChunk(p)) {
   978         assert(res != 0, "Block size should not be 0");
   979         return res;
   980       }
   981     } else {
   982       // must read from what 'p' points to in each loop.
   983       klassOop k = ((volatile oopDesc*)p)->klass_or_null();
   984       if (k != NULL) {
   985         assert(k->is_oop(true /* ignore mark word */), "Should be klass oop");
   986         oop o = (oop)p;
   987         assert(o->is_parsable(), "Should be parsable");
   988         assert(o->is_oop(true /* ignore mark word */), "Should be an oop.");
   989         size_t res = o->size_given_klass(k->klass_part());
   990         res = adjustObjectSize(res);
   991         assert(res != 0, "Block size should not be 0");
   992         return res;
   993       }
   994     }
   995   }
   996 }
   998 // A variant of the above that uses the Printezis bits for
   999 // unparsable but allocated objects. This avoids any possible
  1000 // stalls waiting for mutators to initialize objects, and is
  1001 // thus potentially faster than the variant above. However,
  1002 // this variant may return a zero size for a block that is
  1003 // under mutation and for which a consistent size cannot be
  1004 // inferred without stalling; see CMSCollector::block_size_if_printezis_bits().
  1005 size_t CompactibleFreeListSpace::block_size_no_stall(HeapWord* p,
  1006                                                      const CMSCollector* c)
  1007 const {
  1008   assert(MemRegion(bottom(), end()).contains(p), "p not in space");
  1009   // This must be volatile, or else there is a danger that the compiler
  1010   // will compile the code below into a sometimes-infinite loop, by keeping
  1011   // the value read the first time in a register.
  1012   DEBUG_ONLY(uint loops = 0;)
  1013   while (true) {
  1014     // We must do this until we get a consistent view of the object.
  1015     if (FreeChunk::indicatesFreeChunk(p)) {
  1016       volatile FreeChunk* fc = (volatile FreeChunk*)p;
  1017       size_t res = fc->size();
  1018       if (FreeChunk::indicatesFreeChunk(p)) {
  1019         assert(res != 0, "Block size should not be 0");
  1020         assert(loops == 0, "Should be 0");
  1021         return res;
  1023     } else {
  1024       // must read from what 'p' points to in each loop.
  1025       klassOop k = ((volatile oopDesc*)p)->klass_or_null();
  1026       if (k != NULL &&
  1027           ((oopDesc*)p)->is_parsable() &&
  1028           ((oopDesc*)p)->is_conc_safe()) {
  1029         assert(k->is_oop(), "Should really be klass oop.");
  1030         oop o = (oop)p;
  1031         assert(o->is_oop(), "Should be an oop");
  1032         size_t res = o->size_given_klass(k->klass_part());
  1033         res = adjustObjectSize(res);
  1034         assert(res != 0, "Block size should not be 0");
  1035         return res;
  1036       } else {
  1037         return c->block_size_if_printezis_bits(p);
  1040     assert(loops == 0, "Can loop at most once");
  1041     DEBUG_ONLY(loops++;)
  1045 size_t CompactibleFreeListSpace::block_size_nopar(const HeapWord* p) const {
  1046   NOT_PRODUCT(verify_objects_initialized());
  1047   assert(MemRegion(bottom(), end()).contains(p), "p not in space");
  1048   FreeChunk* fc = (FreeChunk*)p;
  1049   if (fc->isFree()) {
  1050     return fc->size();
  1051   } else {
  1052     // Ignore mark word because this may be a recently promoted
  1053     // object whose mark word is used to chain together grey
  1054     // objects (the last one would have a null value).
  1055     assert(oop(p)->is_oop(true), "Should be an oop");
  1056     return adjustObjectSize(oop(p)->size());
  1060 // This implementation assumes that the property of "being an object" is
  1061 // stable.  But being a free chunk may not be (because of parallel
  1062 // promotion.)
  1063 bool CompactibleFreeListSpace::block_is_obj(const HeapWord* p) const {
  1064   FreeChunk* fc = (FreeChunk*)p;
  1065   assert(is_in_reserved(p), "Should be in space");
  1066   // When doing a mark-sweep-compact of the CMS generation, this
  1067   // assertion may fail because prepare_for_compaction() uses
  1068   // space that is garbage to maintain information on ranges of
  1069   // live objects so that these live ranges can be moved as a whole.
  1070   // Comment out this assertion until that problem can be solved
  1071   // (i.e., that the block start calculation may look at objects
  1072   // at address below "p" in finding the object that contains "p"
  1073   // and those objects (if garbage) may have been modified to hold
  1074   // live range information.
  1075   // assert(CollectedHeap::use_parallel_gc_threads() || _bt.block_start(p) == p,
  1076   //        "Should be a block boundary");
  1077   if (FreeChunk::indicatesFreeChunk(p)) return false;
  1078   klassOop k = oop(p)->klass_or_null();
  1079   if (k != NULL) {
  1080     // Ignore mark word because it may have been used to
  1081     // chain together promoted objects (the last one
  1082     // would have a null value).
  1083     assert(oop(p)->is_oop(true), "Should be an oop");
  1084     return true;
  1085   } else {
  1086     return false;  // Was not an object at the start of collection.
  1090 // Check if the object is alive. This fact is checked either by consulting
  1091 // the main marking bitmap in the sweeping phase or, if it's a permanent
  1092 // generation and we're not in the sweeping phase, by checking the
  1093 // perm_gen_verify_bit_map where we store the "deadness" information if
  1094 // we did not sweep the perm gen in the most recent previous GC cycle.
  1095 bool CompactibleFreeListSpace::obj_is_alive(const HeapWord* p) const {
  1096   assert(block_is_obj(p), "The address should point to an object");
  1097   assert(SafepointSynchronize::is_at_safepoint(), "Else races are possible");
  1099   // If we're sweeping, we use object liveness information from the main bit map
  1100   // for both perm gen and old gen.
  1101   // We don't need to lock the bitmap (live_map or dead_map below), because
  1102   // EITHER we are in the middle of the sweeping phase, and the
  1103   // main marking bit map (live_map below) is locked,
  1104   // OR we're in other phases and perm_gen_verify_bit_map (dead_map below)
  1105   // is stable, because it's mutated only in the sweeping phase.
  1106   // NOTE: This method is also used by jmap where, if class unloading is
  1107   // off, the results can return "false" for legitimate perm objects,
  1108   // when we are not in the midst of a sweeping phase, which can result
  1109   // in jmap not reporting certain perm gen objects. This will be moot
  1110   // if/when the perm gen goes away in the future.
  1111   if (_collector->abstract_state() == CMSCollector::Sweeping) {
  1112     CMSBitMap* live_map = _collector->markBitMap();
  1113     return live_map->par_isMarked((HeapWord*) p);
  1114   } else {
  1115     // If we're not currently sweeping and we haven't swept the perm gen in
  1116     // the previous concurrent cycle then we may have dead but unswept objects
  1117     // in the perm gen. In this case, we use the "deadness" information
  1118     // that we had saved in perm_gen_verify_bit_map at the last sweep.
  1119     if (!CMSClassUnloadingEnabled && _collector->_permGen->reserved().contains(p)) {
  1120       if (_collector->verifying()) {
  1121         CMSBitMap* dead_map = _collector->perm_gen_verify_bit_map();
  1122         // Object is marked in the dead_map bitmap at the previous sweep
  1123         // when we know that it's dead; if the bitmap is not allocated then
  1124         // the object is alive.
  1125         return (dead_map->sizeInBits() == 0) // bit_map has been allocated
  1126                || !dead_map->par_isMarked((HeapWord*) p);
  1127       } else {
  1128         return false; // We can't say for sure if it's live, so we say that it's dead.
  1132   return true;
  1135 bool CompactibleFreeListSpace::block_is_obj_nopar(const HeapWord* p) const {
  1136   FreeChunk* fc = (FreeChunk*)p;
  1137   assert(is_in_reserved(p), "Should be in space");
  1138   assert(_bt.block_start(p) == p, "Should be a block boundary");
  1139   if (!fc->isFree()) {
  1140     // Ignore mark word because it may have been used to
  1141     // chain together promoted objects (the last one
  1142     // would have a null value).
  1143     assert(oop(p)->is_oop(true), "Should be an oop");
  1144     return true;
  1146   return false;
  1149 // "MT-safe but not guaranteed MT-precise" (TM); you may get an
  1150 // approximate answer if you don't hold the freelistlock when you call this.
  1151 size_t CompactibleFreeListSpace::totalSizeInIndexedFreeLists() const {
  1152   size_t size = 0;
  1153   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
  1154     debug_only(
  1155       // We may be calling here without the lock in which case we
  1156       // won't do this modest sanity check.
  1157       if (freelistLock()->owned_by_self()) {
  1158         size_t total_list_size = 0;
  1159         for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
  1160           fc = fc->next()) {
  1161           total_list_size += i;
  1163         assert(total_list_size == i * _indexedFreeList[i].count(),
  1164                "Count in list is incorrect");
  1167     size += i * _indexedFreeList[i].count();
  1169   return size;
  1172 HeapWord* CompactibleFreeListSpace::par_allocate(size_t size) {
  1173   MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
  1174   return allocate(size);
  1177 HeapWord*
  1178 CompactibleFreeListSpace::getChunkFromSmallLinearAllocBlockRemainder(size_t size) {
  1179   return getChunkFromLinearAllocBlockRemainder(&_smallLinearAllocBlock, size);
  1182 HeapWord* CompactibleFreeListSpace::allocate(size_t size) {
  1183   assert_lock_strong(freelistLock());
  1184   HeapWord* res = NULL;
  1185   assert(size == adjustObjectSize(size),
  1186          "use adjustObjectSize() before calling into allocate()");
  1188   if (_adaptive_freelists) {
  1189     res = allocate_adaptive_freelists(size);
  1190   } else {  // non-adaptive free lists
  1191     res = allocate_non_adaptive_freelists(size);
  1194   if (res != NULL) {
  1195     // check that res does lie in this space!
  1196     assert(is_in_reserved(res), "Not in this space!");
  1197     assert(is_aligned((void*)res), "alignment check");
  1199     FreeChunk* fc = (FreeChunk*)res;
  1200     fc->markNotFree();
  1201     assert(!fc->isFree(), "shouldn't be marked free");
  1202     assert(oop(fc)->klass_or_null() == NULL, "should look uninitialized");
  1203     // Verify that the block offset table shows this to
  1204     // be a single block, but not one which is unallocated.
  1205     _bt.verify_single_block(res, size);
  1206     _bt.verify_not_unallocated(res, size);
  1207     // mangle a just allocated object with a distinct pattern.
  1208     debug_only(fc->mangleAllocated(size));
  1211   return res;
  1214 HeapWord* CompactibleFreeListSpace::allocate_non_adaptive_freelists(size_t size) {
  1215   HeapWord* res = NULL;
  1216   // try and use linear allocation for smaller blocks
  1217   if (size < _smallLinearAllocBlock._allocation_size_limit) {
  1218     // if successful, the following also adjusts block offset table
  1219     res = getChunkFromSmallLinearAllocBlock(size);
  1221   // Else triage to indexed lists for smaller sizes
  1222   if (res == NULL) {
  1223     if (size < SmallForDictionary) {
  1224       res = (HeapWord*) getChunkFromIndexedFreeList(size);
  1225     } else {
  1226       // else get it from the big dictionary; if even this doesn't
  1227       // work we are out of luck.
  1228       res = (HeapWord*)getChunkFromDictionaryExact(size);
  1232   return res;
  1235 HeapWord* CompactibleFreeListSpace::allocate_adaptive_freelists(size_t size) {
  1236   assert_lock_strong(freelistLock());
  1237   HeapWord* res = NULL;
  1238   assert(size == adjustObjectSize(size),
  1239          "use adjustObjectSize() before calling into allocate()");
  1241   // Strategy
  1242   //   if small
  1243   //     exact size from small object indexed list if small
  1244   //     small or large linear allocation block (linAB) as appropriate
  1245   //     take from lists of greater sized chunks
  1246   //   else
  1247   //     dictionary
  1248   //     small or large linear allocation block if it has the space
  1249   // Try allocating exact size from indexTable first
  1250   if (size < IndexSetSize) {
  1251     res = (HeapWord*) getChunkFromIndexedFreeList(size);
  1252     if(res != NULL) {
  1253       assert(res != (HeapWord*)_indexedFreeList[size].head(),
  1254         "Not removed from free list");
  1255       // no block offset table adjustment is necessary on blocks in
  1256       // the indexed lists.
  1258     // Try allocating from the small LinAB
  1259     } else if (size < _smallLinearAllocBlock._allocation_size_limit &&
  1260         (res = getChunkFromSmallLinearAllocBlock(size)) != NULL) {
  1261         // if successful, the above also adjusts block offset table
  1262         // Note that this call will refill the LinAB to
  1263         // satisfy the request.  This is different that
  1264         // evm.
  1265         // Don't record chunk off a LinAB?  smallSplitBirth(size);
  1266     } else {
  1267       // Raid the exact free lists larger than size, even if they are not
  1268       // overpopulated.
  1269       res = (HeapWord*) getChunkFromGreater(size);
  1271   } else {
  1272     // Big objects get allocated directly from the dictionary.
  1273     res = (HeapWord*) getChunkFromDictionaryExact(size);
  1274     if (res == NULL) {
  1275       // Try hard not to fail since an allocation failure will likely
  1276       // trigger a synchronous GC.  Try to get the space from the
  1277       // allocation blocks.
  1278       res = getChunkFromSmallLinearAllocBlockRemainder(size);
  1282   return res;
  1285 // A worst-case estimate of the space required (in HeapWords) to expand the heap
  1286 // when promoting obj.
  1287 size_t CompactibleFreeListSpace::expansionSpaceRequired(size_t obj_size) const {
  1288   // Depending on the object size, expansion may require refilling either a
  1289   // bigLAB or a smallLAB plus refilling a PromotionInfo object.  MinChunkSize
  1290   // is added because the dictionary may over-allocate to avoid fragmentation.
  1291   size_t space = obj_size;
  1292   if (!_adaptive_freelists) {
  1293     space = MAX2(space, _smallLinearAllocBlock._refillSize);
  1295   space += _promoInfo.refillSize() + 2 * MinChunkSize;
  1296   return space;
  1299 FreeChunk* CompactibleFreeListSpace::getChunkFromGreater(size_t numWords) {
  1300   FreeChunk* ret;
  1302   assert(numWords >= MinChunkSize, "Size is less than minimum");
  1303   assert(linearAllocationWouldFail() || bestFitFirst(),
  1304     "Should not be here");
  1306   size_t i;
  1307   size_t currSize = numWords + MinChunkSize;
  1308   assert(currSize % MinObjAlignment == 0, "currSize should be aligned");
  1309   for (i = currSize; i < IndexSetSize; i += IndexSetStride) {
  1310     FreeList* fl = &_indexedFreeList[i];
  1311     if (fl->head()) {
  1312       ret = getFromListGreater(fl, numWords);
  1313       assert(ret == NULL || ret->isFree(), "Should be returning a free chunk");
  1314       return ret;
  1318   currSize = MAX2((size_t)SmallForDictionary,
  1319                   (size_t)(numWords + MinChunkSize));
  1321   /* Try to get a chunk that satisfies request, while avoiding
  1322      fragmentation that can't be handled. */
  1324     ret =  dictionary()->getChunk(currSize);
  1325     if (ret != NULL) {
  1326       assert(ret->size() - numWords >= MinChunkSize,
  1327              "Chunk is too small");
  1328       _bt.allocated((HeapWord*)ret, ret->size());
  1329       /* Carve returned chunk. */
  1330       (void) splitChunkAndReturnRemainder(ret, numWords);
  1331       /* Label this as no longer a free chunk. */
  1332       assert(ret->isFree(), "This chunk should be free");
  1333       ret->linkPrev(NULL);
  1335     assert(ret == NULL || ret->isFree(), "Should be returning a free chunk");
  1336     return ret;
  1338   ShouldNotReachHere();
  1341 bool CompactibleFreeListSpace::verifyChunkInIndexedFreeLists(FreeChunk* fc)
  1342   const {
  1343   assert(fc->size() < IndexSetSize, "Size of chunk is too large");
  1344   return _indexedFreeList[fc->size()].verifyChunkInFreeLists(fc);
  1347 bool CompactibleFreeListSpace::verifyChunkInFreeLists(FreeChunk* fc) const {
  1348   if (fc->size() >= IndexSetSize) {
  1349     return dictionary()->verifyChunkInFreeLists(fc);
  1350   } else {
  1351     return verifyChunkInIndexedFreeLists(fc);
  1355 #ifndef PRODUCT
  1356 void CompactibleFreeListSpace::assert_locked() const {
  1357   CMSLockVerifier::assert_locked(freelistLock(), parDictionaryAllocLock());
  1360 void CompactibleFreeListSpace::assert_locked(const Mutex* lock) const {
  1361   CMSLockVerifier::assert_locked(lock);
  1363 #endif
  1365 FreeChunk* CompactibleFreeListSpace::allocateScratch(size_t size) {
  1366   // In the parallel case, the main thread holds the free list lock
  1367   // on behalf the parallel threads.
  1368   FreeChunk* fc;
  1370     // If GC is parallel, this might be called by several threads.
  1371     // This should be rare enough that the locking overhead won't affect
  1372     // the sequential code.
  1373     MutexLockerEx x(parDictionaryAllocLock(),
  1374                     Mutex::_no_safepoint_check_flag);
  1375     fc = getChunkFromDictionary(size);
  1377   if (fc != NULL) {
  1378     fc->dontCoalesce();
  1379     assert(fc->isFree(), "Should be free, but not coalescable");
  1380     // Verify that the block offset table shows this to
  1381     // be a single block, but not one which is unallocated.
  1382     _bt.verify_single_block((HeapWord*)fc, fc->size());
  1383     _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
  1385   return fc;
  1388 oop CompactibleFreeListSpace::promote(oop obj, size_t obj_size) {
  1389   assert(obj_size == (size_t)obj->size(), "bad obj_size passed in");
  1390   assert_locked();
  1392   // if we are tracking promotions, then first ensure space for
  1393   // promotion (including spooling space for saving header if necessary).
  1394   // then allocate and copy, then track promoted info if needed.
  1395   // When tracking (see PromotionInfo::track()), the mark word may
  1396   // be displaced and in this case restoration of the mark word
  1397   // occurs in the (oop_since_save_marks_)iterate phase.
  1398   if (_promoInfo.tracking() && !_promoInfo.ensure_spooling_space()) {
  1399     return NULL;
  1401   // Call the allocate(size_t, bool) form directly to avoid the
  1402   // additional call through the allocate(size_t) form.  Having
  1403   // the compile inline the call is problematic because allocate(size_t)
  1404   // is a virtual method.
  1405   HeapWord* res = allocate(adjustObjectSize(obj_size));
  1406   if (res != NULL) {
  1407     Copy::aligned_disjoint_words((HeapWord*)obj, res, obj_size);
  1408     // if we should be tracking promotions, do so.
  1409     if (_promoInfo.tracking()) {
  1410         _promoInfo.track((PromotedObject*)res);
  1413   return oop(res);
  1416 HeapWord*
  1417 CompactibleFreeListSpace::getChunkFromSmallLinearAllocBlock(size_t size) {
  1418   assert_locked();
  1419   assert(size >= MinChunkSize, "minimum chunk size");
  1420   assert(size <  _smallLinearAllocBlock._allocation_size_limit,
  1421     "maximum from smallLinearAllocBlock");
  1422   return getChunkFromLinearAllocBlock(&_smallLinearAllocBlock, size);
  1425 HeapWord*
  1426 CompactibleFreeListSpace::getChunkFromLinearAllocBlock(LinearAllocBlock *blk,
  1427                                                        size_t size) {
  1428   assert_locked();
  1429   assert(size >= MinChunkSize, "too small");
  1430   HeapWord* res = NULL;
  1431   // Try to do linear allocation from blk, making sure that
  1432   if (blk->_word_size == 0) {
  1433     // We have probably been unable to fill this either in the prologue or
  1434     // when it was exhausted at the last linear allocation. Bail out until
  1435     // next time.
  1436     assert(blk->_ptr == NULL, "consistency check");
  1437     return NULL;
  1439   assert(blk->_word_size != 0 && blk->_ptr != NULL, "consistency check");
  1440   res = getChunkFromLinearAllocBlockRemainder(blk, size);
  1441   if (res != NULL) return res;
  1443   // about to exhaust this linear allocation block
  1444   if (blk->_word_size == size) { // exactly satisfied
  1445     res = blk->_ptr;
  1446     _bt.allocated(res, blk->_word_size);
  1447   } else if (size + MinChunkSize <= blk->_refillSize) {
  1448     size_t sz = blk->_word_size;
  1449     // Update _unallocated_block if the size is such that chunk would be
  1450     // returned to the indexed free list.  All other chunks in the indexed
  1451     // free lists are allocated from the dictionary so that _unallocated_block
  1452     // has already been adjusted for them.  Do it here so that the cost
  1453     // for all chunks added back to the indexed free lists.
  1454     if (sz < SmallForDictionary) {
  1455       _bt.allocated(blk->_ptr, sz);
  1457     // Return the chunk that isn't big enough, and then refill below.
  1458     addChunkToFreeLists(blk->_ptr, sz);
  1459     splitBirth(sz);
  1460     // Don't keep statistics on adding back chunk from a LinAB.
  1461   } else {
  1462     // A refilled block would not satisfy the request.
  1463     return NULL;
  1466   blk->_ptr = NULL; blk->_word_size = 0;
  1467   refillLinearAllocBlock(blk);
  1468   assert(blk->_ptr == NULL || blk->_word_size >= size + MinChunkSize,
  1469          "block was replenished");
  1470   if (res != NULL) {
  1471     splitBirth(size);
  1472     repairLinearAllocBlock(blk);
  1473   } else if (blk->_ptr != NULL) {
  1474     res = blk->_ptr;
  1475     size_t blk_size = blk->_word_size;
  1476     blk->_word_size -= size;
  1477     blk->_ptr  += size;
  1478     splitBirth(size);
  1479     repairLinearAllocBlock(blk);
  1480     // Update BOT last so that other (parallel) GC threads see a consistent
  1481     // view of the BOT and free blocks.
  1482     // Above must occur before BOT is updated below.
  1483     OrderAccess::storestore();
  1484     _bt.split_block(res, blk_size, size);  // adjust block offset table
  1486   return res;
  1489 HeapWord*  CompactibleFreeListSpace::getChunkFromLinearAllocBlockRemainder(
  1490                                         LinearAllocBlock* blk,
  1491                                         size_t size) {
  1492   assert_locked();
  1493   assert(size >= MinChunkSize, "too small");
  1495   HeapWord* res = NULL;
  1496   // This is the common case.  Keep it simple.
  1497   if (blk->_word_size >= size + MinChunkSize) {
  1498     assert(blk->_ptr != NULL, "consistency check");
  1499     res = blk->_ptr;
  1500     // Note that the BOT is up-to-date for the linAB before allocation.  It
  1501     // indicates the start of the linAB.  The split_block() updates the
  1502     // BOT for the linAB after the allocation (indicates the start of the
  1503     // next chunk to be allocated).
  1504     size_t blk_size = blk->_word_size;
  1505     blk->_word_size -= size;
  1506     blk->_ptr  += size;
  1507     splitBirth(size);
  1508     repairLinearAllocBlock(blk);
  1509     // Update BOT last so that other (parallel) GC threads see a consistent
  1510     // view of the BOT and free blocks.
  1511     // Above must occur before BOT is updated below.
  1512     OrderAccess::storestore();
  1513     _bt.split_block(res, blk_size, size);  // adjust block offset table
  1514     _bt.allocated(res, size);
  1516   return res;
  1519 FreeChunk*
  1520 CompactibleFreeListSpace::getChunkFromIndexedFreeList(size_t size) {
  1521   assert_locked();
  1522   assert(size < SmallForDictionary, "just checking");
  1523   FreeChunk* res;
  1524   res = _indexedFreeList[size].getChunkAtHead();
  1525   if (res == NULL) {
  1526     res = getChunkFromIndexedFreeListHelper(size);
  1528   _bt.verify_not_unallocated((HeapWord*) res, size);
  1529   assert(res == NULL || res->size() == size, "Incorrect block size");
  1530   return res;
  1533 FreeChunk*
  1534 CompactibleFreeListSpace::getChunkFromIndexedFreeListHelper(size_t size,
  1535   bool replenish) {
  1536   assert_locked();
  1537   FreeChunk* fc = NULL;
  1538   if (size < SmallForDictionary) {
  1539     assert(_indexedFreeList[size].head() == NULL ||
  1540       _indexedFreeList[size].surplus() <= 0,
  1541       "List for this size should be empty or under populated");
  1542     // Try best fit in exact lists before replenishing the list
  1543     if (!bestFitFirst() || (fc = bestFitSmall(size)) == NULL) {
  1544       // Replenish list.
  1545       //
  1546       // Things tried that failed.
  1547       //   Tried allocating out of the two LinAB's first before
  1548       // replenishing lists.
  1549       //   Tried small linAB of size 256 (size in indexed list)
  1550       // and replenishing indexed lists from the small linAB.
  1551       //
  1552       FreeChunk* newFc = NULL;
  1553       const size_t replenish_size = CMSIndexedFreeListReplenish * size;
  1554       if (replenish_size < SmallForDictionary) {
  1555         // Do not replenish from an underpopulated size.
  1556         if (_indexedFreeList[replenish_size].surplus() > 0 &&
  1557             _indexedFreeList[replenish_size].head() != NULL) {
  1558           newFc = _indexedFreeList[replenish_size].getChunkAtHead();
  1559         } else if (bestFitFirst()) {
  1560           newFc = bestFitSmall(replenish_size);
  1563       if (newFc == NULL && replenish_size > size) {
  1564         assert(CMSIndexedFreeListReplenish > 1, "ctl pt invariant");
  1565         newFc = getChunkFromIndexedFreeListHelper(replenish_size, false);
  1567       // Note: The stats update re split-death of block obtained above
  1568       // will be recorded below precisely when we know we are going to
  1569       // be actually splitting it into more than one pieces below.
  1570       if (newFc != NULL) {
  1571         if  (replenish || CMSReplenishIntermediate) {
  1572           // Replenish this list and return one block to caller.
  1573           size_t i;
  1574           FreeChunk *curFc, *nextFc;
  1575           size_t num_blk = newFc->size() / size;
  1576           assert(num_blk >= 1, "Smaller than requested?");
  1577           assert(newFc->size() % size == 0, "Should be integral multiple of request");
  1578           if (num_blk > 1) {
  1579             // we are sure we will be splitting the block just obtained
  1580             // into multiple pieces; record the split-death of the original
  1581             splitDeath(replenish_size);
  1583           // carve up and link blocks 0, ..., num_blk - 2
  1584           // The last chunk is not added to the lists but is returned as the
  1585           // free chunk.
  1586           for (curFc = newFc, nextFc = (FreeChunk*)((HeapWord*)curFc + size),
  1587                i = 0;
  1588                i < (num_blk - 1);
  1589                curFc = nextFc, nextFc = (FreeChunk*)((HeapWord*)nextFc + size),
  1590                i++) {
  1591             curFc->setSize(size);
  1592             // Don't record this as a return in order to try and
  1593             // determine the "returns" from a GC.
  1594             _bt.verify_not_unallocated((HeapWord*) fc, size);
  1595             _indexedFreeList[size].returnChunkAtTail(curFc, false);
  1596             _bt.mark_block((HeapWord*)curFc, size);
  1597             splitBirth(size);
  1598             // Don't record the initial population of the indexed list
  1599             // as a split birth.
  1602           // check that the arithmetic was OK above
  1603           assert((HeapWord*)nextFc == (HeapWord*)newFc + num_blk*size,
  1604             "inconsistency in carving newFc");
  1605           curFc->setSize(size);
  1606           _bt.mark_block((HeapWord*)curFc, size);
  1607           splitBirth(size);
  1608           fc = curFc;
  1609         } else {
  1610           // Return entire block to caller
  1611           fc = newFc;
  1615   } else {
  1616     // Get a free chunk from the free chunk dictionary to be returned to
  1617     // replenish the indexed free list.
  1618     fc = getChunkFromDictionaryExact(size);
  1620   // assert(fc == NULL || fc->isFree(), "Should be returning a free chunk");
  1621   return fc;
  1624 FreeChunk*
  1625 CompactibleFreeListSpace::getChunkFromDictionary(size_t size) {
  1626   assert_locked();
  1627   FreeChunk* fc = _dictionary->getChunk(size);
  1628   if (fc == NULL) {
  1629     return NULL;
  1631   _bt.allocated((HeapWord*)fc, fc->size());
  1632   if (fc->size() >= size + MinChunkSize) {
  1633     fc = splitChunkAndReturnRemainder(fc, size);
  1635   assert(fc->size() >= size, "chunk too small");
  1636   assert(fc->size() < size + MinChunkSize, "chunk too big");
  1637   _bt.verify_single_block((HeapWord*)fc, fc->size());
  1638   return fc;
  1641 FreeChunk*
  1642 CompactibleFreeListSpace::getChunkFromDictionaryExact(size_t size) {
  1643   assert_locked();
  1644   FreeChunk* fc = _dictionary->getChunk(size);
  1645   if (fc == NULL) {
  1646     return fc;
  1648   _bt.allocated((HeapWord*)fc, fc->size());
  1649   if (fc->size() == size) {
  1650     _bt.verify_single_block((HeapWord*)fc, size);
  1651     return fc;
  1653   assert(fc->size() > size, "getChunk() guarantee");
  1654   if (fc->size() < size + MinChunkSize) {
  1655     // Return the chunk to the dictionary and go get a bigger one.
  1656     returnChunkToDictionary(fc);
  1657     fc = _dictionary->getChunk(size + MinChunkSize);
  1658     if (fc == NULL) {
  1659       return NULL;
  1661     _bt.allocated((HeapWord*)fc, fc->size());
  1663   assert(fc->size() >= size + MinChunkSize, "tautology");
  1664   fc = splitChunkAndReturnRemainder(fc, size);
  1665   assert(fc->size() == size, "chunk is wrong size");
  1666   _bt.verify_single_block((HeapWord*)fc, size);
  1667   return fc;
  1670 void
  1671 CompactibleFreeListSpace::returnChunkToDictionary(FreeChunk* chunk) {
  1672   assert_locked();
  1674   size_t size = chunk->size();
  1675   _bt.verify_single_block((HeapWord*)chunk, size);
  1676   // adjust _unallocated_block downward, as necessary
  1677   _bt.freed((HeapWord*)chunk, size);
  1678   _dictionary->returnChunk(chunk);
  1679 #ifndef PRODUCT
  1680   if (CMSCollector::abstract_state() != CMSCollector::Sweeping) {
  1681     TreeChunk::as_TreeChunk(chunk)->list()->verify_stats();
  1683 #endif // PRODUCT
  1686 void
  1687 CompactibleFreeListSpace::returnChunkToFreeList(FreeChunk* fc) {
  1688   assert_locked();
  1689   size_t size = fc->size();
  1690   _bt.verify_single_block((HeapWord*) fc, size);
  1691   _bt.verify_not_unallocated((HeapWord*) fc, size);
  1692   if (_adaptive_freelists) {
  1693     _indexedFreeList[size].returnChunkAtTail(fc);
  1694   } else {
  1695     _indexedFreeList[size].returnChunkAtHead(fc);
  1697 #ifndef PRODUCT
  1698   if (CMSCollector::abstract_state() != CMSCollector::Sweeping) {
  1699      _indexedFreeList[size].verify_stats();
  1701 #endif // PRODUCT
  1704 // Add chunk to end of last block -- if it's the largest
  1705 // block -- and update BOT and census data. We would
  1706 // of course have preferred to coalesce it with the
  1707 // last block, but it's currently less expensive to find the
  1708 // largest block than it is to find the last.
  1709 void
  1710 CompactibleFreeListSpace::addChunkToFreeListsAtEndRecordingStats(
  1711   HeapWord* chunk, size_t     size) {
  1712   // check that the chunk does lie in this space!
  1713   assert(chunk != NULL && is_in_reserved(chunk), "Not in this space!");
  1714   // One of the parallel gc task threads may be here
  1715   // whilst others are allocating.
  1716   Mutex* lock = NULL;
  1717   if (ParallelGCThreads != 0) {
  1718     lock = &_parDictionaryAllocLock;
  1720   FreeChunk* ec;
  1722     MutexLockerEx x(lock, Mutex::_no_safepoint_check_flag);
  1723     ec = dictionary()->findLargestDict();  // get largest block
  1724     if (ec != NULL && ec->end() == chunk) {
  1725       // It's a coterminal block - we can coalesce.
  1726       size_t old_size = ec->size();
  1727       coalDeath(old_size);
  1728       removeChunkFromDictionary(ec);
  1729       size += old_size;
  1730     } else {
  1731       ec = (FreeChunk*)chunk;
  1734   ec->setSize(size);
  1735   debug_only(ec->mangleFreed(size));
  1736   if (size < SmallForDictionary) {
  1737     lock = _indexedFreeListParLocks[size];
  1739   MutexLockerEx x(lock, Mutex::_no_safepoint_check_flag);
  1740   addChunkAndRepairOffsetTable((HeapWord*)ec, size, true);
  1741   // record the birth under the lock since the recording involves
  1742   // manipulation of the list on which the chunk lives and
  1743   // if the chunk is allocated and is the last on the list,
  1744   // the list can go away.
  1745   coalBirth(size);
  1748 void
  1749 CompactibleFreeListSpace::addChunkToFreeLists(HeapWord* chunk,
  1750                                               size_t     size) {
  1751   // check that the chunk does lie in this space!
  1752   assert(chunk != NULL && is_in_reserved(chunk), "Not in this space!");
  1753   assert_locked();
  1754   _bt.verify_single_block(chunk, size);
  1756   FreeChunk* fc = (FreeChunk*) chunk;
  1757   fc->setSize(size);
  1758   debug_only(fc->mangleFreed(size));
  1759   if (size < SmallForDictionary) {
  1760     returnChunkToFreeList(fc);
  1761   } else {
  1762     returnChunkToDictionary(fc);
  1766 void
  1767 CompactibleFreeListSpace::addChunkAndRepairOffsetTable(HeapWord* chunk,
  1768   size_t size, bool coalesced) {
  1769   assert_locked();
  1770   assert(chunk != NULL, "null chunk");
  1771   if (coalesced) {
  1772     // repair BOT
  1773     _bt.single_block(chunk, size);
  1775   addChunkToFreeLists(chunk, size);
  1778 // We _must_ find the purported chunk on our free lists;
  1779 // we assert if we don't.
  1780 void
  1781 CompactibleFreeListSpace::removeFreeChunkFromFreeLists(FreeChunk* fc) {
  1782   size_t size = fc->size();
  1783   assert_locked();
  1784   debug_only(verifyFreeLists());
  1785   if (size < SmallForDictionary) {
  1786     removeChunkFromIndexedFreeList(fc);
  1787   } else {
  1788     removeChunkFromDictionary(fc);
  1790   _bt.verify_single_block((HeapWord*)fc, size);
  1791   debug_only(verifyFreeLists());
  1794 void
  1795 CompactibleFreeListSpace::removeChunkFromDictionary(FreeChunk* fc) {
  1796   size_t size = fc->size();
  1797   assert_locked();
  1798   assert(fc != NULL, "null chunk");
  1799   _bt.verify_single_block((HeapWord*)fc, size);
  1800   _dictionary->removeChunk(fc);
  1801   // adjust _unallocated_block upward, as necessary
  1802   _bt.allocated((HeapWord*)fc, size);
  1805 void
  1806 CompactibleFreeListSpace::removeChunkFromIndexedFreeList(FreeChunk* fc) {
  1807   assert_locked();
  1808   size_t size = fc->size();
  1809   _bt.verify_single_block((HeapWord*)fc, size);
  1810   NOT_PRODUCT(
  1811     if (FLSVerifyIndexTable) {
  1812       verifyIndexedFreeList(size);
  1815   _indexedFreeList[size].removeChunk(fc);
  1816   debug_only(fc->clearNext());
  1817   debug_only(fc->clearPrev());
  1818   NOT_PRODUCT(
  1819     if (FLSVerifyIndexTable) {
  1820       verifyIndexedFreeList(size);
  1825 FreeChunk* CompactibleFreeListSpace::bestFitSmall(size_t numWords) {
  1826   /* A hint is the next larger size that has a surplus.
  1827      Start search at a size large enough to guarantee that
  1828      the excess is >= MIN_CHUNK. */
  1829   size_t start = align_object_size(numWords + MinChunkSize);
  1830   if (start < IndexSetSize) {
  1831     FreeList* it   = _indexedFreeList;
  1832     size_t    hint = _indexedFreeList[start].hint();
  1833     while (hint < IndexSetSize) {
  1834       assert(hint % MinObjAlignment == 0, "hint should be aligned");
  1835       FreeList *fl = &_indexedFreeList[hint];
  1836       if (fl->surplus() > 0 && fl->head() != NULL) {
  1837         // Found a list with surplus, reset original hint
  1838         // and split out a free chunk which is returned.
  1839         _indexedFreeList[start].set_hint(hint);
  1840         FreeChunk* res = getFromListGreater(fl, numWords);
  1841         assert(res == NULL || res->isFree(),
  1842           "Should be returning a free chunk");
  1843         return res;
  1845       hint = fl->hint(); /* keep looking */
  1847     /* None found. */
  1848     it[start].set_hint(IndexSetSize);
  1850   return NULL;
  1853 /* Requires fl->size >= numWords + MinChunkSize */
  1854 FreeChunk* CompactibleFreeListSpace::getFromListGreater(FreeList* fl,
  1855   size_t numWords) {
  1856   FreeChunk *curr = fl->head();
  1857   size_t oldNumWords = curr->size();
  1858   assert(numWords >= MinChunkSize, "Word size is too small");
  1859   assert(curr != NULL, "List is empty");
  1860   assert(oldNumWords >= numWords + MinChunkSize,
  1861         "Size of chunks in the list is too small");
  1863   fl->removeChunk(curr);
  1864   // recorded indirectly by splitChunkAndReturnRemainder -
  1865   // smallSplit(oldNumWords, numWords);
  1866   FreeChunk* new_chunk = splitChunkAndReturnRemainder(curr, numWords);
  1867   // Does anything have to be done for the remainder in terms of
  1868   // fixing the card table?
  1869   assert(new_chunk == NULL || new_chunk->isFree(),
  1870     "Should be returning a free chunk");
  1871   return new_chunk;
  1874 FreeChunk*
  1875 CompactibleFreeListSpace::splitChunkAndReturnRemainder(FreeChunk* chunk,
  1876   size_t new_size) {
  1877   assert_locked();
  1878   size_t size = chunk->size();
  1879   assert(size > new_size, "Split from a smaller block?");
  1880   assert(is_aligned(chunk), "alignment problem");
  1881   assert(size == adjustObjectSize(size), "alignment problem");
  1882   size_t rem_size = size - new_size;
  1883   assert(rem_size == adjustObjectSize(rem_size), "alignment problem");
  1884   assert(rem_size >= MinChunkSize, "Free chunk smaller than minimum");
  1885   FreeChunk* ffc = (FreeChunk*)((HeapWord*)chunk + new_size);
  1886   assert(is_aligned(ffc), "alignment problem");
  1887   ffc->setSize(rem_size);
  1888   ffc->linkNext(NULL);
  1889   ffc->linkPrev(NULL); // Mark as a free block for other (parallel) GC threads.
  1890   // Above must occur before BOT is updated below.
  1891   // adjust block offset table
  1892   OrderAccess::storestore();
  1893   assert(chunk->isFree() && ffc->isFree(), "Error");
  1894   _bt.split_block((HeapWord*)chunk, chunk->size(), new_size);
  1895   if (rem_size < SmallForDictionary) {
  1896     bool is_par = (SharedHeap::heap()->n_par_threads() > 0);
  1897     if (is_par) _indexedFreeListParLocks[rem_size]->lock();
  1898     returnChunkToFreeList(ffc);
  1899     split(size, rem_size);
  1900     if (is_par) _indexedFreeListParLocks[rem_size]->unlock();
  1901   } else {
  1902     returnChunkToDictionary(ffc);
  1903     split(size ,rem_size);
  1905   chunk->setSize(new_size);
  1906   return chunk;
  1909 void
  1910 CompactibleFreeListSpace::sweep_completed() {
  1911   // Now that space is probably plentiful, refill linear
  1912   // allocation blocks as needed.
  1913   refillLinearAllocBlocksIfNeeded();
  1916 void
  1917 CompactibleFreeListSpace::gc_prologue() {
  1918   assert_locked();
  1919   if (PrintFLSStatistics != 0) {
  1920     gclog_or_tty->print("Before GC:\n");
  1921     reportFreeListStatistics();
  1923   refillLinearAllocBlocksIfNeeded();
  1926 void
  1927 CompactibleFreeListSpace::gc_epilogue() {
  1928   assert_locked();
  1929   if (PrintGCDetails && Verbose && !_adaptive_freelists) {
  1930     if (_smallLinearAllocBlock._word_size == 0)
  1931       warning("CompactibleFreeListSpace(epilogue):: Linear allocation failure");
  1933   assert(_promoInfo.noPromotions(), "_promoInfo inconsistency");
  1934   _promoInfo.stopTrackingPromotions();
  1935   repairLinearAllocationBlocks();
  1936   // Print Space's stats
  1937   if (PrintFLSStatistics != 0) {
  1938     gclog_or_tty->print("After GC:\n");
  1939     reportFreeListStatistics();
  1943 // Iteration support, mostly delegated from a CMS generation
  1945 void CompactibleFreeListSpace::save_marks() {
  1946   // mark the "end" of the used space at the time of this call;
  1947   // note, however, that promoted objects from this point
  1948   // on are tracked in the _promoInfo below.
  1949   set_saved_mark_word(unallocated_block());
  1950   // inform allocator that promotions should be tracked.
  1951   assert(_promoInfo.noPromotions(), "_promoInfo inconsistency");
  1952   _promoInfo.startTrackingPromotions();
  1955 bool CompactibleFreeListSpace::no_allocs_since_save_marks() {
  1956   assert(_promoInfo.tracking(), "No preceding save_marks?");
  1957   assert(SharedHeap::heap()->n_par_threads() == 0,
  1958          "Shouldn't be called if using parallel gc.");
  1959   return _promoInfo.noPromotions();
  1962 #define CFLS_OOP_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix)           \
  1964 void CompactibleFreeListSpace::                                             \
  1965 oop_since_save_marks_iterate##nv_suffix(OopClosureType* blk) {              \
  1966   assert(SharedHeap::heap()->n_par_threads() == 0,                          \
  1967          "Shouldn't be called (yet) during parallel part of gc.");          \
  1968   _promoInfo.promoted_oops_iterate##nv_suffix(blk);                         \
  1969   /*                                                                        \
  1970    * This also restores any displaced headers and removes the elements from \
  1971    * the iteration set as they are processed, so that we have a clean slate \
  1972    * at the end of the iteration. Note, thus, that if new objects are       \
  1973    * promoted as a result of the iteration they are iterated over as well.  \
  1974    */                                                                       \
  1975   assert(_promoInfo.noPromotions(), "_promoInfo inconsistency");            \
  1978 ALL_SINCE_SAVE_MARKS_CLOSURES(CFLS_OOP_SINCE_SAVE_MARKS_DEFN)
  1981 void CompactibleFreeListSpace::object_iterate_since_last_GC(ObjectClosure* cl) {
  1982   // ugghh... how would one do this efficiently for a non-contiguous space?
  1983   guarantee(false, "NYI");
  1986 bool CompactibleFreeListSpace::linearAllocationWouldFail() const {
  1987   return _smallLinearAllocBlock._word_size == 0;
  1990 void CompactibleFreeListSpace::repairLinearAllocationBlocks() {
  1991   // Fix up linear allocation blocks to look like free blocks
  1992   repairLinearAllocBlock(&_smallLinearAllocBlock);
  1995 void CompactibleFreeListSpace::repairLinearAllocBlock(LinearAllocBlock* blk) {
  1996   assert_locked();
  1997   if (blk->_ptr != NULL) {
  1998     assert(blk->_word_size != 0 && blk->_word_size >= MinChunkSize,
  1999            "Minimum block size requirement");
  2000     FreeChunk* fc = (FreeChunk*)(blk->_ptr);
  2001     fc->setSize(blk->_word_size);
  2002     fc->linkPrev(NULL);   // mark as free
  2003     fc->dontCoalesce();
  2004     assert(fc->isFree(), "just marked it free");
  2005     assert(fc->cantCoalesce(), "just marked it uncoalescable");
  2009 void CompactibleFreeListSpace::refillLinearAllocBlocksIfNeeded() {
  2010   assert_locked();
  2011   if (_smallLinearAllocBlock._ptr == NULL) {
  2012     assert(_smallLinearAllocBlock._word_size == 0,
  2013       "Size of linAB should be zero if the ptr is NULL");
  2014     // Reset the linAB refill and allocation size limit.
  2015     _smallLinearAllocBlock.set(0, 0, 1024*SmallForLinearAlloc, SmallForLinearAlloc);
  2017   refillLinearAllocBlockIfNeeded(&_smallLinearAllocBlock);
  2020 void
  2021 CompactibleFreeListSpace::refillLinearAllocBlockIfNeeded(LinearAllocBlock* blk) {
  2022   assert_locked();
  2023   assert((blk->_ptr == NULL && blk->_word_size == 0) ||
  2024          (blk->_ptr != NULL && blk->_word_size >= MinChunkSize),
  2025          "blk invariant");
  2026   if (blk->_ptr == NULL) {
  2027     refillLinearAllocBlock(blk);
  2029   if (PrintMiscellaneous && Verbose) {
  2030     if (blk->_word_size == 0) {
  2031       warning("CompactibleFreeListSpace(prologue):: Linear allocation failure");
  2036 void
  2037 CompactibleFreeListSpace::refillLinearAllocBlock(LinearAllocBlock* blk) {
  2038   assert_locked();
  2039   assert(blk->_word_size == 0 && blk->_ptr == NULL,
  2040          "linear allocation block should be empty");
  2041   FreeChunk* fc;
  2042   if (blk->_refillSize < SmallForDictionary &&
  2043       (fc = getChunkFromIndexedFreeList(blk->_refillSize)) != NULL) {
  2044     // A linAB's strategy might be to use small sizes to reduce
  2045     // fragmentation but still get the benefits of allocation from a
  2046     // linAB.
  2047   } else {
  2048     fc = getChunkFromDictionary(blk->_refillSize);
  2050   if (fc != NULL) {
  2051     blk->_ptr  = (HeapWord*)fc;
  2052     blk->_word_size = fc->size();
  2053     fc->dontCoalesce();   // to prevent sweeper from sweeping us up
  2057 // Support for concurrent collection policy decisions.
  2058 bool CompactibleFreeListSpace::should_concurrent_collect() const {
  2059   // In the future we might want to add in frgamentation stats --
  2060   // including erosion of the "mountain" into this decision as well.
  2061   return !adaptive_freelists() && linearAllocationWouldFail();
  2064 // Support for compaction
  2066 void CompactibleFreeListSpace::prepare_for_compaction(CompactPoint* cp) {
  2067   SCAN_AND_FORWARD(cp,end,block_is_obj,block_size);
  2068   // prepare_for_compaction() uses the space between live objects
  2069   // so that later phase can skip dead space quickly.  So verification
  2070   // of the free lists doesn't work after.
  2073 #define obj_size(q) adjustObjectSize(oop(q)->size())
  2074 #define adjust_obj_size(s) adjustObjectSize(s)
  2076 void CompactibleFreeListSpace::adjust_pointers() {
  2077   // In other versions of adjust_pointers(), a bail out
  2078   // based on the amount of live data in the generation
  2079   // (i.e., if 0, bail out) may be used.
  2080   // Cannot test used() == 0 here because the free lists have already
  2081   // been mangled by the compaction.
  2083   SCAN_AND_ADJUST_POINTERS(adjust_obj_size);
  2084   // See note about verification in prepare_for_compaction().
  2087 void CompactibleFreeListSpace::compact() {
  2088   SCAN_AND_COMPACT(obj_size);
  2091 // fragmentation_metric = 1 - [sum of (fbs**2) / (sum of fbs)**2]
  2092 // where fbs is free block sizes
  2093 double CompactibleFreeListSpace::flsFrag() const {
  2094   size_t itabFree = totalSizeInIndexedFreeLists();
  2095   double frag = 0.0;
  2096   size_t i;
  2098   for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
  2099     double sz  = i;
  2100     frag      += _indexedFreeList[i].count() * (sz * sz);
  2103   double totFree = itabFree +
  2104                    _dictionary->totalChunkSize(DEBUG_ONLY(freelistLock()));
  2105   if (totFree > 0) {
  2106     frag = ((frag + _dictionary->sum_of_squared_block_sizes()) /
  2107             (totFree * totFree));
  2108     frag = (double)1.0  - frag;
  2109   } else {
  2110     assert(frag == 0.0, "Follows from totFree == 0");
  2112   return frag;
  2115 void CompactibleFreeListSpace::beginSweepFLCensus(
  2116   float inter_sweep_current,
  2117   float inter_sweep_estimate,
  2118   float intra_sweep_estimate) {
  2119   assert_locked();
  2120   size_t i;
  2121   for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
  2122     FreeList* fl    = &_indexedFreeList[i];
  2123     if (PrintFLSStatistics > 1) {
  2124       gclog_or_tty->print("size[%d] : ", i);
  2126     fl->compute_desired(inter_sweep_current, inter_sweep_estimate, intra_sweep_estimate);
  2127     fl->set_coalDesired((ssize_t)((double)fl->desired() * CMSSmallCoalSurplusPercent));
  2128     fl->set_beforeSweep(fl->count());
  2129     fl->set_bfrSurp(fl->surplus());
  2131   _dictionary->beginSweepDictCensus(CMSLargeCoalSurplusPercent,
  2132                                     inter_sweep_current,
  2133                                     inter_sweep_estimate,
  2134                                     intra_sweep_estimate);
  2137 void CompactibleFreeListSpace::setFLSurplus() {
  2138   assert_locked();
  2139   size_t i;
  2140   for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
  2141     FreeList *fl = &_indexedFreeList[i];
  2142     fl->set_surplus(fl->count() -
  2143                     (ssize_t)((double)fl->desired() * CMSSmallSplitSurplusPercent));
  2147 void CompactibleFreeListSpace::setFLHints() {
  2148   assert_locked();
  2149   size_t i;
  2150   size_t h = IndexSetSize;
  2151   for (i = IndexSetSize - 1; i != 0; i -= IndexSetStride) {
  2152     FreeList *fl = &_indexedFreeList[i];
  2153     fl->set_hint(h);
  2154     if (fl->surplus() > 0) {
  2155       h = i;
  2160 void CompactibleFreeListSpace::clearFLCensus() {
  2161   assert_locked();
  2162   int i;
  2163   for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
  2164     FreeList *fl = &_indexedFreeList[i];
  2165     fl->set_prevSweep(fl->count());
  2166     fl->set_coalBirths(0);
  2167     fl->set_coalDeaths(0);
  2168     fl->set_splitBirths(0);
  2169     fl->set_splitDeaths(0);
  2173 void CompactibleFreeListSpace::endSweepFLCensus(size_t sweep_count) {
  2174   if (PrintFLSStatistics > 0) {
  2175     HeapWord* largestAddr = (HeapWord*) dictionary()->findLargestDict();
  2176     gclog_or_tty->print_cr("CMS: Large block " PTR_FORMAT,
  2177                            largestAddr);
  2179   setFLSurplus();
  2180   setFLHints();
  2181   if (PrintGC && PrintFLSCensus > 0) {
  2182     printFLCensus(sweep_count);
  2184   clearFLCensus();
  2185   assert_locked();
  2186   _dictionary->endSweepDictCensus(CMSLargeSplitSurplusPercent);
  2189 bool CompactibleFreeListSpace::coalOverPopulated(size_t size) {
  2190   if (size < SmallForDictionary) {
  2191     FreeList *fl = &_indexedFreeList[size];
  2192     return (fl->coalDesired() < 0) ||
  2193            ((int)fl->count() > fl->coalDesired());
  2194   } else {
  2195     return dictionary()->coalDictOverPopulated(size);
  2199 void CompactibleFreeListSpace::smallCoalBirth(size_t size) {
  2200   assert(size < SmallForDictionary, "Size too large for indexed list");
  2201   FreeList *fl = &_indexedFreeList[size];
  2202   fl->increment_coalBirths();
  2203   fl->increment_surplus();
  2206 void CompactibleFreeListSpace::smallCoalDeath(size_t size) {
  2207   assert(size < SmallForDictionary, "Size too large for indexed list");
  2208   FreeList *fl = &_indexedFreeList[size];
  2209   fl->increment_coalDeaths();
  2210   fl->decrement_surplus();
  2213 void CompactibleFreeListSpace::coalBirth(size_t size) {
  2214   if (size  < SmallForDictionary) {
  2215     smallCoalBirth(size);
  2216   } else {
  2217     dictionary()->dictCensusUpdate(size,
  2218                                    false /* split */,
  2219                                    true /* birth */);
  2223 void CompactibleFreeListSpace::coalDeath(size_t size) {
  2224   if(size  < SmallForDictionary) {
  2225     smallCoalDeath(size);
  2226   } else {
  2227     dictionary()->dictCensusUpdate(size,
  2228                                    false /* split */,
  2229                                    false /* birth */);
  2233 void CompactibleFreeListSpace::smallSplitBirth(size_t size) {
  2234   assert(size < SmallForDictionary, "Size too large for indexed list");
  2235   FreeList *fl = &_indexedFreeList[size];
  2236   fl->increment_splitBirths();
  2237   fl->increment_surplus();
  2240 void CompactibleFreeListSpace::smallSplitDeath(size_t size) {
  2241   assert(size < SmallForDictionary, "Size too large for indexed list");
  2242   FreeList *fl = &_indexedFreeList[size];
  2243   fl->increment_splitDeaths();
  2244   fl->decrement_surplus();
  2247 void CompactibleFreeListSpace::splitBirth(size_t size) {
  2248   if (size  < SmallForDictionary) {
  2249     smallSplitBirth(size);
  2250   } else {
  2251     dictionary()->dictCensusUpdate(size,
  2252                                    true /* split */,
  2253                                    true /* birth */);
  2257 void CompactibleFreeListSpace::splitDeath(size_t size) {
  2258   if (size  < SmallForDictionary) {
  2259     smallSplitDeath(size);
  2260   } else {
  2261     dictionary()->dictCensusUpdate(size,
  2262                                    true /* split */,
  2263                                    false /* birth */);
  2267 void CompactibleFreeListSpace::split(size_t from, size_t to1) {
  2268   size_t to2 = from - to1;
  2269   splitDeath(from);
  2270   splitBirth(to1);
  2271   splitBirth(to2);
  2274 void CompactibleFreeListSpace::print() const {
  2275   print_on(tty);
  2278 void CompactibleFreeListSpace::prepare_for_verify() {
  2279   assert_locked();
  2280   repairLinearAllocationBlocks();
  2281   // Verify that the SpoolBlocks look like free blocks of
  2282   // appropriate sizes... To be done ...
  2285 class VerifyAllBlksClosure: public BlkClosure {
  2286  private:
  2287   const CompactibleFreeListSpace* _sp;
  2288   const MemRegion                 _span;
  2289   HeapWord*                       _last_addr;
  2290   size_t                          _last_size;
  2291   bool                            _last_was_obj;
  2292   bool                            _last_was_live;
  2294  public:
  2295   VerifyAllBlksClosure(const CompactibleFreeListSpace* sp,
  2296     MemRegion span) :  _sp(sp), _span(span),
  2297                        _last_addr(NULL), _last_size(0),
  2298                        _last_was_obj(false), _last_was_live(false) { }
  2300   virtual size_t do_blk(HeapWord* addr) {
  2301     size_t res;
  2302     bool   was_obj  = false;
  2303     bool   was_live = false;
  2304     if (_sp->block_is_obj(addr)) {
  2305       was_obj = true;
  2306       oop p = oop(addr);
  2307       guarantee(p->is_oop(), "Should be an oop");
  2308       res = _sp->adjustObjectSize(p->size());
  2309       if (_sp->obj_is_alive(addr)) {
  2310         was_live = true;
  2311         p->verify();
  2313     } else {
  2314       FreeChunk* fc = (FreeChunk*)addr;
  2315       res = fc->size();
  2316       if (FLSVerifyLists && !fc->cantCoalesce()) {
  2317         guarantee(_sp->verifyChunkInFreeLists(fc),
  2318                   "Chunk should be on a free list");
  2321     if (res == 0) {
  2322       gclog_or_tty->print_cr("Livelock: no rank reduction!");
  2323       gclog_or_tty->print_cr(
  2324         " Current:  addr = " PTR_FORMAT ", size = " SIZE_FORMAT ", obj = %s, live = %s \n"
  2325         " Previous: addr = " PTR_FORMAT ", size = " SIZE_FORMAT ", obj = %s, live = %s \n",
  2326         addr,       res,        was_obj      ?"true":"false", was_live      ?"true":"false",
  2327         _last_addr, _last_size, _last_was_obj?"true":"false", _last_was_live?"true":"false");
  2328       _sp->print_on(gclog_or_tty);
  2329       guarantee(false, "Seppuku!");
  2331     _last_addr = addr;
  2332     _last_size = res;
  2333     _last_was_obj  = was_obj;
  2334     _last_was_live = was_live;
  2335     return res;
  2337 };
  2339 class VerifyAllOopsClosure: public OopClosure {
  2340  private:
  2341   const CMSCollector*             _collector;
  2342   const CompactibleFreeListSpace* _sp;
  2343   const MemRegion                 _span;
  2344   const bool                      _past_remark;
  2345   const CMSBitMap*                _bit_map;
  2347  protected:
  2348   void do_oop(void* p, oop obj) {
  2349     if (_span.contains(obj)) { // the interior oop points into CMS heap
  2350       if (!_span.contains(p)) { // reference from outside CMS heap
  2351         // Should be a valid object; the first disjunct below allows
  2352         // us to sidestep an assertion in block_is_obj() that insists
  2353         // that p be in _sp. Note that several generations (and spaces)
  2354         // are spanned by _span (CMS heap) above.
  2355         guarantee(!_sp->is_in_reserved(obj) ||
  2356                   _sp->block_is_obj((HeapWord*)obj),
  2357                   "Should be an object");
  2358         guarantee(obj->is_oop(), "Should be an oop");
  2359         obj->verify();
  2360         if (_past_remark) {
  2361           // Remark has been completed, the object should be marked
  2362           _bit_map->isMarked((HeapWord*)obj);
  2364       } else { // reference within CMS heap
  2365         if (_past_remark) {
  2366           // Remark has been completed -- so the referent should have
  2367           // been marked, if referring object is.
  2368           if (_bit_map->isMarked(_collector->block_start(p))) {
  2369             guarantee(_bit_map->isMarked((HeapWord*)obj), "Marking error?");
  2373     } else if (_sp->is_in_reserved(p)) {
  2374       // the reference is from FLS, and points out of FLS
  2375       guarantee(obj->is_oop(), "Should be an oop");
  2376       obj->verify();
  2380   template <class T> void do_oop_work(T* p) {
  2381     T heap_oop = oopDesc::load_heap_oop(p);
  2382     if (!oopDesc::is_null(heap_oop)) {
  2383       oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
  2384       do_oop(p, obj);
  2388  public:
  2389   VerifyAllOopsClosure(const CMSCollector* collector,
  2390     const CompactibleFreeListSpace* sp, MemRegion span,
  2391     bool past_remark, CMSBitMap* bit_map) :
  2392     OopClosure(), _collector(collector), _sp(sp), _span(span),
  2393     _past_remark(past_remark), _bit_map(bit_map) { }
  2395   virtual void do_oop(oop* p)       { VerifyAllOopsClosure::do_oop_work(p); }
  2396   virtual void do_oop(narrowOop* p) { VerifyAllOopsClosure::do_oop_work(p); }
  2397 };
  2399 void CompactibleFreeListSpace::verify(bool ignored) const {
  2400   assert_lock_strong(&_freelistLock);
  2401   verify_objects_initialized();
  2402   MemRegion span = _collector->_span;
  2403   bool past_remark = (_collector->abstract_state() ==
  2404                       CMSCollector::Sweeping);
  2406   ResourceMark rm;
  2407   HandleMark  hm;
  2409   // Check integrity of CFL data structures
  2410   _promoInfo.verify();
  2411   _dictionary->verify();
  2412   if (FLSVerifyIndexTable) {
  2413     verifyIndexedFreeLists();
  2415   // Check integrity of all objects and free blocks in space
  2417     VerifyAllBlksClosure cl(this, span);
  2418     ((CompactibleFreeListSpace*)this)->blk_iterate(&cl);  // cast off const
  2420   // Check that all references in the heap to FLS
  2421   // are to valid objects in FLS or that references in
  2422   // FLS are to valid objects elsewhere in the heap
  2423   if (FLSVerifyAllHeapReferences)
  2425     VerifyAllOopsClosure cl(_collector, this, span, past_remark,
  2426       _collector->markBitMap());
  2427     CollectedHeap* ch = Universe::heap();
  2428     ch->oop_iterate(&cl);              // all oops in generations
  2429     ch->permanent_oop_iterate(&cl);    // all oops in perm gen
  2432   if (VerifyObjectStartArray) {
  2433     // Verify the block offset table
  2434     _bt.verify();
  2438 #ifndef PRODUCT
  2439 void CompactibleFreeListSpace::verifyFreeLists() const {
  2440   if (FLSVerifyLists) {
  2441     _dictionary->verify();
  2442     verifyIndexedFreeLists();
  2443   } else {
  2444     if (FLSVerifyDictionary) {
  2445       _dictionary->verify();
  2447     if (FLSVerifyIndexTable) {
  2448       verifyIndexedFreeLists();
  2452 #endif
  2454 void CompactibleFreeListSpace::verifyIndexedFreeLists() const {
  2455   size_t i = 0;
  2456   for (; i < MinChunkSize; i++) {
  2457     guarantee(_indexedFreeList[i].head() == NULL, "should be NULL");
  2459   for (; i < IndexSetSize; i++) {
  2460     verifyIndexedFreeList(i);
  2464 void CompactibleFreeListSpace::verifyIndexedFreeList(size_t size) const {
  2465   FreeChunk* fc   =  _indexedFreeList[size].head();
  2466   FreeChunk* tail =  _indexedFreeList[size].tail();
  2467   size_t    num = _indexedFreeList[size].count();
  2468   size_t      n = 0;
  2469   guarantee((size % 2 == 0) || fc == NULL, "Odd slots should be empty");
  2470   for (; fc != NULL; fc = fc->next(), n++) {
  2471     guarantee(fc->size() == size, "Size inconsistency");
  2472     guarantee(fc->isFree(), "!free?");
  2473     guarantee(fc->next() == NULL || fc->next()->prev() == fc, "Broken list");
  2474     guarantee((fc->next() == NULL) == (fc == tail), "Incorrect tail");
  2476   guarantee(n == num, "Incorrect count");
  2479 #ifndef PRODUCT
  2480 void CompactibleFreeListSpace::checkFreeListConsistency() const {
  2481   assert(_dictionary->minSize() <= IndexSetSize,
  2482     "Some sizes can't be allocated without recourse to"
  2483     " linear allocation buffers");
  2484   assert(MIN_TREE_CHUNK_SIZE*HeapWordSize == sizeof(TreeChunk),
  2485     "else MIN_TREE_CHUNK_SIZE is wrong");
  2486   assert((IndexSetStride == 2 && IndexSetStart == 2) ||
  2487          (IndexSetStride == 1 && IndexSetStart == 1), "just checking");
  2488   assert((IndexSetStride != 2) || (MinChunkSize % 2 == 0),
  2489       "Some for-loops may be incorrectly initialized");
  2490   assert((IndexSetStride != 2) || (IndexSetSize % 2 == 1),
  2491       "For-loops that iterate over IndexSet with stride 2 may be wrong");
  2493 #endif
  2495 void CompactibleFreeListSpace::printFLCensus(size_t sweep_count) const {
  2496   assert_lock_strong(&_freelistLock);
  2497   FreeList total;
  2498   gclog_or_tty->print("end sweep# " SIZE_FORMAT "\n", sweep_count);
  2499   FreeList::print_labels_on(gclog_or_tty, "size");
  2500   size_t totalFree = 0;
  2501   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
  2502     const FreeList *fl = &_indexedFreeList[i];
  2503     totalFree += fl->count() * fl->size();
  2504     if (i % (40*IndexSetStride) == 0) {
  2505       FreeList::print_labels_on(gclog_or_tty, "size");
  2507     fl->print_on(gclog_or_tty);
  2508     total.set_bfrSurp(    total.bfrSurp()     + fl->bfrSurp()    );
  2509     total.set_surplus(    total.surplus()     + fl->surplus()    );
  2510     total.set_desired(    total.desired()     + fl->desired()    );
  2511     total.set_prevSweep(  total.prevSweep()   + fl->prevSweep()  );
  2512     total.set_beforeSweep(total.beforeSweep() + fl->beforeSweep());
  2513     total.set_count(      total.count()       + fl->count()      );
  2514     total.set_coalBirths( total.coalBirths()  + fl->coalBirths() );
  2515     total.set_coalDeaths( total.coalDeaths()  + fl->coalDeaths() );
  2516     total.set_splitBirths(total.splitBirths() + fl->splitBirths());
  2517     total.set_splitDeaths(total.splitDeaths() + fl->splitDeaths());
  2519   total.print_on(gclog_or_tty, "TOTAL");
  2520   gclog_or_tty->print_cr("Total free in indexed lists "
  2521                          SIZE_FORMAT " words", totalFree);
  2522   gclog_or_tty->print("growth: %8.5f  deficit: %8.5f\n",
  2523     (double)(total.splitBirths()+total.coalBirths()-total.splitDeaths()-total.coalDeaths())/
  2524             (total.prevSweep() != 0 ? (double)total.prevSweep() : 1.0),
  2525     (double)(total.desired() - total.count())/(total.desired() != 0 ? (double)total.desired() : 1.0));
  2526   _dictionary->printDictCensus();
  2529 ///////////////////////////////////////////////////////////////////////////
  2530 // CFLS_LAB
  2531 ///////////////////////////////////////////////////////////////////////////
  2533 #define VECTOR_257(x)                                                                                  \
  2534   /* 1  2  3  4  5  6  7  8  9 1x 11 12 13 14 15 16 17 18 19 2x 21 22 23 24 25 26 27 28 29 3x 31 32 */ \
  2535   {  x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
  2536      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
  2537      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
  2538      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
  2539      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
  2540      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
  2541      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
  2542      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
  2543      x }
  2545 // Initialize with default setting of CMSParPromoteBlocksToClaim, _not_
  2546 // OldPLABSize, whose static default is different; if overridden at the
  2547 // command-line, this will get reinitialized via a call to
  2548 // modify_initialization() below.
  2549 AdaptiveWeightedAverage CFLS_LAB::_blocks_to_claim[]    =
  2550   VECTOR_257(AdaptiveWeightedAverage(OldPLABWeight, (float)CMSParPromoteBlocksToClaim));
  2551 size_t CFLS_LAB::_global_num_blocks[]  = VECTOR_257(0);
  2552 int    CFLS_LAB::_global_num_workers[] = VECTOR_257(0);
  2554 CFLS_LAB::CFLS_LAB(CompactibleFreeListSpace* cfls) :
  2555   _cfls(cfls)
  2557   assert(CompactibleFreeListSpace::IndexSetSize == 257, "Modify VECTOR_257() macro above");
  2558   for (size_t i = CompactibleFreeListSpace::IndexSetStart;
  2559        i < CompactibleFreeListSpace::IndexSetSize;
  2560        i += CompactibleFreeListSpace::IndexSetStride) {
  2561     _indexedFreeList[i].set_size(i);
  2562     _num_blocks[i] = 0;
  2566 static bool _CFLS_LAB_modified = false;
  2568 void CFLS_LAB::modify_initialization(size_t n, unsigned wt) {
  2569   assert(!_CFLS_LAB_modified, "Call only once");
  2570   _CFLS_LAB_modified = true;
  2571   for (size_t i = CompactibleFreeListSpace::IndexSetStart;
  2572        i < CompactibleFreeListSpace::IndexSetSize;
  2573        i += CompactibleFreeListSpace::IndexSetStride) {
  2574     _blocks_to_claim[i].modify(n, wt, true /* force */);
  2578 HeapWord* CFLS_LAB::alloc(size_t word_sz) {
  2579   FreeChunk* res;
  2580   assert(word_sz == _cfls->adjustObjectSize(word_sz), "Error");
  2581   if (word_sz >=  CompactibleFreeListSpace::IndexSetSize) {
  2582     // This locking manages sync with other large object allocations.
  2583     MutexLockerEx x(_cfls->parDictionaryAllocLock(),
  2584                     Mutex::_no_safepoint_check_flag);
  2585     res = _cfls->getChunkFromDictionaryExact(word_sz);
  2586     if (res == NULL) return NULL;
  2587   } else {
  2588     FreeList* fl = &_indexedFreeList[word_sz];
  2589     if (fl->count() == 0) {
  2590       // Attempt to refill this local free list.
  2591       get_from_global_pool(word_sz, fl);
  2592       // If it didn't work, give up.
  2593       if (fl->count() == 0) return NULL;
  2595     res = fl->getChunkAtHead();
  2596     assert(res != NULL, "Why was count non-zero?");
  2598   res->markNotFree();
  2599   assert(!res->isFree(), "shouldn't be marked free");
  2600   assert(oop(res)->klass_or_null() == NULL, "should look uninitialized");
  2601   // mangle a just allocated object with a distinct pattern.
  2602   debug_only(res->mangleAllocated(word_sz));
  2603   return (HeapWord*)res;
  2606 // Get a chunk of blocks of the right size and update related
  2607 // book-keeping stats
  2608 void CFLS_LAB::get_from_global_pool(size_t word_sz, FreeList* fl) {
  2609   // Get the #blocks we want to claim
  2610   size_t n_blks = (size_t)_blocks_to_claim[word_sz].average();
  2611   assert(n_blks > 0, "Error");
  2612   assert(ResizePLAB || n_blks == OldPLABSize, "Error");
  2613   // In some cases, when the application has a phase change,
  2614   // there may be a sudden and sharp shift in the object survival
  2615   // profile, and updating the counts at the end of a scavenge
  2616   // may not be quick enough, giving rise to large scavenge pauses
  2617   // during these phase changes. It is beneficial to detect such
  2618   // changes on-the-fly during a scavenge and avoid such a phase-change
  2619   // pothole. The following code is a heuristic attempt to do that.
  2620   // It is protected by a product flag until we have gained
  2621   // enough experience with this heuristic and fine-tuned its behaviour.
  2622   // WARNING: This might increase fragmentation if we overreact to
  2623   // small spikes, so some kind of historical smoothing based on
  2624   // previous experience with the greater reactivity might be useful.
  2625   // Lacking sufficient experience, CMSOldPLABResizeQuicker is disabled by
  2626   // default.
  2627   if (ResizeOldPLAB && CMSOldPLABResizeQuicker) {
  2628     size_t multiple = _num_blocks[word_sz]/(CMSOldPLABToleranceFactor*CMSOldPLABNumRefills*n_blks);
  2629     n_blks +=  CMSOldPLABReactivityFactor*multiple*n_blks;
  2630     n_blks = MIN2(n_blks, CMSOldPLABMax);
  2632   assert(n_blks > 0, "Error");
  2633   _cfls->par_get_chunk_of_blocks(word_sz, n_blks, fl);
  2634   // Update stats table entry for this block size
  2635   _num_blocks[word_sz] += fl->count();
  2638 void CFLS_LAB::compute_desired_plab_size() {
  2639   for (size_t i =  CompactibleFreeListSpace::IndexSetStart;
  2640        i < CompactibleFreeListSpace::IndexSetSize;
  2641        i += CompactibleFreeListSpace::IndexSetStride) {
  2642     assert((_global_num_workers[i] == 0) == (_global_num_blocks[i] == 0),
  2643            "Counter inconsistency");
  2644     if (_global_num_workers[i] > 0) {
  2645       // Need to smooth wrt historical average
  2646       if (ResizeOldPLAB) {
  2647         _blocks_to_claim[i].sample(
  2648           MAX2((size_t)CMSOldPLABMin,
  2649           MIN2((size_t)CMSOldPLABMax,
  2650                _global_num_blocks[i]/(_global_num_workers[i]*CMSOldPLABNumRefills))));
  2652       // Reset counters for next round
  2653       _global_num_workers[i] = 0;
  2654       _global_num_blocks[i] = 0;
  2655       if (PrintOldPLAB) {
  2656         gclog_or_tty->print_cr("[%d]: %d", i, (size_t)_blocks_to_claim[i].average());
  2662 void CFLS_LAB::retire(int tid) {
  2663   // We run this single threaded with the world stopped;
  2664   // so no need for locks and such.
  2665 #define CFLS_LAB_PARALLEL_ACCESS 0
  2666   NOT_PRODUCT(Thread* t = Thread::current();)
  2667   assert(Thread::current()->is_VM_thread(), "Error");
  2668   assert(CompactibleFreeListSpace::IndexSetStart == CompactibleFreeListSpace::IndexSetStride,
  2669          "Will access to uninitialized slot below");
  2670 #if CFLS_LAB_PARALLEL_ACCESS
  2671   for (size_t i = CompactibleFreeListSpace::IndexSetSize - 1;
  2672        i > 0;
  2673        i -= CompactibleFreeListSpace::IndexSetStride) {
  2674 #else // CFLS_LAB_PARALLEL_ACCESS
  2675   for (size_t i =  CompactibleFreeListSpace::IndexSetStart;
  2676        i < CompactibleFreeListSpace::IndexSetSize;
  2677        i += CompactibleFreeListSpace::IndexSetStride) {
  2678 #endif // !CFLS_LAB_PARALLEL_ACCESS
  2679     assert(_num_blocks[i] >= (size_t)_indexedFreeList[i].count(),
  2680            "Can't retire more than what we obtained");
  2681     if (_num_blocks[i] > 0) {
  2682       size_t num_retire =  _indexedFreeList[i].count();
  2683       assert(_num_blocks[i] > num_retire, "Should have used at least one");
  2685 #if CFLS_LAB_PARALLEL_ACCESS
  2686         MutexLockerEx x(_cfls->_indexedFreeListParLocks[i],
  2687                         Mutex::_no_safepoint_check_flag);
  2688 #endif // CFLS_LAB_PARALLEL_ACCESS
  2689         // Update globals stats for num_blocks used
  2690         _global_num_blocks[i] += (_num_blocks[i] - num_retire);
  2691         _global_num_workers[i]++;
  2692         assert(_global_num_workers[i] <= (ssize_t)ParallelGCThreads, "Too big");
  2693         if (num_retire > 0) {
  2694           _cfls->_indexedFreeList[i].prepend(&_indexedFreeList[i]);
  2695           // Reset this list.
  2696           _indexedFreeList[i] = FreeList();
  2697           _indexedFreeList[i].set_size(i);
  2700       if (PrintOldPLAB) {
  2701         gclog_or_tty->print_cr("%d[%d]: %d/%d/%d",
  2702                                tid, i, num_retire, _num_blocks[i], (size_t)_blocks_to_claim[i].average());
  2704       // Reset stats for next round
  2705       _num_blocks[i]         = 0;
  2710 void CompactibleFreeListSpace:: par_get_chunk_of_blocks(size_t word_sz, size_t n, FreeList* fl) {
  2711   assert(fl->count() == 0, "Precondition.");
  2712   assert(word_sz < CompactibleFreeListSpace::IndexSetSize,
  2713          "Precondition");
  2715   // We'll try all multiples of word_sz in the indexed set, starting with
  2716   // word_sz itself and, if CMSSplitIndexedFreeListBlocks, try larger multiples,
  2717   // then try getting a big chunk and splitting it.
  2719     bool found;
  2720     int  k;
  2721     size_t cur_sz;
  2722     for (k = 1, cur_sz = k * word_sz, found = false;
  2723          (cur_sz < CompactibleFreeListSpace::IndexSetSize) &&
  2724          (CMSSplitIndexedFreeListBlocks || k <= 1);
  2725          k++, cur_sz = k * word_sz) {
  2726       FreeList fl_for_cur_sz;  // Empty.
  2727       fl_for_cur_sz.set_size(cur_sz);
  2729         MutexLockerEx x(_indexedFreeListParLocks[cur_sz],
  2730                         Mutex::_no_safepoint_check_flag);
  2731         FreeList* gfl = &_indexedFreeList[cur_sz];
  2732         if (gfl->count() != 0) {
  2733           // nn is the number of chunks of size cur_sz that
  2734           // we'd need to split k-ways each, in order to create
  2735           // "n" chunks of size word_sz each.
  2736           const size_t nn = MAX2(n/k, (size_t)1);
  2737           gfl->getFirstNChunksFromList(nn, &fl_for_cur_sz);
  2738           found = true;
  2739           if (k > 1) {
  2740             // Update split death stats for the cur_sz-size blocks list:
  2741             // we increment the split death count by the number of blocks
  2742             // we just took from the cur_sz-size blocks list and which
  2743             // we will be splitting below.
  2744             ssize_t deaths = gfl->splitDeaths() +
  2745                              fl_for_cur_sz.count();
  2746             gfl->set_splitDeaths(deaths);
  2750       // Now transfer fl_for_cur_sz to fl.  Common case, we hope, is k = 1.
  2751       if (found) {
  2752         if (k == 1) {
  2753           fl->prepend(&fl_for_cur_sz);
  2754         } else {
  2755           // Divide each block on fl_for_cur_sz up k ways.
  2756           FreeChunk* fc;
  2757           while ((fc = fl_for_cur_sz.getChunkAtHead()) != NULL) {
  2758             // Must do this in reverse order, so that anybody attempting to
  2759             // access the main chunk sees it as a single free block until we
  2760             // change it.
  2761             size_t fc_size = fc->size();
  2762             assert(fc->isFree(), "Error");
  2763             for (int i = k-1; i >= 0; i--) {
  2764               FreeChunk* ffc = (FreeChunk*)((HeapWord*)fc + i * word_sz);
  2765               assert((i != 0) ||
  2766                         ((fc == ffc) && ffc->isFree() &&
  2767                          (ffc->size() == k*word_sz) && (fc_size == word_sz)),
  2768                         "Counting error");
  2769               ffc->setSize(word_sz);
  2770               ffc->linkPrev(NULL); // Mark as a free block for other (parallel) GC threads.
  2771               ffc->linkNext(NULL);
  2772               // Above must occur before BOT is updated below.
  2773               OrderAccess::storestore();
  2774               // splitting from the right, fc_size == i * word_sz
  2775               _bt.mark_block((HeapWord*)ffc, word_sz, true /* reducing */);
  2776               fc_size -= word_sz;
  2777               assert(fc_size == i*word_sz, "Error");
  2778               _bt.verify_not_unallocated((HeapWord*)ffc, word_sz);
  2779               _bt.verify_single_block((HeapWord*)fc, fc_size);
  2780               _bt.verify_single_block((HeapWord*)ffc, word_sz);
  2781               // Push this on "fl".
  2782               fl->returnChunkAtHead(ffc);
  2784             // TRAP
  2785             assert(fl->tail()->next() == NULL, "List invariant.");
  2788         // Update birth stats for this block size.
  2789         size_t num = fl->count();
  2790         MutexLockerEx x(_indexedFreeListParLocks[word_sz],
  2791                         Mutex::_no_safepoint_check_flag);
  2792         ssize_t births = _indexedFreeList[word_sz].splitBirths() + num;
  2793         _indexedFreeList[word_sz].set_splitBirths(births);
  2794         return;
  2798   // Otherwise, we'll split a block from the dictionary.
  2799   FreeChunk* fc = NULL;
  2800   FreeChunk* rem_fc = NULL;
  2801   size_t rem;
  2803     MutexLockerEx x(parDictionaryAllocLock(),
  2804                     Mutex::_no_safepoint_check_flag);
  2805     while (n > 0) {
  2806       fc = dictionary()->getChunk(MAX2(n * word_sz,
  2807                                   _dictionary->minSize()),
  2808                                   FreeBlockDictionary::atLeast);
  2809       if (fc != NULL) {
  2810         _bt.allocated((HeapWord*)fc, fc->size(), true /* reducing */);  // update _unallocated_blk
  2811         dictionary()->dictCensusUpdate(fc->size(),
  2812                                        true /*split*/,
  2813                                        false /*birth*/);
  2814         break;
  2815       } else {
  2816         n--;
  2819     if (fc == NULL) return;
  2820     // Otherwise, split up that block.
  2821     assert((ssize_t)n >= 1, "Control point invariant");
  2822     assert(fc->isFree(), "Error: should be a free block");
  2823     _bt.verify_single_block((HeapWord*)fc, fc->size());
  2824     const size_t nn = fc->size() / word_sz;
  2825     n = MIN2(nn, n);
  2826     assert((ssize_t)n >= 1, "Control point invariant");
  2827     rem = fc->size() - n * word_sz;
  2828     // If there is a remainder, and it's too small, allocate one fewer.
  2829     if (rem > 0 && rem < MinChunkSize) {
  2830       n--; rem += word_sz;
  2832     // Note that at this point we may have n == 0.
  2833     assert((ssize_t)n >= 0, "Control point invariant");
  2835     // If n is 0, the chunk fc that was found is not large
  2836     // enough to leave a viable remainder.  We are unable to
  2837     // allocate even one block.  Return fc to the
  2838     // dictionary and return, leaving "fl" empty.
  2839     if (n == 0) {
  2840       returnChunkToDictionary(fc);
  2841       assert(fl->count() == 0, "We never allocated any blocks");
  2842       return;
  2845     // First return the remainder, if any.
  2846     // Note that we hold the lock until we decide if we're going to give
  2847     // back the remainder to the dictionary, since a concurrent allocation
  2848     // may otherwise see the heap as empty.  (We're willing to take that
  2849     // hit if the block is a small block.)
  2850     if (rem > 0) {
  2851       size_t prefix_size = n * word_sz;
  2852       rem_fc = (FreeChunk*)((HeapWord*)fc + prefix_size);
  2853       rem_fc->setSize(rem);
  2854       rem_fc->linkPrev(NULL); // Mark as a free block for other (parallel) GC threads.
  2855       rem_fc->linkNext(NULL);
  2856       // Above must occur before BOT is updated below.
  2857       assert((ssize_t)n > 0 && prefix_size > 0 && rem_fc > fc, "Error");
  2858       OrderAccess::storestore();
  2859       _bt.split_block((HeapWord*)fc, fc->size(), prefix_size);
  2860       assert(fc->isFree(), "Error");
  2861       fc->setSize(prefix_size);
  2862       if (rem >= IndexSetSize) {
  2863         returnChunkToDictionary(rem_fc);
  2864         dictionary()->dictCensusUpdate(rem, true /*split*/, true /*birth*/);
  2865         rem_fc = NULL;
  2867       // Otherwise, return it to the small list below.
  2870   if (rem_fc != NULL) {
  2871     MutexLockerEx x(_indexedFreeListParLocks[rem],
  2872                     Mutex::_no_safepoint_check_flag);
  2873     _bt.verify_not_unallocated((HeapWord*)rem_fc, rem_fc->size());
  2874     _indexedFreeList[rem].returnChunkAtHead(rem_fc);
  2875     smallSplitBirth(rem);
  2877   assert((ssize_t)n > 0 && fc != NULL, "Consistency");
  2878   // Now do the splitting up.
  2879   // Must do this in reverse order, so that anybody attempting to
  2880   // access the main chunk sees it as a single free block until we
  2881   // change it.
  2882   size_t fc_size = n * word_sz;
  2883   // All but first chunk in this loop
  2884   for (ssize_t i = n-1; i > 0; i--) {
  2885     FreeChunk* ffc = (FreeChunk*)((HeapWord*)fc + i * word_sz);
  2886     ffc->setSize(word_sz);
  2887     ffc->linkPrev(NULL); // Mark as a free block for other (parallel) GC threads.
  2888     ffc->linkNext(NULL);
  2889     // Above must occur before BOT is updated below.
  2890     OrderAccess::storestore();
  2891     // splitting from the right, fc_size == (n - i + 1) * wordsize
  2892     _bt.mark_block((HeapWord*)ffc, word_sz, true /* reducing */);
  2893     fc_size -= word_sz;
  2894     _bt.verify_not_unallocated((HeapWord*)ffc, ffc->size());
  2895     _bt.verify_single_block((HeapWord*)ffc, ffc->size());
  2896     _bt.verify_single_block((HeapWord*)fc, fc_size);
  2897     // Push this on "fl".
  2898     fl->returnChunkAtHead(ffc);
  2900   // First chunk
  2901   assert(fc->isFree() && fc->size() == n*word_sz, "Error: should still be a free block");
  2902   // The blocks above should show their new sizes before the first block below
  2903   fc->setSize(word_sz);
  2904   fc->linkPrev(NULL);    // idempotent wrt free-ness, see assert above
  2905   fc->linkNext(NULL);
  2906   _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
  2907   _bt.verify_single_block((HeapWord*)fc, fc->size());
  2908   fl->returnChunkAtHead(fc);
  2910   assert((ssize_t)n > 0 && (ssize_t)n == fl->count(), "Incorrect number of blocks");
  2912     // Update the stats for this block size.
  2913     MutexLockerEx x(_indexedFreeListParLocks[word_sz],
  2914                     Mutex::_no_safepoint_check_flag);
  2915     const ssize_t births = _indexedFreeList[word_sz].splitBirths() + n;
  2916     _indexedFreeList[word_sz].set_splitBirths(births);
  2917     // ssize_t new_surplus = _indexedFreeList[word_sz].surplus() + n;
  2918     // _indexedFreeList[word_sz].set_surplus(new_surplus);
  2921   // TRAP
  2922   assert(fl->tail()->next() == NULL, "List invariant.");
  2925 // Set up the space's par_seq_tasks structure for work claiming
  2926 // for parallel rescan. See CMSParRemarkTask where this is currently used.
  2927 // XXX Need to suitably abstract and generalize this and the next
  2928 // method into one.
  2929 void
  2930 CompactibleFreeListSpace::
  2931 initialize_sequential_subtasks_for_rescan(int n_threads) {
  2932   // The "size" of each task is fixed according to rescan_task_size.
  2933   assert(n_threads > 0, "Unexpected n_threads argument");
  2934   const size_t task_size = rescan_task_size();
  2935   size_t n_tasks = (used_region().word_size() + task_size - 1)/task_size;
  2936   assert((n_tasks == 0) == used_region().is_empty(), "n_tasks incorrect");
  2937   assert(n_tasks == 0 ||
  2938          ((used_region().start() + (n_tasks - 1)*task_size < used_region().end()) &&
  2939           (used_region().start() + n_tasks*task_size >= used_region().end())),
  2940          "n_tasks calculation incorrect");
  2941   SequentialSubTasksDone* pst = conc_par_seq_tasks();
  2942   assert(!pst->valid(), "Clobbering existing data?");
  2943   // Sets the condition for completion of the subtask (how many threads
  2944   // need to finish in order to be done).
  2945   pst->set_n_threads(n_threads);
  2946   pst->set_n_tasks((int)n_tasks);
  2949 // Set up the space's par_seq_tasks structure for work claiming
  2950 // for parallel concurrent marking. See CMSConcMarkTask where this is currently used.
  2951 void
  2952 CompactibleFreeListSpace::
  2953 initialize_sequential_subtasks_for_marking(int n_threads,
  2954                                            HeapWord* low) {
  2955   // The "size" of each task is fixed according to rescan_task_size.
  2956   assert(n_threads > 0, "Unexpected n_threads argument");
  2957   const size_t task_size = marking_task_size();
  2958   assert(task_size > CardTableModRefBS::card_size_in_words &&
  2959          (task_size %  CardTableModRefBS::card_size_in_words == 0),
  2960          "Otherwise arithmetic below would be incorrect");
  2961   MemRegion span = _gen->reserved();
  2962   if (low != NULL) {
  2963     if (span.contains(low)) {
  2964       // Align low down to  a card boundary so that
  2965       // we can use block_offset_careful() on span boundaries.
  2966       HeapWord* aligned_low = (HeapWord*)align_size_down((uintptr_t)low,
  2967                                  CardTableModRefBS::card_size);
  2968       // Clip span prefix at aligned_low
  2969       span = span.intersection(MemRegion(aligned_low, span.end()));
  2970     } else if (low > span.end()) {
  2971       span = MemRegion(low, low);  // Null region
  2972     } // else use entire span
  2974   assert(span.is_empty() ||
  2975          ((uintptr_t)span.start() %  CardTableModRefBS::card_size == 0),
  2976         "span should start at a card boundary");
  2977   size_t n_tasks = (span.word_size() + task_size - 1)/task_size;
  2978   assert((n_tasks == 0) == span.is_empty(), "Inconsistency");
  2979   assert(n_tasks == 0 ||
  2980          ((span.start() + (n_tasks - 1)*task_size < span.end()) &&
  2981           (span.start() + n_tasks*task_size >= span.end())),
  2982          "n_tasks calculation incorrect");
  2983   SequentialSubTasksDone* pst = conc_par_seq_tasks();
  2984   assert(!pst->valid(), "Clobbering existing data?");
  2985   // Sets the condition for completion of the subtask (how many threads
  2986   // need to finish in order to be done).
  2987   pst->set_n_threads(n_threads);
  2988   pst->set_n_tasks((int)n_tasks);

mercurial