src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp

Tue, 16 Nov 2010 13:58:48 -0800

author
ysr
date
Tue, 16 Nov 2010 13:58:48 -0800
changeset 2301
9eecf81a02fb
parent 2294
4df7f8cba524
child 2314
f95d63e2154a
permissions
-rw-r--r--

7000578: CMS: assert(SafepointSynchronize::is_at_safepoint()) failed: Else races are possible
Summary: Weakened assert in onj_is_alive() to allow its use at initialization time when is_at_safepoint() normally reports false; added some related asserts to check order of is_init_completed() after Universe::is_fully_initialized().
Reviewed-by: jcoomes

     1 /*
     2  * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    20  * or visit www.oracle.com if you need additional information or have any
    21  * questions.
    22  *
    23  */
    25 # include "incls/_precompiled.incl"
    26 # include "incls/_compactibleFreeListSpace.cpp.incl"
    28 /////////////////////////////////////////////////////////////////////////
    29 //// CompactibleFreeListSpace
    30 /////////////////////////////////////////////////////////////////////////
    32 // highest ranked  free list lock rank
    33 int CompactibleFreeListSpace::_lockRank = Mutex::leaf + 3;
    35 // Defaults are 0 so things will break badly if incorrectly initialized.
    36 int CompactibleFreeListSpace::IndexSetStart  = 0;
    37 int CompactibleFreeListSpace::IndexSetStride = 0;
    39 size_t MinChunkSize = 0;
    41 void CompactibleFreeListSpace::set_cms_values() {
    42   // Set CMS global values
    43   assert(MinChunkSize == 0, "already set");
    44   #define numQuanta(x,y) ((x+y-1)/y)
    45   MinChunkSize = numQuanta(sizeof(FreeChunk), MinObjAlignmentInBytes) * MinObjAlignment;
    47   assert(IndexSetStart == 0 && IndexSetStride == 0, "already set");
    48   IndexSetStart  = MinObjAlignment;
    49   IndexSetStride = MinObjAlignment;
    50 }
    52 // Constructor
    53 CompactibleFreeListSpace::CompactibleFreeListSpace(BlockOffsetSharedArray* bs,
    54   MemRegion mr, bool use_adaptive_freelists,
    55   FreeBlockDictionary::DictionaryChoice dictionaryChoice) :
    56   _dictionaryChoice(dictionaryChoice),
    57   _adaptive_freelists(use_adaptive_freelists),
    58   _bt(bs, mr),
    59   // free list locks are in the range of values taken by _lockRank
    60   // This range currently is [_leaf+2, _leaf+3]
    61   // Note: this requires that CFLspace c'tors
    62   // are called serially in the order in which the locks are
    63   // are acquired in the program text. This is true today.
    64   _freelistLock(_lockRank--, "CompactibleFreeListSpace._lock", true),
    65   _parDictionaryAllocLock(Mutex::leaf - 1,  // == rank(ExpandHeap_lock) - 1
    66                           "CompactibleFreeListSpace._dict_par_lock", true),
    67   _rescan_task_size(CardTableModRefBS::card_size_in_words * BitsPerWord *
    68                     CMSRescanMultiple),
    69   _marking_task_size(CardTableModRefBS::card_size_in_words * BitsPerWord *
    70                     CMSConcMarkMultiple),
    71   _collector(NULL)
    72 {
    73   _bt.set_space(this);
    74   initialize(mr, SpaceDecorator::Clear, SpaceDecorator::Mangle);
    75   // We have all of "mr", all of which we place in the dictionary
    76   // as one big chunk. We'll need to decide here which of several
    77   // possible alternative dictionary implementations to use. For
    78   // now the choice is easy, since we have only one working
    79   // implementation, namely, the simple binary tree (splaying
    80   // temporarily disabled).
    81   switch (dictionaryChoice) {
    82     case FreeBlockDictionary::dictionarySplayTree:
    83     case FreeBlockDictionary::dictionarySkipList:
    84     default:
    85       warning("dictionaryChoice: selected option not understood; using"
    86               " default BinaryTreeDictionary implementation instead.");
    87     case FreeBlockDictionary::dictionaryBinaryTree:
    88       _dictionary = new BinaryTreeDictionary(mr);
    89       break;
    90   }
    91   assert(_dictionary != NULL, "CMS dictionary initialization");
    92   // The indexed free lists are initially all empty and are lazily
    93   // filled in on demand. Initialize the array elements to NULL.
    94   initializeIndexedFreeListArray();
    96   // Not using adaptive free lists assumes that allocation is first
    97   // from the linAB's.  Also a cms perm gen which can be compacted
    98   // has to have the klass's klassKlass allocated at a lower
    99   // address in the heap than the klass so that the klassKlass is
   100   // moved to its new location before the klass is moved.
   101   // Set the _refillSize for the linear allocation blocks
   102   if (!use_adaptive_freelists) {
   103     FreeChunk* fc = _dictionary->getChunk(mr.word_size());
   104     // The small linAB initially has all the space and will allocate
   105     // a chunk of any size.
   106     HeapWord* addr = (HeapWord*) fc;
   107     _smallLinearAllocBlock.set(addr, fc->size() ,
   108       1024*SmallForLinearAlloc, fc->size());
   109     // Note that _unallocated_block is not updated here.
   110     // Allocations from the linear allocation block should
   111     // update it.
   112   } else {
   113     _smallLinearAllocBlock.set(0, 0, 1024*SmallForLinearAlloc,
   114                                SmallForLinearAlloc);
   115   }
   116   // CMSIndexedFreeListReplenish should be at least 1
   117   CMSIndexedFreeListReplenish = MAX2((uintx)1, CMSIndexedFreeListReplenish);
   118   _promoInfo.setSpace(this);
   119   if (UseCMSBestFit) {
   120     _fitStrategy = FreeBlockBestFitFirst;
   121   } else {
   122     _fitStrategy = FreeBlockStrategyNone;
   123   }
   124   checkFreeListConsistency();
   126   // Initialize locks for parallel case.
   128   if (CollectedHeap::use_parallel_gc_threads()) {
   129     for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
   130       _indexedFreeListParLocks[i] = new Mutex(Mutex::leaf - 1, // == ExpandHeap_lock - 1
   131                                               "a freelist par lock",
   132                                               true);
   133       if (_indexedFreeListParLocks[i] == NULL)
   134         vm_exit_during_initialization("Could not allocate a par lock");
   135       DEBUG_ONLY(
   136         _indexedFreeList[i].set_protecting_lock(_indexedFreeListParLocks[i]);
   137       )
   138     }
   139     _dictionary->set_par_lock(&_parDictionaryAllocLock);
   140   }
   141 }
   143 // Like CompactibleSpace forward() but always calls cross_threshold() to
   144 // update the block offset table.  Removed initialize_threshold call because
   145 // CFLS does not use a block offset array for contiguous spaces.
   146 HeapWord* CompactibleFreeListSpace::forward(oop q, size_t size,
   147                                     CompactPoint* cp, HeapWord* compact_top) {
   148   // q is alive
   149   // First check if we should switch compaction space
   150   assert(this == cp->space, "'this' should be current compaction space.");
   151   size_t compaction_max_size = pointer_delta(end(), compact_top);
   152   assert(adjustObjectSize(size) == cp->space->adjust_object_size_v(size),
   153     "virtual adjustObjectSize_v() method is not correct");
   154   size_t adjusted_size = adjustObjectSize(size);
   155   assert(compaction_max_size >= MinChunkSize || compaction_max_size == 0,
   156          "no small fragments allowed");
   157   assert(minimum_free_block_size() == MinChunkSize,
   158          "for de-virtualized reference below");
   159   // Can't leave a nonzero size, residual fragment smaller than MinChunkSize
   160   if (adjusted_size + MinChunkSize > compaction_max_size &&
   161       adjusted_size != compaction_max_size) {
   162     do {
   163       // switch to next compaction space
   164       cp->space->set_compaction_top(compact_top);
   165       cp->space = cp->space->next_compaction_space();
   166       if (cp->space == NULL) {
   167         cp->gen = GenCollectedHeap::heap()->prev_gen(cp->gen);
   168         assert(cp->gen != NULL, "compaction must succeed");
   169         cp->space = cp->gen->first_compaction_space();
   170         assert(cp->space != NULL, "generation must have a first compaction space");
   171       }
   172       compact_top = cp->space->bottom();
   173       cp->space->set_compaction_top(compact_top);
   174       // The correct adjusted_size may not be the same as that for this method
   175       // (i.e., cp->space may no longer be "this" so adjust the size again.
   176       // Use the virtual method which is not used above to save the virtual
   177       // dispatch.
   178       adjusted_size = cp->space->adjust_object_size_v(size);
   179       compaction_max_size = pointer_delta(cp->space->end(), compact_top);
   180       assert(cp->space->minimum_free_block_size() == 0, "just checking");
   181     } while (adjusted_size > compaction_max_size);
   182   }
   184   // store the forwarding pointer into the mark word
   185   if ((HeapWord*)q != compact_top) {
   186     q->forward_to(oop(compact_top));
   187     assert(q->is_gc_marked(), "encoding the pointer should preserve the mark");
   188   } else {
   189     // if the object isn't moving we can just set the mark to the default
   190     // mark and handle it specially later on.
   191     q->init_mark();
   192     assert(q->forwardee() == NULL, "should be forwarded to NULL");
   193   }
   195   VALIDATE_MARK_SWEEP_ONLY(MarkSweep::register_live_oop(q, adjusted_size));
   196   compact_top += adjusted_size;
   198   // we need to update the offset table so that the beginnings of objects can be
   199   // found during scavenge.  Note that we are updating the offset table based on
   200   // where the object will be once the compaction phase finishes.
   202   // Always call cross_threshold().  A contiguous space can only call it when
   203   // the compaction_top exceeds the current threshold but not for an
   204   // non-contiguous space.
   205   cp->threshold =
   206     cp->space->cross_threshold(compact_top - adjusted_size, compact_top);
   207   return compact_top;
   208 }
   210 // A modified copy of OffsetTableContigSpace::cross_threshold() with _offsets -> _bt
   211 // and use of single_block instead of alloc_block.  The name here is not really
   212 // appropriate - maybe a more general name could be invented for both the
   213 // contiguous and noncontiguous spaces.
   215 HeapWord* CompactibleFreeListSpace::cross_threshold(HeapWord* start, HeapWord* the_end) {
   216   _bt.single_block(start, the_end);
   217   return end();
   218 }
   220 // Initialize them to NULL.
   221 void CompactibleFreeListSpace::initializeIndexedFreeListArray() {
   222   for (size_t i = 0; i < IndexSetSize; i++) {
   223     // Note that on platforms where objects are double word aligned,
   224     // the odd array elements are not used.  It is convenient, however,
   225     // to map directly from the object size to the array element.
   226     _indexedFreeList[i].reset(IndexSetSize);
   227     _indexedFreeList[i].set_size(i);
   228     assert(_indexedFreeList[i].count() == 0, "reset check failed");
   229     assert(_indexedFreeList[i].head() == NULL, "reset check failed");
   230     assert(_indexedFreeList[i].tail() == NULL, "reset check failed");
   231     assert(_indexedFreeList[i].hint() == IndexSetSize, "reset check failed");
   232   }
   233 }
   235 void CompactibleFreeListSpace::resetIndexedFreeListArray() {
   236   for (int i = 1; i < IndexSetSize; i++) {
   237     assert(_indexedFreeList[i].size() == (size_t) i,
   238       "Indexed free list sizes are incorrect");
   239     _indexedFreeList[i].reset(IndexSetSize);
   240     assert(_indexedFreeList[i].count() == 0, "reset check failed");
   241     assert(_indexedFreeList[i].head() == NULL, "reset check failed");
   242     assert(_indexedFreeList[i].tail() == NULL, "reset check failed");
   243     assert(_indexedFreeList[i].hint() == IndexSetSize, "reset check failed");
   244   }
   245 }
   247 void CompactibleFreeListSpace::reset(MemRegion mr) {
   248   resetIndexedFreeListArray();
   249   dictionary()->reset();
   250   if (BlockOffsetArrayUseUnallocatedBlock) {
   251     assert(end() == mr.end(), "We are compacting to the bottom of CMS gen");
   252     // Everything's allocated until proven otherwise.
   253     _bt.set_unallocated_block(end());
   254   }
   255   if (!mr.is_empty()) {
   256     assert(mr.word_size() >= MinChunkSize, "Chunk size is too small");
   257     _bt.single_block(mr.start(), mr.word_size());
   258     FreeChunk* fc = (FreeChunk*) mr.start();
   259     fc->setSize(mr.word_size());
   260     if (mr.word_size() >= IndexSetSize ) {
   261       returnChunkToDictionary(fc);
   262     } else {
   263       _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
   264       _indexedFreeList[mr.word_size()].returnChunkAtHead(fc);
   265     }
   266   }
   267   _promoInfo.reset();
   268   _smallLinearAllocBlock._ptr = NULL;
   269   _smallLinearAllocBlock._word_size = 0;
   270 }
   272 void CompactibleFreeListSpace::reset_after_compaction() {
   273   // Reset the space to the new reality - one free chunk.
   274   MemRegion mr(compaction_top(), end());
   275   reset(mr);
   276   // Now refill the linear allocation block(s) if possible.
   277   if (_adaptive_freelists) {
   278     refillLinearAllocBlocksIfNeeded();
   279   } else {
   280     // Place as much of mr in the linAB as we can get,
   281     // provided it was big enough to go into the dictionary.
   282     FreeChunk* fc = dictionary()->findLargestDict();
   283     if (fc != NULL) {
   284       assert(fc->size() == mr.word_size(),
   285              "Why was the chunk broken up?");
   286       removeChunkFromDictionary(fc);
   287       HeapWord* addr = (HeapWord*) fc;
   288       _smallLinearAllocBlock.set(addr, fc->size() ,
   289         1024*SmallForLinearAlloc, fc->size());
   290       // Note that _unallocated_block is not updated here.
   291     }
   292   }
   293 }
   295 // Walks the entire dictionary, returning a coterminal
   296 // chunk, if it exists. Use with caution since it involves
   297 // a potentially complete walk of a potentially large tree.
   298 FreeChunk* CompactibleFreeListSpace::find_chunk_at_end() {
   300   assert_lock_strong(&_freelistLock);
   302   return dictionary()->find_chunk_ends_at(end());
   303 }
   306 #ifndef PRODUCT
   307 void CompactibleFreeListSpace::initializeIndexedFreeListArrayReturnedBytes() {
   308   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
   309     _indexedFreeList[i].allocation_stats()->set_returnedBytes(0);
   310   }
   311 }
   313 size_t CompactibleFreeListSpace::sumIndexedFreeListArrayReturnedBytes() {
   314   size_t sum = 0;
   315   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
   316     sum += _indexedFreeList[i].allocation_stats()->returnedBytes();
   317   }
   318   return sum;
   319 }
   321 size_t CompactibleFreeListSpace::totalCountInIndexedFreeLists() const {
   322   size_t count = 0;
   323   for (int i = (int)MinChunkSize; i < IndexSetSize; i++) {
   324     debug_only(
   325       ssize_t total_list_count = 0;
   326       for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
   327          fc = fc->next()) {
   328         total_list_count++;
   329       }
   330       assert(total_list_count ==  _indexedFreeList[i].count(),
   331         "Count in list is incorrect");
   332     )
   333     count += _indexedFreeList[i].count();
   334   }
   335   return count;
   336 }
   338 size_t CompactibleFreeListSpace::totalCount() {
   339   size_t num = totalCountInIndexedFreeLists();
   340   num +=  dictionary()->totalCount();
   341   if (_smallLinearAllocBlock._word_size != 0) {
   342     num++;
   343   }
   344   return num;
   345 }
   346 #endif
   348 bool CompactibleFreeListSpace::is_free_block(const HeapWord* p) const {
   349   FreeChunk* fc = (FreeChunk*) p;
   350   return fc->isFree();
   351 }
   353 size_t CompactibleFreeListSpace::used() const {
   354   return capacity() - free();
   355 }
   357 size_t CompactibleFreeListSpace::free() const {
   358   // "MT-safe, but not MT-precise"(TM), if you will: i.e.
   359   // if you do this while the structures are in flux you
   360   // may get an approximate answer only; for instance
   361   // because there is concurrent allocation either
   362   // directly by mutators or for promotion during a GC.
   363   // It's "MT-safe", however, in the sense that you are guaranteed
   364   // not to crash and burn, for instance, because of walking
   365   // pointers that could disappear as you were walking them.
   366   // The approximation is because the various components
   367   // that are read below are not read atomically (and
   368   // further the computation of totalSizeInIndexedFreeLists()
   369   // is itself a non-atomic computation. The normal use of
   370   // this is during a resize operation at the end of GC
   371   // and at that time you are guaranteed to get the
   372   // correct actual value. However, for instance, this is
   373   // also read completely asynchronously by the "perf-sampler"
   374   // that supports jvmstat, and you are apt to see the values
   375   // flicker in such cases.
   376   assert(_dictionary != NULL, "No _dictionary?");
   377   return (_dictionary->totalChunkSize(DEBUG_ONLY(freelistLock())) +
   378           totalSizeInIndexedFreeLists() +
   379           _smallLinearAllocBlock._word_size) * HeapWordSize;
   380 }
   382 size_t CompactibleFreeListSpace::max_alloc_in_words() const {
   383   assert(_dictionary != NULL, "No _dictionary?");
   384   assert_locked();
   385   size_t res = _dictionary->maxChunkSize();
   386   res = MAX2(res, MIN2(_smallLinearAllocBlock._word_size,
   387                        (size_t) SmallForLinearAlloc - 1));
   388   // XXX the following could potentially be pretty slow;
   389   // should one, pesimally for the rare cases when res
   390   // caclulated above is less than IndexSetSize,
   391   // just return res calculated above? My reasoning was that
   392   // those cases will be so rare that the extra time spent doesn't
   393   // really matter....
   394   // Note: do not change the loop test i >= res + IndexSetStride
   395   // to i > res below, because i is unsigned and res may be zero.
   396   for (size_t i = IndexSetSize - 1; i >= res + IndexSetStride;
   397        i -= IndexSetStride) {
   398     if (_indexedFreeList[i].head() != NULL) {
   399       assert(_indexedFreeList[i].count() != 0, "Inconsistent FreeList");
   400       return i;
   401     }
   402   }
   403   return res;
   404 }
   406 void LinearAllocBlock::print_on(outputStream* st) const {
   407   st->print_cr(" LinearAllocBlock: ptr = " PTR_FORMAT ", word_size = " SIZE_FORMAT
   408             ", refillsize = " SIZE_FORMAT ", allocation_size_limit = " SIZE_FORMAT,
   409             _ptr, _word_size, _refillSize, _allocation_size_limit);
   410 }
   412 void CompactibleFreeListSpace::print_on(outputStream* st) const {
   413   st->print_cr("COMPACTIBLE FREELIST SPACE");
   414   st->print_cr(" Space:");
   415   Space::print_on(st);
   417   st->print_cr("promoInfo:");
   418   _promoInfo.print_on(st);
   420   st->print_cr("_smallLinearAllocBlock");
   421   _smallLinearAllocBlock.print_on(st);
   423   // dump_memory_block(_smallLinearAllocBlock->_ptr, 128);
   425   st->print_cr(" _fitStrategy = %s, _adaptive_freelists = %s",
   426                _fitStrategy?"true":"false", _adaptive_freelists?"true":"false");
   427 }
   429 void CompactibleFreeListSpace::print_indexed_free_lists(outputStream* st)
   430 const {
   431   reportIndexedFreeListStatistics();
   432   gclog_or_tty->print_cr("Layout of Indexed Freelists");
   433   gclog_or_tty->print_cr("---------------------------");
   434   FreeList::print_labels_on(st, "size");
   435   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
   436     _indexedFreeList[i].print_on(gclog_or_tty);
   437     for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
   438          fc = fc->next()) {
   439       gclog_or_tty->print_cr("\t[" PTR_FORMAT "," PTR_FORMAT ")  %s",
   440                           fc, (HeapWord*)fc + i,
   441                           fc->cantCoalesce() ? "\t CC" : "");
   442     }
   443   }
   444 }
   446 void CompactibleFreeListSpace::print_promo_info_blocks(outputStream* st)
   447 const {
   448   _promoInfo.print_on(st);
   449 }
   451 void CompactibleFreeListSpace::print_dictionary_free_lists(outputStream* st)
   452 const {
   453   _dictionary->reportStatistics();
   454   st->print_cr("Layout of Freelists in Tree");
   455   st->print_cr("---------------------------");
   456   _dictionary->print_free_lists(st);
   457 }
   459 class BlkPrintingClosure: public BlkClosure {
   460   const CMSCollector*             _collector;
   461   const CompactibleFreeListSpace* _sp;
   462   const CMSBitMap*                _live_bit_map;
   463   const bool                      _post_remark;
   464   outputStream*                   _st;
   465 public:
   466   BlkPrintingClosure(const CMSCollector* collector,
   467                      const CompactibleFreeListSpace* sp,
   468                      const CMSBitMap* live_bit_map,
   469                      outputStream* st):
   470     _collector(collector),
   471     _sp(sp),
   472     _live_bit_map(live_bit_map),
   473     _post_remark(collector->abstract_state() > CMSCollector::FinalMarking),
   474     _st(st) { }
   475   size_t do_blk(HeapWord* addr);
   476 };
   478 size_t BlkPrintingClosure::do_blk(HeapWord* addr) {
   479   size_t sz = _sp->block_size_no_stall(addr, _collector);
   480   assert(sz != 0, "Should always be able to compute a size");
   481   if (_sp->block_is_obj(addr)) {
   482     const bool dead = _post_remark && !_live_bit_map->isMarked(addr);
   483     _st->print_cr(PTR_FORMAT ": %s object of size " SIZE_FORMAT "%s",
   484       addr,
   485       dead ? "dead" : "live",
   486       sz,
   487       (!dead && CMSPrintObjectsInDump) ? ":" : ".");
   488     if (CMSPrintObjectsInDump && !dead) {
   489       oop(addr)->print_on(_st);
   490       _st->print_cr("--------------------------------------");
   491     }
   492   } else { // free block
   493     _st->print_cr(PTR_FORMAT ": free block of size " SIZE_FORMAT "%s",
   494       addr, sz, CMSPrintChunksInDump ? ":" : ".");
   495     if (CMSPrintChunksInDump) {
   496       ((FreeChunk*)addr)->print_on(_st);
   497       _st->print_cr("--------------------------------------");
   498     }
   499   }
   500   return sz;
   501 }
   503 void CompactibleFreeListSpace::dump_at_safepoint_with_locks(CMSCollector* c,
   504   outputStream* st) {
   505   st->print_cr("\n=========================");
   506   st->print_cr("Block layout in CMS Heap:");
   507   st->print_cr("=========================");
   508   BlkPrintingClosure  bpcl(c, this, c->markBitMap(), st);
   509   blk_iterate(&bpcl);
   511   st->print_cr("\n=======================================");
   512   st->print_cr("Order & Layout of Promotion Info Blocks");
   513   st->print_cr("=======================================");
   514   print_promo_info_blocks(st);
   516   st->print_cr("\n===========================");
   517   st->print_cr("Order of Indexed Free Lists");
   518   st->print_cr("=========================");
   519   print_indexed_free_lists(st);
   521   st->print_cr("\n=================================");
   522   st->print_cr("Order of Free Lists in Dictionary");
   523   st->print_cr("=================================");
   524   print_dictionary_free_lists(st);
   525 }
   528 void CompactibleFreeListSpace::reportFreeListStatistics() const {
   529   assert_lock_strong(&_freelistLock);
   530   assert(PrintFLSStatistics != 0, "Reporting error");
   531   _dictionary->reportStatistics();
   532   if (PrintFLSStatistics > 1) {
   533     reportIndexedFreeListStatistics();
   534     size_t totalSize = totalSizeInIndexedFreeLists() +
   535                        _dictionary->totalChunkSize(DEBUG_ONLY(freelistLock()));
   536     gclog_or_tty->print(" free=%ld frag=%1.4f\n", totalSize, flsFrag());
   537   }
   538 }
   540 void CompactibleFreeListSpace::reportIndexedFreeListStatistics() const {
   541   assert_lock_strong(&_freelistLock);
   542   gclog_or_tty->print("Statistics for IndexedFreeLists:\n"
   543                       "--------------------------------\n");
   544   size_t totalSize = totalSizeInIndexedFreeLists();
   545   size_t   freeBlocks = numFreeBlocksInIndexedFreeLists();
   546   gclog_or_tty->print("Total Free Space: %d\n", totalSize);
   547   gclog_or_tty->print("Max   Chunk Size: %d\n", maxChunkSizeInIndexedFreeLists());
   548   gclog_or_tty->print("Number of Blocks: %d\n", freeBlocks);
   549   if (freeBlocks != 0) {
   550     gclog_or_tty->print("Av.  Block  Size: %d\n", totalSize/freeBlocks);
   551   }
   552 }
   554 size_t CompactibleFreeListSpace::numFreeBlocksInIndexedFreeLists() const {
   555   size_t res = 0;
   556   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
   557     debug_only(
   558       ssize_t recount = 0;
   559       for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
   560          fc = fc->next()) {
   561         recount += 1;
   562       }
   563       assert(recount == _indexedFreeList[i].count(),
   564         "Incorrect count in list");
   565     )
   566     res += _indexedFreeList[i].count();
   567   }
   568   return res;
   569 }
   571 size_t CompactibleFreeListSpace::maxChunkSizeInIndexedFreeLists() const {
   572   for (size_t i = IndexSetSize - 1; i != 0; i -= IndexSetStride) {
   573     if (_indexedFreeList[i].head() != NULL) {
   574       assert(_indexedFreeList[i].count() != 0, "Inconsistent FreeList");
   575       return (size_t)i;
   576     }
   577   }
   578   return 0;
   579 }
   581 void CompactibleFreeListSpace::set_end(HeapWord* value) {
   582   HeapWord* prevEnd = end();
   583   assert(prevEnd != value, "unnecessary set_end call");
   584   assert(prevEnd == NULL || !BlockOffsetArrayUseUnallocatedBlock || value >= unallocated_block(),
   585         "New end is below unallocated block");
   586   _end = value;
   587   if (prevEnd != NULL) {
   588     // Resize the underlying block offset table.
   589     _bt.resize(pointer_delta(value, bottom()));
   590     if (value <= prevEnd) {
   591       assert(!BlockOffsetArrayUseUnallocatedBlock || value >= unallocated_block(),
   592              "New end is below unallocated block");
   593     } else {
   594       // Now, take this new chunk and add it to the free blocks.
   595       // Note that the BOT has not yet been updated for this block.
   596       size_t newFcSize = pointer_delta(value, prevEnd);
   597       // XXX This is REALLY UGLY and should be fixed up. XXX
   598       if (!_adaptive_freelists && _smallLinearAllocBlock._ptr == NULL) {
   599         // Mark the boundary of the new block in BOT
   600         _bt.mark_block(prevEnd, value);
   601         // put it all in the linAB
   602         if (ParallelGCThreads == 0) {
   603           _smallLinearAllocBlock._ptr = prevEnd;
   604           _smallLinearAllocBlock._word_size = newFcSize;
   605           repairLinearAllocBlock(&_smallLinearAllocBlock);
   606         } else { // ParallelGCThreads > 0
   607           MutexLockerEx x(parDictionaryAllocLock(),
   608                           Mutex::_no_safepoint_check_flag);
   609           _smallLinearAllocBlock._ptr = prevEnd;
   610           _smallLinearAllocBlock._word_size = newFcSize;
   611           repairLinearAllocBlock(&_smallLinearAllocBlock);
   612         }
   613         // Births of chunks put into a LinAB are not recorded.  Births
   614         // of chunks as they are allocated out of a LinAB are.
   615       } else {
   616         // Add the block to the free lists, if possible coalescing it
   617         // with the last free block, and update the BOT and census data.
   618         addChunkToFreeListsAtEndRecordingStats(prevEnd, newFcSize);
   619       }
   620     }
   621   }
   622 }
   624 class FreeListSpace_DCTOC : public Filtering_DCTOC {
   625   CompactibleFreeListSpace* _cfls;
   626   CMSCollector* _collector;
   627 protected:
   628   // Override.
   629 #define walk_mem_region_with_cl_DECL(ClosureType)                       \
   630   virtual void walk_mem_region_with_cl(MemRegion mr,                    \
   631                                        HeapWord* bottom, HeapWord* top, \
   632                                        ClosureType* cl);                \
   633       void walk_mem_region_with_cl_par(MemRegion mr,                    \
   634                                        HeapWord* bottom, HeapWord* top, \
   635                                        ClosureType* cl);                \
   636     void walk_mem_region_with_cl_nopar(MemRegion mr,                    \
   637                                        HeapWord* bottom, HeapWord* top, \
   638                                        ClosureType* cl)
   639   walk_mem_region_with_cl_DECL(OopClosure);
   640   walk_mem_region_with_cl_DECL(FilteringClosure);
   642 public:
   643   FreeListSpace_DCTOC(CompactibleFreeListSpace* sp,
   644                       CMSCollector* collector,
   645                       OopClosure* cl,
   646                       CardTableModRefBS::PrecisionStyle precision,
   647                       HeapWord* boundary) :
   648     Filtering_DCTOC(sp, cl, precision, boundary),
   649     _cfls(sp), _collector(collector) {}
   650 };
   652 // We de-virtualize the block-related calls below, since we know that our
   653 // space is a CompactibleFreeListSpace.
   654 #define FreeListSpace_DCTOC__walk_mem_region_with_cl_DEFN(ClosureType)          \
   655 void FreeListSpace_DCTOC::walk_mem_region_with_cl(MemRegion mr,                 \
   656                                                  HeapWord* bottom,              \
   657                                                  HeapWord* top,                 \
   658                                                  ClosureType* cl) {             \
   659    if (SharedHeap::heap()->n_par_threads() > 0) {                               \
   660      walk_mem_region_with_cl_par(mr, bottom, top, cl);                          \
   661    } else {                                                                     \
   662      walk_mem_region_with_cl_nopar(mr, bottom, top, cl);                        \
   663    }                                                                            \
   664 }                                                                               \
   665 void FreeListSpace_DCTOC::walk_mem_region_with_cl_par(MemRegion mr,             \
   666                                                       HeapWord* bottom,         \
   667                                                       HeapWord* top,            \
   668                                                       ClosureType* cl) {        \
   669   /* Skip parts that are before "mr", in case "block_start" sent us             \
   670      back too far. */                                                           \
   671   HeapWord* mr_start = mr.start();                                              \
   672   size_t bot_size = _cfls->CompactibleFreeListSpace::block_size(bottom);        \
   673   HeapWord* next = bottom + bot_size;                                           \
   674   while (next < mr_start) {                                                     \
   675     bottom = next;                                                              \
   676     bot_size = _cfls->CompactibleFreeListSpace::block_size(bottom);             \
   677     next = bottom + bot_size;                                                   \
   678   }                                                                             \
   679                                                                                 \
   680   while (bottom < top) {                                                        \
   681     if (_cfls->CompactibleFreeListSpace::block_is_obj(bottom) &&                \
   682         !_cfls->CompactibleFreeListSpace::obj_allocated_since_save_marks(       \
   683                     oop(bottom)) &&                                             \
   684         !_collector->CMSCollector::is_dead_obj(oop(bottom))) {                  \
   685       size_t word_sz = oop(bottom)->oop_iterate(cl, mr);                        \
   686       bottom += _cfls->adjustObjectSize(word_sz);                               \
   687     } else {                                                                    \
   688       bottom += _cfls->CompactibleFreeListSpace::block_size(bottom);            \
   689     }                                                                           \
   690   }                                                                             \
   691 }                                                                               \
   692 void FreeListSpace_DCTOC::walk_mem_region_with_cl_nopar(MemRegion mr,           \
   693                                                         HeapWord* bottom,       \
   694                                                         HeapWord* top,          \
   695                                                         ClosureType* cl) {      \
   696   /* Skip parts that are before "mr", in case "block_start" sent us             \
   697      back too far. */                                                           \
   698   HeapWord* mr_start = mr.start();                                              \
   699   size_t bot_size = _cfls->CompactibleFreeListSpace::block_size_nopar(bottom);  \
   700   HeapWord* next = bottom + bot_size;                                           \
   701   while (next < mr_start) {                                                     \
   702     bottom = next;                                                              \
   703     bot_size = _cfls->CompactibleFreeListSpace::block_size_nopar(bottom);       \
   704     next = bottom + bot_size;                                                   \
   705   }                                                                             \
   706                                                                                 \
   707   while (bottom < top) {                                                        \
   708     if (_cfls->CompactibleFreeListSpace::block_is_obj_nopar(bottom) &&          \
   709         !_cfls->CompactibleFreeListSpace::obj_allocated_since_save_marks(       \
   710                     oop(bottom)) &&                                             \
   711         !_collector->CMSCollector::is_dead_obj(oop(bottom))) {                  \
   712       size_t word_sz = oop(bottom)->oop_iterate(cl, mr);                        \
   713       bottom += _cfls->adjustObjectSize(word_sz);                               \
   714     } else {                                                                    \
   715       bottom += _cfls->CompactibleFreeListSpace::block_size_nopar(bottom);      \
   716     }                                                                           \
   717   }                                                                             \
   718 }
   720 // (There are only two of these, rather than N, because the split is due
   721 // only to the introduction of the FilteringClosure, a local part of the
   722 // impl of this abstraction.)
   723 FreeListSpace_DCTOC__walk_mem_region_with_cl_DEFN(OopClosure)
   724 FreeListSpace_DCTOC__walk_mem_region_with_cl_DEFN(FilteringClosure)
   726 DirtyCardToOopClosure*
   727 CompactibleFreeListSpace::new_dcto_cl(OopClosure* cl,
   728                                       CardTableModRefBS::PrecisionStyle precision,
   729                                       HeapWord* boundary) {
   730   return new FreeListSpace_DCTOC(this, _collector, cl, precision, boundary);
   731 }
   734 // Note on locking for the space iteration functions:
   735 // since the collector's iteration activities are concurrent with
   736 // allocation activities by mutators, absent a suitable mutual exclusion
   737 // mechanism the iterators may go awry. For instace a block being iterated
   738 // may suddenly be allocated or divided up and part of it allocated and
   739 // so on.
   741 // Apply the given closure to each block in the space.
   742 void CompactibleFreeListSpace::blk_iterate_careful(BlkClosureCareful* cl) {
   743   assert_lock_strong(freelistLock());
   744   HeapWord *cur, *limit;
   745   for (cur = bottom(), limit = end(); cur < limit;
   746        cur += cl->do_blk_careful(cur));
   747 }
   749 // Apply the given closure to each block in the space.
   750 void CompactibleFreeListSpace::blk_iterate(BlkClosure* cl) {
   751   assert_lock_strong(freelistLock());
   752   HeapWord *cur, *limit;
   753   for (cur = bottom(), limit = end(); cur < limit;
   754        cur += cl->do_blk(cur));
   755 }
   757 // Apply the given closure to each oop in the space.
   758 void CompactibleFreeListSpace::oop_iterate(OopClosure* cl) {
   759   assert_lock_strong(freelistLock());
   760   HeapWord *cur, *limit;
   761   size_t curSize;
   762   for (cur = bottom(), limit = end(); cur < limit;
   763        cur += curSize) {
   764     curSize = block_size(cur);
   765     if (block_is_obj(cur)) {
   766       oop(cur)->oop_iterate(cl);
   767     }
   768   }
   769 }
   771 // Apply the given closure to each oop in the space \intersect memory region.
   772 void CompactibleFreeListSpace::oop_iterate(MemRegion mr, OopClosure* cl) {
   773   assert_lock_strong(freelistLock());
   774   if (is_empty()) {
   775     return;
   776   }
   777   MemRegion cur = MemRegion(bottom(), end());
   778   mr = mr.intersection(cur);
   779   if (mr.is_empty()) {
   780     return;
   781   }
   782   if (mr.equals(cur)) {
   783     oop_iterate(cl);
   784     return;
   785   }
   786   assert(mr.end() <= end(), "just took an intersection above");
   787   HeapWord* obj_addr = block_start(mr.start());
   788   HeapWord* t = mr.end();
   790   SpaceMemRegionOopsIterClosure smr_blk(cl, mr);
   791   if (block_is_obj(obj_addr)) {
   792     // Handle first object specially.
   793     oop obj = oop(obj_addr);
   794     obj_addr += adjustObjectSize(obj->oop_iterate(&smr_blk));
   795   } else {
   796     FreeChunk* fc = (FreeChunk*)obj_addr;
   797     obj_addr += fc->size();
   798   }
   799   while (obj_addr < t) {
   800     HeapWord* obj = obj_addr;
   801     obj_addr += block_size(obj_addr);
   802     // If "obj_addr" is not greater than top, then the
   803     // entire object "obj" is within the region.
   804     if (obj_addr <= t) {
   805       if (block_is_obj(obj)) {
   806         oop(obj)->oop_iterate(cl);
   807       }
   808     } else {
   809       // "obj" extends beyond end of region
   810       if (block_is_obj(obj)) {
   811         oop(obj)->oop_iterate(&smr_blk);
   812       }
   813       break;
   814     }
   815   }
   816 }
   818 // NOTE: In the following methods, in order to safely be able to
   819 // apply the closure to an object, we need to be sure that the
   820 // object has been initialized. We are guaranteed that an object
   821 // is initialized if we are holding the Heap_lock with the
   822 // world stopped.
   823 void CompactibleFreeListSpace::verify_objects_initialized() const {
   824   if (is_init_completed()) {
   825     assert_locked_or_safepoint(Heap_lock);
   826     if (Universe::is_fully_initialized()) {
   827       guarantee(SafepointSynchronize::is_at_safepoint(),
   828                 "Required for objects to be initialized");
   829     }
   830   } // else make a concession at vm start-up
   831 }
   833 // Apply the given closure to each object in the space
   834 void CompactibleFreeListSpace::object_iterate(ObjectClosure* blk) {
   835   assert_lock_strong(freelistLock());
   836   NOT_PRODUCT(verify_objects_initialized());
   837   HeapWord *cur, *limit;
   838   size_t curSize;
   839   for (cur = bottom(), limit = end(); cur < limit;
   840        cur += curSize) {
   841     curSize = block_size(cur);
   842     if (block_is_obj(cur)) {
   843       blk->do_object(oop(cur));
   844     }
   845   }
   846 }
   848 // Apply the given closure to each live object in the space
   849 //   The usage of CompactibleFreeListSpace
   850 // by the ConcurrentMarkSweepGeneration for concurrent GC's allows
   851 // objects in the space with references to objects that are no longer
   852 // valid.  For example, an object may reference another object
   853 // that has already been sweep up (collected).  This method uses
   854 // obj_is_alive() to determine whether it is safe to apply the closure to
   855 // an object.  See obj_is_alive() for details on how liveness of an
   856 // object is decided.
   858 void CompactibleFreeListSpace::safe_object_iterate(ObjectClosure* blk) {
   859   assert_lock_strong(freelistLock());
   860   NOT_PRODUCT(verify_objects_initialized());
   861   HeapWord *cur, *limit;
   862   size_t curSize;
   863   for (cur = bottom(), limit = end(); cur < limit;
   864        cur += curSize) {
   865     curSize = block_size(cur);
   866     if (block_is_obj(cur) && obj_is_alive(cur)) {
   867       blk->do_object(oop(cur));
   868     }
   869   }
   870 }
   872 void CompactibleFreeListSpace::object_iterate_mem(MemRegion mr,
   873                                                   UpwardsObjectClosure* cl) {
   874   assert_locked(freelistLock());
   875   NOT_PRODUCT(verify_objects_initialized());
   876   Space::object_iterate_mem(mr, cl);
   877 }
   879 // Callers of this iterator beware: The closure application should
   880 // be robust in the face of uninitialized objects and should (always)
   881 // return a correct size so that the next addr + size below gives us a
   882 // valid block boundary. [See for instance,
   883 // ScanMarkedObjectsAgainCarefullyClosure::do_object_careful()
   884 // in ConcurrentMarkSweepGeneration.cpp.]
   885 HeapWord*
   886 CompactibleFreeListSpace::object_iterate_careful(ObjectClosureCareful* cl) {
   887   assert_lock_strong(freelistLock());
   888   HeapWord *addr, *last;
   889   size_t size;
   890   for (addr = bottom(), last  = end();
   891        addr < last; addr += size) {
   892     FreeChunk* fc = (FreeChunk*)addr;
   893     if (fc->isFree()) {
   894       // Since we hold the free list lock, which protects direct
   895       // allocation in this generation by mutators, a free object
   896       // will remain free throughout this iteration code.
   897       size = fc->size();
   898     } else {
   899       // Note that the object need not necessarily be initialized,
   900       // because (for instance) the free list lock does NOT protect
   901       // object initialization. The closure application below must
   902       // therefore be correct in the face of uninitialized objects.
   903       size = cl->do_object_careful(oop(addr));
   904       if (size == 0) {
   905         // An unparsable object found. Signal early termination.
   906         return addr;
   907       }
   908     }
   909   }
   910   return NULL;
   911 }
   913 // Callers of this iterator beware: The closure application should
   914 // be robust in the face of uninitialized objects and should (always)
   915 // return a correct size so that the next addr + size below gives us a
   916 // valid block boundary. [See for instance,
   917 // ScanMarkedObjectsAgainCarefullyClosure::do_object_careful()
   918 // in ConcurrentMarkSweepGeneration.cpp.]
   919 HeapWord*
   920 CompactibleFreeListSpace::object_iterate_careful_m(MemRegion mr,
   921   ObjectClosureCareful* cl) {
   922   assert_lock_strong(freelistLock());
   923   // Can't use used_region() below because it may not necessarily
   924   // be the same as [bottom(),end()); although we could
   925   // use [used_region().start(),round_to(used_region().end(),CardSize)),
   926   // that appears too cumbersome, so we just do the simpler check
   927   // in the assertion below.
   928   assert(!mr.is_empty() && MemRegion(bottom(),end()).contains(mr),
   929          "mr should be non-empty and within used space");
   930   HeapWord *addr, *end;
   931   size_t size;
   932   for (addr = block_start_careful(mr.start()), end  = mr.end();
   933        addr < end; addr += size) {
   934     FreeChunk* fc = (FreeChunk*)addr;
   935     if (fc->isFree()) {
   936       // Since we hold the free list lock, which protects direct
   937       // allocation in this generation by mutators, a free object
   938       // will remain free throughout this iteration code.
   939       size = fc->size();
   940     } else {
   941       // Note that the object need not necessarily be initialized,
   942       // because (for instance) the free list lock does NOT protect
   943       // object initialization. The closure application below must
   944       // therefore be correct in the face of uninitialized objects.
   945       size = cl->do_object_careful_m(oop(addr), mr);
   946       if (size == 0) {
   947         // An unparsable object found. Signal early termination.
   948         return addr;
   949       }
   950     }
   951   }
   952   return NULL;
   953 }
   956 HeapWord* CompactibleFreeListSpace::block_start_const(const void* p) const {
   957   NOT_PRODUCT(verify_objects_initialized());
   958   return _bt.block_start(p);
   959 }
   961 HeapWord* CompactibleFreeListSpace::block_start_careful(const void* p) const {
   962   return _bt.block_start_careful(p);
   963 }
   965 size_t CompactibleFreeListSpace::block_size(const HeapWord* p) const {
   966   NOT_PRODUCT(verify_objects_initialized());
   967   // This must be volatile, or else there is a danger that the compiler
   968   // will compile the code below into a sometimes-infinite loop, by keeping
   969   // the value read the first time in a register.
   970   while (true) {
   971     // We must do this until we get a consistent view of the object.
   972     if (FreeChunk::indicatesFreeChunk(p)) {
   973       volatile FreeChunk* fc = (volatile FreeChunk*)p;
   974       size_t res = fc->size();
   975       // If the object is still a free chunk, return the size, else it
   976       // has been allocated so try again.
   977       if (FreeChunk::indicatesFreeChunk(p)) {
   978         assert(res != 0, "Block size should not be 0");
   979         return res;
   980       }
   981     } else {
   982       // must read from what 'p' points to in each loop.
   983       klassOop k = ((volatile oopDesc*)p)->klass_or_null();
   984       if (k != NULL) {
   985         assert(k->is_oop(true /* ignore mark word */), "Should be klass oop");
   986         oop o = (oop)p;
   987         assert(o->is_parsable(), "Should be parsable");
   988         assert(o->is_oop(true /* ignore mark word */), "Should be an oop.");
   989         size_t res = o->size_given_klass(k->klass_part());
   990         res = adjustObjectSize(res);
   991         assert(res != 0, "Block size should not be 0");
   992         return res;
   993       }
   994     }
   995   }
   996 }
   998 // A variant of the above that uses the Printezis bits for
   999 // unparsable but allocated objects. This avoids any possible
  1000 // stalls waiting for mutators to initialize objects, and is
  1001 // thus potentially faster than the variant above. However,
  1002 // this variant may return a zero size for a block that is
  1003 // under mutation and for which a consistent size cannot be
  1004 // inferred without stalling; see CMSCollector::block_size_if_printezis_bits().
  1005 size_t CompactibleFreeListSpace::block_size_no_stall(HeapWord* p,
  1006                                                      const CMSCollector* c)
  1007 const {
  1008   assert(MemRegion(bottom(), end()).contains(p), "p not in space");
  1009   // This must be volatile, or else there is a danger that the compiler
  1010   // will compile the code below into a sometimes-infinite loop, by keeping
  1011   // the value read the first time in a register.
  1012   DEBUG_ONLY(uint loops = 0;)
  1013   while (true) {
  1014     // We must do this until we get a consistent view of the object.
  1015     if (FreeChunk::indicatesFreeChunk(p)) {
  1016       volatile FreeChunk* fc = (volatile FreeChunk*)p;
  1017       size_t res = fc->size();
  1018       if (FreeChunk::indicatesFreeChunk(p)) {
  1019         assert(res != 0, "Block size should not be 0");
  1020         assert(loops == 0, "Should be 0");
  1021         return res;
  1023     } else {
  1024       // must read from what 'p' points to in each loop.
  1025       klassOop k = ((volatile oopDesc*)p)->klass_or_null();
  1026       if (k != NULL &&
  1027           ((oopDesc*)p)->is_parsable() &&
  1028           ((oopDesc*)p)->is_conc_safe()) {
  1029         assert(k->is_oop(), "Should really be klass oop.");
  1030         oop o = (oop)p;
  1031         assert(o->is_oop(), "Should be an oop");
  1032         size_t res = o->size_given_klass(k->klass_part());
  1033         res = adjustObjectSize(res);
  1034         assert(res != 0, "Block size should not be 0");
  1035         return res;
  1036       } else {
  1037         return c->block_size_if_printezis_bits(p);
  1040     assert(loops == 0, "Can loop at most once");
  1041     DEBUG_ONLY(loops++;)
  1045 size_t CompactibleFreeListSpace::block_size_nopar(const HeapWord* p) const {
  1046   NOT_PRODUCT(verify_objects_initialized());
  1047   assert(MemRegion(bottom(), end()).contains(p), "p not in space");
  1048   FreeChunk* fc = (FreeChunk*)p;
  1049   if (fc->isFree()) {
  1050     return fc->size();
  1051   } else {
  1052     // Ignore mark word because this may be a recently promoted
  1053     // object whose mark word is used to chain together grey
  1054     // objects (the last one would have a null value).
  1055     assert(oop(p)->is_oop(true), "Should be an oop");
  1056     return adjustObjectSize(oop(p)->size());
  1060 // This implementation assumes that the property of "being an object" is
  1061 // stable.  But being a free chunk may not be (because of parallel
  1062 // promotion.)
  1063 bool CompactibleFreeListSpace::block_is_obj(const HeapWord* p) const {
  1064   FreeChunk* fc = (FreeChunk*)p;
  1065   assert(is_in_reserved(p), "Should be in space");
  1066   // When doing a mark-sweep-compact of the CMS generation, this
  1067   // assertion may fail because prepare_for_compaction() uses
  1068   // space that is garbage to maintain information on ranges of
  1069   // live objects so that these live ranges can be moved as a whole.
  1070   // Comment out this assertion until that problem can be solved
  1071   // (i.e., that the block start calculation may look at objects
  1072   // at address below "p" in finding the object that contains "p"
  1073   // and those objects (if garbage) may have been modified to hold
  1074   // live range information.
  1075   // assert(CollectedHeap::use_parallel_gc_threads() || _bt.block_start(p) == p,
  1076   //        "Should be a block boundary");
  1077   if (FreeChunk::indicatesFreeChunk(p)) return false;
  1078   klassOop k = oop(p)->klass_or_null();
  1079   if (k != NULL) {
  1080     // Ignore mark word because it may have been used to
  1081     // chain together promoted objects (the last one
  1082     // would have a null value).
  1083     assert(oop(p)->is_oop(true), "Should be an oop");
  1084     return true;
  1085   } else {
  1086     return false;  // Was not an object at the start of collection.
  1090 // Check if the object is alive. This fact is checked either by consulting
  1091 // the main marking bitmap in the sweeping phase or, if it's a permanent
  1092 // generation and we're not in the sweeping phase, by checking the
  1093 // perm_gen_verify_bit_map where we store the "deadness" information if
  1094 // we did not sweep the perm gen in the most recent previous GC cycle.
  1095 bool CompactibleFreeListSpace::obj_is_alive(const HeapWord* p) const {
  1096   assert(SafepointSynchronize::is_at_safepoint() || !is_init_completed(),
  1097          "Else races are possible");
  1098   assert(block_is_obj(p), "The address should point to an object");
  1100   // If we're sweeping, we use object liveness information from the main bit map
  1101   // for both perm gen and old gen.
  1102   // We don't need to lock the bitmap (live_map or dead_map below), because
  1103   // EITHER we are in the middle of the sweeping phase, and the
  1104   // main marking bit map (live_map below) is locked,
  1105   // OR we're in other phases and perm_gen_verify_bit_map (dead_map below)
  1106   // is stable, because it's mutated only in the sweeping phase.
  1107   // NOTE: This method is also used by jmap where, if class unloading is
  1108   // off, the results can return "false" for legitimate perm objects,
  1109   // when we are not in the midst of a sweeping phase, which can result
  1110   // in jmap not reporting certain perm gen objects. This will be moot
  1111   // if/when the perm gen goes away in the future.
  1112   if (_collector->abstract_state() == CMSCollector::Sweeping) {
  1113     CMSBitMap* live_map = _collector->markBitMap();
  1114     return live_map->par_isMarked((HeapWord*) p);
  1115   } else {
  1116     // If we're not currently sweeping and we haven't swept the perm gen in
  1117     // the previous concurrent cycle then we may have dead but unswept objects
  1118     // in the perm gen. In this case, we use the "deadness" information
  1119     // that we had saved in perm_gen_verify_bit_map at the last sweep.
  1120     if (!CMSClassUnloadingEnabled && _collector->_permGen->reserved().contains(p)) {
  1121       if (_collector->verifying()) {
  1122         CMSBitMap* dead_map = _collector->perm_gen_verify_bit_map();
  1123         // Object is marked in the dead_map bitmap at the previous sweep
  1124         // when we know that it's dead; if the bitmap is not allocated then
  1125         // the object is alive.
  1126         return (dead_map->sizeInBits() == 0) // bit_map has been allocated
  1127                || !dead_map->par_isMarked((HeapWord*) p);
  1128       } else {
  1129         return false; // We can't say for sure if it's live, so we say that it's dead.
  1133   return true;
  1136 bool CompactibleFreeListSpace::block_is_obj_nopar(const HeapWord* p) const {
  1137   FreeChunk* fc = (FreeChunk*)p;
  1138   assert(is_in_reserved(p), "Should be in space");
  1139   assert(_bt.block_start(p) == p, "Should be a block boundary");
  1140   if (!fc->isFree()) {
  1141     // Ignore mark word because it may have been used to
  1142     // chain together promoted objects (the last one
  1143     // would have a null value).
  1144     assert(oop(p)->is_oop(true), "Should be an oop");
  1145     return true;
  1147   return false;
  1150 // "MT-safe but not guaranteed MT-precise" (TM); you may get an
  1151 // approximate answer if you don't hold the freelistlock when you call this.
  1152 size_t CompactibleFreeListSpace::totalSizeInIndexedFreeLists() const {
  1153   size_t size = 0;
  1154   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
  1155     debug_only(
  1156       // We may be calling here without the lock in which case we
  1157       // won't do this modest sanity check.
  1158       if (freelistLock()->owned_by_self()) {
  1159         size_t total_list_size = 0;
  1160         for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
  1161           fc = fc->next()) {
  1162           total_list_size += i;
  1164         assert(total_list_size == i * _indexedFreeList[i].count(),
  1165                "Count in list is incorrect");
  1168     size += i * _indexedFreeList[i].count();
  1170   return size;
  1173 HeapWord* CompactibleFreeListSpace::par_allocate(size_t size) {
  1174   MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
  1175   return allocate(size);
  1178 HeapWord*
  1179 CompactibleFreeListSpace::getChunkFromSmallLinearAllocBlockRemainder(size_t size) {
  1180   return getChunkFromLinearAllocBlockRemainder(&_smallLinearAllocBlock, size);
  1183 HeapWord* CompactibleFreeListSpace::allocate(size_t size) {
  1184   assert_lock_strong(freelistLock());
  1185   HeapWord* res = NULL;
  1186   assert(size == adjustObjectSize(size),
  1187          "use adjustObjectSize() before calling into allocate()");
  1189   if (_adaptive_freelists) {
  1190     res = allocate_adaptive_freelists(size);
  1191   } else {  // non-adaptive free lists
  1192     res = allocate_non_adaptive_freelists(size);
  1195   if (res != NULL) {
  1196     // check that res does lie in this space!
  1197     assert(is_in_reserved(res), "Not in this space!");
  1198     assert(is_aligned((void*)res), "alignment check");
  1200     FreeChunk* fc = (FreeChunk*)res;
  1201     fc->markNotFree();
  1202     assert(!fc->isFree(), "shouldn't be marked free");
  1203     assert(oop(fc)->klass_or_null() == NULL, "should look uninitialized");
  1204     // Verify that the block offset table shows this to
  1205     // be a single block, but not one which is unallocated.
  1206     _bt.verify_single_block(res, size);
  1207     _bt.verify_not_unallocated(res, size);
  1208     // mangle a just allocated object with a distinct pattern.
  1209     debug_only(fc->mangleAllocated(size));
  1212   return res;
  1215 HeapWord* CompactibleFreeListSpace::allocate_non_adaptive_freelists(size_t size) {
  1216   HeapWord* res = NULL;
  1217   // try and use linear allocation for smaller blocks
  1218   if (size < _smallLinearAllocBlock._allocation_size_limit) {
  1219     // if successful, the following also adjusts block offset table
  1220     res = getChunkFromSmallLinearAllocBlock(size);
  1222   // Else triage to indexed lists for smaller sizes
  1223   if (res == NULL) {
  1224     if (size < SmallForDictionary) {
  1225       res = (HeapWord*) getChunkFromIndexedFreeList(size);
  1226     } else {
  1227       // else get it from the big dictionary; if even this doesn't
  1228       // work we are out of luck.
  1229       res = (HeapWord*)getChunkFromDictionaryExact(size);
  1233   return res;
  1236 HeapWord* CompactibleFreeListSpace::allocate_adaptive_freelists(size_t size) {
  1237   assert_lock_strong(freelistLock());
  1238   HeapWord* res = NULL;
  1239   assert(size == adjustObjectSize(size),
  1240          "use adjustObjectSize() before calling into allocate()");
  1242   // Strategy
  1243   //   if small
  1244   //     exact size from small object indexed list if small
  1245   //     small or large linear allocation block (linAB) as appropriate
  1246   //     take from lists of greater sized chunks
  1247   //   else
  1248   //     dictionary
  1249   //     small or large linear allocation block if it has the space
  1250   // Try allocating exact size from indexTable first
  1251   if (size < IndexSetSize) {
  1252     res = (HeapWord*) getChunkFromIndexedFreeList(size);
  1253     if(res != NULL) {
  1254       assert(res != (HeapWord*)_indexedFreeList[size].head(),
  1255         "Not removed from free list");
  1256       // no block offset table adjustment is necessary on blocks in
  1257       // the indexed lists.
  1259     // Try allocating from the small LinAB
  1260     } else if (size < _smallLinearAllocBlock._allocation_size_limit &&
  1261         (res = getChunkFromSmallLinearAllocBlock(size)) != NULL) {
  1262         // if successful, the above also adjusts block offset table
  1263         // Note that this call will refill the LinAB to
  1264         // satisfy the request.  This is different that
  1265         // evm.
  1266         // Don't record chunk off a LinAB?  smallSplitBirth(size);
  1267     } else {
  1268       // Raid the exact free lists larger than size, even if they are not
  1269       // overpopulated.
  1270       res = (HeapWord*) getChunkFromGreater(size);
  1272   } else {
  1273     // Big objects get allocated directly from the dictionary.
  1274     res = (HeapWord*) getChunkFromDictionaryExact(size);
  1275     if (res == NULL) {
  1276       // Try hard not to fail since an allocation failure will likely
  1277       // trigger a synchronous GC.  Try to get the space from the
  1278       // allocation blocks.
  1279       res = getChunkFromSmallLinearAllocBlockRemainder(size);
  1283   return res;
  1286 // A worst-case estimate of the space required (in HeapWords) to expand the heap
  1287 // when promoting obj.
  1288 size_t CompactibleFreeListSpace::expansionSpaceRequired(size_t obj_size) const {
  1289   // Depending on the object size, expansion may require refilling either a
  1290   // bigLAB or a smallLAB plus refilling a PromotionInfo object.  MinChunkSize
  1291   // is added because the dictionary may over-allocate to avoid fragmentation.
  1292   size_t space = obj_size;
  1293   if (!_adaptive_freelists) {
  1294     space = MAX2(space, _smallLinearAllocBlock._refillSize);
  1296   space += _promoInfo.refillSize() + 2 * MinChunkSize;
  1297   return space;
  1300 FreeChunk* CompactibleFreeListSpace::getChunkFromGreater(size_t numWords) {
  1301   FreeChunk* ret;
  1303   assert(numWords >= MinChunkSize, "Size is less than minimum");
  1304   assert(linearAllocationWouldFail() || bestFitFirst(),
  1305     "Should not be here");
  1307   size_t i;
  1308   size_t currSize = numWords + MinChunkSize;
  1309   assert(currSize % MinObjAlignment == 0, "currSize should be aligned");
  1310   for (i = currSize; i < IndexSetSize; i += IndexSetStride) {
  1311     FreeList* fl = &_indexedFreeList[i];
  1312     if (fl->head()) {
  1313       ret = getFromListGreater(fl, numWords);
  1314       assert(ret == NULL || ret->isFree(), "Should be returning a free chunk");
  1315       return ret;
  1319   currSize = MAX2((size_t)SmallForDictionary,
  1320                   (size_t)(numWords + MinChunkSize));
  1322   /* Try to get a chunk that satisfies request, while avoiding
  1323      fragmentation that can't be handled. */
  1325     ret =  dictionary()->getChunk(currSize);
  1326     if (ret != NULL) {
  1327       assert(ret->size() - numWords >= MinChunkSize,
  1328              "Chunk is too small");
  1329       _bt.allocated((HeapWord*)ret, ret->size());
  1330       /* Carve returned chunk. */
  1331       (void) splitChunkAndReturnRemainder(ret, numWords);
  1332       /* Label this as no longer a free chunk. */
  1333       assert(ret->isFree(), "This chunk should be free");
  1334       ret->linkPrev(NULL);
  1336     assert(ret == NULL || ret->isFree(), "Should be returning a free chunk");
  1337     return ret;
  1339   ShouldNotReachHere();
  1342 bool CompactibleFreeListSpace::verifyChunkInIndexedFreeLists(FreeChunk* fc)
  1343   const {
  1344   assert(fc->size() < IndexSetSize, "Size of chunk is too large");
  1345   return _indexedFreeList[fc->size()].verifyChunkInFreeLists(fc);
  1348 bool CompactibleFreeListSpace::verifyChunkInFreeLists(FreeChunk* fc) const {
  1349   if (fc->size() >= IndexSetSize) {
  1350     return dictionary()->verifyChunkInFreeLists(fc);
  1351   } else {
  1352     return verifyChunkInIndexedFreeLists(fc);
  1356 #ifndef PRODUCT
  1357 void CompactibleFreeListSpace::assert_locked() const {
  1358   CMSLockVerifier::assert_locked(freelistLock(), parDictionaryAllocLock());
  1361 void CompactibleFreeListSpace::assert_locked(const Mutex* lock) const {
  1362   CMSLockVerifier::assert_locked(lock);
  1364 #endif
  1366 FreeChunk* CompactibleFreeListSpace::allocateScratch(size_t size) {
  1367   // In the parallel case, the main thread holds the free list lock
  1368   // on behalf the parallel threads.
  1369   FreeChunk* fc;
  1371     // If GC is parallel, this might be called by several threads.
  1372     // This should be rare enough that the locking overhead won't affect
  1373     // the sequential code.
  1374     MutexLockerEx x(parDictionaryAllocLock(),
  1375                     Mutex::_no_safepoint_check_flag);
  1376     fc = getChunkFromDictionary(size);
  1378   if (fc != NULL) {
  1379     fc->dontCoalesce();
  1380     assert(fc->isFree(), "Should be free, but not coalescable");
  1381     // Verify that the block offset table shows this to
  1382     // be a single block, but not one which is unallocated.
  1383     _bt.verify_single_block((HeapWord*)fc, fc->size());
  1384     _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
  1386   return fc;
  1389 oop CompactibleFreeListSpace::promote(oop obj, size_t obj_size) {
  1390   assert(obj_size == (size_t)obj->size(), "bad obj_size passed in");
  1391   assert_locked();
  1393   // if we are tracking promotions, then first ensure space for
  1394   // promotion (including spooling space for saving header if necessary).
  1395   // then allocate and copy, then track promoted info if needed.
  1396   // When tracking (see PromotionInfo::track()), the mark word may
  1397   // be displaced and in this case restoration of the mark word
  1398   // occurs in the (oop_since_save_marks_)iterate phase.
  1399   if (_promoInfo.tracking() && !_promoInfo.ensure_spooling_space()) {
  1400     return NULL;
  1402   // Call the allocate(size_t, bool) form directly to avoid the
  1403   // additional call through the allocate(size_t) form.  Having
  1404   // the compile inline the call is problematic because allocate(size_t)
  1405   // is a virtual method.
  1406   HeapWord* res = allocate(adjustObjectSize(obj_size));
  1407   if (res != NULL) {
  1408     Copy::aligned_disjoint_words((HeapWord*)obj, res, obj_size);
  1409     // if we should be tracking promotions, do so.
  1410     if (_promoInfo.tracking()) {
  1411         _promoInfo.track((PromotedObject*)res);
  1414   return oop(res);
  1417 HeapWord*
  1418 CompactibleFreeListSpace::getChunkFromSmallLinearAllocBlock(size_t size) {
  1419   assert_locked();
  1420   assert(size >= MinChunkSize, "minimum chunk size");
  1421   assert(size <  _smallLinearAllocBlock._allocation_size_limit,
  1422     "maximum from smallLinearAllocBlock");
  1423   return getChunkFromLinearAllocBlock(&_smallLinearAllocBlock, size);
  1426 HeapWord*
  1427 CompactibleFreeListSpace::getChunkFromLinearAllocBlock(LinearAllocBlock *blk,
  1428                                                        size_t size) {
  1429   assert_locked();
  1430   assert(size >= MinChunkSize, "too small");
  1431   HeapWord* res = NULL;
  1432   // Try to do linear allocation from blk, making sure that
  1433   if (blk->_word_size == 0) {
  1434     // We have probably been unable to fill this either in the prologue or
  1435     // when it was exhausted at the last linear allocation. Bail out until
  1436     // next time.
  1437     assert(blk->_ptr == NULL, "consistency check");
  1438     return NULL;
  1440   assert(blk->_word_size != 0 && blk->_ptr != NULL, "consistency check");
  1441   res = getChunkFromLinearAllocBlockRemainder(blk, size);
  1442   if (res != NULL) return res;
  1444   // about to exhaust this linear allocation block
  1445   if (blk->_word_size == size) { // exactly satisfied
  1446     res = blk->_ptr;
  1447     _bt.allocated(res, blk->_word_size);
  1448   } else if (size + MinChunkSize <= blk->_refillSize) {
  1449     size_t sz = blk->_word_size;
  1450     // Update _unallocated_block if the size is such that chunk would be
  1451     // returned to the indexed free list.  All other chunks in the indexed
  1452     // free lists are allocated from the dictionary so that _unallocated_block
  1453     // has already been adjusted for them.  Do it here so that the cost
  1454     // for all chunks added back to the indexed free lists.
  1455     if (sz < SmallForDictionary) {
  1456       _bt.allocated(blk->_ptr, sz);
  1458     // Return the chunk that isn't big enough, and then refill below.
  1459     addChunkToFreeLists(blk->_ptr, sz);
  1460     splitBirth(sz);
  1461     // Don't keep statistics on adding back chunk from a LinAB.
  1462   } else {
  1463     // A refilled block would not satisfy the request.
  1464     return NULL;
  1467   blk->_ptr = NULL; blk->_word_size = 0;
  1468   refillLinearAllocBlock(blk);
  1469   assert(blk->_ptr == NULL || blk->_word_size >= size + MinChunkSize,
  1470          "block was replenished");
  1471   if (res != NULL) {
  1472     splitBirth(size);
  1473     repairLinearAllocBlock(blk);
  1474   } else if (blk->_ptr != NULL) {
  1475     res = blk->_ptr;
  1476     size_t blk_size = blk->_word_size;
  1477     blk->_word_size -= size;
  1478     blk->_ptr  += size;
  1479     splitBirth(size);
  1480     repairLinearAllocBlock(blk);
  1481     // Update BOT last so that other (parallel) GC threads see a consistent
  1482     // view of the BOT and free blocks.
  1483     // Above must occur before BOT is updated below.
  1484     OrderAccess::storestore();
  1485     _bt.split_block(res, blk_size, size);  // adjust block offset table
  1487   return res;
  1490 HeapWord*  CompactibleFreeListSpace::getChunkFromLinearAllocBlockRemainder(
  1491                                         LinearAllocBlock* blk,
  1492                                         size_t size) {
  1493   assert_locked();
  1494   assert(size >= MinChunkSize, "too small");
  1496   HeapWord* res = NULL;
  1497   // This is the common case.  Keep it simple.
  1498   if (blk->_word_size >= size + MinChunkSize) {
  1499     assert(blk->_ptr != NULL, "consistency check");
  1500     res = blk->_ptr;
  1501     // Note that the BOT is up-to-date for the linAB before allocation.  It
  1502     // indicates the start of the linAB.  The split_block() updates the
  1503     // BOT for the linAB after the allocation (indicates the start of the
  1504     // next chunk to be allocated).
  1505     size_t blk_size = blk->_word_size;
  1506     blk->_word_size -= size;
  1507     blk->_ptr  += size;
  1508     splitBirth(size);
  1509     repairLinearAllocBlock(blk);
  1510     // Update BOT last so that other (parallel) GC threads see a consistent
  1511     // view of the BOT and free blocks.
  1512     // Above must occur before BOT is updated below.
  1513     OrderAccess::storestore();
  1514     _bt.split_block(res, blk_size, size);  // adjust block offset table
  1515     _bt.allocated(res, size);
  1517   return res;
  1520 FreeChunk*
  1521 CompactibleFreeListSpace::getChunkFromIndexedFreeList(size_t size) {
  1522   assert_locked();
  1523   assert(size < SmallForDictionary, "just checking");
  1524   FreeChunk* res;
  1525   res = _indexedFreeList[size].getChunkAtHead();
  1526   if (res == NULL) {
  1527     res = getChunkFromIndexedFreeListHelper(size);
  1529   _bt.verify_not_unallocated((HeapWord*) res, size);
  1530   assert(res == NULL || res->size() == size, "Incorrect block size");
  1531   return res;
  1534 FreeChunk*
  1535 CompactibleFreeListSpace::getChunkFromIndexedFreeListHelper(size_t size,
  1536   bool replenish) {
  1537   assert_locked();
  1538   FreeChunk* fc = NULL;
  1539   if (size < SmallForDictionary) {
  1540     assert(_indexedFreeList[size].head() == NULL ||
  1541       _indexedFreeList[size].surplus() <= 0,
  1542       "List for this size should be empty or under populated");
  1543     // Try best fit in exact lists before replenishing the list
  1544     if (!bestFitFirst() || (fc = bestFitSmall(size)) == NULL) {
  1545       // Replenish list.
  1546       //
  1547       // Things tried that failed.
  1548       //   Tried allocating out of the two LinAB's first before
  1549       // replenishing lists.
  1550       //   Tried small linAB of size 256 (size in indexed list)
  1551       // and replenishing indexed lists from the small linAB.
  1552       //
  1553       FreeChunk* newFc = NULL;
  1554       const size_t replenish_size = CMSIndexedFreeListReplenish * size;
  1555       if (replenish_size < SmallForDictionary) {
  1556         // Do not replenish from an underpopulated size.
  1557         if (_indexedFreeList[replenish_size].surplus() > 0 &&
  1558             _indexedFreeList[replenish_size].head() != NULL) {
  1559           newFc = _indexedFreeList[replenish_size].getChunkAtHead();
  1560         } else if (bestFitFirst()) {
  1561           newFc = bestFitSmall(replenish_size);
  1564       if (newFc == NULL && replenish_size > size) {
  1565         assert(CMSIndexedFreeListReplenish > 1, "ctl pt invariant");
  1566         newFc = getChunkFromIndexedFreeListHelper(replenish_size, false);
  1568       // Note: The stats update re split-death of block obtained above
  1569       // will be recorded below precisely when we know we are going to
  1570       // be actually splitting it into more than one pieces below.
  1571       if (newFc != NULL) {
  1572         if  (replenish || CMSReplenishIntermediate) {
  1573           // Replenish this list and return one block to caller.
  1574           size_t i;
  1575           FreeChunk *curFc, *nextFc;
  1576           size_t num_blk = newFc->size() / size;
  1577           assert(num_blk >= 1, "Smaller than requested?");
  1578           assert(newFc->size() % size == 0, "Should be integral multiple of request");
  1579           if (num_blk > 1) {
  1580             // we are sure we will be splitting the block just obtained
  1581             // into multiple pieces; record the split-death of the original
  1582             splitDeath(replenish_size);
  1584           // carve up and link blocks 0, ..., num_blk - 2
  1585           // The last chunk is not added to the lists but is returned as the
  1586           // free chunk.
  1587           for (curFc = newFc, nextFc = (FreeChunk*)((HeapWord*)curFc + size),
  1588                i = 0;
  1589                i < (num_blk - 1);
  1590                curFc = nextFc, nextFc = (FreeChunk*)((HeapWord*)nextFc + size),
  1591                i++) {
  1592             curFc->setSize(size);
  1593             // Don't record this as a return in order to try and
  1594             // determine the "returns" from a GC.
  1595             _bt.verify_not_unallocated((HeapWord*) fc, size);
  1596             _indexedFreeList[size].returnChunkAtTail(curFc, false);
  1597             _bt.mark_block((HeapWord*)curFc, size);
  1598             splitBirth(size);
  1599             // Don't record the initial population of the indexed list
  1600             // as a split birth.
  1603           // check that the arithmetic was OK above
  1604           assert((HeapWord*)nextFc == (HeapWord*)newFc + num_blk*size,
  1605             "inconsistency in carving newFc");
  1606           curFc->setSize(size);
  1607           _bt.mark_block((HeapWord*)curFc, size);
  1608           splitBirth(size);
  1609           fc = curFc;
  1610         } else {
  1611           // Return entire block to caller
  1612           fc = newFc;
  1616   } else {
  1617     // Get a free chunk from the free chunk dictionary to be returned to
  1618     // replenish the indexed free list.
  1619     fc = getChunkFromDictionaryExact(size);
  1621   // assert(fc == NULL || fc->isFree(), "Should be returning a free chunk");
  1622   return fc;
  1625 FreeChunk*
  1626 CompactibleFreeListSpace::getChunkFromDictionary(size_t size) {
  1627   assert_locked();
  1628   FreeChunk* fc = _dictionary->getChunk(size);
  1629   if (fc == NULL) {
  1630     return NULL;
  1632   _bt.allocated((HeapWord*)fc, fc->size());
  1633   if (fc->size() >= size + MinChunkSize) {
  1634     fc = splitChunkAndReturnRemainder(fc, size);
  1636   assert(fc->size() >= size, "chunk too small");
  1637   assert(fc->size() < size + MinChunkSize, "chunk too big");
  1638   _bt.verify_single_block((HeapWord*)fc, fc->size());
  1639   return fc;
  1642 FreeChunk*
  1643 CompactibleFreeListSpace::getChunkFromDictionaryExact(size_t size) {
  1644   assert_locked();
  1645   FreeChunk* fc = _dictionary->getChunk(size);
  1646   if (fc == NULL) {
  1647     return fc;
  1649   _bt.allocated((HeapWord*)fc, fc->size());
  1650   if (fc->size() == size) {
  1651     _bt.verify_single_block((HeapWord*)fc, size);
  1652     return fc;
  1654   assert(fc->size() > size, "getChunk() guarantee");
  1655   if (fc->size() < size + MinChunkSize) {
  1656     // Return the chunk to the dictionary and go get a bigger one.
  1657     returnChunkToDictionary(fc);
  1658     fc = _dictionary->getChunk(size + MinChunkSize);
  1659     if (fc == NULL) {
  1660       return NULL;
  1662     _bt.allocated((HeapWord*)fc, fc->size());
  1664   assert(fc->size() >= size + MinChunkSize, "tautology");
  1665   fc = splitChunkAndReturnRemainder(fc, size);
  1666   assert(fc->size() == size, "chunk is wrong size");
  1667   _bt.verify_single_block((HeapWord*)fc, size);
  1668   return fc;
  1671 void
  1672 CompactibleFreeListSpace::returnChunkToDictionary(FreeChunk* chunk) {
  1673   assert_locked();
  1675   size_t size = chunk->size();
  1676   _bt.verify_single_block((HeapWord*)chunk, size);
  1677   // adjust _unallocated_block downward, as necessary
  1678   _bt.freed((HeapWord*)chunk, size);
  1679   _dictionary->returnChunk(chunk);
  1680 #ifndef PRODUCT
  1681   if (CMSCollector::abstract_state() != CMSCollector::Sweeping) {
  1682     TreeChunk::as_TreeChunk(chunk)->list()->verify_stats();
  1684 #endif // PRODUCT
  1687 void
  1688 CompactibleFreeListSpace::returnChunkToFreeList(FreeChunk* fc) {
  1689   assert_locked();
  1690   size_t size = fc->size();
  1691   _bt.verify_single_block((HeapWord*) fc, size);
  1692   _bt.verify_not_unallocated((HeapWord*) fc, size);
  1693   if (_adaptive_freelists) {
  1694     _indexedFreeList[size].returnChunkAtTail(fc);
  1695   } else {
  1696     _indexedFreeList[size].returnChunkAtHead(fc);
  1698 #ifndef PRODUCT
  1699   if (CMSCollector::abstract_state() != CMSCollector::Sweeping) {
  1700      _indexedFreeList[size].verify_stats();
  1702 #endif // PRODUCT
  1705 // Add chunk to end of last block -- if it's the largest
  1706 // block -- and update BOT and census data. We would
  1707 // of course have preferred to coalesce it with the
  1708 // last block, but it's currently less expensive to find the
  1709 // largest block than it is to find the last.
  1710 void
  1711 CompactibleFreeListSpace::addChunkToFreeListsAtEndRecordingStats(
  1712   HeapWord* chunk, size_t     size) {
  1713   // check that the chunk does lie in this space!
  1714   assert(chunk != NULL && is_in_reserved(chunk), "Not in this space!");
  1715   // One of the parallel gc task threads may be here
  1716   // whilst others are allocating.
  1717   Mutex* lock = NULL;
  1718   if (ParallelGCThreads != 0) {
  1719     lock = &_parDictionaryAllocLock;
  1721   FreeChunk* ec;
  1723     MutexLockerEx x(lock, Mutex::_no_safepoint_check_flag);
  1724     ec = dictionary()->findLargestDict();  // get largest block
  1725     if (ec != NULL && ec->end() == chunk) {
  1726       // It's a coterminal block - we can coalesce.
  1727       size_t old_size = ec->size();
  1728       coalDeath(old_size);
  1729       removeChunkFromDictionary(ec);
  1730       size += old_size;
  1731     } else {
  1732       ec = (FreeChunk*)chunk;
  1735   ec->setSize(size);
  1736   debug_only(ec->mangleFreed(size));
  1737   if (size < SmallForDictionary) {
  1738     lock = _indexedFreeListParLocks[size];
  1740   MutexLockerEx x(lock, Mutex::_no_safepoint_check_flag);
  1741   addChunkAndRepairOffsetTable((HeapWord*)ec, size, true);
  1742   // record the birth under the lock since the recording involves
  1743   // manipulation of the list on which the chunk lives and
  1744   // if the chunk is allocated and is the last on the list,
  1745   // the list can go away.
  1746   coalBirth(size);
  1749 void
  1750 CompactibleFreeListSpace::addChunkToFreeLists(HeapWord* chunk,
  1751                                               size_t     size) {
  1752   // check that the chunk does lie in this space!
  1753   assert(chunk != NULL && is_in_reserved(chunk), "Not in this space!");
  1754   assert_locked();
  1755   _bt.verify_single_block(chunk, size);
  1757   FreeChunk* fc = (FreeChunk*) chunk;
  1758   fc->setSize(size);
  1759   debug_only(fc->mangleFreed(size));
  1760   if (size < SmallForDictionary) {
  1761     returnChunkToFreeList(fc);
  1762   } else {
  1763     returnChunkToDictionary(fc);
  1767 void
  1768 CompactibleFreeListSpace::addChunkAndRepairOffsetTable(HeapWord* chunk,
  1769   size_t size, bool coalesced) {
  1770   assert_locked();
  1771   assert(chunk != NULL, "null chunk");
  1772   if (coalesced) {
  1773     // repair BOT
  1774     _bt.single_block(chunk, size);
  1776   addChunkToFreeLists(chunk, size);
  1779 // We _must_ find the purported chunk on our free lists;
  1780 // we assert if we don't.
  1781 void
  1782 CompactibleFreeListSpace::removeFreeChunkFromFreeLists(FreeChunk* fc) {
  1783   size_t size = fc->size();
  1784   assert_locked();
  1785   debug_only(verifyFreeLists());
  1786   if (size < SmallForDictionary) {
  1787     removeChunkFromIndexedFreeList(fc);
  1788   } else {
  1789     removeChunkFromDictionary(fc);
  1791   _bt.verify_single_block((HeapWord*)fc, size);
  1792   debug_only(verifyFreeLists());
  1795 void
  1796 CompactibleFreeListSpace::removeChunkFromDictionary(FreeChunk* fc) {
  1797   size_t size = fc->size();
  1798   assert_locked();
  1799   assert(fc != NULL, "null chunk");
  1800   _bt.verify_single_block((HeapWord*)fc, size);
  1801   _dictionary->removeChunk(fc);
  1802   // adjust _unallocated_block upward, as necessary
  1803   _bt.allocated((HeapWord*)fc, size);
  1806 void
  1807 CompactibleFreeListSpace::removeChunkFromIndexedFreeList(FreeChunk* fc) {
  1808   assert_locked();
  1809   size_t size = fc->size();
  1810   _bt.verify_single_block((HeapWord*)fc, size);
  1811   NOT_PRODUCT(
  1812     if (FLSVerifyIndexTable) {
  1813       verifyIndexedFreeList(size);
  1816   _indexedFreeList[size].removeChunk(fc);
  1817   debug_only(fc->clearNext());
  1818   debug_only(fc->clearPrev());
  1819   NOT_PRODUCT(
  1820     if (FLSVerifyIndexTable) {
  1821       verifyIndexedFreeList(size);
  1826 FreeChunk* CompactibleFreeListSpace::bestFitSmall(size_t numWords) {
  1827   /* A hint is the next larger size that has a surplus.
  1828      Start search at a size large enough to guarantee that
  1829      the excess is >= MIN_CHUNK. */
  1830   size_t start = align_object_size(numWords + MinChunkSize);
  1831   if (start < IndexSetSize) {
  1832     FreeList* it   = _indexedFreeList;
  1833     size_t    hint = _indexedFreeList[start].hint();
  1834     while (hint < IndexSetSize) {
  1835       assert(hint % MinObjAlignment == 0, "hint should be aligned");
  1836       FreeList *fl = &_indexedFreeList[hint];
  1837       if (fl->surplus() > 0 && fl->head() != NULL) {
  1838         // Found a list with surplus, reset original hint
  1839         // and split out a free chunk which is returned.
  1840         _indexedFreeList[start].set_hint(hint);
  1841         FreeChunk* res = getFromListGreater(fl, numWords);
  1842         assert(res == NULL || res->isFree(),
  1843           "Should be returning a free chunk");
  1844         return res;
  1846       hint = fl->hint(); /* keep looking */
  1848     /* None found. */
  1849     it[start].set_hint(IndexSetSize);
  1851   return NULL;
  1854 /* Requires fl->size >= numWords + MinChunkSize */
  1855 FreeChunk* CompactibleFreeListSpace::getFromListGreater(FreeList* fl,
  1856   size_t numWords) {
  1857   FreeChunk *curr = fl->head();
  1858   size_t oldNumWords = curr->size();
  1859   assert(numWords >= MinChunkSize, "Word size is too small");
  1860   assert(curr != NULL, "List is empty");
  1861   assert(oldNumWords >= numWords + MinChunkSize,
  1862         "Size of chunks in the list is too small");
  1864   fl->removeChunk(curr);
  1865   // recorded indirectly by splitChunkAndReturnRemainder -
  1866   // smallSplit(oldNumWords, numWords);
  1867   FreeChunk* new_chunk = splitChunkAndReturnRemainder(curr, numWords);
  1868   // Does anything have to be done for the remainder in terms of
  1869   // fixing the card table?
  1870   assert(new_chunk == NULL || new_chunk->isFree(),
  1871     "Should be returning a free chunk");
  1872   return new_chunk;
  1875 FreeChunk*
  1876 CompactibleFreeListSpace::splitChunkAndReturnRemainder(FreeChunk* chunk,
  1877   size_t new_size) {
  1878   assert_locked();
  1879   size_t size = chunk->size();
  1880   assert(size > new_size, "Split from a smaller block?");
  1881   assert(is_aligned(chunk), "alignment problem");
  1882   assert(size == adjustObjectSize(size), "alignment problem");
  1883   size_t rem_size = size - new_size;
  1884   assert(rem_size == adjustObjectSize(rem_size), "alignment problem");
  1885   assert(rem_size >= MinChunkSize, "Free chunk smaller than minimum");
  1886   FreeChunk* ffc = (FreeChunk*)((HeapWord*)chunk + new_size);
  1887   assert(is_aligned(ffc), "alignment problem");
  1888   ffc->setSize(rem_size);
  1889   ffc->linkNext(NULL);
  1890   ffc->linkPrev(NULL); // Mark as a free block for other (parallel) GC threads.
  1891   // Above must occur before BOT is updated below.
  1892   // adjust block offset table
  1893   OrderAccess::storestore();
  1894   assert(chunk->isFree() && ffc->isFree(), "Error");
  1895   _bt.split_block((HeapWord*)chunk, chunk->size(), new_size);
  1896   if (rem_size < SmallForDictionary) {
  1897     bool is_par = (SharedHeap::heap()->n_par_threads() > 0);
  1898     if (is_par) _indexedFreeListParLocks[rem_size]->lock();
  1899     returnChunkToFreeList(ffc);
  1900     split(size, rem_size);
  1901     if (is_par) _indexedFreeListParLocks[rem_size]->unlock();
  1902   } else {
  1903     returnChunkToDictionary(ffc);
  1904     split(size ,rem_size);
  1906   chunk->setSize(new_size);
  1907   return chunk;
  1910 void
  1911 CompactibleFreeListSpace::sweep_completed() {
  1912   // Now that space is probably plentiful, refill linear
  1913   // allocation blocks as needed.
  1914   refillLinearAllocBlocksIfNeeded();
  1917 void
  1918 CompactibleFreeListSpace::gc_prologue() {
  1919   assert_locked();
  1920   if (PrintFLSStatistics != 0) {
  1921     gclog_or_tty->print("Before GC:\n");
  1922     reportFreeListStatistics();
  1924   refillLinearAllocBlocksIfNeeded();
  1927 void
  1928 CompactibleFreeListSpace::gc_epilogue() {
  1929   assert_locked();
  1930   if (PrintGCDetails && Verbose && !_adaptive_freelists) {
  1931     if (_smallLinearAllocBlock._word_size == 0)
  1932       warning("CompactibleFreeListSpace(epilogue):: Linear allocation failure");
  1934   assert(_promoInfo.noPromotions(), "_promoInfo inconsistency");
  1935   _promoInfo.stopTrackingPromotions();
  1936   repairLinearAllocationBlocks();
  1937   // Print Space's stats
  1938   if (PrintFLSStatistics != 0) {
  1939     gclog_or_tty->print("After GC:\n");
  1940     reportFreeListStatistics();
  1944 // Iteration support, mostly delegated from a CMS generation
  1946 void CompactibleFreeListSpace::save_marks() {
  1947   // mark the "end" of the used space at the time of this call;
  1948   // note, however, that promoted objects from this point
  1949   // on are tracked in the _promoInfo below.
  1950   set_saved_mark_word(unallocated_block());
  1951   // inform allocator that promotions should be tracked.
  1952   assert(_promoInfo.noPromotions(), "_promoInfo inconsistency");
  1953   _promoInfo.startTrackingPromotions();
  1956 bool CompactibleFreeListSpace::no_allocs_since_save_marks() {
  1957   assert(_promoInfo.tracking(), "No preceding save_marks?");
  1958   assert(SharedHeap::heap()->n_par_threads() == 0,
  1959          "Shouldn't be called if using parallel gc.");
  1960   return _promoInfo.noPromotions();
  1963 #define CFLS_OOP_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix)           \
  1965 void CompactibleFreeListSpace::                                             \
  1966 oop_since_save_marks_iterate##nv_suffix(OopClosureType* blk) {              \
  1967   assert(SharedHeap::heap()->n_par_threads() == 0,                          \
  1968          "Shouldn't be called (yet) during parallel part of gc.");          \
  1969   _promoInfo.promoted_oops_iterate##nv_suffix(blk);                         \
  1970   /*                                                                        \
  1971    * This also restores any displaced headers and removes the elements from \
  1972    * the iteration set as they are processed, so that we have a clean slate \
  1973    * at the end of the iteration. Note, thus, that if new objects are       \
  1974    * promoted as a result of the iteration they are iterated over as well.  \
  1975    */                                                                       \
  1976   assert(_promoInfo.noPromotions(), "_promoInfo inconsistency");            \
  1979 ALL_SINCE_SAVE_MARKS_CLOSURES(CFLS_OOP_SINCE_SAVE_MARKS_DEFN)
  1982 void CompactibleFreeListSpace::object_iterate_since_last_GC(ObjectClosure* cl) {
  1983   // ugghh... how would one do this efficiently for a non-contiguous space?
  1984   guarantee(false, "NYI");
  1987 bool CompactibleFreeListSpace::linearAllocationWouldFail() const {
  1988   return _smallLinearAllocBlock._word_size == 0;
  1991 void CompactibleFreeListSpace::repairLinearAllocationBlocks() {
  1992   // Fix up linear allocation blocks to look like free blocks
  1993   repairLinearAllocBlock(&_smallLinearAllocBlock);
  1996 void CompactibleFreeListSpace::repairLinearAllocBlock(LinearAllocBlock* blk) {
  1997   assert_locked();
  1998   if (blk->_ptr != NULL) {
  1999     assert(blk->_word_size != 0 && blk->_word_size >= MinChunkSize,
  2000            "Minimum block size requirement");
  2001     FreeChunk* fc = (FreeChunk*)(blk->_ptr);
  2002     fc->setSize(blk->_word_size);
  2003     fc->linkPrev(NULL);   // mark as free
  2004     fc->dontCoalesce();
  2005     assert(fc->isFree(), "just marked it free");
  2006     assert(fc->cantCoalesce(), "just marked it uncoalescable");
  2010 void CompactibleFreeListSpace::refillLinearAllocBlocksIfNeeded() {
  2011   assert_locked();
  2012   if (_smallLinearAllocBlock._ptr == NULL) {
  2013     assert(_smallLinearAllocBlock._word_size == 0,
  2014       "Size of linAB should be zero if the ptr is NULL");
  2015     // Reset the linAB refill and allocation size limit.
  2016     _smallLinearAllocBlock.set(0, 0, 1024*SmallForLinearAlloc, SmallForLinearAlloc);
  2018   refillLinearAllocBlockIfNeeded(&_smallLinearAllocBlock);
  2021 void
  2022 CompactibleFreeListSpace::refillLinearAllocBlockIfNeeded(LinearAllocBlock* blk) {
  2023   assert_locked();
  2024   assert((blk->_ptr == NULL && blk->_word_size == 0) ||
  2025          (blk->_ptr != NULL && blk->_word_size >= MinChunkSize),
  2026          "blk invariant");
  2027   if (blk->_ptr == NULL) {
  2028     refillLinearAllocBlock(blk);
  2030   if (PrintMiscellaneous && Verbose) {
  2031     if (blk->_word_size == 0) {
  2032       warning("CompactibleFreeListSpace(prologue):: Linear allocation failure");
  2037 void
  2038 CompactibleFreeListSpace::refillLinearAllocBlock(LinearAllocBlock* blk) {
  2039   assert_locked();
  2040   assert(blk->_word_size == 0 && blk->_ptr == NULL,
  2041          "linear allocation block should be empty");
  2042   FreeChunk* fc;
  2043   if (blk->_refillSize < SmallForDictionary &&
  2044       (fc = getChunkFromIndexedFreeList(blk->_refillSize)) != NULL) {
  2045     // A linAB's strategy might be to use small sizes to reduce
  2046     // fragmentation but still get the benefits of allocation from a
  2047     // linAB.
  2048   } else {
  2049     fc = getChunkFromDictionary(blk->_refillSize);
  2051   if (fc != NULL) {
  2052     blk->_ptr  = (HeapWord*)fc;
  2053     blk->_word_size = fc->size();
  2054     fc->dontCoalesce();   // to prevent sweeper from sweeping us up
  2058 // Support for concurrent collection policy decisions.
  2059 bool CompactibleFreeListSpace::should_concurrent_collect() const {
  2060   // In the future we might want to add in frgamentation stats --
  2061   // including erosion of the "mountain" into this decision as well.
  2062   return !adaptive_freelists() && linearAllocationWouldFail();
  2065 // Support for compaction
  2067 void CompactibleFreeListSpace::prepare_for_compaction(CompactPoint* cp) {
  2068   SCAN_AND_FORWARD(cp,end,block_is_obj,block_size);
  2069   // prepare_for_compaction() uses the space between live objects
  2070   // so that later phase can skip dead space quickly.  So verification
  2071   // of the free lists doesn't work after.
  2074 #define obj_size(q) adjustObjectSize(oop(q)->size())
  2075 #define adjust_obj_size(s) adjustObjectSize(s)
  2077 void CompactibleFreeListSpace::adjust_pointers() {
  2078   // In other versions of adjust_pointers(), a bail out
  2079   // based on the amount of live data in the generation
  2080   // (i.e., if 0, bail out) may be used.
  2081   // Cannot test used() == 0 here because the free lists have already
  2082   // been mangled by the compaction.
  2084   SCAN_AND_ADJUST_POINTERS(adjust_obj_size);
  2085   // See note about verification in prepare_for_compaction().
  2088 void CompactibleFreeListSpace::compact() {
  2089   SCAN_AND_COMPACT(obj_size);
  2092 // fragmentation_metric = 1 - [sum of (fbs**2) / (sum of fbs)**2]
  2093 // where fbs is free block sizes
  2094 double CompactibleFreeListSpace::flsFrag() const {
  2095   size_t itabFree = totalSizeInIndexedFreeLists();
  2096   double frag = 0.0;
  2097   size_t i;
  2099   for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
  2100     double sz  = i;
  2101     frag      += _indexedFreeList[i].count() * (sz * sz);
  2104   double totFree = itabFree +
  2105                    _dictionary->totalChunkSize(DEBUG_ONLY(freelistLock()));
  2106   if (totFree > 0) {
  2107     frag = ((frag + _dictionary->sum_of_squared_block_sizes()) /
  2108             (totFree * totFree));
  2109     frag = (double)1.0  - frag;
  2110   } else {
  2111     assert(frag == 0.0, "Follows from totFree == 0");
  2113   return frag;
  2116 void CompactibleFreeListSpace::beginSweepFLCensus(
  2117   float inter_sweep_current,
  2118   float inter_sweep_estimate,
  2119   float intra_sweep_estimate) {
  2120   assert_locked();
  2121   size_t i;
  2122   for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
  2123     FreeList* fl    = &_indexedFreeList[i];
  2124     if (PrintFLSStatistics > 1) {
  2125       gclog_or_tty->print("size[%d] : ", i);
  2127     fl->compute_desired(inter_sweep_current, inter_sweep_estimate, intra_sweep_estimate);
  2128     fl->set_coalDesired((ssize_t)((double)fl->desired() * CMSSmallCoalSurplusPercent));
  2129     fl->set_beforeSweep(fl->count());
  2130     fl->set_bfrSurp(fl->surplus());
  2132   _dictionary->beginSweepDictCensus(CMSLargeCoalSurplusPercent,
  2133                                     inter_sweep_current,
  2134                                     inter_sweep_estimate,
  2135                                     intra_sweep_estimate);
  2138 void CompactibleFreeListSpace::setFLSurplus() {
  2139   assert_locked();
  2140   size_t i;
  2141   for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
  2142     FreeList *fl = &_indexedFreeList[i];
  2143     fl->set_surplus(fl->count() -
  2144                     (ssize_t)((double)fl->desired() * CMSSmallSplitSurplusPercent));
  2148 void CompactibleFreeListSpace::setFLHints() {
  2149   assert_locked();
  2150   size_t i;
  2151   size_t h = IndexSetSize;
  2152   for (i = IndexSetSize - 1; i != 0; i -= IndexSetStride) {
  2153     FreeList *fl = &_indexedFreeList[i];
  2154     fl->set_hint(h);
  2155     if (fl->surplus() > 0) {
  2156       h = i;
  2161 void CompactibleFreeListSpace::clearFLCensus() {
  2162   assert_locked();
  2163   int i;
  2164   for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
  2165     FreeList *fl = &_indexedFreeList[i];
  2166     fl->set_prevSweep(fl->count());
  2167     fl->set_coalBirths(0);
  2168     fl->set_coalDeaths(0);
  2169     fl->set_splitBirths(0);
  2170     fl->set_splitDeaths(0);
  2174 void CompactibleFreeListSpace::endSweepFLCensus(size_t sweep_count) {
  2175   if (PrintFLSStatistics > 0) {
  2176     HeapWord* largestAddr = (HeapWord*) dictionary()->findLargestDict();
  2177     gclog_or_tty->print_cr("CMS: Large block " PTR_FORMAT,
  2178                            largestAddr);
  2180   setFLSurplus();
  2181   setFLHints();
  2182   if (PrintGC && PrintFLSCensus > 0) {
  2183     printFLCensus(sweep_count);
  2185   clearFLCensus();
  2186   assert_locked();
  2187   _dictionary->endSweepDictCensus(CMSLargeSplitSurplusPercent);
  2190 bool CompactibleFreeListSpace::coalOverPopulated(size_t size) {
  2191   if (size < SmallForDictionary) {
  2192     FreeList *fl = &_indexedFreeList[size];
  2193     return (fl->coalDesired() < 0) ||
  2194            ((int)fl->count() > fl->coalDesired());
  2195   } else {
  2196     return dictionary()->coalDictOverPopulated(size);
  2200 void CompactibleFreeListSpace::smallCoalBirth(size_t size) {
  2201   assert(size < SmallForDictionary, "Size too large for indexed list");
  2202   FreeList *fl = &_indexedFreeList[size];
  2203   fl->increment_coalBirths();
  2204   fl->increment_surplus();
  2207 void CompactibleFreeListSpace::smallCoalDeath(size_t size) {
  2208   assert(size < SmallForDictionary, "Size too large for indexed list");
  2209   FreeList *fl = &_indexedFreeList[size];
  2210   fl->increment_coalDeaths();
  2211   fl->decrement_surplus();
  2214 void CompactibleFreeListSpace::coalBirth(size_t size) {
  2215   if (size  < SmallForDictionary) {
  2216     smallCoalBirth(size);
  2217   } else {
  2218     dictionary()->dictCensusUpdate(size,
  2219                                    false /* split */,
  2220                                    true /* birth */);
  2224 void CompactibleFreeListSpace::coalDeath(size_t size) {
  2225   if(size  < SmallForDictionary) {
  2226     smallCoalDeath(size);
  2227   } else {
  2228     dictionary()->dictCensusUpdate(size,
  2229                                    false /* split */,
  2230                                    false /* birth */);
  2234 void CompactibleFreeListSpace::smallSplitBirth(size_t size) {
  2235   assert(size < SmallForDictionary, "Size too large for indexed list");
  2236   FreeList *fl = &_indexedFreeList[size];
  2237   fl->increment_splitBirths();
  2238   fl->increment_surplus();
  2241 void CompactibleFreeListSpace::smallSplitDeath(size_t size) {
  2242   assert(size < SmallForDictionary, "Size too large for indexed list");
  2243   FreeList *fl = &_indexedFreeList[size];
  2244   fl->increment_splitDeaths();
  2245   fl->decrement_surplus();
  2248 void CompactibleFreeListSpace::splitBirth(size_t size) {
  2249   if (size  < SmallForDictionary) {
  2250     smallSplitBirth(size);
  2251   } else {
  2252     dictionary()->dictCensusUpdate(size,
  2253                                    true /* split */,
  2254                                    true /* birth */);
  2258 void CompactibleFreeListSpace::splitDeath(size_t size) {
  2259   if (size  < SmallForDictionary) {
  2260     smallSplitDeath(size);
  2261   } else {
  2262     dictionary()->dictCensusUpdate(size,
  2263                                    true /* split */,
  2264                                    false /* birth */);
  2268 void CompactibleFreeListSpace::split(size_t from, size_t to1) {
  2269   size_t to2 = from - to1;
  2270   splitDeath(from);
  2271   splitBirth(to1);
  2272   splitBirth(to2);
  2275 void CompactibleFreeListSpace::print() const {
  2276   print_on(tty);
  2279 void CompactibleFreeListSpace::prepare_for_verify() {
  2280   assert_locked();
  2281   repairLinearAllocationBlocks();
  2282   // Verify that the SpoolBlocks look like free blocks of
  2283   // appropriate sizes... To be done ...
  2286 class VerifyAllBlksClosure: public BlkClosure {
  2287  private:
  2288   const CompactibleFreeListSpace* _sp;
  2289   const MemRegion                 _span;
  2290   HeapWord*                       _last_addr;
  2291   size_t                          _last_size;
  2292   bool                            _last_was_obj;
  2293   bool                            _last_was_live;
  2295  public:
  2296   VerifyAllBlksClosure(const CompactibleFreeListSpace* sp,
  2297     MemRegion span) :  _sp(sp), _span(span),
  2298                        _last_addr(NULL), _last_size(0),
  2299                        _last_was_obj(false), _last_was_live(false) { }
  2301   virtual size_t do_blk(HeapWord* addr) {
  2302     size_t res;
  2303     bool   was_obj  = false;
  2304     bool   was_live = false;
  2305     if (_sp->block_is_obj(addr)) {
  2306       was_obj = true;
  2307       oop p = oop(addr);
  2308       guarantee(p->is_oop(), "Should be an oop");
  2309       res = _sp->adjustObjectSize(p->size());
  2310       if (_sp->obj_is_alive(addr)) {
  2311         was_live = true;
  2312         p->verify();
  2314     } else {
  2315       FreeChunk* fc = (FreeChunk*)addr;
  2316       res = fc->size();
  2317       if (FLSVerifyLists && !fc->cantCoalesce()) {
  2318         guarantee(_sp->verifyChunkInFreeLists(fc),
  2319                   "Chunk should be on a free list");
  2322     if (res == 0) {
  2323       gclog_or_tty->print_cr("Livelock: no rank reduction!");
  2324       gclog_or_tty->print_cr(
  2325         " Current:  addr = " PTR_FORMAT ", size = " SIZE_FORMAT ", obj = %s, live = %s \n"
  2326         " Previous: addr = " PTR_FORMAT ", size = " SIZE_FORMAT ", obj = %s, live = %s \n",
  2327         addr,       res,        was_obj      ?"true":"false", was_live      ?"true":"false",
  2328         _last_addr, _last_size, _last_was_obj?"true":"false", _last_was_live?"true":"false");
  2329       _sp->print_on(gclog_or_tty);
  2330       guarantee(false, "Seppuku!");
  2332     _last_addr = addr;
  2333     _last_size = res;
  2334     _last_was_obj  = was_obj;
  2335     _last_was_live = was_live;
  2336     return res;
  2338 };
  2340 class VerifyAllOopsClosure: public OopClosure {
  2341  private:
  2342   const CMSCollector*             _collector;
  2343   const CompactibleFreeListSpace* _sp;
  2344   const MemRegion                 _span;
  2345   const bool                      _past_remark;
  2346   const CMSBitMap*                _bit_map;
  2348  protected:
  2349   void do_oop(void* p, oop obj) {
  2350     if (_span.contains(obj)) { // the interior oop points into CMS heap
  2351       if (!_span.contains(p)) { // reference from outside CMS heap
  2352         // Should be a valid object; the first disjunct below allows
  2353         // us to sidestep an assertion in block_is_obj() that insists
  2354         // that p be in _sp. Note that several generations (and spaces)
  2355         // are spanned by _span (CMS heap) above.
  2356         guarantee(!_sp->is_in_reserved(obj) ||
  2357                   _sp->block_is_obj((HeapWord*)obj),
  2358                   "Should be an object");
  2359         guarantee(obj->is_oop(), "Should be an oop");
  2360         obj->verify();
  2361         if (_past_remark) {
  2362           // Remark has been completed, the object should be marked
  2363           _bit_map->isMarked((HeapWord*)obj);
  2365       } else { // reference within CMS heap
  2366         if (_past_remark) {
  2367           // Remark has been completed -- so the referent should have
  2368           // been marked, if referring object is.
  2369           if (_bit_map->isMarked(_collector->block_start(p))) {
  2370             guarantee(_bit_map->isMarked((HeapWord*)obj), "Marking error?");
  2374     } else if (_sp->is_in_reserved(p)) {
  2375       // the reference is from FLS, and points out of FLS
  2376       guarantee(obj->is_oop(), "Should be an oop");
  2377       obj->verify();
  2381   template <class T> void do_oop_work(T* p) {
  2382     T heap_oop = oopDesc::load_heap_oop(p);
  2383     if (!oopDesc::is_null(heap_oop)) {
  2384       oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
  2385       do_oop(p, obj);
  2389  public:
  2390   VerifyAllOopsClosure(const CMSCollector* collector,
  2391     const CompactibleFreeListSpace* sp, MemRegion span,
  2392     bool past_remark, CMSBitMap* bit_map) :
  2393     OopClosure(), _collector(collector), _sp(sp), _span(span),
  2394     _past_remark(past_remark), _bit_map(bit_map) { }
  2396   virtual void do_oop(oop* p)       { VerifyAllOopsClosure::do_oop_work(p); }
  2397   virtual void do_oop(narrowOop* p) { VerifyAllOopsClosure::do_oop_work(p); }
  2398 };
  2400 void CompactibleFreeListSpace::verify(bool ignored) const {
  2401   assert_lock_strong(&_freelistLock);
  2402   verify_objects_initialized();
  2403   MemRegion span = _collector->_span;
  2404   bool past_remark = (_collector->abstract_state() ==
  2405                       CMSCollector::Sweeping);
  2407   ResourceMark rm;
  2408   HandleMark  hm;
  2410   // Check integrity of CFL data structures
  2411   _promoInfo.verify();
  2412   _dictionary->verify();
  2413   if (FLSVerifyIndexTable) {
  2414     verifyIndexedFreeLists();
  2416   // Check integrity of all objects and free blocks in space
  2418     VerifyAllBlksClosure cl(this, span);
  2419     ((CompactibleFreeListSpace*)this)->blk_iterate(&cl);  // cast off const
  2421   // Check that all references in the heap to FLS
  2422   // are to valid objects in FLS or that references in
  2423   // FLS are to valid objects elsewhere in the heap
  2424   if (FLSVerifyAllHeapReferences)
  2426     VerifyAllOopsClosure cl(_collector, this, span, past_remark,
  2427       _collector->markBitMap());
  2428     CollectedHeap* ch = Universe::heap();
  2429     ch->oop_iterate(&cl);              // all oops in generations
  2430     ch->permanent_oop_iterate(&cl);    // all oops in perm gen
  2433   if (VerifyObjectStartArray) {
  2434     // Verify the block offset table
  2435     _bt.verify();
  2439 #ifndef PRODUCT
  2440 void CompactibleFreeListSpace::verifyFreeLists() const {
  2441   if (FLSVerifyLists) {
  2442     _dictionary->verify();
  2443     verifyIndexedFreeLists();
  2444   } else {
  2445     if (FLSVerifyDictionary) {
  2446       _dictionary->verify();
  2448     if (FLSVerifyIndexTable) {
  2449       verifyIndexedFreeLists();
  2453 #endif
  2455 void CompactibleFreeListSpace::verifyIndexedFreeLists() const {
  2456   size_t i = 0;
  2457   for (; i < MinChunkSize; i++) {
  2458     guarantee(_indexedFreeList[i].head() == NULL, "should be NULL");
  2460   for (; i < IndexSetSize; i++) {
  2461     verifyIndexedFreeList(i);
  2465 void CompactibleFreeListSpace::verifyIndexedFreeList(size_t size) const {
  2466   FreeChunk* fc   =  _indexedFreeList[size].head();
  2467   FreeChunk* tail =  _indexedFreeList[size].tail();
  2468   size_t    num = _indexedFreeList[size].count();
  2469   size_t      n = 0;
  2470   guarantee((size % 2 == 0) || fc == NULL, "Odd slots should be empty");
  2471   for (; fc != NULL; fc = fc->next(), n++) {
  2472     guarantee(fc->size() == size, "Size inconsistency");
  2473     guarantee(fc->isFree(), "!free?");
  2474     guarantee(fc->next() == NULL || fc->next()->prev() == fc, "Broken list");
  2475     guarantee((fc->next() == NULL) == (fc == tail), "Incorrect tail");
  2477   guarantee(n == num, "Incorrect count");
  2480 #ifndef PRODUCT
  2481 void CompactibleFreeListSpace::checkFreeListConsistency() const {
  2482   assert(_dictionary->minSize() <= IndexSetSize,
  2483     "Some sizes can't be allocated without recourse to"
  2484     " linear allocation buffers");
  2485   assert(MIN_TREE_CHUNK_SIZE*HeapWordSize == sizeof(TreeChunk),
  2486     "else MIN_TREE_CHUNK_SIZE is wrong");
  2487   assert((IndexSetStride == 2 && IndexSetStart == 2) ||
  2488          (IndexSetStride == 1 && IndexSetStart == 1), "just checking");
  2489   assert((IndexSetStride != 2) || (MinChunkSize % 2 == 0),
  2490       "Some for-loops may be incorrectly initialized");
  2491   assert((IndexSetStride != 2) || (IndexSetSize % 2 == 1),
  2492       "For-loops that iterate over IndexSet with stride 2 may be wrong");
  2494 #endif
  2496 void CompactibleFreeListSpace::printFLCensus(size_t sweep_count) const {
  2497   assert_lock_strong(&_freelistLock);
  2498   FreeList total;
  2499   gclog_or_tty->print("end sweep# " SIZE_FORMAT "\n", sweep_count);
  2500   FreeList::print_labels_on(gclog_or_tty, "size");
  2501   size_t totalFree = 0;
  2502   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
  2503     const FreeList *fl = &_indexedFreeList[i];
  2504     totalFree += fl->count() * fl->size();
  2505     if (i % (40*IndexSetStride) == 0) {
  2506       FreeList::print_labels_on(gclog_or_tty, "size");
  2508     fl->print_on(gclog_or_tty);
  2509     total.set_bfrSurp(    total.bfrSurp()     + fl->bfrSurp()    );
  2510     total.set_surplus(    total.surplus()     + fl->surplus()    );
  2511     total.set_desired(    total.desired()     + fl->desired()    );
  2512     total.set_prevSweep(  total.prevSweep()   + fl->prevSweep()  );
  2513     total.set_beforeSweep(total.beforeSweep() + fl->beforeSweep());
  2514     total.set_count(      total.count()       + fl->count()      );
  2515     total.set_coalBirths( total.coalBirths()  + fl->coalBirths() );
  2516     total.set_coalDeaths( total.coalDeaths()  + fl->coalDeaths() );
  2517     total.set_splitBirths(total.splitBirths() + fl->splitBirths());
  2518     total.set_splitDeaths(total.splitDeaths() + fl->splitDeaths());
  2520   total.print_on(gclog_or_tty, "TOTAL");
  2521   gclog_or_tty->print_cr("Total free in indexed lists "
  2522                          SIZE_FORMAT " words", totalFree);
  2523   gclog_or_tty->print("growth: %8.5f  deficit: %8.5f\n",
  2524     (double)(total.splitBirths()+total.coalBirths()-total.splitDeaths()-total.coalDeaths())/
  2525             (total.prevSweep() != 0 ? (double)total.prevSweep() : 1.0),
  2526     (double)(total.desired() - total.count())/(total.desired() != 0 ? (double)total.desired() : 1.0));
  2527   _dictionary->printDictCensus();
  2530 ///////////////////////////////////////////////////////////////////////////
  2531 // CFLS_LAB
  2532 ///////////////////////////////////////////////////////////////////////////
  2534 #define VECTOR_257(x)                                                                                  \
  2535   /* 1  2  3  4  5  6  7  8  9 1x 11 12 13 14 15 16 17 18 19 2x 21 22 23 24 25 26 27 28 29 3x 31 32 */ \
  2536   {  x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
  2537      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
  2538      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
  2539      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
  2540      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
  2541      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
  2542      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
  2543      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
  2544      x }
  2546 // Initialize with default setting of CMSParPromoteBlocksToClaim, _not_
  2547 // OldPLABSize, whose static default is different; if overridden at the
  2548 // command-line, this will get reinitialized via a call to
  2549 // modify_initialization() below.
  2550 AdaptiveWeightedAverage CFLS_LAB::_blocks_to_claim[]    =
  2551   VECTOR_257(AdaptiveWeightedAverage(OldPLABWeight, (float)CMSParPromoteBlocksToClaim));
  2552 size_t CFLS_LAB::_global_num_blocks[]  = VECTOR_257(0);
  2553 int    CFLS_LAB::_global_num_workers[] = VECTOR_257(0);
  2555 CFLS_LAB::CFLS_LAB(CompactibleFreeListSpace* cfls) :
  2556   _cfls(cfls)
  2558   assert(CompactibleFreeListSpace::IndexSetSize == 257, "Modify VECTOR_257() macro above");
  2559   for (size_t i = CompactibleFreeListSpace::IndexSetStart;
  2560        i < CompactibleFreeListSpace::IndexSetSize;
  2561        i += CompactibleFreeListSpace::IndexSetStride) {
  2562     _indexedFreeList[i].set_size(i);
  2563     _num_blocks[i] = 0;
  2567 static bool _CFLS_LAB_modified = false;
  2569 void CFLS_LAB::modify_initialization(size_t n, unsigned wt) {
  2570   assert(!_CFLS_LAB_modified, "Call only once");
  2571   _CFLS_LAB_modified = true;
  2572   for (size_t i = CompactibleFreeListSpace::IndexSetStart;
  2573        i < CompactibleFreeListSpace::IndexSetSize;
  2574        i += CompactibleFreeListSpace::IndexSetStride) {
  2575     _blocks_to_claim[i].modify(n, wt, true /* force */);
  2579 HeapWord* CFLS_LAB::alloc(size_t word_sz) {
  2580   FreeChunk* res;
  2581   assert(word_sz == _cfls->adjustObjectSize(word_sz), "Error");
  2582   if (word_sz >=  CompactibleFreeListSpace::IndexSetSize) {
  2583     // This locking manages sync with other large object allocations.
  2584     MutexLockerEx x(_cfls->parDictionaryAllocLock(),
  2585                     Mutex::_no_safepoint_check_flag);
  2586     res = _cfls->getChunkFromDictionaryExact(word_sz);
  2587     if (res == NULL) return NULL;
  2588   } else {
  2589     FreeList* fl = &_indexedFreeList[word_sz];
  2590     if (fl->count() == 0) {
  2591       // Attempt to refill this local free list.
  2592       get_from_global_pool(word_sz, fl);
  2593       // If it didn't work, give up.
  2594       if (fl->count() == 0) return NULL;
  2596     res = fl->getChunkAtHead();
  2597     assert(res != NULL, "Why was count non-zero?");
  2599   res->markNotFree();
  2600   assert(!res->isFree(), "shouldn't be marked free");
  2601   assert(oop(res)->klass_or_null() == NULL, "should look uninitialized");
  2602   // mangle a just allocated object with a distinct pattern.
  2603   debug_only(res->mangleAllocated(word_sz));
  2604   return (HeapWord*)res;
  2607 // Get a chunk of blocks of the right size and update related
  2608 // book-keeping stats
  2609 void CFLS_LAB::get_from_global_pool(size_t word_sz, FreeList* fl) {
  2610   // Get the #blocks we want to claim
  2611   size_t n_blks = (size_t)_blocks_to_claim[word_sz].average();
  2612   assert(n_blks > 0, "Error");
  2613   assert(ResizePLAB || n_blks == OldPLABSize, "Error");
  2614   // In some cases, when the application has a phase change,
  2615   // there may be a sudden and sharp shift in the object survival
  2616   // profile, and updating the counts at the end of a scavenge
  2617   // may not be quick enough, giving rise to large scavenge pauses
  2618   // during these phase changes. It is beneficial to detect such
  2619   // changes on-the-fly during a scavenge and avoid such a phase-change
  2620   // pothole. The following code is a heuristic attempt to do that.
  2621   // It is protected by a product flag until we have gained
  2622   // enough experience with this heuristic and fine-tuned its behaviour.
  2623   // WARNING: This might increase fragmentation if we overreact to
  2624   // small spikes, so some kind of historical smoothing based on
  2625   // previous experience with the greater reactivity might be useful.
  2626   // Lacking sufficient experience, CMSOldPLABResizeQuicker is disabled by
  2627   // default.
  2628   if (ResizeOldPLAB && CMSOldPLABResizeQuicker) {
  2629     size_t multiple = _num_blocks[word_sz]/(CMSOldPLABToleranceFactor*CMSOldPLABNumRefills*n_blks);
  2630     n_blks +=  CMSOldPLABReactivityFactor*multiple*n_blks;
  2631     n_blks = MIN2(n_blks, CMSOldPLABMax);
  2633   assert(n_blks > 0, "Error");
  2634   _cfls->par_get_chunk_of_blocks(word_sz, n_blks, fl);
  2635   // Update stats table entry for this block size
  2636   _num_blocks[word_sz] += fl->count();
  2639 void CFLS_LAB::compute_desired_plab_size() {
  2640   for (size_t i =  CompactibleFreeListSpace::IndexSetStart;
  2641        i < CompactibleFreeListSpace::IndexSetSize;
  2642        i += CompactibleFreeListSpace::IndexSetStride) {
  2643     assert((_global_num_workers[i] == 0) == (_global_num_blocks[i] == 0),
  2644            "Counter inconsistency");
  2645     if (_global_num_workers[i] > 0) {
  2646       // Need to smooth wrt historical average
  2647       if (ResizeOldPLAB) {
  2648         _blocks_to_claim[i].sample(
  2649           MAX2((size_t)CMSOldPLABMin,
  2650           MIN2((size_t)CMSOldPLABMax,
  2651                _global_num_blocks[i]/(_global_num_workers[i]*CMSOldPLABNumRefills))));
  2653       // Reset counters for next round
  2654       _global_num_workers[i] = 0;
  2655       _global_num_blocks[i] = 0;
  2656       if (PrintOldPLAB) {
  2657         gclog_or_tty->print_cr("[%d]: %d", i, (size_t)_blocks_to_claim[i].average());
  2663 void CFLS_LAB::retire(int tid) {
  2664   // We run this single threaded with the world stopped;
  2665   // so no need for locks and such.
  2666 #define CFLS_LAB_PARALLEL_ACCESS 0
  2667   NOT_PRODUCT(Thread* t = Thread::current();)
  2668   assert(Thread::current()->is_VM_thread(), "Error");
  2669   assert(CompactibleFreeListSpace::IndexSetStart == CompactibleFreeListSpace::IndexSetStride,
  2670          "Will access to uninitialized slot below");
  2671 #if CFLS_LAB_PARALLEL_ACCESS
  2672   for (size_t i = CompactibleFreeListSpace::IndexSetSize - 1;
  2673        i > 0;
  2674        i -= CompactibleFreeListSpace::IndexSetStride) {
  2675 #else // CFLS_LAB_PARALLEL_ACCESS
  2676   for (size_t i =  CompactibleFreeListSpace::IndexSetStart;
  2677        i < CompactibleFreeListSpace::IndexSetSize;
  2678        i += CompactibleFreeListSpace::IndexSetStride) {
  2679 #endif // !CFLS_LAB_PARALLEL_ACCESS
  2680     assert(_num_blocks[i] >= (size_t)_indexedFreeList[i].count(),
  2681            "Can't retire more than what we obtained");
  2682     if (_num_blocks[i] > 0) {
  2683       size_t num_retire =  _indexedFreeList[i].count();
  2684       assert(_num_blocks[i] > num_retire, "Should have used at least one");
  2686 #if CFLS_LAB_PARALLEL_ACCESS
  2687         MutexLockerEx x(_cfls->_indexedFreeListParLocks[i],
  2688                         Mutex::_no_safepoint_check_flag);
  2689 #endif // CFLS_LAB_PARALLEL_ACCESS
  2690         // Update globals stats for num_blocks used
  2691         _global_num_blocks[i] += (_num_blocks[i] - num_retire);
  2692         _global_num_workers[i]++;
  2693         assert(_global_num_workers[i] <= (ssize_t)ParallelGCThreads, "Too big");
  2694         if (num_retire > 0) {
  2695           _cfls->_indexedFreeList[i].prepend(&_indexedFreeList[i]);
  2696           // Reset this list.
  2697           _indexedFreeList[i] = FreeList();
  2698           _indexedFreeList[i].set_size(i);
  2701       if (PrintOldPLAB) {
  2702         gclog_or_tty->print_cr("%d[%d]: %d/%d/%d",
  2703                                tid, i, num_retire, _num_blocks[i], (size_t)_blocks_to_claim[i].average());
  2705       // Reset stats for next round
  2706       _num_blocks[i]         = 0;
  2711 void CompactibleFreeListSpace:: par_get_chunk_of_blocks(size_t word_sz, size_t n, FreeList* fl) {
  2712   assert(fl->count() == 0, "Precondition.");
  2713   assert(word_sz < CompactibleFreeListSpace::IndexSetSize,
  2714          "Precondition");
  2716   // We'll try all multiples of word_sz in the indexed set, starting with
  2717   // word_sz itself and, if CMSSplitIndexedFreeListBlocks, try larger multiples,
  2718   // then try getting a big chunk and splitting it.
  2720     bool found;
  2721     int  k;
  2722     size_t cur_sz;
  2723     for (k = 1, cur_sz = k * word_sz, found = false;
  2724          (cur_sz < CompactibleFreeListSpace::IndexSetSize) &&
  2725          (CMSSplitIndexedFreeListBlocks || k <= 1);
  2726          k++, cur_sz = k * word_sz) {
  2727       FreeList fl_for_cur_sz;  // Empty.
  2728       fl_for_cur_sz.set_size(cur_sz);
  2730         MutexLockerEx x(_indexedFreeListParLocks[cur_sz],
  2731                         Mutex::_no_safepoint_check_flag);
  2732         FreeList* gfl = &_indexedFreeList[cur_sz];
  2733         if (gfl->count() != 0) {
  2734           // nn is the number of chunks of size cur_sz that
  2735           // we'd need to split k-ways each, in order to create
  2736           // "n" chunks of size word_sz each.
  2737           const size_t nn = MAX2(n/k, (size_t)1);
  2738           gfl->getFirstNChunksFromList(nn, &fl_for_cur_sz);
  2739           found = true;
  2740           if (k > 1) {
  2741             // Update split death stats for the cur_sz-size blocks list:
  2742             // we increment the split death count by the number of blocks
  2743             // we just took from the cur_sz-size blocks list and which
  2744             // we will be splitting below.
  2745             ssize_t deaths = gfl->splitDeaths() +
  2746                              fl_for_cur_sz.count();
  2747             gfl->set_splitDeaths(deaths);
  2751       // Now transfer fl_for_cur_sz to fl.  Common case, we hope, is k = 1.
  2752       if (found) {
  2753         if (k == 1) {
  2754           fl->prepend(&fl_for_cur_sz);
  2755         } else {
  2756           // Divide each block on fl_for_cur_sz up k ways.
  2757           FreeChunk* fc;
  2758           while ((fc = fl_for_cur_sz.getChunkAtHead()) != NULL) {
  2759             // Must do this in reverse order, so that anybody attempting to
  2760             // access the main chunk sees it as a single free block until we
  2761             // change it.
  2762             size_t fc_size = fc->size();
  2763             assert(fc->isFree(), "Error");
  2764             for (int i = k-1; i >= 0; i--) {
  2765               FreeChunk* ffc = (FreeChunk*)((HeapWord*)fc + i * word_sz);
  2766               assert((i != 0) ||
  2767                         ((fc == ffc) && ffc->isFree() &&
  2768                          (ffc->size() == k*word_sz) && (fc_size == word_sz)),
  2769                         "Counting error");
  2770               ffc->setSize(word_sz);
  2771               ffc->linkPrev(NULL); // Mark as a free block for other (parallel) GC threads.
  2772               ffc->linkNext(NULL);
  2773               // Above must occur before BOT is updated below.
  2774               OrderAccess::storestore();
  2775               // splitting from the right, fc_size == i * word_sz
  2776               _bt.mark_block((HeapWord*)ffc, word_sz, true /* reducing */);
  2777               fc_size -= word_sz;
  2778               assert(fc_size == i*word_sz, "Error");
  2779               _bt.verify_not_unallocated((HeapWord*)ffc, word_sz);
  2780               _bt.verify_single_block((HeapWord*)fc, fc_size);
  2781               _bt.verify_single_block((HeapWord*)ffc, word_sz);
  2782               // Push this on "fl".
  2783               fl->returnChunkAtHead(ffc);
  2785             // TRAP
  2786             assert(fl->tail()->next() == NULL, "List invariant.");
  2789         // Update birth stats for this block size.
  2790         size_t num = fl->count();
  2791         MutexLockerEx x(_indexedFreeListParLocks[word_sz],
  2792                         Mutex::_no_safepoint_check_flag);
  2793         ssize_t births = _indexedFreeList[word_sz].splitBirths() + num;
  2794         _indexedFreeList[word_sz].set_splitBirths(births);
  2795         return;
  2799   // Otherwise, we'll split a block from the dictionary.
  2800   FreeChunk* fc = NULL;
  2801   FreeChunk* rem_fc = NULL;
  2802   size_t rem;
  2804     MutexLockerEx x(parDictionaryAllocLock(),
  2805                     Mutex::_no_safepoint_check_flag);
  2806     while (n > 0) {
  2807       fc = dictionary()->getChunk(MAX2(n * word_sz,
  2808                                   _dictionary->minSize()),
  2809                                   FreeBlockDictionary::atLeast);
  2810       if (fc != NULL) {
  2811         _bt.allocated((HeapWord*)fc, fc->size(), true /* reducing */);  // update _unallocated_blk
  2812         dictionary()->dictCensusUpdate(fc->size(),
  2813                                        true /*split*/,
  2814                                        false /*birth*/);
  2815         break;
  2816       } else {
  2817         n--;
  2820     if (fc == NULL) return;
  2821     // Otherwise, split up that block.
  2822     assert((ssize_t)n >= 1, "Control point invariant");
  2823     assert(fc->isFree(), "Error: should be a free block");
  2824     _bt.verify_single_block((HeapWord*)fc, fc->size());
  2825     const size_t nn = fc->size() / word_sz;
  2826     n = MIN2(nn, n);
  2827     assert((ssize_t)n >= 1, "Control point invariant");
  2828     rem = fc->size() - n * word_sz;
  2829     // If there is a remainder, and it's too small, allocate one fewer.
  2830     if (rem > 0 && rem < MinChunkSize) {
  2831       n--; rem += word_sz;
  2833     // Note that at this point we may have n == 0.
  2834     assert((ssize_t)n >= 0, "Control point invariant");
  2836     // If n is 0, the chunk fc that was found is not large
  2837     // enough to leave a viable remainder.  We are unable to
  2838     // allocate even one block.  Return fc to the
  2839     // dictionary and return, leaving "fl" empty.
  2840     if (n == 0) {
  2841       returnChunkToDictionary(fc);
  2842       assert(fl->count() == 0, "We never allocated any blocks");
  2843       return;
  2846     // First return the remainder, if any.
  2847     // Note that we hold the lock until we decide if we're going to give
  2848     // back the remainder to the dictionary, since a concurrent allocation
  2849     // may otherwise see the heap as empty.  (We're willing to take that
  2850     // hit if the block is a small block.)
  2851     if (rem > 0) {
  2852       size_t prefix_size = n * word_sz;
  2853       rem_fc = (FreeChunk*)((HeapWord*)fc + prefix_size);
  2854       rem_fc->setSize(rem);
  2855       rem_fc->linkPrev(NULL); // Mark as a free block for other (parallel) GC threads.
  2856       rem_fc->linkNext(NULL);
  2857       // Above must occur before BOT is updated below.
  2858       assert((ssize_t)n > 0 && prefix_size > 0 && rem_fc > fc, "Error");
  2859       OrderAccess::storestore();
  2860       _bt.split_block((HeapWord*)fc, fc->size(), prefix_size);
  2861       assert(fc->isFree(), "Error");
  2862       fc->setSize(prefix_size);
  2863       if (rem >= IndexSetSize) {
  2864         returnChunkToDictionary(rem_fc);
  2865         dictionary()->dictCensusUpdate(rem, true /*split*/, true /*birth*/);
  2866         rem_fc = NULL;
  2868       // Otherwise, return it to the small list below.
  2871   if (rem_fc != NULL) {
  2872     MutexLockerEx x(_indexedFreeListParLocks[rem],
  2873                     Mutex::_no_safepoint_check_flag);
  2874     _bt.verify_not_unallocated((HeapWord*)rem_fc, rem_fc->size());
  2875     _indexedFreeList[rem].returnChunkAtHead(rem_fc);
  2876     smallSplitBirth(rem);
  2878   assert((ssize_t)n > 0 && fc != NULL, "Consistency");
  2879   // Now do the splitting up.
  2880   // Must do this in reverse order, so that anybody attempting to
  2881   // access the main chunk sees it as a single free block until we
  2882   // change it.
  2883   size_t fc_size = n * word_sz;
  2884   // All but first chunk in this loop
  2885   for (ssize_t i = n-1; i > 0; i--) {
  2886     FreeChunk* ffc = (FreeChunk*)((HeapWord*)fc + i * word_sz);
  2887     ffc->setSize(word_sz);
  2888     ffc->linkPrev(NULL); // Mark as a free block for other (parallel) GC threads.
  2889     ffc->linkNext(NULL);
  2890     // Above must occur before BOT is updated below.
  2891     OrderAccess::storestore();
  2892     // splitting from the right, fc_size == (n - i + 1) * wordsize
  2893     _bt.mark_block((HeapWord*)ffc, word_sz, true /* reducing */);
  2894     fc_size -= word_sz;
  2895     _bt.verify_not_unallocated((HeapWord*)ffc, ffc->size());
  2896     _bt.verify_single_block((HeapWord*)ffc, ffc->size());
  2897     _bt.verify_single_block((HeapWord*)fc, fc_size);
  2898     // Push this on "fl".
  2899     fl->returnChunkAtHead(ffc);
  2901   // First chunk
  2902   assert(fc->isFree() && fc->size() == n*word_sz, "Error: should still be a free block");
  2903   // The blocks above should show their new sizes before the first block below
  2904   fc->setSize(word_sz);
  2905   fc->linkPrev(NULL);    // idempotent wrt free-ness, see assert above
  2906   fc->linkNext(NULL);
  2907   _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
  2908   _bt.verify_single_block((HeapWord*)fc, fc->size());
  2909   fl->returnChunkAtHead(fc);
  2911   assert((ssize_t)n > 0 && (ssize_t)n == fl->count(), "Incorrect number of blocks");
  2913     // Update the stats for this block size.
  2914     MutexLockerEx x(_indexedFreeListParLocks[word_sz],
  2915                     Mutex::_no_safepoint_check_flag);
  2916     const ssize_t births = _indexedFreeList[word_sz].splitBirths() + n;
  2917     _indexedFreeList[word_sz].set_splitBirths(births);
  2918     // ssize_t new_surplus = _indexedFreeList[word_sz].surplus() + n;
  2919     // _indexedFreeList[word_sz].set_surplus(new_surplus);
  2922   // TRAP
  2923   assert(fl->tail()->next() == NULL, "List invariant.");
  2926 // Set up the space's par_seq_tasks structure for work claiming
  2927 // for parallel rescan. See CMSParRemarkTask where this is currently used.
  2928 // XXX Need to suitably abstract and generalize this and the next
  2929 // method into one.
  2930 void
  2931 CompactibleFreeListSpace::
  2932 initialize_sequential_subtasks_for_rescan(int n_threads) {
  2933   // The "size" of each task is fixed according to rescan_task_size.
  2934   assert(n_threads > 0, "Unexpected n_threads argument");
  2935   const size_t task_size = rescan_task_size();
  2936   size_t n_tasks = (used_region().word_size() + task_size - 1)/task_size;
  2937   assert((n_tasks == 0) == used_region().is_empty(), "n_tasks incorrect");
  2938   assert(n_tasks == 0 ||
  2939          ((used_region().start() + (n_tasks - 1)*task_size < used_region().end()) &&
  2940           (used_region().start() + n_tasks*task_size >= used_region().end())),
  2941          "n_tasks calculation incorrect");
  2942   SequentialSubTasksDone* pst = conc_par_seq_tasks();
  2943   assert(!pst->valid(), "Clobbering existing data?");
  2944   // Sets the condition for completion of the subtask (how many threads
  2945   // need to finish in order to be done).
  2946   pst->set_n_threads(n_threads);
  2947   pst->set_n_tasks((int)n_tasks);
  2950 // Set up the space's par_seq_tasks structure for work claiming
  2951 // for parallel concurrent marking. See CMSConcMarkTask where this is currently used.
  2952 void
  2953 CompactibleFreeListSpace::
  2954 initialize_sequential_subtasks_for_marking(int n_threads,
  2955                                            HeapWord* low) {
  2956   // The "size" of each task is fixed according to rescan_task_size.
  2957   assert(n_threads > 0, "Unexpected n_threads argument");
  2958   const size_t task_size = marking_task_size();
  2959   assert(task_size > CardTableModRefBS::card_size_in_words &&
  2960          (task_size %  CardTableModRefBS::card_size_in_words == 0),
  2961          "Otherwise arithmetic below would be incorrect");
  2962   MemRegion span = _gen->reserved();
  2963   if (low != NULL) {
  2964     if (span.contains(low)) {
  2965       // Align low down to  a card boundary so that
  2966       // we can use block_offset_careful() on span boundaries.
  2967       HeapWord* aligned_low = (HeapWord*)align_size_down((uintptr_t)low,
  2968                                  CardTableModRefBS::card_size);
  2969       // Clip span prefix at aligned_low
  2970       span = span.intersection(MemRegion(aligned_low, span.end()));
  2971     } else if (low > span.end()) {
  2972       span = MemRegion(low, low);  // Null region
  2973     } // else use entire span
  2975   assert(span.is_empty() ||
  2976          ((uintptr_t)span.start() %  CardTableModRefBS::card_size == 0),
  2977         "span should start at a card boundary");
  2978   size_t n_tasks = (span.word_size() + task_size - 1)/task_size;
  2979   assert((n_tasks == 0) == span.is_empty(), "Inconsistency");
  2980   assert(n_tasks == 0 ||
  2981          ((span.start() + (n_tasks - 1)*task_size < span.end()) &&
  2982           (span.start() + n_tasks*task_size >= span.end())),
  2983          "n_tasks calculation incorrect");
  2984   SequentialSubTasksDone* pst = conc_par_seq_tasks();
  2985   assert(!pst->valid(), "Clobbering existing data?");
  2986   // Sets the condition for completion of the subtask (how many threads
  2987   // need to finish in order to be done).
  2988   pst->set_n_threads(n_threads);
  2989   pst->set_n_tasks((int)n_tasks);

mercurial