src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp

Mon, 21 Jan 2013 16:11:24 -0500

author
acorn
date
Mon, 21 Jan 2013 16:11:24 -0500
changeset 4469
c73c3f2c5b3b
parent 4384
b735136e0d82
parent 4465
203f64878aab
child 4489
ef1e11845e18
permissions
-rw-r--r--

Merge

     1 /*
     2  * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    20  * or visit www.oracle.com if you need additional information or have any
    21  * questions.
    22  *
    23  */
    25 #include "precompiled.hpp"
    26 #include "gc_implementation/concurrentMarkSweep/cmsLockVerifier.hpp"
    27 #include "gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp"
    28 #include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.inline.hpp"
    29 #include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.hpp"
    30 #include "gc_implementation/shared/liveRange.hpp"
    31 #include "gc_implementation/shared/spaceDecorator.hpp"
    32 #include "gc_interface/collectedHeap.inline.hpp"
    33 #include "memory/allocation.inline.hpp"
    34 #include "memory/blockOffsetTable.inline.hpp"
    35 #include "memory/resourceArea.hpp"
    36 #include "memory/universe.inline.hpp"
    37 #include "oops/oop.inline.hpp"
    38 #include "runtime/globals.hpp"
    39 #include "runtime/handles.inline.hpp"
    40 #include "runtime/init.hpp"
    41 #include "runtime/java.hpp"
    42 #include "runtime/vmThread.hpp"
    43 #include "utilities/copy.hpp"
    45 /////////////////////////////////////////////////////////////////////////
    46 //// CompactibleFreeListSpace
    47 /////////////////////////////////////////////////////////////////////////
    49 // highest ranked  free list lock rank
    50 int CompactibleFreeListSpace::_lockRank = Mutex::leaf + 3;
    52 // Defaults are 0 so things will break badly if incorrectly initialized.
    53 size_t CompactibleFreeListSpace::IndexSetStart  = 0;
    54 size_t CompactibleFreeListSpace::IndexSetStride = 0;
    56 size_t MinChunkSize = 0;
    58 void CompactibleFreeListSpace::set_cms_values() {
    59   // Set CMS global values
    60   assert(MinChunkSize == 0, "already set");
    62   // MinChunkSize should be a multiple of MinObjAlignment and be large enough
    63   // for chunks to contain a FreeChunk.
    64   size_t min_chunk_size_in_bytes = align_size_up(sizeof(FreeChunk), MinObjAlignmentInBytes);
    65   MinChunkSize = min_chunk_size_in_bytes / BytesPerWord;
    67   assert(IndexSetStart == 0 && IndexSetStride == 0, "already set");
    68   IndexSetStart  = MinChunkSize;
    69   IndexSetStride = MinObjAlignment;
    70 }
    72 // Constructor
    73 CompactibleFreeListSpace::CompactibleFreeListSpace(BlockOffsetSharedArray* bs,
    74   MemRegion mr, bool use_adaptive_freelists,
    75   FreeBlockDictionary<FreeChunk>::DictionaryChoice dictionaryChoice) :
    76   _dictionaryChoice(dictionaryChoice),
    77   _adaptive_freelists(use_adaptive_freelists),
    78   _bt(bs, mr),
    79   // free list locks are in the range of values taken by _lockRank
    80   // This range currently is [_leaf+2, _leaf+3]
    81   // Note: this requires that CFLspace c'tors
    82   // are called serially in the order in which the locks are
    83   // are acquired in the program text. This is true today.
    84   _freelistLock(_lockRank--, "CompactibleFreeListSpace._lock", true),
    85   _parDictionaryAllocLock(Mutex::leaf - 1,  // == rank(ExpandHeap_lock) - 1
    86                           "CompactibleFreeListSpace._dict_par_lock", true),
    87   _rescan_task_size(CardTableModRefBS::card_size_in_words * BitsPerWord *
    88                     CMSRescanMultiple),
    89   _marking_task_size(CardTableModRefBS::card_size_in_words * BitsPerWord *
    90                     CMSConcMarkMultiple),
    91   _collector(NULL)
    92 {
    93   assert(sizeof(FreeChunk) / BytesPerWord <= MinChunkSize,
    94          "FreeChunk is larger than expected");
    95   _bt.set_space(this);
    96   initialize(mr, SpaceDecorator::Clear, SpaceDecorator::Mangle);
    97   // We have all of "mr", all of which we place in the dictionary
    98   // as one big chunk. We'll need to decide here which of several
    99   // possible alternative dictionary implementations to use. For
   100   // now the choice is easy, since we have only one working
   101   // implementation, namely, the simple binary tree (splaying
   102   // temporarily disabled).
   103   switch (dictionaryChoice) {
   104     case FreeBlockDictionary<FreeChunk>::dictionaryBinaryTree:
   105       _dictionary = new BinaryTreeDictionary<FreeChunk, AdaptiveFreeList>(mr);
   106       break;
   107     case FreeBlockDictionary<FreeChunk>::dictionarySplayTree:
   108     case FreeBlockDictionary<FreeChunk>::dictionarySkipList:
   109     default:
   110       warning("dictionaryChoice: selected option not understood; using"
   111               " default BinaryTreeDictionary implementation instead.");
   112   }
   113   assert(_dictionary != NULL, "CMS dictionary initialization");
   114   // The indexed free lists are initially all empty and are lazily
   115   // filled in on demand. Initialize the array elements to NULL.
   116   initializeIndexedFreeListArray();
   118   // Not using adaptive free lists assumes that allocation is first
   119   // from the linAB's.  Also a cms perm gen which can be compacted
   120   // has to have the klass's klassKlass allocated at a lower
   121   // address in the heap than the klass so that the klassKlass is
   122   // moved to its new location before the klass is moved.
   123   // Set the _refillSize for the linear allocation blocks
   124   if (!use_adaptive_freelists) {
   125     FreeChunk* fc = _dictionary->get_chunk(mr.word_size());
   126     // The small linAB initially has all the space and will allocate
   127     // a chunk of any size.
   128     HeapWord* addr = (HeapWord*) fc;
   129     _smallLinearAllocBlock.set(addr, fc->size() ,
   130       1024*SmallForLinearAlloc, fc->size());
   131     // Note that _unallocated_block is not updated here.
   132     // Allocations from the linear allocation block should
   133     // update it.
   134   } else {
   135     _smallLinearAllocBlock.set(0, 0, 1024*SmallForLinearAlloc,
   136                                SmallForLinearAlloc);
   137   }
   138   // CMSIndexedFreeListReplenish should be at least 1
   139   CMSIndexedFreeListReplenish = MAX2((uintx)1, CMSIndexedFreeListReplenish);
   140   _promoInfo.setSpace(this);
   141   if (UseCMSBestFit) {
   142     _fitStrategy = FreeBlockBestFitFirst;
   143   } else {
   144     _fitStrategy = FreeBlockStrategyNone;
   145   }
   146   check_free_list_consistency();
   148   // Initialize locks for parallel case.
   150   if (CollectedHeap::use_parallel_gc_threads()) {
   151     for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
   152       _indexedFreeListParLocks[i] = new Mutex(Mutex::leaf - 1, // == ExpandHeap_lock - 1
   153                                               "a freelist par lock",
   154                                               true);
   155       if (_indexedFreeListParLocks[i] == NULL)
   156         vm_exit_during_initialization("Could not allocate a par lock");
   157       DEBUG_ONLY(
   158         _indexedFreeList[i].set_protecting_lock(_indexedFreeListParLocks[i]);
   159       )
   160     }
   161     _dictionary->set_par_lock(&_parDictionaryAllocLock);
   162   }
   163 }
   165 // Like CompactibleSpace forward() but always calls cross_threshold() to
   166 // update the block offset table.  Removed initialize_threshold call because
   167 // CFLS does not use a block offset array for contiguous spaces.
   168 HeapWord* CompactibleFreeListSpace::forward(oop q, size_t size,
   169                                     CompactPoint* cp, HeapWord* compact_top) {
   170   // q is alive
   171   // First check if we should switch compaction space
   172   assert(this == cp->space, "'this' should be current compaction space.");
   173   size_t compaction_max_size = pointer_delta(end(), compact_top);
   174   assert(adjustObjectSize(size) == cp->space->adjust_object_size_v(size),
   175     "virtual adjustObjectSize_v() method is not correct");
   176   size_t adjusted_size = adjustObjectSize(size);
   177   assert(compaction_max_size >= MinChunkSize || compaction_max_size == 0,
   178          "no small fragments allowed");
   179   assert(minimum_free_block_size() == MinChunkSize,
   180          "for de-virtualized reference below");
   181   // Can't leave a nonzero size, residual fragment smaller than MinChunkSize
   182   if (adjusted_size + MinChunkSize > compaction_max_size &&
   183       adjusted_size != compaction_max_size) {
   184     do {
   185       // switch to next compaction space
   186       cp->space->set_compaction_top(compact_top);
   187       cp->space = cp->space->next_compaction_space();
   188       if (cp->space == NULL) {
   189         cp->gen = GenCollectedHeap::heap()->prev_gen(cp->gen);
   190         assert(cp->gen != NULL, "compaction must succeed");
   191         cp->space = cp->gen->first_compaction_space();
   192         assert(cp->space != NULL, "generation must have a first compaction space");
   193       }
   194       compact_top = cp->space->bottom();
   195       cp->space->set_compaction_top(compact_top);
   196       // The correct adjusted_size may not be the same as that for this method
   197       // (i.e., cp->space may no longer be "this" so adjust the size again.
   198       // Use the virtual method which is not used above to save the virtual
   199       // dispatch.
   200       adjusted_size = cp->space->adjust_object_size_v(size);
   201       compaction_max_size = pointer_delta(cp->space->end(), compact_top);
   202       assert(cp->space->minimum_free_block_size() == 0, "just checking");
   203     } while (adjusted_size > compaction_max_size);
   204   }
   206   // store the forwarding pointer into the mark word
   207   if ((HeapWord*)q != compact_top) {
   208     q->forward_to(oop(compact_top));
   209     assert(q->is_gc_marked(), "encoding the pointer should preserve the mark");
   210   } else {
   211     // if the object isn't moving we can just set the mark to the default
   212     // mark and handle it specially later on.
   213     q->init_mark();
   214     assert(q->forwardee() == NULL, "should be forwarded to NULL");
   215   }
   217   compact_top += adjusted_size;
   219   // we need to update the offset table so that the beginnings of objects can be
   220   // found during scavenge.  Note that we are updating the offset table based on
   221   // where the object will be once the compaction phase finishes.
   223   // Always call cross_threshold().  A contiguous space can only call it when
   224   // the compaction_top exceeds the current threshold but not for an
   225   // non-contiguous space.
   226   cp->threshold =
   227     cp->space->cross_threshold(compact_top - adjusted_size, compact_top);
   228   return compact_top;
   229 }
   231 // A modified copy of OffsetTableContigSpace::cross_threshold() with _offsets -> _bt
   232 // and use of single_block instead of alloc_block.  The name here is not really
   233 // appropriate - maybe a more general name could be invented for both the
   234 // contiguous and noncontiguous spaces.
   236 HeapWord* CompactibleFreeListSpace::cross_threshold(HeapWord* start, HeapWord* the_end) {
   237   _bt.single_block(start, the_end);
   238   return end();
   239 }
   241 // Initialize them to NULL.
   242 void CompactibleFreeListSpace::initializeIndexedFreeListArray() {
   243   for (size_t i = 0; i < IndexSetSize; i++) {
   244     // Note that on platforms where objects are double word aligned,
   245     // the odd array elements are not used.  It is convenient, however,
   246     // to map directly from the object size to the array element.
   247     _indexedFreeList[i].reset(IndexSetSize);
   248     _indexedFreeList[i].set_size(i);
   249     assert(_indexedFreeList[i].count() == 0, "reset check failed");
   250     assert(_indexedFreeList[i].head() == NULL, "reset check failed");
   251     assert(_indexedFreeList[i].tail() == NULL, "reset check failed");
   252     assert(_indexedFreeList[i].hint() == IndexSetSize, "reset check failed");
   253   }
   254 }
   256 void CompactibleFreeListSpace::resetIndexedFreeListArray() {
   257   for (size_t i = 1; i < IndexSetSize; i++) {
   258     assert(_indexedFreeList[i].size() == (size_t) i,
   259       "Indexed free list sizes are incorrect");
   260     _indexedFreeList[i].reset(IndexSetSize);
   261     assert(_indexedFreeList[i].count() == 0, "reset check failed");
   262     assert(_indexedFreeList[i].head() == NULL, "reset check failed");
   263     assert(_indexedFreeList[i].tail() == NULL, "reset check failed");
   264     assert(_indexedFreeList[i].hint() == IndexSetSize, "reset check failed");
   265   }
   266 }
   268 void CompactibleFreeListSpace::reset(MemRegion mr) {
   269   resetIndexedFreeListArray();
   270   dictionary()->reset();
   271   if (BlockOffsetArrayUseUnallocatedBlock) {
   272     assert(end() == mr.end(), "We are compacting to the bottom of CMS gen");
   273     // Everything's allocated until proven otherwise.
   274     _bt.set_unallocated_block(end());
   275   }
   276   if (!mr.is_empty()) {
   277     assert(mr.word_size() >= MinChunkSize, "Chunk size is too small");
   278     _bt.single_block(mr.start(), mr.word_size());
   279     FreeChunk* fc = (FreeChunk*) mr.start();
   280     fc->set_size(mr.word_size());
   281     if (mr.word_size() >= IndexSetSize ) {
   282       returnChunkToDictionary(fc);
   283     } else {
   284       _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
   285       _indexedFreeList[mr.word_size()].return_chunk_at_head(fc);
   286     }
   287   }
   288   _promoInfo.reset();
   289   _smallLinearAllocBlock._ptr = NULL;
   290   _smallLinearAllocBlock._word_size = 0;
   291 }
   293 void CompactibleFreeListSpace::reset_after_compaction() {
   294   // Reset the space to the new reality - one free chunk.
   295   MemRegion mr(compaction_top(), end());
   296   reset(mr);
   297   // Now refill the linear allocation block(s) if possible.
   298   if (_adaptive_freelists) {
   299     refillLinearAllocBlocksIfNeeded();
   300   } else {
   301     // Place as much of mr in the linAB as we can get,
   302     // provided it was big enough to go into the dictionary.
   303     FreeChunk* fc = dictionary()->find_largest_dict();
   304     if (fc != NULL) {
   305       assert(fc->size() == mr.word_size(),
   306              "Why was the chunk broken up?");
   307       removeChunkFromDictionary(fc);
   308       HeapWord* addr = (HeapWord*) fc;
   309       _smallLinearAllocBlock.set(addr, fc->size() ,
   310         1024*SmallForLinearAlloc, fc->size());
   311       // Note that _unallocated_block is not updated here.
   312     }
   313   }
   314 }
   316 // Walks the entire dictionary, returning a coterminal
   317 // chunk, if it exists. Use with caution since it involves
   318 // a potentially complete walk of a potentially large tree.
   319 FreeChunk* CompactibleFreeListSpace::find_chunk_at_end() {
   321   assert_lock_strong(&_freelistLock);
   323   return dictionary()->find_chunk_ends_at(end());
   324 }
   327 #ifndef PRODUCT
   328 void CompactibleFreeListSpace::initializeIndexedFreeListArrayReturnedBytes() {
   329   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
   330     _indexedFreeList[i].allocation_stats()->set_returned_bytes(0);
   331   }
   332 }
   334 size_t CompactibleFreeListSpace::sumIndexedFreeListArrayReturnedBytes() {
   335   size_t sum = 0;
   336   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
   337     sum += _indexedFreeList[i].allocation_stats()->returned_bytes();
   338   }
   339   return sum;
   340 }
   342 size_t CompactibleFreeListSpace::totalCountInIndexedFreeLists() const {
   343   size_t count = 0;
   344   for (size_t i = IndexSetStart; i < IndexSetSize; i++) {
   345     debug_only(
   346       ssize_t total_list_count = 0;
   347       for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
   348          fc = fc->next()) {
   349         total_list_count++;
   350       }
   351       assert(total_list_count ==  _indexedFreeList[i].count(),
   352         "Count in list is incorrect");
   353     )
   354     count += _indexedFreeList[i].count();
   355   }
   356   return count;
   357 }
   359 size_t CompactibleFreeListSpace::totalCount() {
   360   size_t num = totalCountInIndexedFreeLists();
   361   num +=  dictionary()->total_count();
   362   if (_smallLinearAllocBlock._word_size != 0) {
   363     num++;
   364   }
   365   return num;
   366 }
   367 #endif
   369 bool CompactibleFreeListSpace::is_free_block(const HeapWord* p) const {
   370   FreeChunk* fc = (FreeChunk*) p;
   371   return fc->is_free();
   372 }
   374 size_t CompactibleFreeListSpace::used() const {
   375   return capacity() - free();
   376 }
   378 size_t CompactibleFreeListSpace::free() const {
   379   // "MT-safe, but not MT-precise"(TM), if you will: i.e.
   380   // if you do this while the structures are in flux you
   381   // may get an approximate answer only; for instance
   382   // because there is concurrent allocation either
   383   // directly by mutators or for promotion during a GC.
   384   // It's "MT-safe", however, in the sense that you are guaranteed
   385   // not to crash and burn, for instance, because of walking
   386   // pointers that could disappear as you were walking them.
   387   // The approximation is because the various components
   388   // that are read below are not read atomically (and
   389   // further the computation of totalSizeInIndexedFreeLists()
   390   // is itself a non-atomic computation. The normal use of
   391   // this is during a resize operation at the end of GC
   392   // and at that time you are guaranteed to get the
   393   // correct actual value. However, for instance, this is
   394   // also read completely asynchronously by the "perf-sampler"
   395   // that supports jvmstat, and you are apt to see the values
   396   // flicker in such cases.
   397   assert(_dictionary != NULL, "No _dictionary?");
   398   return (_dictionary->total_chunk_size(DEBUG_ONLY(freelistLock())) +
   399           totalSizeInIndexedFreeLists() +
   400           _smallLinearAllocBlock._word_size) * HeapWordSize;
   401 }
   403 size_t CompactibleFreeListSpace::max_alloc_in_words() const {
   404   assert(_dictionary != NULL, "No _dictionary?");
   405   assert_locked();
   406   size_t res = _dictionary->max_chunk_size();
   407   res = MAX2(res, MIN2(_smallLinearAllocBlock._word_size,
   408                        (size_t) SmallForLinearAlloc - 1));
   409   // XXX the following could potentially be pretty slow;
   410   // should one, pesimally for the rare cases when res
   411   // caclulated above is less than IndexSetSize,
   412   // just return res calculated above? My reasoning was that
   413   // those cases will be so rare that the extra time spent doesn't
   414   // really matter....
   415   // Note: do not change the loop test i >= res + IndexSetStride
   416   // to i > res below, because i is unsigned and res may be zero.
   417   for (size_t i = IndexSetSize - 1; i >= res + IndexSetStride;
   418        i -= IndexSetStride) {
   419     if (_indexedFreeList[i].head() != NULL) {
   420       assert(_indexedFreeList[i].count() != 0, "Inconsistent FreeList");
   421       return i;
   422     }
   423   }
   424   return res;
   425 }
   427 void LinearAllocBlock::print_on(outputStream* st) const {
   428   st->print_cr(" LinearAllocBlock: ptr = " PTR_FORMAT ", word_size = " SIZE_FORMAT
   429             ", refillsize = " SIZE_FORMAT ", allocation_size_limit = " SIZE_FORMAT,
   430             _ptr, _word_size, _refillSize, _allocation_size_limit);
   431 }
   433 void CompactibleFreeListSpace::print_on(outputStream* st) const {
   434   st->print_cr("COMPACTIBLE FREELIST SPACE");
   435   st->print_cr(" Space:");
   436   Space::print_on(st);
   438   st->print_cr("promoInfo:");
   439   _promoInfo.print_on(st);
   441   st->print_cr("_smallLinearAllocBlock");
   442   _smallLinearAllocBlock.print_on(st);
   444   // dump_memory_block(_smallLinearAllocBlock->_ptr, 128);
   446   st->print_cr(" _fitStrategy = %s, _adaptive_freelists = %s",
   447                _fitStrategy?"true":"false", _adaptive_freelists?"true":"false");
   448 }
   450 void CompactibleFreeListSpace::print_indexed_free_lists(outputStream* st)
   451 const {
   452   reportIndexedFreeListStatistics();
   453   gclog_or_tty->print_cr("Layout of Indexed Freelists");
   454   gclog_or_tty->print_cr("---------------------------");
   455   AdaptiveFreeList<FreeChunk>::print_labels_on(st, "size");
   456   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
   457     _indexedFreeList[i].print_on(gclog_or_tty);
   458     for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
   459          fc = fc->next()) {
   460       gclog_or_tty->print_cr("\t[" PTR_FORMAT "," PTR_FORMAT ")  %s",
   461                           fc, (HeapWord*)fc + i,
   462                           fc->cantCoalesce() ? "\t CC" : "");
   463     }
   464   }
   465 }
   467 void CompactibleFreeListSpace::print_promo_info_blocks(outputStream* st)
   468 const {
   469   _promoInfo.print_on(st);
   470 }
   472 void CompactibleFreeListSpace::print_dictionary_free_lists(outputStream* st)
   473 const {
   474   _dictionary->report_statistics();
   475   st->print_cr("Layout of Freelists in Tree");
   476   st->print_cr("---------------------------");
   477   _dictionary->print_free_lists(st);
   478 }
   480 class BlkPrintingClosure: public BlkClosure {
   481   const CMSCollector*             _collector;
   482   const CompactibleFreeListSpace* _sp;
   483   const CMSBitMap*                _live_bit_map;
   484   const bool                      _post_remark;
   485   outputStream*                   _st;
   486 public:
   487   BlkPrintingClosure(const CMSCollector* collector,
   488                      const CompactibleFreeListSpace* sp,
   489                      const CMSBitMap* live_bit_map,
   490                      outputStream* st):
   491     _collector(collector),
   492     _sp(sp),
   493     _live_bit_map(live_bit_map),
   494     _post_remark(collector->abstract_state() > CMSCollector::FinalMarking),
   495     _st(st) { }
   496   size_t do_blk(HeapWord* addr);
   497 };
   499 size_t BlkPrintingClosure::do_blk(HeapWord* addr) {
   500   size_t sz = _sp->block_size_no_stall(addr, _collector);
   501   assert(sz != 0, "Should always be able to compute a size");
   502   if (_sp->block_is_obj(addr)) {
   503     const bool dead = _post_remark && !_live_bit_map->isMarked(addr);
   504     _st->print_cr(PTR_FORMAT ": %s object of size " SIZE_FORMAT "%s",
   505       addr,
   506       dead ? "dead" : "live",
   507       sz,
   508       (!dead && CMSPrintObjectsInDump) ? ":" : ".");
   509     if (CMSPrintObjectsInDump && !dead) {
   510       oop(addr)->print_on(_st);
   511       _st->print_cr("--------------------------------------");
   512     }
   513   } else { // free block
   514     _st->print_cr(PTR_FORMAT ": free block of size " SIZE_FORMAT "%s",
   515       addr, sz, CMSPrintChunksInDump ? ":" : ".");
   516     if (CMSPrintChunksInDump) {
   517       ((FreeChunk*)addr)->print_on(_st);
   518       _st->print_cr("--------------------------------------");
   519     }
   520   }
   521   return sz;
   522 }
   524 void CompactibleFreeListSpace::dump_at_safepoint_with_locks(CMSCollector* c,
   525   outputStream* st) {
   526   st->print_cr("\n=========================");
   527   st->print_cr("Block layout in CMS Heap:");
   528   st->print_cr("=========================");
   529   BlkPrintingClosure  bpcl(c, this, c->markBitMap(), st);
   530   blk_iterate(&bpcl);
   532   st->print_cr("\n=======================================");
   533   st->print_cr("Order & Layout of Promotion Info Blocks");
   534   st->print_cr("=======================================");
   535   print_promo_info_blocks(st);
   537   st->print_cr("\n===========================");
   538   st->print_cr("Order of Indexed Free Lists");
   539   st->print_cr("=========================");
   540   print_indexed_free_lists(st);
   542   st->print_cr("\n=================================");
   543   st->print_cr("Order of Free Lists in Dictionary");
   544   st->print_cr("=================================");
   545   print_dictionary_free_lists(st);
   546 }
   549 void CompactibleFreeListSpace::reportFreeListStatistics() const {
   550   assert_lock_strong(&_freelistLock);
   551   assert(PrintFLSStatistics != 0, "Reporting error");
   552   _dictionary->report_statistics();
   553   if (PrintFLSStatistics > 1) {
   554     reportIndexedFreeListStatistics();
   555     size_t total_size = totalSizeInIndexedFreeLists() +
   556                        _dictionary->total_chunk_size(DEBUG_ONLY(freelistLock()));
   557     gclog_or_tty->print(" free=" SIZE_FORMAT " frag=%1.4f\n", total_size, flsFrag());
   558   }
   559 }
   561 void CompactibleFreeListSpace::reportIndexedFreeListStatistics() const {
   562   assert_lock_strong(&_freelistLock);
   563   gclog_or_tty->print("Statistics for IndexedFreeLists:\n"
   564                       "--------------------------------\n");
   565   size_t total_size = totalSizeInIndexedFreeLists();
   566   size_t   free_blocks = numFreeBlocksInIndexedFreeLists();
   567   gclog_or_tty->print("Total Free Space: %d\n", total_size);
   568   gclog_or_tty->print("Max   Chunk Size: %d\n", maxChunkSizeInIndexedFreeLists());
   569   gclog_or_tty->print("Number of Blocks: %d\n", free_blocks);
   570   if (free_blocks != 0) {
   571     gclog_or_tty->print("Av.  Block  Size: %d\n", total_size/free_blocks);
   572   }
   573 }
   575 size_t CompactibleFreeListSpace::numFreeBlocksInIndexedFreeLists() const {
   576   size_t res = 0;
   577   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
   578     debug_only(
   579       ssize_t recount = 0;
   580       for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
   581          fc = fc->next()) {
   582         recount += 1;
   583       }
   584       assert(recount == _indexedFreeList[i].count(),
   585         "Incorrect count in list");
   586     )
   587     res += _indexedFreeList[i].count();
   588   }
   589   return res;
   590 }
   592 size_t CompactibleFreeListSpace::maxChunkSizeInIndexedFreeLists() const {
   593   for (size_t i = IndexSetSize - 1; i != 0; i -= IndexSetStride) {
   594     if (_indexedFreeList[i].head() != NULL) {
   595       assert(_indexedFreeList[i].count() != 0, "Inconsistent FreeList");
   596       return (size_t)i;
   597     }
   598   }
   599   return 0;
   600 }
   602 void CompactibleFreeListSpace::set_end(HeapWord* value) {
   603   HeapWord* prevEnd = end();
   604   assert(prevEnd != value, "unnecessary set_end call");
   605   assert(prevEnd == NULL || !BlockOffsetArrayUseUnallocatedBlock || value >= unallocated_block(),
   606         "New end is below unallocated block");
   607   _end = value;
   608   if (prevEnd != NULL) {
   609     // Resize the underlying block offset table.
   610     _bt.resize(pointer_delta(value, bottom()));
   611     if (value <= prevEnd) {
   612       assert(!BlockOffsetArrayUseUnallocatedBlock || value >= unallocated_block(),
   613              "New end is below unallocated block");
   614     } else {
   615       // Now, take this new chunk and add it to the free blocks.
   616       // Note that the BOT has not yet been updated for this block.
   617       size_t newFcSize = pointer_delta(value, prevEnd);
   618       // XXX This is REALLY UGLY and should be fixed up. XXX
   619       if (!_adaptive_freelists && _smallLinearAllocBlock._ptr == NULL) {
   620         // Mark the boundary of the new block in BOT
   621         _bt.mark_block(prevEnd, value);
   622         // put it all in the linAB
   623         if (ParallelGCThreads == 0) {
   624           _smallLinearAllocBlock._ptr = prevEnd;
   625           _smallLinearAllocBlock._word_size = newFcSize;
   626           repairLinearAllocBlock(&_smallLinearAllocBlock);
   627         } else { // ParallelGCThreads > 0
   628           MutexLockerEx x(parDictionaryAllocLock(),
   629                           Mutex::_no_safepoint_check_flag);
   630           _smallLinearAllocBlock._ptr = prevEnd;
   631           _smallLinearAllocBlock._word_size = newFcSize;
   632           repairLinearAllocBlock(&_smallLinearAllocBlock);
   633         }
   634         // Births of chunks put into a LinAB are not recorded.  Births
   635         // of chunks as they are allocated out of a LinAB are.
   636       } else {
   637         // Add the block to the free lists, if possible coalescing it
   638         // with the last free block, and update the BOT and census data.
   639         addChunkToFreeListsAtEndRecordingStats(prevEnd, newFcSize);
   640       }
   641     }
   642   }
   643 }
   645 class FreeListSpace_DCTOC : public Filtering_DCTOC {
   646   CompactibleFreeListSpace* _cfls;
   647   CMSCollector* _collector;
   648 protected:
   649   // Override.
   650 #define walk_mem_region_with_cl_DECL(ClosureType)                       \
   651   virtual void walk_mem_region_with_cl(MemRegion mr,                    \
   652                                        HeapWord* bottom, HeapWord* top, \
   653                                        ClosureType* cl);                \
   654       void walk_mem_region_with_cl_par(MemRegion mr,                    \
   655                                        HeapWord* bottom, HeapWord* top, \
   656                                        ClosureType* cl);                \
   657     void walk_mem_region_with_cl_nopar(MemRegion mr,                    \
   658                                        HeapWord* bottom, HeapWord* top, \
   659                                        ClosureType* cl)
   660   walk_mem_region_with_cl_DECL(ExtendedOopClosure);
   661   walk_mem_region_with_cl_DECL(FilteringClosure);
   663 public:
   664   FreeListSpace_DCTOC(CompactibleFreeListSpace* sp,
   665                       CMSCollector* collector,
   666                       ExtendedOopClosure* cl,
   667                       CardTableModRefBS::PrecisionStyle precision,
   668                       HeapWord* boundary) :
   669     Filtering_DCTOC(sp, cl, precision, boundary),
   670     _cfls(sp), _collector(collector) {}
   671 };
   673 // We de-virtualize the block-related calls below, since we know that our
   674 // space is a CompactibleFreeListSpace.
   676 #define FreeListSpace_DCTOC__walk_mem_region_with_cl_DEFN(ClosureType)          \
   677 void FreeListSpace_DCTOC::walk_mem_region_with_cl(MemRegion mr,                 \
   678                                                  HeapWord* bottom,              \
   679                                                  HeapWord* top,                 \
   680                                                  ClosureType* cl) {             \
   681    bool is_par = SharedHeap::heap()->n_par_threads() > 0;                       \
   682    if (is_par) {                                                                \
   683      assert(SharedHeap::heap()->n_par_threads() ==                              \
   684             SharedHeap::heap()->workers()->active_workers(), "Mismatch");       \
   685      walk_mem_region_with_cl_par(mr, bottom, top, cl);                          \
   686    } else {                                                                     \
   687      walk_mem_region_with_cl_nopar(mr, bottom, top, cl);                        \
   688    }                                                                            \
   689 }                                                                               \
   690 void FreeListSpace_DCTOC::walk_mem_region_with_cl_par(MemRegion mr,             \
   691                                                       HeapWord* bottom,         \
   692                                                       HeapWord* top,            \
   693                                                       ClosureType* cl) {        \
   694   /* Skip parts that are before "mr", in case "block_start" sent us             \
   695      back too far. */                                                           \
   696   HeapWord* mr_start = mr.start();                                              \
   697   size_t bot_size = _cfls->CompactibleFreeListSpace::block_size(bottom);        \
   698   HeapWord* next = bottom + bot_size;                                           \
   699   while (next < mr_start) {                                                     \
   700     bottom = next;                                                              \
   701     bot_size = _cfls->CompactibleFreeListSpace::block_size(bottom);             \
   702     next = bottom + bot_size;                                                   \
   703   }                                                                             \
   704                                                                                 \
   705   while (bottom < top) {                                                        \
   706     if (_cfls->CompactibleFreeListSpace::block_is_obj(bottom) &&                \
   707         !_cfls->CompactibleFreeListSpace::obj_allocated_since_save_marks(       \
   708                     oop(bottom)) &&                                             \
   709         !_collector->CMSCollector::is_dead_obj(oop(bottom))) {                  \
   710       size_t word_sz = oop(bottom)->oop_iterate(cl, mr);                        \
   711       bottom += _cfls->adjustObjectSize(word_sz);                               \
   712     } else {                                                                    \
   713       bottom += _cfls->CompactibleFreeListSpace::block_size(bottom);            \
   714     }                                                                           \
   715   }                                                                             \
   716 }                                                                               \
   717 void FreeListSpace_DCTOC::walk_mem_region_with_cl_nopar(MemRegion mr,           \
   718                                                         HeapWord* bottom,       \
   719                                                         HeapWord* top,          \
   720                                                         ClosureType* cl) {      \
   721   /* Skip parts that are before "mr", in case "block_start" sent us             \
   722      back too far. */                                                           \
   723   HeapWord* mr_start = mr.start();                                              \
   724   size_t bot_size = _cfls->CompactibleFreeListSpace::block_size_nopar(bottom);  \
   725   HeapWord* next = bottom + bot_size;                                           \
   726   while (next < mr_start) {                                                     \
   727     bottom = next;                                                              \
   728     bot_size = _cfls->CompactibleFreeListSpace::block_size_nopar(bottom);       \
   729     next = bottom + bot_size;                                                   \
   730   }                                                                             \
   731                                                                                 \
   732   while (bottom < top) {                                                        \
   733     if (_cfls->CompactibleFreeListSpace::block_is_obj_nopar(bottom) &&          \
   734         !_cfls->CompactibleFreeListSpace::obj_allocated_since_save_marks(       \
   735                     oop(bottom)) &&                                             \
   736         !_collector->CMSCollector::is_dead_obj(oop(bottom))) {                  \
   737       size_t word_sz = oop(bottom)->oop_iterate(cl, mr);                        \
   738       bottom += _cfls->adjustObjectSize(word_sz);                               \
   739     } else {                                                                    \
   740       bottom += _cfls->CompactibleFreeListSpace::block_size_nopar(bottom);      \
   741     }                                                                           \
   742   }                                                                             \
   743 }
   745 // (There are only two of these, rather than N, because the split is due
   746 // only to the introduction of the FilteringClosure, a local part of the
   747 // impl of this abstraction.)
   748 FreeListSpace_DCTOC__walk_mem_region_with_cl_DEFN(ExtendedOopClosure)
   749 FreeListSpace_DCTOC__walk_mem_region_with_cl_DEFN(FilteringClosure)
   751 DirtyCardToOopClosure*
   752 CompactibleFreeListSpace::new_dcto_cl(ExtendedOopClosure* cl,
   753                                       CardTableModRefBS::PrecisionStyle precision,
   754                                       HeapWord* boundary) {
   755   return new FreeListSpace_DCTOC(this, _collector, cl, precision, boundary);
   756 }
   759 // Note on locking for the space iteration functions:
   760 // since the collector's iteration activities are concurrent with
   761 // allocation activities by mutators, absent a suitable mutual exclusion
   762 // mechanism the iterators may go awry. For instace a block being iterated
   763 // may suddenly be allocated or divided up and part of it allocated and
   764 // so on.
   766 // Apply the given closure to each block in the space.
   767 void CompactibleFreeListSpace::blk_iterate_careful(BlkClosureCareful* cl) {
   768   assert_lock_strong(freelistLock());
   769   HeapWord *cur, *limit;
   770   for (cur = bottom(), limit = end(); cur < limit;
   771        cur += cl->do_blk_careful(cur));
   772 }
   774 // Apply the given closure to each block in the space.
   775 void CompactibleFreeListSpace::blk_iterate(BlkClosure* cl) {
   776   assert_lock_strong(freelistLock());
   777   HeapWord *cur, *limit;
   778   for (cur = bottom(), limit = end(); cur < limit;
   779        cur += cl->do_blk(cur));
   780 }
   782 // Apply the given closure to each oop in the space.
   783 void CompactibleFreeListSpace::oop_iterate(ExtendedOopClosure* cl) {
   784   assert_lock_strong(freelistLock());
   785   HeapWord *cur, *limit;
   786   size_t curSize;
   787   for (cur = bottom(), limit = end(); cur < limit;
   788        cur += curSize) {
   789     curSize = block_size(cur);
   790     if (block_is_obj(cur)) {
   791       oop(cur)->oop_iterate(cl);
   792     }
   793   }
   794 }
   796 // Apply the given closure to each oop in the space \intersect memory region.
   797 void CompactibleFreeListSpace::oop_iterate(MemRegion mr, ExtendedOopClosure* cl) {
   798   assert_lock_strong(freelistLock());
   799   if (is_empty()) {
   800     return;
   801   }
   802   MemRegion cur = MemRegion(bottom(), end());
   803   mr = mr.intersection(cur);
   804   if (mr.is_empty()) {
   805     return;
   806   }
   807   if (mr.equals(cur)) {
   808     oop_iterate(cl);
   809     return;
   810   }
   811   assert(mr.end() <= end(), "just took an intersection above");
   812   HeapWord* obj_addr = block_start(mr.start());
   813   HeapWord* t = mr.end();
   815   SpaceMemRegionOopsIterClosure smr_blk(cl, mr);
   816   if (block_is_obj(obj_addr)) {
   817     // Handle first object specially.
   818     oop obj = oop(obj_addr);
   819     obj_addr += adjustObjectSize(obj->oop_iterate(&smr_blk));
   820   } else {
   821     FreeChunk* fc = (FreeChunk*)obj_addr;
   822     obj_addr += fc->size();
   823   }
   824   while (obj_addr < t) {
   825     HeapWord* obj = obj_addr;
   826     obj_addr += block_size(obj_addr);
   827     // If "obj_addr" is not greater than top, then the
   828     // entire object "obj" is within the region.
   829     if (obj_addr <= t) {
   830       if (block_is_obj(obj)) {
   831         oop(obj)->oop_iterate(cl);
   832       }
   833     } else {
   834       // "obj" extends beyond end of region
   835       if (block_is_obj(obj)) {
   836         oop(obj)->oop_iterate(&smr_blk);
   837       }
   838       break;
   839     }
   840   }
   841 }
   843 // NOTE: In the following methods, in order to safely be able to
   844 // apply the closure to an object, we need to be sure that the
   845 // object has been initialized. We are guaranteed that an object
   846 // is initialized if we are holding the Heap_lock with the
   847 // world stopped.
   848 void CompactibleFreeListSpace::verify_objects_initialized() const {
   849   if (is_init_completed()) {
   850     assert_locked_or_safepoint(Heap_lock);
   851     if (Universe::is_fully_initialized()) {
   852       guarantee(SafepointSynchronize::is_at_safepoint(),
   853                 "Required for objects to be initialized");
   854     }
   855   } // else make a concession at vm start-up
   856 }
   858 // Apply the given closure to each object in the space
   859 void CompactibleFreeListSpace::object_iterate(ObjectClosure* blk) {
   860   assert_lock_strong(freelistLock());
   861   NOT_PRODUCT(verify_objects_initialized());
   862   HeapWord *cur, *limit;
   863   size_t curSize;
   864   for (cur = bottom(), limit = end(); cur < limit;
   865        cur += curSize) {
   866     curSize = block_size(cur);
   867     if (block_is_obj(cur)) {
   868       blk->do_object(oop(cur));
   869     }
   870   }
   871 }
   873 // Apply the given closure to each live object in the space
   874 //   The usage of CompactibleFreeListSpace
   875 // by the ConcurrentMarkSweepGeneration for concurrent GC's allows
   876 // objects in the space with references to objects that are no longer
   877 // valid.  For example, an object may reference another object
   878 // that has already been sweep up (collected).  This method uses
   879 // obj_is_alive() to determine whether it is safe to apply the closure to
   880 // an object.  See obj_is_alive() for details on how liveness of an
   881 // object is decided.
   883 void CompactibleFreeListSpace::safe_object_iterate(ObjectClosure* blk) {
   884   assert_lock_strong(freelistLock());
   885   NOT_PRODUCT(verify_objects_initialized());
   886   HeapWord *cur, *limit;
   887   size_t curSize;
   888   for (cur = bottom(), limit = end(); cur < limit;
   889        cur += curSize) {
   890     curSize = block_size(cur);
   891     if (block_is_obj(cur) && obj_is_alive(cur)) {
   892       blk->do_object(oop(cur));
   893     }
   894   }
   895 }
   897 void CompactibleFreeListSpace::object_iterate_mem(MemRegion mr,
   898                                                   UpwardsObjectClosure* cl) {
   899   assert_locked(freelistLock());
   900   NOT_PRODUCT(verify_objects_initialized());
   901   Space::object_iterate_mem(mr, cl);
   902 }
   904 // Callers of this iterator beware: The closure application should
   905 // be robust in the face of uninitialized objects and should (always)
   906 // return a correct size so that the next addr + size below gives us a
   907 // valid block boundary. [See for instance,
   908 // ScanMarkedObjectsAgainCarefullyClosure::do_object_careful()
   909 // in ConcurrentMarkSweepGeneration.cpp.]
   910 HeapWord*
   911 CompactibleFreeListSpace::object_iterate_careful(ObjectClosureCareful* cl) {
   912   assert_lock_strong(freelistLock());
   913   HeapWord *addr, *last;
   914   size_t size;
   915   for (addr = bottom(), last  = end();
   916        addr < last; addr += size) {
   917     FreeChunk* fc = (FreeChunk*)addr;
   918     if (fc->is_free()) {
   919       // Since we hold the free list lock, which protects direct
   920       // allocation in this generation by mutators, a free object
   921       // will remain free throughout this iteration code.
   922       size = fc->size();
   923     } else {
   924       // Note that the object need not necessarily be initialized,
   925       // because (for instance) the free list lock does NOT protect
   926       // object initialization. The closure application below must
   927       // therefore be correct in the face of uninitialized objects.
   928       size = cl->do_object_careful(oop(addr));
   929       if (size == 0) {
   930         // An unparsable object found. Signal early termination.
   931         return addr;
   932       }
   933     }
   934   }
   935   return NULL;
   936 }
   938 // Callers of this iterator beware: The closure application should
   939 // be robust in the face of uninitialized objects and should (always)
   940 // return a correct size so that the next addr + size below gives us a
   941 // valid block boundary. [See for instance,
   942 // ScanMarkedObjectsAgainCarefullyClosure::do_object_careful()
   943 // in ConcurrentMarkSweepGeneration.cpp.]
   944 HeapWord*
   945 CompactibleFreeListSpace::object_iterate_careful_m(MemRegion mr,
   946   ObjectClosureCareful* cl) {
   947   assert_lock_strong(freelistLock());
   948   // Can't use used_region() below because it may not necessarily
   949   // be the same as [bottom(),end()); although we could
   950   // use [used_region().start(),round_to(used_region().end(),CardSize)),
   951   // that appears too cumbersome, so we just do the simpler check
   952   // in the assertion below.
   953   assert(!mr.is_empty() && MemRegion(bottom(),end()).contains(mr),
   954          "mr should be non-empty and within used space");
   955   HeapWord *addr, *end;
   956   size_t size;
   957   for (addr = block_start_careful(mr.start()), end  = mr.end();
   958        addr < end; addr += size) {
   959     FreeChunk* fc = (FreeChunk*)addr;
   960     if (fc->is_free()) {
   961       // Since we hold the free list lock, which protects direct
   962       // allocation in this generation by mutators, a free object
   963       // will remain free throughout this iteration code.
   964       size = fc->size();
   965     } else {
   966       // Note that the object need not necessarily be initialized,
   967       // because (for instance) the free list lock does NOT protect
   968       // object initialization. The closure application below must
   969       // therefore be correct in the face of uninitialized objects.
   970       size = cl->do_object_careful_m(oop(addr), mr);
   971       if (size == 0) {
   972         // An unparsable object found. Signal early termination.
   973         return addr;
   974       }
   975     }
   976   }
   977   return NULL;
   978 }
   981 HeapWord* CompactibleFreeListSpace::block_start_const(const void* p) const {
   982   NOT_PRODUCT(verify_objects_initialized());
   983   return _bt.block_start(p);
   984 }
   986 HeapWord* CompactibleFreeListSpace::block_start_careful(const void* p) const {
   987   return _bt.block_start_careful(p);
   988 }
   990 size_t CompactibleFreeListSpace::block_size(const HeapWord* p) const {
   991   NOT_PRODUCT(verify_objects_initialized());
   992   // This must be volatile, or else there is a danger that the compiler
   993   // will compile the code below into a sometimes-infinite loop, by keeping
   994   // the value read the first time in a register.
   995   while (true) {
   996     // We must do this until we get a consistent view of the object.
   997     if (FreeChunk::indicatesFreeChunk(p)) {
   998       volatile FreeChunk* fc = (volatile FreeChunk*)p;
   999       size_t res = fc->size();
  1000       // If the object is still a free chunk, return the size, else it
  1001       // has been allocated so try again.
  1002       if (FreeChunk::indicatesFreeChunk(p)) {
  1003         assert(res != 0, "Block size should not be 0");
  1004         return res;
  1006     } else {
  1007       // must read from what 'p' points to in each loop.
  1008       Klass* k = ((volatile oopDesc*)p)->klass_or_null();
  1009       if (k != NULL) {
  1010         assert(k->is_klass(), "Should really be klass oop.");
  1011         oop o = (oop)p;
  1012         assert(o->is_oop(true /* ignore mark word */), "Should be an oop.");
  1013         size_t res = o->size_given_klass(k);
  1014         res = adjustObjectSize(res);
  1015         assert(res != 0, "Block size should not be 0");
  1016         return res;
  1022 // TODO: Now that is_parsable is gone, we should combine these two functions.
  1023 // A variant of the above that uses the Printezis bits for
  1024 // unparsable but allocated objects. This avoids any possible
  1025 // stalls waiting for mutators to initialize objects, and is
  1026 // thus potentially faster than the variant above. However,
  1027 // this variant may return a zero size for a block that is
  1028 // under mutation and for which a consistent size cannot be
  1029 // inferred without stalling; see CMSCollector::block_size_if_printezis_bits().
  1030 size_t CompactibleFreeListSpace::block_size_no_stall(HeapWord* p,
  1031                                                      const CMSCollector* c)
  1032 const {
  1033   assert(MemRegion(bottom(), end()).contains(p), "p not in space");
  1034   // This must be volatile, or else there is a danger that the compiler
  1035   // will compile the code below into a sometimes-infinite loop, by keeping
  1036   // the value read the first time in a register.
  1037   DEBUG_ONLY(uint loops = 0;)
  1038   while (true) {
  1039     // We must do this until we get a consistent view of the object.
  1040     if (FreeChunk::indicatesFreeChunk(p)) {
  1041       volatile FreeChunk* fc = (volatile FreeChunk*)p;
  1042       size_t res = fc->size();
  1043       if (FreeChunk::indicatesFreeChunk(p)) {
  1044         assert(res != 0, "Block size should not be 0");
  1045         assert(loops == 0, "Should be 0");
  1046         return res;
  1048     } else {
  1049       // must read from what 'p' points to in each loop.
  1050       Klass* k = ((volatile oopDesc*)p)->klass_or_null();
  1051       // We trust the size of any object that has a non-NULL
  1052       // klass and (for those in the perm gen) is parsable
  1053       // -- irrespective of its conc_safe-ty.
  1054       if (k != NULL) {
  1055         assert(k->is_klass(), "Should really be klass oop.");
  1056         oop o = (oop)p;
  1057         assert(o->is_oop(), "Should be an oop");
  1058         size_t res = o->size_given_klass(k);
  1059         res = adjustObjectSize(res);
  1060         assert(res != 0, "Block size should not be 0");
  1061         return res;
  1062       } else {
  1063         // May return 0 if P-bits not present.
  1064         return c->block_size_if_printezis_bits(p);
  1067     assert(loops == 0, "Can loop at most once");
  1068     DEBUG_ONLY(loops++;)
  1072 size_t CompactibleFreeListSpace::block_size_nopar(const HeapWord* p) const {
  1073   NOT_PRODUCT(verify_objects_initialized());
  1074   assert(MemRegion(bottom(), end()).contains(p), "p not in space");
  1075   FreeChunk* fc = (FreeChunk*)p;
  1076   if (fc->is_free()) {
  1077     return fc->size();
  1078   } else {
  1079     // Ignore mark word because this may be a recently promoted
  1080     // object whose mark word is used to chain together grey
  1081     // objects (the last one would have a null value).
  1082     assert(oop(p)->is_oop(true), "Should be an oop");
  1083     return adjustObjectSize(oop(p)->size());
  1087 // This implementation assumes that the property of "being an object" is
  1088 // stable.  But being a free chunk may not be (because of parallel
  1089 // promotion.)
  1090 bool CompactibleFreeListSpace::block_is_obj(const HeapWord* p) const {
  1091   FreeChunk* fc = (FreeChunk*)p;
  1092   assert(is_in_reserved(p), "Should be in space");
  1093   // When doing a mark-sweep-compact of the CMS generation, this
  1094   // assertion may fail because prepare_for_compaction() uses
  1095   // space that is garbage to maintain information on ranges of
  1096   // live objects so that these live ranges can be moved as a whole.
  1097   // Comment out this assertion until that problem can be solved
  1098   // (i.e., that the block start calculation may look at objects
  1099   // at address below "p" in finding the object that contains "p"
  1100   // and those objects (if garbage) may have been modified to hold
  1101   // live range information.
  1102   // assert(CollectedHeap::use_parallel_gc_threads() || _bt.block_start(p) == p,
  1103   //        "Should be a block boundary");
  1104   if (FreeChunk::indicatesFreeChunk(p)) return false;
  1105   Klass* k = oop(p)->klass_or_null();
  1106   if (k != NULL) {
  1107     // Ignore mark word because it may have been used to
  1108     // chain together promoted objects (the last one
  1109     // would have a null value).
  1110     assert(oop(p)->is_oop(true), "Should be an oop");
  1111     return true;
  1112   } else {
  1113     return false;  // Was not an object at the start of collection.
  1117 // Check if the object is alive. This fact is checked either by consulting
  1118 // the main marking bitmap in the sweeping phase or, if it's a permanent
  1119 // generation and we're not in the sweeping phase, by checking the
  1120 // perm_gen_verify_bit_map where we store the "deadness" information if
  1121 // we did not sweep the perm gen in the most recent previous GC cycle.
  1122 bool CompactibleFreeListSpace::obj_is_alive(const HeapWord* p) const {
  1123   assert(SafepointSynchronize::is_at_safepoint() || !is_init_completed(),
  1124          "Else races are possible");
  1125   assert(block_is_obj(p), "The address should point to an object");
  1127   // If we're sweeping, we use object liveness information from the main bit map
  1128   // for both perm gen and old gen.
  1129   // We don't need to lock the bitmap (live_map or dead_map below), because
  1130   // EITHER we are in the middle of the sweeping phase, and the
  1131   // main marking bit map (live_map below) is locked,
  1132   // OR we're in other phases and perm_gen_verify_bit_map (dead_map below)
  1133   // is stable, because it's mutated only in the sweeping phase.
  1134   // NOTE: This method is also used by jmap where, if class unloading is
  1135   // off, the results can return "false" for legitimate perm objects,
  1136   // when we are not in the midst of a sweeping phase, which can result
  1137   // in jmap not reporting certain perm gen objects. This will be moot
  1138   // if/when the perm gen goes away in the future.
  1139   if (_collector->abstract_state() == CMSCollector::Sweeping) {
  1140     CMSBitMap* live_map = _collector->markBitMap();
  1141     return live_map->par_isMarked((HeapWord*) p);
  1143   return true;
  1146 bool CompactibleFreeListSpace::block_is_obj_nopar(const HeapWord* p) const {
  1147   FreeChunk* fc = (FreeChunk*)p;
  1148   assert(is_in_reserved(p), "Should be in space");
  1149   assert(_bt.block_start(p) == p, "Should be a block boundary");
  1150   if (!fc->is_free()) {
  1151     // Ignore mark word because it may have been used to
  1152     // chain together promoted objects (the last one
  1153     // would have a null value).
  1154     assert(oop(p)->is_oop(true), "Should be an oop");
  1155     return true;
  1157   return false;
  1160 // "MT-safe but not guaranteed MT-precise" (TM); you may get an
  1161 // approximate answer if you don't hold the freelistlock when you call this.
  1162 size_t CompactibleFreeListSpace::totalSizeInIndexedFreeLists() const {
  1163   size_t size = 0;
  1164   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
  1165     debug_only(
  1166       // We may be calling here without the lock in which case we
  1167       // won't do this modest sanity check.
  1168       if (freelistLock()->owned_by_self()) {
  1169         size_t total_list_size = 0;
  1170         for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
  1171           fc = fc->next()) {
  1172           total_list_size += i;
  1174         assert(total_list_size == i * _indexedFreeList[i].count(),
  1175                "Count in list is incorrect");
  1178     size += i * _indexedFreeList[i].count();
  1180   return size;
  1183 HeapWord* CompactibleFreeListSpace::par_allocate(size_t size) {
  1184   MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
  1185   return allocate(size);
  1188 HeapWord*
  1189 CompactibleFreeListSpace::getChunkFromSmallLinearAllocBlockRemainder(size_t size) {
  1190   return getChunkFromLinearAllocBlockRemainder(&_smallLinearAllocBlock, size);
  1193 HeapWord* CompactibleFreeListSpace::allocate(size_t size) {
  1194   assert_lock_strong(freelistLock());
  1195   HeapWord* res = NULL;
  1196   assert(size == adjustObjectSize(size),
  1197          "use adjustObjectSize() before calling into allocate()");
  1199   if (_adaptive_freelists) {
  1200     res = allocate_adaptive_freelists(size);
  1201   } else {  // non-adaptive free lists
  1202     res = allocate_non_adaptive_freelists(size);
  1205   if (res != NULL) {
  1206     // check that res does lie in this space!
  1207     assert(is_in_reserved(res), "Not in this space!");
  1208     assert(is_aligned((void*)res), "alignment check");
  1210     FreeChunk* fc = (FreeChunk*)res;
  1211     fc->markNotFree();
  1212     assert(!fc->is_free(), "shouldn't be marked free");
  1213     assert(oop(fc)->klass_or_null() == NULL, "should look uninitialized");
  1214     // Verify that the block offset table shows this to
  1215     // be a single block, but not one which is unallocated.
  1216     _bt.verify_single_block(res, size);
  1217     _bt.verify_not_unallocated(res, size);
  1218     // mangle a just allocated object with a distinct pattern.
  1219     debug_only(fc->mangleAllocated(size));
  1222   return res;
  1225 HeapWord* CompactibleFreeListSpace::allocate_non_adaptive_freelists(size_t size) {
  1226   HeapWord* res = NULL;
  1227   // try and use linear allocation for smaller blocks
  1228   if (size < _smallLinearAllocBlock._allocation_size_limit) {
  1229     // if successful, the following also adjusts block offset table
  1230     res = getChunkFromSmallLinearAllocBlock(size);
  1232   // Else triage to indexed lists for smaller sizes
  1233   if (res == NULL) {
  1234     if (size < SmallForDictionary) {
  1235       res = (HeapWord*) getChunkFromIndexedFreeList(size);
  1236     } else {
  1237       // else get it from the big dictionary; if even this doesn't
  1238       // work we are out of luck.
  1239       res = (HeapWord*)getChunkFromDictionaryExact(size);
  1243   return res;
  1246 HeapWord* CompactibleFreeListSpace::allocate_adaptive_freelists(size_t size) {
  1247   assert_lock_strong(freelistLock());
  1248   HeapWord* res = NULL;
  1249   assert(size == adjustObjectSize(size),
  1250          "use adjustObjectSize() before calling into allocate()");
  1252   // Strategy
  1253   //   if small
  1254   //     exact size from small object indexed list if small
  1255   //     small or large linear allocation block (linAB) as appropriate
  1256   //     take from lists of greater sized chunks
  1257   //   else
  1258   //     dictionary
  1259   //     small or large linear allocation block if it has the space
  1260   // Try allocating exact size from indexTable first
  1261   if (size < IndexSetSize) {
  1262     res = (HeapWord*) getChunkFromIndexedFreeList(size);
  1263     if(res != NULL) {
  1264       assert(res != (HeapWord*)_indexedFreeList[size].head(),
  1265         "Not removed from free list");
  1266       // no block offset table adjustment is necessary on blocks in
  1267       // the indexed lists.
  1269     // Try allocating from the small LinAB
  1270     } else if (size < _smallLinearAllocBlock._allocation_size_limit &&
  1271         (res = getChunkFromSmallLinearAllocBlock(size)) != NULL) {
  1272         // if successful, the above also adjusts block offset table
  1273         // Note that this call will refill the LinAB to
  1274         // satisfy the request.  This is different that
  1275         // evm.
  1276         // Don't record chunk off a LinAB?  smallSplitBirth(size);
  1277     } else {
  1278       // Raid the exact free lists larger than size, even if they are not
  1279       // overpopulated.
  1280       res = (HeapWord*) getChunkFromGreater(size);
  1282   } else {
  1283     // Big objects get allocated directly from the dictionary.
  1284     res = (HeapWord*) getChunkFromDictionaryExact(size);
  1285     if (res == NULL) {
  1286       // Try hard not to fail since an allocation failure will likely
  1287       // trigger a synchronous GC.  Try to get the space from the
  1288       // allocation blocks.
  1289       res = getChunkFromSmallLinearAllocBlockRemainder(size);
  1293   return res;
  1296 // A worst-case estimate of the space required (in HeapWords) to expand the heap
  1297 // when promoting obj.
  1298 size_t CompactibleFreeListSpace::expansionSpaceRequired(size_t obj_size) const {
  1299   // Depending on the object size, expansion may require refilling either a
  1300   // bigLAB or a smallLAB plus refilling a PromotionInfo object.  MinChunkSize
  1301   // is added because the dictionary may over-allocate to avoid fragmentation.
  1302   size_t space = obj_size;
  1303   if (!_adaptive_freelists) {
  1304     space = MAX2(space, _smallLinearAllocBlock._refillSize);
  1306   space += _promoInfo.refillSize() + 2 * MinChunkSize;
  1307   return space;
  1310 FreeChunk* CompactibleFreeListSpace::getChunkFromGreater(size_t numWords) {
  1311   FreeChunk* ret;
  1313   assert(numWords >= MinChunkSize, "Size is less than minimum");
  1314   assert(linearAllocationWouldFail() || bestFitFirst(),
  1315     "Should not be here");
  1317   size_t i;
  1318   size_t currSize = numWords + MinChunkSize;
  1319   assert(currSize % MinObjAlignment == 0, "currSize should be aligned");
  1320   for (i = currSize; i < IndexSetSize; i += IndexSetStride) {
  1321     AdaptiveFreeList<FreeChunk>* fl = &_indexedFreeList[i];
  1322     if (fl->head()) {
  1323       ret = getFromListGreater(fl, numWords);
  1324       assert(ret == NULL || ret->is_free(), "Should be returning a free chunk");
  1325       return ret;
  1329   currSize = MAX2((size_t)SmallForDictionary,
  1330                   (size_t)(numWords + MinChunkSize));
  1332   /* Try to get a chunk that satisfies request, while avoiding
  1333      fragmentation that can't be handled. */
  1335     ret =  dictionary()->get_chunk(currSize);
  1336     if (ret != NULL) {
  1337       assert(ret->size() - numWords >= MinChunkSize,
  1338              "Chunk is too small");
  1339       _bt.allocated((HeapWord*)ret, ret->size());
  1340       /* Carve returned chunk. */
  1341       (void) splitChunkAndReturnRemainder(ret, numWords);
  1342       /* Label this as no longer a free chunk. */
  1343       assert(ret->is_free(), "This chunk should be free");
  1344       ret->link_prev(NULL);
  1346     assert(ret == NULL || ret->is_free(), "Should be returning a free chunk");
  1347     return ret;
  1349   ShouldNotReachHere();
  1352 bool CompactibleFreeListSpace::verifyChunkInIndexedFreeLists(FreeChunk* fc) const {
  1353   assert(fc->size() < IndexSetSize, "Size of chunk is too large");
  1354   return _indexedFreeList[fc->size()].verify_chunk_in_free_list(fc);
  1357 bool CompactibleFreeListSpace::verify_chunk_is_linear_alloc_block(FreeChunk* fc) const {
  1358   assert((_smallLinearAllocBlock._ptr != (HeapWord*)fc) ||
  1359          (_smallLinearAllocBlock._word_size == fc->size()),
  1360          "Linear allocation block shows incorrect size");
  1361   return ((_smallLinearAllocBlock._ptr == (HeapWord*)fc) &&
  1362           (_smallLinearAllocBlock._word_size == fc->size()));
  1365 // Check if the purported free chunk is present either as a linear
  1366 // allocation block, the size-indexed table of (smaller) free blocks,
  1367 // or the larger free blocks kept in the binary tree dictionary.
  1368 bool CompactibleFreeListSpace::verify_chunk_in_free_list(FreeChunk* fc) const {
  1369   if (verify_chunk_is_linear_alloc_block(fc)) {
  1370     return true;
  1371   } else if (fc->size() < IndexSetSize) {
  1372     return verifyChunkInIndexedFreeLists(fc);
  1373   } else {
  1374     return dictionary()->verify_chunk_in_free_list(fc);
  1378 #ifndef PRODUCT
  1379 void CompactibleFreeListSpace::assert_locked() const {
  1380   CMSLockVerifier::assert_locked(freelistLock(), parDictionaryAllocLock());
  1383 void CompactibleFreeListSpace::assert_locked(const Mutex* lock) const {
  1384   CMSLockVerifier::assert_locked(lock);
  1386 #endif
  1388 FreeChunk* CompactibleFreeListSpace::allocateScratch(size_t size) {
  1389   // In the parallel case, the main thread holds the free list lock
  1390   // on behalf the parallel threads.
  1391   FreeChunk* fc;
  1393     // If GC is parallel, this might be called by several threads.
  1394     // This should be rare enough that the locking overhead won't affect
  1395     // the sequential code.
  1396     MutexLockerEx x(parDictionaryAllocLock(),
  1397                     Mutex::_no_safepoint_check_flag);
  1398     fc = getChunkFromDictionary(size);
  1400   if (fc != NULL) {
  1401     fc->dontCoalesce();
  1402     assert(fc->is_free(), "Should be free, but not coalescable");
  1403     // Verify that the block offset table shows this to
  1404     // be a single block, but not one which is unallocated.
  1405     _bt.verify_single_block((HeapWord*)fc, fc->size());
  1406     _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
  1408   return fc;
  1411 oop CompactibleFreeListSpace::promote(oop obj, size_t obj_size) {
  1412   assert(obj_size == (size_t)obj->size(), "bad obj_size passed in");
  1413   assert_locked();
  1415   // if we are tracking promotions, then first ensure space for
  1416   // promotion (including spooling space for saving header if necessary).
  1417   // then allocate and copy, then track promoted info if needed.
  1418   // When tracking (see PromotionInfo::track()), the mark word may
  1419   // be displaced and in this case restoration of the mark word
  1420   // occurs in the (oop_since_save_marks_)iterate phase.
  1421   if (_promoInfo.tracking() && !_promoInfo.ensure_spooling_space()) {
  1422     return NULL;
  1424   // Call the allocate(size_t, bool) form directly to avoid the
  1425   // additional call through the allocate(size_t) form.  Having
  1426   // the compile inline the call is problematic because allocate(size_t)
  1427   // is a virtual method.
  1428   HeapWord* res = allocate(adjustObjectSize(obj_size));
  1429   if (res != NULL) {
  1430     Copy::aligned_disjoint_words((HeapWord*)obj, res, obj_size);
  1431     // if we should be tracking promotions, do so.
  1432     if (_promoInfo.tracking()) {
  1433         _promoInfo.track((PromotedObject*)res);
  1436   return oop(res);
  1439 HeapWord*
  1440 CompactibleFreeListSpace::getChunkFromSmallLinearAllocBlock(size_t size) {
  1441   assert_locked();
  1442   assert(size >= MinChunkSize, "minimum chunk size");
  1443   assert(size <  _smallLinearAllocBlock._allocation_size_limit,
  1444     "maximum from smallLinearAllocBlock");
  1445   return getChunkFromLinearAllocBlock(&_smallLinearAllocBlock, size);
  1448 HeapWord*
  1449 CompactibleFreeListSpace::getChunkFromLinearAllocBlock(LinearAllocBlock *blk,
  1450                                                        size_t size) {
  1451   assert_locked();
  1452   assert(size >= MinChunkSize, "too small");
  1453   HeapWord* res = NULL;
  1454   // Try to do linear allocation from blk, making sure that
  1455   if (blk->_word_size == 0) {
  1456     // We have probably been unable to fill this either in the prologue or
  1457     // when it was exhausted at the last linear allocation. Bail out until
  1458     // next time.
  1459     assert(blk->_ptr == NULL, "consistency check");
  1460     return NULL;
  1462   assert(blk->_word_size != 0 && blk->_ptr != NULL, "consistency check");
  1463   res = getChunkFromLinearAllocBlockRemainder(blk, size);
  1464   if (res != NULL) return res;
  1466   // about to exhaust this linear allocation block
  1467   if (blk->_word_size == size) { // exactly satisfied
  1468     res = blk->_ptr;
  1469     _bt.allocated(res, blk->_word_size);
  1470   } else if (size + MinChunkSize <= blk->_refillSize) {
  1471     size_t sz = blk->_word_size;
  1472     // Update _unallocated_block if the size is such that chunk would be
  1473     // returned to the indexed free list.  All other chunks in the indexed
  1474     // free lists are allocated from the dictionary so that _unallocated_block
  1475     // has already been adjusted for them.  Do it here so that the cost
  1476     // for all chunks added back to the indexed free lists.
  1477     if (sz < SmallForDictionary) {
  1478       _bt.allocated(blk->_ptr, sz);
  1480     // Return the chunk that isn't big enough, and then refill below.
  1481     addChunkToFreeLists(blk->_ptr, sz);
  1482     split_birth(sz);
  1483     // Don't keep statistics on adding back chunk from a LinAB.
  1484   } else {
  1485     // A refilled block would not satisfy the request.
  1486     return NULL;
  1489   blk->_ptr = NULL; blk->_word_size = 0;
  1490   refillLinearAllocBlock(blk);
  1491   assert(blk->_ptr == NULL || blk->_word_size >= size + MinChunkSize,
  1492          "block was replenished");
  1493   if (res != NULL) {
  1494     split_birth(size);
  1495     repairLinearAllocBlock(blk);
  1496   } else if (blk->_ptr != NULL) {
  1497     res = blk->_ptr;
  1498     size_t blk_size = blk->_word_size;
  1499     blk->_word_size -= size;
  1500     blk->_ptr  += size;
  1501     split_birth(size);
  1502     repairLinearAllocBlock(blk);
  1503     // Update BOT last so that other (parallel) GC threads see a consistent
  1504     // view of the BOT and free blocks.
  1505     // Above must occur before BOT is updated below.
  1506     OrderAccess::storestore();
  1507     _bt.split_block(res, blk_size, size);  // adjust block offset table
  1509   return res;
  1512 HeapWord*  CompactibleFreeListSpace::getChunkFromLinearAllocBlockRemainder(
  1513                                         LinearAllocBlock* blk,
  1514                                         size_t size) {
  1515   assert_locked();
  1516   assert(size >= MinChunkSize, "too small");
  1518   HeapWord* res = NULL;
  1519   // This is the common case.  Keep it simple.
  1520   if (blk->_word_size >= size + MinChunkSize) {
  1521     assert(blk->_ptr != NULL, "consistency check");
  1522     res = blk->_ptr;
  1523     // Note that the BOT is up-to-date for the linAB before allocation.  It
  1524     // indicates the start of the linAB.  The split_block() updates the
  1525     // BOT for the linAB after the allocation (indicates the start of the
  1526     // next chunk to be allocated).
  1527     size_t blk_size = blk->_word_size;
  1528     blk->_word_size -= size;
  1529     blk->_ptr  += size;
  1530     split_birth(size);
  1531     repairLinearAllocBlock(blk);
  1532     // Update BOT last so that other (parallel) GC threads see a consistent
  1533     // view of the BOT and free blocks.
  1534     // Above must occur before BOT is updated below.
  1535     OrderAccess::storestore();
  1536     _bt.split_block(res, blk_size, size);  // adjust block offset table
  1537     _bt.allocated(res, size);
  1539   return res;
  1542 FreeChunk*
  1543 CompactibleFreeListSpace::getChunkFromIndexedFreeList(size_t size) {
  1544   assert_locked();
  1545   assert(size < SmallForDictionary, "just checking");
  1546   FreeChunk* res;
  1547   res = _indexedFreeList[size].get_chunk_at_head();
  1548   if (res == NULL) {
  1549     res = getChunkFromIndexedFreeListHelper(size);
  1551   _bt.verify_not_unallocated((HeapWord*) res, size);
  1552   assert(res == NULL || res->size() == size, "Incorrect block size");
  1553   return res;
  1556 FreeChunk*
  1557 CompactibleFreeListSpace::getChunkFromIndexedFreeListHelper(size_t size,
  1558   bool replenish) {
  1559   assert_locked();
  1560   FreeChunk* fc = NULL;
  1561   if (size < SmallForDictionary) {
  1562     assert(_indexedFreeList[size].head() == NULL ||
  1563       _indexedFreeList[size].surplus() <= 0,
  1564       "List for this size should be empty or under populated");
  1565     // Try best fit in exact lists before replenishing the list
  1566     if (!bestFitFirst() || (fc = bestFitSmall(size)) == NULL) {
  1567       // Replenish list.
  1568       //
  1569       // Things tried that failed.
  1570       //   Tried allocating out of the two LinAB's first before
  1571       // replenishing lists.
  1572       //   Tried small linAB of size 256 (size in indexed list)
  1573       // and replenishing indexed lists from the small linAB.
  1574       //
  1575       FreeChunk* newFc = NULL;
  1576       const size_t replenish_size = CMSIndexedFreeListReplenish * size;
  1577       if (replenish_size < SmallForDictionary) {
  1578         // Do not replenish from an underpopulated size.
  1579         if (_indexedFreeList[replenish_size].surplus() > 0 &&
  1580             _indexedFreeList[replenish_size].head() != NULL) {
  1581           newFc = _indexedFreeList[replenish_size].get_chunk_at_head();
  1582         } else if (bestFitFirst()) {
  1583           newFc = bestFitSmall(replenish_size);
  1586       if (newFc == NULL && replenish_size > size) {
  1587         assert(CMSIndexedFreeListReplenish > 1, "ctl pt invariant");
  1588         newFc = getChunkFromIndexedFreeListHelper(replenish_size, false);
  1590       // Note: The stats update re split-death of block obtained above
  1591       // will be recorded below precisely when we know we are going to
  1592       // be actually splitting it into more than one pieces below.
  1593       if (newFc != NULL) {
  1594         if  (replenish || CMSReplenishIntermediate) {
  1595           // Replenish this list and return one block to caller.
  1596           size_t i;
  1597           FreeChunk *curFc, *nextFc;
  1598           size_t num_blk = newFc->size() / size;
  1599           assert(num_blk >= 1, "Smaller than requested?");
  1600           assert(newFc->size() % size == 0, "Should be integral multiple of request");
  1601           if (num_blk > 1) {
  1602             // we are sure we will be splitting the block just obtained
  1603             // into multiple pieces; record the split-death of the original
  1604             splitDeath(replenish_size);
  1606           // carve up and link blocks 0, ..., num_blk - 2
  1607           // The last chunk is not added to the lists but is returned as the
  1608           // free chunk.
  1609           for (curFc = newFc, nextFc = (FreeChunk*)((HeapWord*)curFc + size),
  1610                i = 0;
  1611                i < (num_blk - 1);
  1612                curFc = nextFc, nextFc = (FreeChunk*)((HeapWord*)nextFc + size),
  1613                i++) {
  1614             curFc->set_size(size);
  1615             // Don't record this as a return in order to try and
  1616             // determine the "returns" from a GC.
  1617             _bt.verify_not_unallocated((HeapWord*) fc, size);
  1618             _indexedFreeList[size].return_chunk_at_tail(curFc, false);
  1619             _bt.mark_block((HeapWord*)curFc, size);
  1620             split_birth(size);
  1621             // Don't record the initial population of the indexed list
  1622             // as a split birth.
  1625           // check that the arithmetic was OK above
  1626           assert((HeapWord*)nextFc == (HeapWord*)newFc + num_blk*size,
  1627             "inconsistency in carving newFc");
  1628           curFc->set_size(size);
  1629           _bt.mark_block((HeapWord*)curFc, size);
  1630           split_birth(size);
  1631           fc = curFc;
  1632         } else {
  1633           // Return entire block to caller
  1634           fc = newFc;
  1638   } else {
  1639     // Get a free chunk from the free chunk dictionary to be returned to
  1640     // replenish the indexed free list.
  1641     fc = getChunkFromDictionaryExact(size);
  1643   // assert(fc == NULL || fc->is_free(), "Should be returning a free chunk");
  1644   return fc;
  1647 FreeChunk*
  1648 CompactibleFreeListSpace::getChunkFromDictionary(size_t size) {
  1649   assert_locked();
  1650   FreeChunk* fc = _dictionary->get_chunk(size);
  1651   if (fc == NULL) {
  1652     return NULL;
  1654   _bt.allocated((HeapWord*)fc, fc->size());
  1655   if (fc->size() >= size + MinChunkSize) {
  1656     fc = splitChunkAndReturnRemainder(fc, size);
  1658   assert(fc->size() >= size, "chunk too small");
  1659   assert(fc->size() < size + MinChunkSize, "chunk too big");
  1660   _bt.verify_single_block((HeapWord*)fc, fc->size());
  1661   return fc;
  1664 FreeChunk*
  1665 CompactibleFreeListSpace::getChunkFromDictionaryExact(size_t size) {
  1666   assert_locked();
  1667   FreeChunk* fc = _dictionary->get_chunk(size);
  1668   if (fc == NULL) {
  1669     return fc;
  1671   _bt.allocated((HeapWord*)fc, fc->size());
  1672   if (fc->size() == size) {
  1673     _bt.verify_single_block((HeapWord*)fc, size);
  1674     return fc;
  1676   assert(fc->size() > size, "get_chunk() guarantee");
  1677   if (fc->size() < size + MinChunkSize) {
  1678     // Return the chunk to the dictionary and go get a bigger one.
  1679     returnChunkToDictionary(fc);
  1680     fc = _dictionary->get_chunk(size + MinChunkSize);
  1681     if (fc == NULL) {
  1682       return NULL;
  1684     _bt.allocated((HeapWord*)fc, fc->size());
  1686   assert(fc->size() >= size + MinChunkSize, "tautology");
  1687   fc = splitChunkAndReturnRemainder(fc, size);
  1688   assert(fc->size() == size, "chunk is wrong size");
  1689   _bt.verify_single_block((HeapWord*)fc, size);
  1690   return fc;
  1693 void
  1694 CompactibleFreeListSpace::returnChunkToDictionary(FreeChunk* chunk) {
  1695   assert_locked();
  1697   size_t size = chunk->size();
  1698   _bt.verify_single_block((HeapWord*)chunk, size);
  1699   // adjust _unallocated_block downward, as necessary
  1700   _bt.freed((HeapWord*)chunk, size);
  1701   _dictionary->return_chunk(chunk);
  1702 #ifndef PRODUCT
  1703   if (CMSCollector::abstract_state() != CMSCollector::Sweeping) {
  1704     TreeChunk<FreeChunk, AdaptiveFreeList>* tc = TreeChunk<FreeChunk, AdaptiveFreeList>::as_TreeChunk(chunk);
  1705     TreeList<FreeChunk, AdaptiveFreeList>* tl = tc->list();
  1706     tl->verify_stats();
  1708 #endif // PRODUCT
  1711 void
  1712 CompactibleFreeListSpace::returnChunkToFreeList(FreeChunk* fc) {
  1713   assert_locked();
  1714   size_t size = fc->size();
  1715   _bt.verify_single_block((HeapWord*) fc, size);
  1716   _bt.verify_not_unallocated((HeapWord*) fc, size);
  1717   if (_adaptive_freelists) {
  1718     _indexedFreeList[size].return_chunk_at_tail(fc);
  1719   } else {
  1720     _indexedFreeList[size].return_chunk_at_head(fc);
  1722 #ifndef PRODUCT
  1723   if (CMSCollector::abstract_state() != CMSCollector::Sweeping) {
  1724      _indexedFreeList[size].verify_stats();
  1726 #endif // PRODUCT
  1729 // Add chunk to end of last block -- if it's the largest
  1730 // block -- and update BOT and census data. We would
  1731 // of course have preferred to coalesce it with the
  1732 // last block, but it's currently less expensive to find the
  1733 // largest block than it is to find the last.
  1734 void
  1735 CompactibleFreeListSpace::addChunkToFreeListsAtEndRecordingStats(
  1736   HeapWord* chunk, size_t     size) {
  1737   // check that the chunk does lie in this space!
  1738   assert(chunk != NULL && is_in_reserved(chunk), "Not in this space!");
  1739   // One of the parallel gc task threads may be here
  1740   // whilst others are allocating.
  1741   Mutex* lock = NULL;
  1742   if (ParallelGCThreads != 0) {
  1743     lock = &_parDictionaryAllocLock;
  1745   FreeChunk* ec;
  1747     MutexLockerEx x(lock, Mutex::_no_safepoint_check_flag);
  1748     ec = dictionary()->find_largest_dict();  // get largest block
  1749     if (ec != NULL && ec->end() == (uintptr_t*) chunk) {
  1750       // It's a coterminal block - we can coalesce.
  1751       size_t old_size = ec->size();
  1752       coalDeath(old_size);
  1753       removeChunkFromDictionary(ec);
  1754       size += old_size;
  1755     } else {
  1756       ec = (FreeChunk*)chunk;
  1759   ec->set_size(size);
  1760   debug_only(ec->mangleFreed(size));
  1761   if (size < SmallForDictionary) {
  1762     lock = _indexedFreeListParLocks[size];
  1764   MutexLockerEx x(lock, Mutex::_no_safepoint_check_flag);
  1765   addChunkAndRepairOffsetTable((HeapWord*)ec, size, true);
  1766   // record the birth under the lock since the recording involves
  1767   // manipulation of the list on which the chunk lives and
  1768   // if the chunk is allocated and is the last on the list,
  1769   // the list can go away.
  1770   coalBirth(size);
  1773 void
  1774 CompactibleFreeListSpace::addChunkToFreeLists(HeapWord* chunk,
  1775                                               size_t     size) {
  1776   // check that the chunk does lie in this space!
  1777   assert(chunk != NULL && is_in_reserved(chunk), "Not in this space!");
  1778   assert_locked();
  1779   _bt.verify_single_block(chunk, size);
  1781   FreeChunk* fc = (FreeChunk*) chunk;
  1782   fc->set_size(size);
  1783   debug_only(fc->mangleFreed(size));
  1784   if (size < SmallForDictionary) {
  1785     returnChunkToFreeList(fc);
  1786   } else {
  1787     returnChunkToDictionary(fc);
  1791 void
  1792 CompactibleFreeListSpace::addChunkAndRepairOffsetTable(HeapWord* chunk,
  1793   size_t size, bool coalesced) {
  1794   assert_locked();
  1795   assert(chunk != NULL, "null chunk");
  1796   if (coalesced) {
  1797     // repair BOT
  1798     _bt.single_block(chunk, size);
  1800   addChunkToFreeLists(chunk, size);
  1803 // We _must_ find the purported chunk on our free lists;
  1804 // we assert if we don't.
  1805 void
  1806 CompactibleFreeListSpace::removeFreeChunkFromFreeLists(FreeChunk* fc) {
  1807   size_t size = fc->size();
  1808   assert_locked();
  1809   debug_only(verifyFreeLists());
  1810   if (size < SmallForDictionary) {
  1811     removeChunkFromIndexedFreeList(fc);
  1812   } else {
  1813     removeChunkFromDictionary(fc);
  1815   _bt.verify_single_block((HeapWord*)fc, size);
  1816   debug_only(verifyFreeLists());
  1819 void
  1820 CompactibleFreeListSpace::removeChunkFromDictionary(FreeChunk* fc) {
  1821   size_t size = fc->size();
  1822   assert_locked();
  1823   assert(fc != NULL, "null chunk");
  1824   _bt.verify_single_block((HeapWord*)fc, size);
  1825   _dictionary->remove_chunk(fc);
  1826   // adjust _unallocated_block upward, as necessary
  1827   _bt.allocated((HeapWord*)fc, size);
  1830 void
  1831 CompactibleFreeListSpace::removeChunkFromIndexedFreeList(FreeChunk* fc) {
  1832   assert_locked();
  1833   size_t size = fc->size();
  1834   _bt.verify_single_block((HeapWord*)fc, size);
  1835   NOT_PRODUCT(
  1836     if (FLSVerifyIndexTable) {
  1837       verifyIndexedFreeList(size);
  1840   _indexedFreeList[size].remove_chunk(fc);
  1841   NOT_PRODUCT(
  1842     if (FLSVerifyIndexTable) {
  1843       verifyIndexedFreeList(size);
  1848 FreeChunk* CompactibleFreeListSpace::bestFitSmall(size_t numWords) {
  1849   /* A hint is the next larger size that has a surplus.
  1850      Start search at a size large enough to guarantee that
  1851      the excess is >= MIN_CHUNK. */
  1852   size_t start = align_object_size(numWords + MinChunkSize);
  1853   if (start < IndexSetSize) {
  1854     AdaptiveFreeList<FreeChunk>* it   = _indexedFreeList;
  1855     size_t    hint = _indexedFreeList[start].hint();
  1856     while (hint < IndexSetSize) {
  1857       assert(hint % MinObjAlignment == 0, "hint should be aligned");
  1858       AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[hint];
  1859       if (fl->surplus() > 0 && fl->head() != NULL) {
  1860         // Found a list with surplus, reset original hint
  1861         // and split out a free chunk which is returned.
  1862         _indexedFreeList[start].set_hint(hint);
  1863         FreeChunk* res = getFromListGreater(fl, numWords);
  1864         assert(res == NULL || res->is_free(),
  1865           "Should be returning a free chunk");
  1866         return res;
  1868       hint = fl->hint(); /* keep looking */
  1870     /* None found. */
  1871     it[start].set_hint(IndexSetSize);
  1873   return NULL;
  1876 /* Requires fl->size >= numWords + MinChunkSize */
  1877 FreeChunk* CompactibleFreeListSpace::getFromListGreater(AdaptiveFreeList<FreeChunk>* fl,
  1878   size_t numWords) {
  1879   FreeChunk *curr = fl->head();
  1880   size_t oldNumWords = curr->size();
  1881   assert(numWords >= MinChunkSize, "Word size is too small");
  1882   assert(curr != NULL, "List is empty");
  1883   assert(oldNumWords >= numWords + MinChunkSize,
  1884         "Size of chunks in the list is too small");
  1886   fl->remove_chunk(curr);
  1887   // recorded indirectly by splitChunkAndReturnRemainder -
  1888   // smallSplit(oldNumWords, numWords);
  1889   FreeChunk* new_chunk = splitChunkAndReturnRemainder(curr, numWords);
  1890   // Does anything have to be done for the remainder in terms of
  1891   // fixing the card table?
  1892   assert(new_chunk == NULL || new_chunk->is_free(),
  1893     "Should be returning a free chunk");
  1894   return new_chunk;
  1897 FreeChunk*
  1898 CompactibleFreeListSpace::splitChunkAndReturnRemainder(FreeChunk* chunk,
  1899   size_t new_size) {
  1900   assert_locked();
  1901   size_t size = chunk->size();
  1902   assert(size > new_size, "Split from a smaller block?");
  1903   assert(is_aligned(chunk), "alignment problem");
  1904   assert(size == adjustObjectSize(size), "alignment problem");
  1905   size_t rem_size = size - new_size;
  1906   assert(rem_size == adjustObjectSize(rem_size), "alignment problem");
  1907   assert(rem_size >= MinChunkSize, "Free chunk smaller than minimum");
  1908   FreeChunk* ffc = (FreeChunk*)((HeapWord*)chunk + new_size);
  1909   assert(is_aligned(ffc), "alignment problem");
  1910   ffc->set_size(rem_size);
  1911   ffc->link_next(NULL);
  1912   ffc->link_prev(NULL); // Mark as a free block for other (parallel) GC threads.
  1913   // Above must occur before BOT is updated below.
  1914   // adjust block offset table
  1915   OrderAccess::storestore();
  1916   assert(chunk->is_free() && ffc->is_free(), "Error");
  1917   _bt.split_block((HeapWord*)chunk, chunk->size(), new_size);
  1918   if (rem_size < SmallForDictionary) {
  1919     bool is_par = (SharedHeap::heap()->n_par_threads() > 0);
  1920     if (is_par) _indexedFreeListParLocks[rem_size]->lock();
  1921     assert(!is_par ||
  1922            (SharedHeap::heap()->n_par_threads() ==
  1923             SharedHeap::heap()->workers()->active_workers()), "Mismatch");
  1924     returnChunkToFreeList(ffc);
  1925     split(size, rem_size);
  1926     if (is_par) _indexedFreeListParLocks[rem_size]->unlock();
  1927   } else {
  1928     returnChunkToDictionary(ffc);
  1929     split(size ,rem_size);
  1931   chunk->set_size(new_size);
  1932   return chunk;
  1935 void
  1936 CompactibleFreeListSpace::sweep_completed() {
  1937   // Now that space is probably plentiful, refill linear
  1938   // allocation blocks as needed.
  1939   refillLinearAllocBlocksIfNeeded();
  1942 void
  1943 CompactibleFreeListSpace::gc_prologue() {
  1944   assert_locked();
  1945   if (PrintFLSStatistics != 0) {
  1946     gclog_or_tty->print("Before GC:\n");
  1947     reportFreeListStatistics();
  1949   refillLinearAllocBlocksIfNeeded();
  1952 void
  1953 CompactibleFreeListSpace::gc_epilogue() {
  1954   assert_locked();
  1955   if (PrintGCDetails && Verbose && !_adaptive_freelists) {
  1956     if (_smallLinearAllocBlock._word_size == 0)
  1957       warning("CompactibleFreeListSpace(epilogue):: Linear allocation failure");
  1959   assert(_promoInfo.noPromotions(), "_promoInfo inconsistency");
  1960   _promoInfo.stopTrackingPromotions();
  1961   repairLinearAllocationBlocks();
  1962   // Print Space's stats
  1963   if (PrintFLSStatistics != 0) {
  1964     gclog_or_tty->print("After GC:\n");
  1965     reportFreeListStatistics();
  1969 // Iteration support, mostly delegated from a CMS generation
  1971 void CompactibleFreeListSpace::save_marks() {
  1972   assert(Thread::current()->is_VM_thread(),
  1973          "Global variable should only be set when single-threaded");
  1974   // Mark the "end" of the used space at the time of this call;
  1975   // note, however, that promoted objects from this point
  1976   // on are tracked in the _promoInfo below.
  1977   set_saved_mark_word(unallocated_block());
  1978 #ifdef ASSERT
  1979   // Check the sanity of save_marks() etc.
  1980   MemRegion ur    = used_region();
  1981   MemRegion urasm = used_region_at_save_marks();
  1982   assert(ur.contains(urasm),
  1983          err_msg(" Error at save_marks(): [" PTR_FORMAT "," PTR_FORMAT ")"
  1984                  " should contain [" PTR_FORMAT "," PTR_FORMAT ")",
  1985                  ur.start(), ur.end(), urasm.start(), urasm.end()));
  1986 #endif
  1987   // inform allocator that promotions should be tracked.
  1988   assert(_promoInfo.noPromotions(), "_promoInfo inconsistency");
  1989   _promoInfo.startTrackingPromotions();
  1992 bool CompactibleFreeListSpace::no_allocs_since_save_marks() {
  1993   assert(_promoInfo.tracking(), "No preceding save_marks?");
  1994   assert(SharedHeap::heap()->n_par_threads() == 0,
  1995          "Shouldn't be called if using parallel gc.");
  1996   return _promoInfo.noPromotions();
  1999 #define CFLS_OOP_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix)           \
  2001 void CompactibleFreeListSpace::                                             \
  2002 oop_since_save_marks_iterate##nv_suffix(OopClosureType* blk) {              \
  2003   assert(SharedHeap::heap()->n_par_threads() == 0,                          \
  2004          "Shouldn't be called (yet) during parallel part of gc.");          \
  2005   _promoInfo.promoted_oops_iterate##nv_suffix(blk);                         \
  2006   /*                                                                        \
  2007    * This also restores any displaced headers and removes the elements from \
  2008    * the iteration set as they are processed, so that we have a clean slate \
  2009    * at the end of the iteration. Note, thus, that if new objects are       \
  2010    * promoted as a result of the iteration they are iterated over as well.  \
  2011    */                                                                       \
  2012   assert(_promoInfo.noPromotions(), "_promoInfo inconsistency");            \
  2015 ALL_SINCE_SAVE_MARKS_CLOSURES(CFLS_OOP_SINCE_SAVE_MARKS_DEFN)
  2018 void CompactibleFreeListSpace::object_iterate_since_last_GC(ObjectClosure* cl) {
  2019   // ugghh... how would one do this efficiently for a non-contiguous space?
  2020   guarantee(false, "NYI");
  2023 bool CompactibleFreeListSpace::linearAllocationWouldFail() const {
  2024   return _smallLinearAllocBlock._word_size == 0;
  2027 void CompactibleFreeListSpace::repairLinearAllocationBlocks() {
  2028   // Fix up linear allocation blocks to look like free blocks
  2029   repairLinearAllocBlock(&_smallLinearAllocBlock);
  2032 void CompactibleFreeListSpace::repairLinearAllocBlock(LinearAllocBlock* blk) {
  2033   assert_locked();
  2034   if (blk->_ptr != NULL) {
  2035     assert(blk->_word_size != 0 && blk->_word_size >= MinChunkSize,
  2036            "Minimum block size requirement");
  2037     FreeChunk* fc = (FreeChunk*)(blk->_ptr);
  2038     fc->set_size(blk->_word_size);
  2039     fc->link_prev(NULL);   // mark as free
  2040     fc->dontCoalesce();
  2041     assert(fc->is_free(), "just marked it free");
  2042     assert(fc->cantCoalesce(), "just marked it uncoalescable");
  2046 void CompactibleFreeListSpace::refillLinearAllocBlocksIfNeeded() {
  2047   assert_locked();
  2048   if (_smallLinearAllocBlock._ptr == NULL) {
  2049     assert(_smallLinearAllocBlock._word_size == 0,
  2050       "Size of linAB should be zero if the ptr is NULL");
  2051     // Reset the linAB refill and allocation size limit.
  2052     _smallLinearAllocBlock.set(0, 0, 1024*SmallForLinearAlloc, SmallForLinearAlloc);
  2054   refillLinearAllocBlockIfNeeded(&_smallLinearAllocBlock);
  2057 void
  2058 CompactibleFreeListSpace::refillLinearAllocBlockIfNeeded(LinearAllocBlock* blk) {
  2059   assert_locked();
  2060   assert((blk->_ptr == NULL && blk->_word_size == 0) ||
  2061          (blk->_ptr != NULL && blk->_word_size >= MinChunkSize),
  2062          "blk invariant");
  2063   if (blk->_ptr == NULL) {
  2064     refillLinearAllocBlock(blk);
  2066   if (PrintMiscellaneous && Verbose) {
  2067     if (blk->_word_size == 0) {
  2068       warning("CompactibleFreeListSpace(prologue):: Linear allocation failure");
  2073 void
  2074 CompactibleFreeListSpace::refillLinearAllocBlock(LinearAllocBlock* blk) {
  2075   assert_locked();
  2076   assert(blk->_word_size == 0 && blk->_ptr == NULL,
  2077          "linear allocation block should be empty");
  2078   FreeChunk* fc;
  2079   if (blk->_refillSize < SmallForDictionary &&
  2080       (fc = getChunkFromIndexedFreeList(blk->_refillSize)) != NULL) {
  2081     // A linAB's strategy might be to use small sizes to reduce
  2082     // fragmentation but still get the benefits of allocation from a
  2083     // linAB.
  2084   } else {
  2085     fc = getChunkFromDictionary(blk->_refillSize);
  2087   if (fc != NULL) {
  2088     blk->_ptr  = (HeapWord*)fc;
  2089     blk->_word_size = fc->size();
  2090     fc->dontCoalesce();   // to prevent sweeper from sweeping us up
  2094 // Support for concurrent collection policy decisions.
  2095 bool CompactibleFreeListSpace::should_concurrent_collect() const {
  2096   // In the future we might want to add in frgamentation stats --
  2097   // including erosion of the "mountain" into this decision as well.
  2098   return !adaptive_freelists() && linearAllocationWouldFail();
  2101 // Support for compaction
  2103 void CompactibleFreeListSpace::prepare_for_compaction(CompactPoint* cp) {
  2104   SCAN_AND_FORWARD(cp,end,block_is_obj,block_size);
  2105   // prepare_for_compaction() uses the space between live objects
  2106   // so that later phase can skip dead space quickly.  So verification
  2107   // of the free lists doesn't work after.
  2110 #define obj_size(q) adjustObjectSize(oop(q)->size())
  2111 #define adjust_obj_size(s) adjustObjectSize(s)
  2113 void CompactibleFreeListSpace::adjust_pointers() {
  2114   // In other versions of adjust_pointers(), a bail out
  2115   // based on the amount of live data in the generation
  2116   // (i.e., if 0, bail out) may be used.
  2117   // Cannot test used() == 0 here because the free lists have already
  2118   // been mangled by the compaction.
  2120   SCAN_AND_ADJUST_POINTERS(adjust_obj_size);
  2121   // See note about verification in prepare_for_compaction().
  2124 void CompactibleFreeListSpace::compact() {
  2125   SCAN_AND_COMPACT(obj_size);
  2128 // fragmentation_metric = 1 - [sum of (fbs**2) / (sum of fbs)**2]
  2129 // where fbs is free block sizes
  2130 double CompactibleFreeListSpace::flsFrag() const {
  2131   size_t itabFree = totalSizeInIndexedFreeLists();
  2132   double frag = 0.0;
  2133   size_t i;
  2135   for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
  2136     double sz  = i;
  2137     frag      += _indexedFreeList[i].count() * (sz * sz);
  2140   double totFree = itabFree +
  2141                    _dictionary->total_chunk_size(DEBUG_ONLY(freelistLock()));
  2142   if (totFree > 0) {
  2143     frag = ((frag + _dictionary->sum_of_squared_block_sizes()) /
  2144             (totFree * totFree));
  2145     frag = (double)1.0  - frag;
  2146   } else {
  2147     assert(frag == 0.0, "Follows from totFree == 0");
  2149   return frag;
  2152 void CompactibleFreeListSpace::beginSweepFLCensus(
  2153   float inter_sweep_current,
  2154   float inter_sweep_estimate,
  2155   float intra_sweep_estimate) {
  2156   assert_locked();
  2157   size_t i;
  2158   for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
  2159     AdaptiveFreeList<FreeChunk>* fl    = &_indexedFreeList[i];
  2160     if (PrintFLSStatistics > 1) {
  2161       gclog_or_tty->print("size[%d] : ", i);
  2163     fl->compute_desired(inter_sweep_current, inter_sweep_estimate, intra_sweep_estimate);
  2164     fl->set_coal_desired((ssize_t)((double)fl->desired() * CMSSmallCoalSurplusPercent));
  2165     fl->set_before_sweep(fl->count());
  2166     fl->set_bfr_surp(fl->surplus());
  2168   _dictionary->begin_sweep_dict_census(CMSLargeCoalSurplusPercent,
  2169                                     inter_sweep_current,
  2170                                     inter_sweep_estimate,
  2171                                     intra_sweep_estimate);
  2174 void CompactibleFreeListSpace::setFLSurplus() {
  2175   assert_locked();
  2176   size_t i;
  2177   for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
  2178     AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[i];
  2179     fl->set_surplus(fl->count() -
  2180                     (ssize_t)((double)fl->desired() * CMSSmallSplitSurplusPercent));
  2184 void CompactibleFreeListSpace::setFLHints() {
  2185   assert_locked();
  2186   size_t i;
  2187   size_t h = IndexSetSize;
  2188   for (i = IndexSetSize - 1; i != 0; i -= IndexSetStride) {
  2189     AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[i];
  2190     fl->set_hint(h);
  2191     if (fl->surplus() > 0) {
  2192       h = i;
  2197 void CompactibleFreeListSpace::clearFLCensus() {
  2198   assert_locked();
  2199   size_t i;
  2200   for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
  2201     AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[i];
  2202     fl->set_prev_sweep(fl->count());
  2203     fl->set_coal_births(0);
  2204     fl->set_coal_deaths(0);
  2205     fl->set_split_births(0);
  2206     fl->set_split_deaths(0);
  2210 void CompactibleFreeListSpace::endSweepFLCensus(size_t sweep_count) {
  2211   if (PrintFLSStatistics > 0) {
  2212     HeapWord* largestAddr = (HeapWord*) dictionary()->find_largest_dict();
  2213     gclog_or_tty->print_cr("CMS: Large block " PTR_FORMAT,
  2214                            largestAddr);
  2216   setFLSurplus();
  2217   setFLHints();
  2218   if (PrintGC && PrintFLSCensus > 0) {
  2219     printFLCensus(sweep_count);
  2221   clearFLCensus();
  2222   assert_locked();
  2223   _dictionary->end_sweep_dict_census(CMSLargeSplitSurplusPercent);
  2226 bool CompactibleFreeListSpace::coalOverPopulated(size_t size) {
  2227   if (size < SmallForDictionary) {
  2228     AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
  2229     return (fl->coal_desired() < 0) ||
  2230            ((int)fl->count() > fl->coal_desired());
  2231   } else {
  2232     return dictionary()->coal_dict_over_populated(size);
  2236 void CompactibleFreeListSpace::smallCoalBirth(size_t size) {
  2237   assert(size < SmallForDictionary, "Size too large for indexed list");
  2238   AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
  2239   fl->increment_coal_births();
  2240   fl->increment_surplus();
  2243 void CompactibleFreeListSpace::smallCoalDeath(size_t size) {
  2244   assert(size < SmallForDictionary, "Size too large for indexed list");
  2245   AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
  2246   fl->increment_coal_deaths();
  2247   fl->decrement_surplus();
  2250 void CompactibleFreeListSpace::coalBirth(size_t size) {
  2251   if (size  < SmallForDictionary) {
  2252     smallCoalBirth(size);
  2253   } else {
  2254     dictionary()->dict_census_update(size,
  2255                                    false /* split */,
  2256                                    true /* birth */);
  2260 void CompactibleFreeListSpace::coalDeath(size_t size) {
  2261   if(size  < SmallForDictionary) {
  2262     smallCoalDeath(size);
  2263   } else {
  2264     dictionary()->dict_census_update(size,
  2265                                    false /* split */,
  2266                                    false /* birth */);
  2270 void CompactibleFreeListSpace::smallSplitBirth(size_t size) {
  2271   assert(size < SmallForDictionary, "Size too large for indexed list");
  2272   AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
  2273   fl->increment_split_births();
  2274   fl->increment_surplus();
  2277 void CompactibleFreeListSpace::smallSplitDeath(size_t size) {
  2278   assert(size < SmallForDictionary, "Size too large for indexed list");
  2279   AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
  2280   fl->increment_split_deaths();
  2281   fl->decrement_surplus();
  2284 void CompactibleFreeListSpace::split_birth(size_t size) {
  2285   if (size  < SmallForDictionary) {
  2286     smallSplitBirth(size);
  2287   } else {
  2288     dictionary()->dict_census_update(size,
  2289                                    true /* split */,
  2290                                    true /* birth */);
  2294 void CompactibleFreeListSpace::splitDeath(size_t size) {
  2295   if (size  < SmallForDictionary) {
  2296     smallSplitDeath(size);
  2297   } else {
  2298     dictionary()->dict_census_update(size,
  2299                                    true /* split */,
  2300                                    false /* birth */);
  2304 void CompactibleFreeListSpace::split(size_t from, size_t to1) {
  2305   size_t to2 = from - to1;
  2306   splitDeath(from);
  2307   split_birth(to1);
  2308   split_birth(to2);
  2311 void CompactibleFreeListSpace::print() const {
  2312   print_on(tty);
  2315 void CompactibleFreeListSpace::prepare_for_verify() {
  2316   assert_locked();
  2317   repairLinearAllocationBlocks();
  2318   // Verify that the SpoolBlocks look like free blocks of
  2319   // appropriate sizes... To be done ...
  2322 class VerifyAllBlksClosure: public BlkClosure {
  2323  private:
  2324   const CompactibleFreeListSpace* _sp;
  2325   const MemRegion                 _span;
  2326   HeapWord*                       _last_addr;
  2327   size_t                          _last_size;
  2328   bool                            _last_was_obj;
  2329   bool                            _last_was_live;
  2331  public:
  2332   VerifyAllBlksClosure(const CompactibleFreeListSpace* sp,
  2333     MemRegion span) :  _sp(sp), _span(span),
  2334                        _last_addr(NULL), _last_size(0),
  2335                        _last_was_obj(false), _last_was_live(false) { }
  2337   virtual size_t do_blk(HeapWord* addr) {
  2338     size_t res;
  2339     bool   was_obj  = false;
  2340     bool   was_live = false;
  2341     if (_sp->block_is_obj(addr)) {
  2342       was_obj = true;
  2343       oop p = oop(addr);
  2344       guarantee(p->is_oop(), "Should be an oop");
  2345       res = _sp->adjustObjectSize(p->size());
  2346       if (_sp->obj_is_alive(addr)) {
  2347         was_live = true;
  2348         p->verify();
  2350     } else {
  2351       FreeChunk* fc = (FreeChunk*)addr;
  2352       res = fc->size();
  2353       if (FLSVerifyLists && !fc->cantCoalesce()) {
  2354         guarantee(_sp->verify_chunk_in_free_list(fc),
  2355                   "Chunk should be on a free list");
  2358     if (res == 0) {
  2359       gclog_or_tty->print_cr("Livelock: no rank reduction!");
  2360       gclog_or_tty->print_cr(
  2361         " Current:  addr = " PTR_FORMAT ", size = " SIZE_FORMAT ", obj = %s, live = %s \n"
  2362         " Previous: addr = " PTR_FORMAT ", size = " SIZE_FORMAT ", obj = %s, live = %s \n",
  2363         addr,       res,        was_obj      ?"true":"false", was_live      ?"true":"false",
  2364         _last_addr, _last_size, _last_was_obj?"true":"false", _last_was_live?"true":"false");
  2365       _sp->print_on(gclog_or_tty);
  2366       guarantee(false, "Seppuku!");
  2368     _last_addr = addr;
  2369     _last_size = res;
  2370     _last_was_obj  = was_obj;
  2371     _last_was_live = was_live;
  2372     return res;
  2374 };
  2376 class VerifyAllOopsClosure: public OopClosure {
  2377  private:
  2378   const CMSCollector*             _collector;
  2379   const CompactibleFreeListSpace* _sp;
  2380   const MemRegion                 _span;
  2381   const bool                      _past_remark;
  2382   const CMSBitMap*                _bit_map;
  2384  protected:
  2385   void do_oop(void* p, oop obj) {
  2386     if (_span.contains(obj)) { // the interior oop points into CMS heap
  2387       if (!_span.contains(p)) { // reference from outside CMS heap
  2388         // Should be a valid object; the first disjunct below allows
  2389         // us to sidestep an assertion in block_is_obj() that insists
  2390         // that p be in _sp. Note that several generations (and spaces)
  2391         // are spanned by _span (CMS heap) above.
  2392         guarantee(!_sp->is_in_reserved(obj) ||
  2393                   _sp->block_is_obj((HeapWord*)obj),
  2394                   "Should be an object");
  2395         guarantee(obj->is_oop(), "Should be an oop");
  2396         obj->verify();
  2397         if (_past_remark) {
  2398           // Remark has been completed, the object should be marked
  2399           _bit_map->isMarked((HeapWord*)obj);
  2401       } else { // reference within CMS heap
  2402         if (_past_remark) {
  2403           // Remark has been completed -- so the referent should have
  2404           // been marked, if referring object is.
  2405           if (_bit_map->isMarked(_collector->block_start(p))) {
  2406             guarantee(_bit_map->isMarked((HeapWord*)obj), "Marking error?");
  2410     } else if (_sp->is_in_reserved(p)) {
  2411       // the reference is from FLS, and points out of FLS
  2412       guarantee(obj->is_oop(), "Should be an oop");
  2413       obj->verify();
  2417   template <class T> void do_oop_work(T* p) {
  2418     T heap_oop = oopDesc::load_heap_oop(p);
  2419     if (!oopDesc::is_null(heap_oop)) {
  2420       oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
  2421       do_oop(p, obj);
  2425  public:
  2426   VerifyAllOopsClosure(const CMSCollector* collector,
  2427     const CompactibleFreeListSpace* sp, MemRegion span,
  2428     bool past_remark, CMSBitMap* bit_map) :
  2429     _collector(collector), _sp(sp), _span(span),
  2430     _past_remark(past_remark), _bit_map(bit_map) { }
  2432   virtual void do_oop(oop* p)       { VerifyAllOopsClosure::do_oop_work(p); }
  2433   virtual void do_oop(narrowOop* p) { VerifyAllOopsClosure::do_oop_work(p); }
  2434 };
  2436 void CompactibleFreeListSpace::verify() const {
  2437   assert_lock_strong(&_freelistLock);
  2438   verify_objects_initialized();
  2439   MemRegion span = _collector->_span;
  2440   bool past_remark = (_collector->abstract_state() ==
  2441                       CMSCollector::Sweeping);
  2443   ResourceMark rm;
  2444   HandleMark  hm;
  2446   // Check integrity of CFL data structures
  2447   _promoInfo.verify();
  2448   _dictionary->verify();
  2449   if (FLSVerifyIndexTable) {
  2450     verifyIndexedFreeLists();
  2452   // Check integrity of all objects and free blocks in space
  2454     VerifyAllBlksClosure cl(this, span);
  2455     ((CompactibleFreeListSpace*)this)->blk_iterate(&cl);  // cast off const
  2457   // Check that all references in the heap to FLS
  2458   // are to valid objects in FLS or that references in
  2459   // FLS are to valid objects elsewhere in the heap
  2460   if (FLSVerifyAllHeapReferences)
  2462     VerifyAllOopsClosure cl(_collector, this, span, past_remark,
  2463       _collector->markBitMap());
  2464     CollectedHeap* ch = Universe::heap();
  2466     // Iterate over all oops in the heap. Uses the _no_header version
  2467     // since we are not interested in following the klass pointers.
  2468     ch->oop_iterate_no_header(&cl);
  2471   if (VerifyObjectStartArray) {
  2472     // Verify the block offset table
  2473     _bt.verify();
  2477 #ifndef PRODUCT
  2478 void CompactibleFreeListSpace::verifyFreeLists() const {
  2479   if (FLSVerifyLists) {
  2480     _dictionary->verify();
  2481     verifyIndexedFreeLists();
  2482   } else {
  2483     if (FLSVerifyDictionary) {
  2484       _dictionary->verify();
  2486     if (FLSVerifyIndexTable) {
  2487       verifyIndexedFreeLists();
  2491 #endif
  2493 void CompactibleFreeListSpace::verifyIndexedFreeLists() const {
  2494   size_t i = 0;
  2495   for (; i < IndexSetStart; i++) {
  2496     guarantee(_indexedFreeList[i].head() == NULL, "should be NULL");
  2498   for (; i < IndexSetSize; i++) {
  2499     verifyIndexedFreeList(i);
  2503 void CompactibleFreeListSpace::verifyIndexedFreeList(size_t size) const {
  2504   FreeChunk* fc   =  _indexedFreeList[size].head();
  2505   FreeChunk* tail =  _indexedFreeList[size].tail();
  2506   size_t    num = _indexedFreeList[size].count();
  2507   size_t      n = 0;
  2508   guarantee(((size >= IndexSetStart) && (size % IndexSetStride == 0)) || fc == NULL,
  2509             "Slot should have been empty");
  2510   for (; fc != NULL; fc = fc->next(), n++) {
  2511     guarantee(fc->size() == size, "Size inconsistency");
  2512     guarantee(fc->is_free(), "!free?");
  2513     guarantee(fc->next() == NULL || fc->next()->prev() == fc, "Broken list");
  2514     guarantee((fc->next() == NULL) == (fc == tail), "Incorrect tail");
  2516   guarantee(n == num, "Incorrect count");
  2519 #ifndef PRODUCT
  2520 void CompactibleFreeListSpace::check_free_list_consistency() const {
  2521   assert((TreeChunk<FreeChunk, AdaptiveFreeList>::min_size() <= IndexSetSize),
  2522     "Some sizes can't be allocated without recourse to"
  2523     " linear allocation buffers");
  2524   assert((TreeChunk<FreeChunk, AdaptiveFreeList>::min_size()*HeapWordSize == sizeof(TreeChunk<FreeChunk, AdaptiveFreeList>)),
  2525     "else MIN_TREE_CHUNK_SIZE is wrong");
  2526   assert(IndexSetStart != 0, "IndexSetStart not initialized");
  2527   assert(IndexSetStride != 0, "IndexSetStride not initialized");
  2529 #endif
  2531 void CompactibleFreeListSpace::printFLCensus(size_t sweep_count) const {
  2532   assert_lock_strong(&_freelistLock);
  2533   AdaptiveFreeList<FreeChunk> total;
  2534   gclog_or_tty->print("end sweep# " SIZE_FORMAT "\n", sweep_count);
  2535   AdaptiveFreeList<FreeChunk>::print_labels_on(gclog_or_tty, "size");
  2536   size_t total_free = 0;
  2537   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
  2538     const AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[i];
  2539     total_free += fl->count() * fl->size();
  2540     if (i % (40*IndexSetStride) == 0) {
  2541       AdaptiveFreeList<FreeChunk>::print_labels_on(gclog_or_tty, "size");
  2543     fl->print_on(gclog_or_tty);
  2544     total.set_bfr_surp(    total.bfr_surp()     + fl->bfr_surp()    );
  2545     total.set_surplus(    total.surplus()     + fl->surplus()    );
  2546     total.set_desired(    total.desired()     + fl->desired()    );
  2547     total.set_prev_sweep(  total.prev_sweep()   + fl->prev_sweep()  );
  2548     total.set_before_sweep(total.before_sweep() + fl->before_sweep());
  2549     total.set_count(      total.count()       + fl->count()      );
  2550     total.set_coal_births( total.coal_births()  + fl->coal_births() );
  2551     total.set_coal_deaths( total.coal_deaths()  + fl->coal_deaths() );
  2552     total.set_split_births(total.split_births() + fl->split_births());
  2553     total.set_split_deaths(total.split_deaths() + fl->split_deaths());
  2555   total.print_on(gclog_or_tty, "TOTAL");
  2556   gclog_or_tty->print_cr("Total free in indexed lists "
  2557                          SIZE_FORMAT " words", total_free);
  2558   gclog_or_tty->print("growth: %8.5f  deficit: %8.5f\n",
  2559     (double)(total.split_births()+total.coal_births()-total.split_deaths()-total.coal_deaths())/
  2560             (total.prev_sweep() != 0 ? (double)total.prev_sweep() : 1.0),
  2561     (double)(total.desired() - total.count())/(total.desired() != 0 ? (double)total.desired() : 1.0));
  2562   _dictionary->print_dict_census();
  2565 ///////////////////////////////////////////////////////////////////////////
  2566 // CFLS_LAB
  2567 ///////////////////////////////////////////////////////////////////////////
  2569 #define VECTOR_257(x)                                                                                  \
  2570   /* 1  2  3  4  5  6  7  8  9 1x 11 12 13 14 15 16 17 18 19 2x 21 22 23 24 25 26 27 28 29 3x 31 32 */ \
  2571   {  x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
  2572      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
  2573      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
  2574      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
  2575      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
  2576      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
  2577      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
  2578      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
  2579      x }
  2581 // Initialize with default setting of CMSParPromoteBlocksToClaim, _not_
  2582 // OldPLABSize, whose static default is different; if overridden at the
  2583 // command-line, this will get reinitialized via a call to
  2584 // modify_initialization() below.
  2585 AdaptiveWeightedAverage CFLS_LAB::_blocks_to_claim[]    =
  2586   VECTOR_257(AdaptiveWeightedAverage(OldPLABWeight, (float)CMSParPromoteBlocksToClaim));
  2587 size_t CFLS_LAB::_global_num_blocks[]  = VECTOR_257(0);
  2588 uint   CFLS_LAB::_global_num_workers[] = VECTOR_257(0);
  2590 CFLS_LAB::CFLS_LAB(CompactibleFreeListSpace* cfls) :
  2591   _cfls(cfls)
  2593   assert(CompactibleFreeListSpace::IndexSetSize == 257, "Modify VECTOR_257() macro above");
  2594   for (size_t i = CompactibleFreeListSpace::IndexSetStart;
  2595        i < CompactibleFreeListSpace::IndexSetSize;
  2596        i += CompactibleFreeListSpace::IndexSetStride) {
  2597     _indexedFreeList[i].set_size(i);
  2598     _num_blocks[i] = 0;
  2602 static bool _CFLS_LAB_modified = false;
  2604 void CFLS_LAB::modify_initialization(size_t n, unsigned wt) {
  2605   assert(!_CFLS_LAB_modified, "Call only once");
  2606   _CFLS_LAB_modified = true;
  2607   for (size_t i = CompactibleFreeListSpace::IndexSetStart;
  2608        i < CompactibleFreeListSpace::IndexSetSize;
  2609        i += CompactibleFreeListSpace::IndexSetStride) {
  2610     _blocks_to_claim[i].modify(n, wt, true /* force */);
  2614 HeapWord* CFLS_LAB::alloc(size_t word_sz) {
  2615   FreeChunk* res;
  2616   assert(word_sz == _cfls->adjustObjectSize(word_sz), "Error");
  2617   if (word_sz >=  CompactibleFreeListSpace::IndexSetSize) {
  2618     // This locking manages sync with other large object allocations.
  2619     MutexLockerEx x(_cfls->parDictionaryAllocLock(),
  2620                     Mutex::_no_safepoint_check_flag);
  2621     res = _cfls->getChunkFromDictionaryExact(word_sz);
  2622     if (res == NULL) return NULL;
  2623   } else {
  2624     AdaptiveFreeList<FreeChunk>* fl = &_indexedFreeList[word_sz];
  2625     if (fl->count() == 0) {
  2626       // Attempt to refill this local free list.
  2627       get_from_global_pool(word_sz, fl);
  2628       // If it didn't work, give up.
  2629       if (fl->count() == 0) return NULL;
  2631     res = fl->get_chunk_at_head();
  2632     assert(res != NULL, "Why was count non-zero?");
  2634   res->markNotFree();
  2635   assert(!res->is_free(), "shouldn't be marked free");
  2636   assert(oop(res)->klass_or_null() == NULL, "should look uninitialized");
  2637   // mangle a just allocated object with a distinct pattern.
  2638   debug_only(res->mangleAllocated(word_sz));
  2639   return (HeapWord*)res;
  2642 // Get a chunk of blocks of the right size and update related
  2643 // book-keeping stats
  2644 void CFLS_LAB::get_from_global_pool(size_t word_sz, AdaptiveFreeList<FreeChunk>* fl) {
  2645   // Get the #blocks we want to claim
  2646   size_t n_blks = (size_t)_blocks_to_claim[word_sz].average();
  2647   assert(n_blks > 0, "Error");
  2648   assert(ResizePLAB || n_blks == OldPLABSize, "Error");
  2649   // In some cases, when the application has a phase change,
  2650   // there may be a sudden and sharp shift in the object survival
  2651   // profile, and updating the counts at the end of a scavenge
  2652   // may not be quick enough, giving rise to large scavenge pauses
  2653   // during these phase changes. It is beneficial to detect such
  2654   // changes on-the-fly during a scavenge and avoid such a phase-change
  2655   // pothole. The following code is a heuristic attempt to do that.
  2656   // It is protected by a product flag until we have gained
  2657   // enough experience with this heuristic and fine-tuned its behaviour.
  2658   // WARNING: This might increase fragmentation if we overreact to
  2659   // small spikes, so some kind of historical smoothing based on
  2660   // previous experience with the greater reactivity might be useful.
  2661   // Lacking sufficient experience, CMSOldPLABResizeQuicker is disabled by
  2662   // default.
  2663   if (ResizeOldPLAB && CMSOldPLABResizeQuicker) {
  2664     size_t multiple = _num_blocks[word_sz]/(CMSOldPLABToleranceFactor*CMSOldPLABNumRefills*n_blks);
  2665     n_blks +=  CMSOldPLABReactivityFactor*multiple*n_blks;
  2666     n_blks = MIN2(n_blks, CMSOldPLABMax);
  2668   assert(n_blks > 0, "Error");
  2669   _cfls->par_get_chunk_of_blocks(word_sz, n_blks, fl);
  2670   // Update stats table entry for this block size
  2671   _num_blocks[word_sz] += fl->count();
  2674 void CFLS_LAB::compute_desired_plab_size() {
  2675   for (size_t i =  CompactibleFreeListSpace::IndexSetStart;
  2676        i < CompactibleFreeListSpace::IndexSetSize;
  2677        i += CompactibleFreeListSpace::IndexSetStride) {
  2678     assert((_global_num_workers[i] == 0) == (_global_num_blocks[i] == 0),
  2679            "Counter inconsistency");
  2680     if (_global_num_workers[i] > 0) {
  2681       // Need to smooth wrt historical average
  2682       if (ResizeOldPLAB) {
  2683         _blocks_to_claim[i].sample(
  2684           MAX2((size_t)CMSOldPLABMin,
  2685           MIN2((size_t)CMSOldPLABMax,
  2686                _global_num_blocks[i]/(_global_num_workers[i]*CMSOldPLABNumRefills))));
  2688       // Reset counters for next round
  2689       _global_num_workers[i] = 0;
  2690       _global_num_blocks[i] = 0;
  2691       if (PrintOldPLAB) {
  2692         gclog_or_tty->print_cr("[%d]: %d", i, (size_t)_blocks_to_claim[i].average());
  2698 // If this is changed in the future to allow parallel
  2699 // access, one would need to take the FL locks and,
  2700 // depending on how it is used, stagger access from
  2701 // parallel threads to reduce contention.
  2702 void CFLS_LAB::retire(int tid) {
  2703   // We run this single threaded with the world stopped;
  2704   // so no need for locks and such.
  2705   NOT_PRODUCT(Thread* t = Thread::current();)
  2706   assert(Thread::current()->is_VM_thread(), "Error");
  2707   for (size_t i =  CompactibleFreeListSpace::IndexSetStart;
  2708        i < CompactibleFreeListSpace::IndexSetSize;
  2709        i += CompactibleFreeListSpace::IndexSetStride) {
  2710     assert(_num_blocks[i] >= (size_t)_indexedFreeList[i].count(),
  2711            "Can't retire more than what we obtained");
  2712     if (_num_blocks[i] > 0) {
  2713       size_t num_retire =  _indexedFreeList[i].count();
  2714       assert(_num_blocks[i] > num_retire, "Should have used at least one");
  2716         // MutexLockerEx x(_cfls->_indexedFreeListParLocks[i],
  2717         //                Mutex::_no_safepoint_check_flag);
  2719         // Update globals stats for num_blocks used
  2720         _global_num_blocks[i] += (_num_blocks[i] - num_retire);
  2721         _global_num_workers[i]++;
  2722         assert(_global_num_workers[i] <= ParallelGCThreads, "Too big");
  2723         if (num_retire > 0) {
  2724           _cfls->_indexedFreeList[i].prepend(&_indexedFreeList[i]);
  2725           // Reset this list.
  2726           _indexedFreeList[i] = AdaptiveFreeList<FreeChunk>();
  2727           _indexedFreeList[i].set_size(i);
  2730       if (PrintOldPLAB) {
  2731         gclog_or_tty->print_cr("%d[%d]: %d/%d/%d",
  2732                                tid, i, num_retire, _num_blocks[i], (size_t)_blocks_to_claim[i].average());
  2734       // Reset stats for next round
  2735       _num_blocks[i]         = 0;
  2740 void CompactibleFreeListSpace:: par_get_chunk_of_blocks(size_t word_sz, size_t n, AdaptiveFreeList<FreeChunk>* fl) {
  2741   assert(fl->count() == 0, "Precondition.");
  2742   assert(word_sz < CompactibleFreeListSpace::IndexSetSize,
  2743          "Precondition");
  2745   // We'll try all multiples of word_sz in the indexed set, starting with
  2746   // word_sz itself and, if CMSSplitIndexedFreeListBlocks, try larger multiples,
  2747   // then try getting a big chunk and splitting it.
  2749     bool found;
  2750     int  k;
  2751     size_t cur_sz;
  2752     for (k = 1, cur_sz = k * word_sz, found = false;
  2753          (cur_sz < CompactibleFreeListSpace::IndexSetSize) &&
  2754          (CMSSplitIndexedFreeListBlocks || k <= 1);
  2755          k++, cur_sz = k * word_sz) {
  2756       AdaptiveFreeList<FreeChunk> fl_for_cur_sz;  // Empty.
  2757       fl_for_cur_sz.set_size(cur_sz);
  2759         MutexLockerEx x(_indexedFreeListParLocks[cur_sz],
  2760                         Mutex::_no_safepoint_check_flag);
  2761         AdaptiveFreeList<FreeChunk>* gfl = &_indexedFreeList[cur_sz];
  2762         if (gfl->count() != 0) {
  2763           // nn is the number of chunks of size cur_sz that
  2764           // we'd need to split k-ways each, in order to create
  2765           // "n" chunks of size word_sz each.
  2766           const size_t nn = MAX2(n/k, (size_t)1);
  2767           gfl->getFirstNChunksFromList(nn, &fl_for_cur_sz);
  2768           found = true;
  2769           if (k > 1) {
  2770             // Update split death stats for the cur_sz-size blocks list:
  2771             // we increment the split death count by the number of blocks
  2772             // we just took from the cur_sz-size blocks list and which
  2773             // we will be splitting below.
  2774             ssize_t deaths = gfl->split_deaths() +
  2775                              fl_for_cur_sz.count();
  2776             gfl->set_split_deaths(deaths);
  2780       // Now transfer fl_for_cur_sz to fl.  Common case, we hope, is k = 1.
  2781       if (found) {
  2782         if (k == 1) {
  2783           fl->prepend(&fl_for_cur_sz);
  2784         } else {
  2785           // Divide each block on fl_for_cur_sz up k ways.
  2786           FreeChunk* fc;
  2787           while ((fc = fl_for_cur_sz.get_chunk_at_head()) != NULL) {
  2788             // Must do this in reverse order, so that anybody attempting to
  2789             // access the main chunk sees it as a single free block until we
  2790             // change it.
  2791             size_t fc_size = fc->size();
  2792             assert(fc->is_free(), "Error");
  2793             for (int i = k-1; i >= 0; i--) {
  2794               FreeChunk* ffc = (FreeChunk*)((HeapWord*)fc + i * word_sz);
  2795               assert((i != 0) ||
  2796                         ((fc == ffc) && ffc->is_free() &&
  2797                          (ffc->size() == k*word_sz) && (fc_size == word_sz)),
  2798                         "Counting error");
  2799               ffc->set_size(word_sz);
  2800               ffc->link_prev(NULL); // Mark as a free block for other (parallel) GC threads.
  2801               ffc->link_next(NULL);
  2802               // Above must occur before BOT is updated below.
  2803               OrderAccess::storestore();
  2804               // splitting from the right, fc_size == i * word_sz
  2805               _bt.mark_block((HeapWord*)ffc, word_sz, true /* reducing */);
  2806               fc_size -= word_sz;
  2807               assert(fc_size == i*word_sz, "Error");
  2808               _bt.verify_not_unallocated((HeapWord*)ffc, word_sz);
  2809               _bt.verify_single_block((HeapWord*)fc, fc_size);
  2810               _bt.verify_single_block((HeapWord*)ffc, word_sz);
  2811               // Push this on "fl".
  2812               fl->return_chunk_at_head(ffc);
  2814             // TRAP
  2815             assert(fl->tail()->next() == NULL, "List invariant.");
  2818         // Update birth stats for this block size.
  2819         size_t num = fl->count();
  2820         MutexLockerEx x(_indexedFreeListParLocks[word_sz],
  2821                         Mutex::_no_safepoint_check_flag);
  2822         ssize_t births = _indexedFreeList[word_sz].split_births() + num;
  2823         _indexedFreeList[word_sz].set_split_births(births);
  2824         return;
  2828   // Otherwise, we'll split a block from the dictionary.
  2829   FreeChunk* fc = NULL;
  2830   FreeChunk* rem_fc = NULL;
  2831   size_t rem;
  2833     MutexLockerEx x(parDictionaryAllocLock(),
  2834                     Mutex::_no_safepoint_check_flag);
  2835     while (n > 0) {
  2836       fc = dictionary()->get_chunk(MAX2(n * word_sz, _dictionary->min_size()),
  2837                                   FreeBlockDictionary<FreeChunk>::atLeast);
  2838       if (fc != NULL) {
  2839         _bt.allocated((HeapWord*)fc, fc->size(), true /* reducing */);  // update _unallocated_blk
  2840         dictionary()->dict_census_update(fc->size(),
  2841                                        true /*split*/,
  2842                                        false /*birth*/);
  2843         break;
  2844       } else {
  2845         n--;
  2848     if (fc == NULL) return;
  2849     // Otherwise, split up that block.
  2850     assert((ssize_t)n >= 1, "Control point invariant");
  2851     assert(fc->is_free(), "Error: should be a free block");
  2852     _bt.verify_single_block((HeapWord*)fc, fc->size());
  2853     const size_t nn = fc->size() / word_sz;
  2854     n = MIN2(nn, n);
  2855     assert((ssize_t)n >= 1, "Control point invariant");
  2856     rem = fc->size() - n * word_sz;
  2857     // If there is a remainder, and it's too small, allocate one fewer.
  2858     if (rem > 0 && rem < MinChunkSize) {
  2859       n--; rem += word_sz;
  2861     // Note that at this point we may have n == 0.
  2862     assert((ssize_t)n >= 0, "Control point invariant");
  2864     // If n is 0, the chunk fc that was found is not large
  2865     // enough to leave a viable remainder.  We are unable to
  2866     // allocate even one block.  Return fc to the
  2867     // dictionary and return, leaving "fl" empty.
  2868     if (n == 0) {
  2869       returnChunkToDictionary(fc);
  2870       assert(fl->count() == 0, "We never allocated any blocks");
  2871       return;
  2874     // First return the remainder, if any.
  2875     // Note that we hold the lock until we decide if we're going to give
  2876     // back the remainder to the dictionary, since a concurrent allocation
  2877     // may otherwise see the heap as empty.  (We're willing to take that
  2878     // hit if the block is a small block.)
  2879     if (rem > 0) {
  2880       size_t prefix_size = n * word_sz;
  2881       rem_fc = (FreeChunk*)((HeapWord*)fc + prefix_size);
  2882       rem_fc->set_size(rem);
  2883       rem_fc->link_prev(NULL); // Mark as a free block for other (parallel) GC threads.
  2884       rem_fc->link_next(NULL);
  2885       // Above must occur before BOT is updated below.
  2886       assert((ssize_t)n > 0 && prefix_size > 0 && rem_fc > fc, "Error");
  2887       OrderAccess::storestore();
  2888       _bt.split_block((HeapWord*)fc, fc->size(), prefix_size);
  2889       assert(fc->is_free(), "Error");
  2890       fc->set_size(prefix_size);
  2891       if (rem >= IndexSetSize) {
  2892         returnChunkToDictionary(rem_fc);
  2893         dictionary()->dict_census_update(rem, true /*split*/, true /*birth*/);
  2894         rem_fc = NULL;
  2896       // Otherwise, return it to the small list below.
  2899   if (rem_fc != NULL) {
  2900     MutexLockerEx x(_indexedFreeListParLocks[rem],
  2901                     Mutex::_no_safepoint_check_flag);
  2902     _bt.verify_not_unallocated((HeapWord*)rem_fc, rem_fc->size());
  2903     _indexedFreeList[rem].return_chunk_at_head(rem_fc);
  2904     smallSplitBirth(rem);
  2906   assert((ssize_t)n > 0 && fc != NULL, "Consistency");
  2907   // Now do the splitting up.
  2908   // Must do this in reverse order, so that anybody attempting to
  2909   // access the main chunk sees it as a single free block until we
  2910   // change it.
  2911   size_t fc_size = n * word_sz;
  2912   // All but first chunk in this loop
  2913   for (ssize_t i = n-1; i > 0; i--) {
  2914     FreeChunk* ffc = (FreeChunk*)((HeapWord*)fc + i * word_sz);
  2915     ffc->set_size(word_sz);
  2916     ffc->link_prev(NULL); // Mark as a free block for other (parallel) GC threads.
  2917     ffc->link_next(NULL);
  2918     // Above must occur before BOT is updated below.
  2919     OrderAccess::storestore();
  2920     // splitting from the right, fc_size == (n - i + 1) * wordsize
  2921     _bt.mark_block((HeapWord*)ffc, word_sz, true /* reducing */);
  2922     fc_size -= word_sz;
  2923     _bt.verify_not_unallocated((HeapWord*)ffc, ffc->size());
  2924     _bt.verify_single_block((HeapWord*)ffc, ffc->size());
  2925     _bt.verify_single_block((HeapWord*)fc, fc_size);
  2926     // Push this on "fl".
  2927     fl->return_chunk_at_head(ffc);
  2929   // First chunk
  2930   assert(fc->is_free() && fc->size() == n*word_sz, "Error: should still be a free block");
  2931   // The blocks above should show their new sizes before the first block below
  2932   fc->set_size(word_sz);
  2933   fc->link_prev(NULL);    // idempotent wrt free-ness, see assert above
  2934   fc->link_next(NULL);
  2935   _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
  2936   _bt.verify_single_block((HeapWord*)fc, fc->size());
  2937   fl->return_chunk_at_head(fc);
  2939   assert((ssize_t)n > 0 && (ssize_t)n == fl->count(), "Incorrect number of blocks");
  2941     // Update the stats for this block size.
  2942     MutexLockerEx x(_indexedFreeListParLocks[word_sz],
  2943                     Mutex::_no_safepoint_check_flag);
  2944     const ssize_t births = _indexedFreeList[word_sz].split_births() + n;
  2945     _indexedFreeList[word_sz].set_split_births(births);
  2946     // ssize_t new_surplus = _indexedFreeList[word_sz].surplus() + n;
  2947     // _indexedFreeList[word_sz].set_surplus(new_surplus);
  2950   // TRAP
  2951   assert(fl->tail()->next() == NULL, "List invariant.");
  2954 // Set up the space's par_seq_tasks structure for work claiming
  2955 // for parallel rescan. See CMSParRemarkTask where this is currently used.
  2956 // XXX Need to suitably abstract and generalize this and the next
  2957 // method into one.
  2958 void
  2959 CompactibleFreeListSpace::
  2960 initialize_sequential_subtasks_for_rescan(int n_threads) {
  2961   // The "size" of each task is fixed according to rescan_task_size.
  2962   assert(n_threads > 0, "Unexpected n_threads argument");
  2963   const size_t task_size = rescan_task_size();
  2964   size_t n_tasks = (used_region().word_size() + task_size - 1)/task_size;
  2965   assert((n_tasks == 0) == used_region().is_empty(), "n_tasks incorrect");
  2966   assert(n_tasks == 0 ||
  2967          ((used_region().start() + (n_tasks - 1)*task_size < used_region().end()) &&
  2968           (used_region().start() + n_tasks*task_size >= used_region().end())),
  2969          "n_tasks calculation incorrect");
  2970   SequentialSubTasksDone* pst = conc_par_seq_tasks();
  2971   assert(!pst->valid(), "Clobbering existing data?");
  2972   // Sets the condition for completion of the subtask (how many threads
  2973   // need to finish in order to be done).
  2974   pst->set_n_threads(n_threads);
  2975   pst->set_n_tasks((int)n_tasks);
  2978 // Set up the space's par_seq_tasks structure for work claiming
  2979 // for parallel concurrent marking. See CMSConcMarkTask where this is currently used.
  2980 void
  2981 CompactibleFreeListSpace::
  2982 initialize_sequential_subtasks_for_marking(int n_threads,
  2983                                            HeapWord* low) {
  2984   // The "size" of each task is fixed according to rescan_task_size.
  2985   assert(n_threads > 0, "Unexpected n_threads argument");
  2986   const size_t task_size = marking_task_size();
  2987   assert(task_size > CardTableModRefBS::card_size_in_words &&
  2988          (task_size %  CardTableModRefBS::card_size_in_words == 0),
  2989          "Otherwise arithmetic below would be incorrect");
  2990   MemRegion span = _gen->reserved();
  2991   if (low != NULL) {
  2992     if (span.contains(low)) {
  2993       // Align low down to  a card boundary so that
  2994       // we can use block_offset_careful() on span boundaries.
  2995       HeapWord* aligned_low = (HeapWord*)align_size_down((uintptr_t)low,
  2996                                  CardTableModRefBS::card_size);
  2997       // Clip span prefix at aligned_low
  2998       span = span.intersection(MemRegion(aligned_low, span.end()));
  2999     } else if (low > span.end()) {
  3000       span = MemRegion(low, low);  // Null region
  3001     } // else use entire span
  3003   assert(span.is_empty() ||
  3004          ((uintptr_t)span.start() %  CardTableModRefBS::card_size == 0),
  3005         "span should start at a card boundary");
  3006   size_t n_tasks = (span.word_size() + task_size - 1)/task_size;
  3007   assert((n_tasks == 0) == span.is_empty(), "Inconsistency");
  3008   assert(n_tasks == 0 ||
  3009          ((span.start() + (n_tasks - 1)*task_size < span.end()) &&
  3010           (span.start() + n_tasks*task_size >= span.end())),
  3011          "n_tasks calculation incorrect");
  3012   SequentialSubTasksDone* pst = conc_par_seq_tasks();
  3013   assert(!pst->valid(), "Clobbering existing data?");
  3014   // Sets the condition for completion of the subtask (how many threads
  3015   // need to finish in order to be done).
  3016   pst->set_n_threads(n_threads);
  3017   pst->set_n_tasks((int)n_tasks);

mercurial